From 843f9e6a123ed47ce139b421c14e7126f2ac685e Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Wed, 15 Nov 2023 15:38:45 -0500 Subject: [PATCH] Fix crash when idle and request per try timeouts occur within backoff interval Fix [CVE-2024-23322](https://github.com/envoyproxy/envoy/security/advisories/GHSA-6p83-mfmh-qv38) Signed-off-by: Yan Avlasov Signed-off-by: Ryan Northey Signed-off-by: yanavlasov --- changelogs/current.yaml | 3 + source/common/router/router.cc | 1 + source/common/router/upstream_request.cc | 9 +++ .../http_timeout_integration_test.cc | 77 +++++++++++++++++++ 4 files changed, 90 insertions(+) diff --git a/changelogs/current.yaml b/changelogs/current.yaml index 1db0c337be44..5443a221532d 100644 --- a/changelogs/current.yaml +++ b/changelogs/current.yaml @@ -55,6 +55,9 @@ bug_fixes: - area: url matching change: | Fixed excessive CPU utilization when using regex URL template matcher. +- area: http + change: | + Fixed crash when HTTP request idle and per try timeouts occurs within backoff interval. removed_config_or_runtime: # *Normally occurs at the end of the* :ref:`deprecation period ` diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 37c7da0bc40e..c4172d2d0906 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -1115,6 +1115,7 @@ void Filter::onResponseTimeout() { // Called when the per try timeout is hit but we didn't reset the request // (hedge_on_per_try_timeout enabled). void Filter::onSoftPerTryTimeout(UpstreamRequest& upstream_request) { + ASSERT(!upstream_request.retried()); // Track this as a timeout for outlier detection purposes even though we didn't // cancel the request yet and might get a 2xx later. updateOutlierDetection(Upstream::Outlier::Result::LocalOriginTimeout, upstream_request, diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 5d4aef010a91..8496745a80cc 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -510,11 +510,20 @@ void UpstreamRequest::setupPerTryTimeout() { void UpstreamRequest::onPerTryIdleTimeout() { ENVOY_STREAM_LOG(debug, "upstream per try idle timeout", *parent_.callbacks()); + if (per_try_timeout_) { + // Disable the per try idle timer, so it does not trigger further retries + per_try_timeout_->disableTimer(); + } stream_info_.setResponseFlag(StreamInfo::CoreResponseFlag::StreamIdleTimeout); parent_.onPerTryIdleTimeout(*this); } void UpstreamRequest::onPerTryTimeout() { + if (per_try_idle_timeout_) { + // Delete the per try idle timer, so it does not trigger further retries. + // The timer has to be deleted to prevent data flow from re-arming it. + per_try_idle_timeout_.reset(); + } // If we've sent anything downstream, ignore the per try timeout and let the response continue // up to the global timeout if (!parent_.downstreamResponseStarted()) { diff --git a/test/integration/http_timeout_integration_test.cc b/test/integration/http_timeout_integration_test.cc index 74830d697d03..880a52acea04 100644 --- a/test/integration/http_timeout_integration_test.cc +++ b/test/integration/http_timeout_integration_test.cc @@ -631,4 +631,81 @@ TEST_P(HttpTimeoutIntegrationTest, RequestHeaderTimeout) { EXPECT_THAT(response, AllOf(HasSubstr("408"), HasSubstr("header"))); } +// Validate that Envoy correctly handles per try and per try IDLE timeouts +// that are firing within the backoff interval. +TEST_P(HttpTimeoutIntegrationTest, OriginalRequestCompletesBeforeBackoffTimer) { + auto host = config_helper_.createVirtualHost("example.com", "/test_retry"); + host.set_include_is_timeout_retry_header(true); + config_helper_.addVirtualHost(host); + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + auto* route_config = hcm.mutable_route_config(); + auto* virtual_host = route_config->mutable_virtual_hosts(1); + auto* route = virtual_host->mutable_routes(0)->mutable_route(); + auto* retry_policy = route->mutable_retry_policy(); + retry_policy->mutable_per_try_idle_timeout()->set_seconds(0); + // per try IDLE timeout is 400 ms + retry_policy->mutable_per_try_idle_timeout()->set_nanos(400 * 1000 * 1000); + }); + initialize(); + + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + auto encoder_decoder = codec_client_->startRequest(Http::TestRequestHeaderMapImpl{ + {":method", "POST"}, + {":path", "/test_retry"}, + {":scheme", "http"}, + {":authority", "example.com"}, + {"x-forwarded-for", "10.0.0.1"}, + {"x-envoy-retry-on", "5xx"}, + // Enable hedge_on_per_try_timeout so that original request is not reset + {"x-envoy-hedge-on-per-try-timeout", "true"}, + {"x-envoy-upstream-rq-timeout-ms", "500"}, + // Make per try timeout the same as the per try idle timeout + // NOTE: it can be a bit longer, within the back off interval + {"x-envoy-upstream-rq-per-try-timeout-ms", "400"}}); + auto response = std::move(encoder_decoder.second); + request_encoder_ = &encoder_decoder.first; + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + codec_client_->sendData(*request_encoder_, 0, true); + + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + // Trigger per try timeout (but not global timeout). This will actually trigger + // both IDLE and request timeouts in the same I/O operation. + timeSystem().advanceTimeWait(std::chrono::milliseconds(400)); + + // Trigger retry (there's a 25ms backoff before it's issued). + timeSystem().advanceTimeWait(std::chrono::milliseconds(26)); + + // Wait for a second request to be sent upstream + FakeStreamPtr upstream_request2; + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request2)); + + ASSERT_TRUE(upstream_request2->waitForHeadersComplete()); + + // Expect the x-envoy-is-timeout-header to set to indicate to the upstream this is a retry + // initiated by a previous per try timeout. + EXPECT_EQ(upstream_request2->headers().getEnvoyIsTimeoutRetryValue(), "true"); + + ASSERT_TRUE(upstream_request2->waitForEndStream(*dispatcher_)); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + + // Respond to the second request (it does not matter which request gets response). + upstream_request2->encodeHeaders(response_headers, true); + ASSERT_TRUE(response->waitForEndStream()); + + // The first request should be reset since we used the response from the second request. + ASSERT_TRUE(upstream_request_->waitForReset(std::chrono::seconds(15))); + + codec_client_->close(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + } // namespace Envoy