Skip to content
Permalink
Browse files

Track byteSize of HeaderMap internally.

Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.

Signed-off-by: Asra Ali <asraa@google.com>
  • Loading branch information...
asraa authored and htuch committed Oct 7, 2019
1 parent 9c682f8 commit afc39bea36fd436e54262f150c009e8d72db5014
@@ -85,6 +85,10 @@ Version history
* upstream: added :ref:`fail_traffic_on_panic <envoy_api_field_Cluster.CommonLbConfig.ZoneAwareLbConfig.fail_traffic_on_panic>` to allow failing all requests to a cluster during panic state.
* zookeeper: parse responses and emit latency stats.

1.11.2 (October 8, 2019)
========================
* http: fixed CVE-2019-15226 by adding a cached byte size in HeaderMap.

1.11.1 (August 13, 2019)
========================
* http: added mitigation of client initiated attacks that result in flooding of the downstream HTTP/2 connections. Those attacks can be logged at the "warning" level when the runtime feature `http.connection_manager.log_flood_exception` is enabled. The runtime setting defaults to disabled to avoid log spam when under attack.
@@ -78,6 +78,10 @@ class Instance {

/**
* Log a completed request.
* Prior to logging, call refreshByteSize() on HeaderMaps to ensure that an accurate byte size
* count is logged.
* TODO(asraa): Remove refreshByteSize() requirement when entries in HeaderMap can no longer be
* modified by reference and headerMap holds an accurate internal byte size count.
* @param request_headers supplies the incoming request headers after filtering.
* @param response_headers supplies response headers.
* @param response_trailers supplies response trailers.
@@ -457,9 +457,41 @@ class HeaderMap {
virtual void setReferenceKey(const LowerCaseString& key, const std::string& value) PURE;

/**
* HeaderMap contains an internal byte size count, updated as entries are added, removed, or
* modified through the HeaderMap interface. However, HeaderEntries can be accessed and modified
* by reference so that the HeaderMap can no longer accurately update the internal byte size
* count.
*
* Calling byteSize before a HeaderEntry is accessed will return the internal byte size count. The
* value is cleared when a HeaderEntry is accessed, and the value is updated and set again when
* refreshByteSize is called.
*
* To guarantee an accurate byte size count, call refreshByteSize.
*
* @return uint64_t the approximate size of the header map in bytes if valid.
*/
virtual absl::optional<uint64_t> byteSize() const PURE;

/**
* This returns the sum of the byte sizes of the keys and values in the HeaderMap. This also
* updates and sets the byte size count.
*
* To guarantee an accurate byte size count, use this. If it is known HeaderEntries have not been
* manipulated since a call to refreshByteSize, it is safe to use byteSize.
*
* @return uint64_t the approximate size of the header map in bytes.
*/
virtual uint64_t refreshByteSize() PURE;

/**
* This returns the sum of the byte sizes of the keys and values in the HeaderMap.
*
* This iterates over the HeaderMap to calculate size and should only be called directly when the
* user wants an explicit recalculation of the byte size.
*
* @return uint64_t the approximate size of the header map in bytes.
*/
virtual uint64_t byteSize() const PURE;
virtual uint64_t byteSizeInternal() const PURE;

/**
* Get a header by key.
@@ -506,6 +506,18 @@ ConnectionManagerImpl::ActiveStream::~ActiveStream() {
}

connection_manager_.stats_.named_.downstream_rq_active_.dec();
// Refresh byte sizes of the HeaderMaps before logging.
// TODO(asraa): Remove this when entries in HeaderMap can no longer be modified by reference and
// HeaderMap holds an accurate internal byte size count.
if (request_headers_ != nullptr) {
request_headers_->refreshByteSize();
}
if (response_headers_ != nullptr) {
response_headers_->refreshByteSize();
}
if (response_trailers_ != nullptr) {
response_trailers_->refreshByteSize();
}
for (const AccessLog::InstanceSharedPtr& access_log : connection_manager_.config_.accessLogs()) {
access_log->log(request_headers_.get(), response_headers_.get(), response_trailers_.get(),
stream_info_);
@@ -719,14 +731,6 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers,
}
}

ASSERT(connection_manager_.config_.maxRequestHeadersKb() > 0);
if (request_headers_->byteSize() > (connection_manager_.config_.maxRequestHeadersKb() * 1024)) {
sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_),
Code::RequestHeaderFieldsTooLarge, "", nullptr, is_head_request_, absl::nullopt,
StreamInfo::ResponseCodeDetails::get().RequestHeadersTooLarge);
return;
}

// Currently we only support relative paths at the application layer. We expect the codec to have
// broken the path into pieces if applicable. NOTE: Currently the HTTP/1.1 codec only does this
// when the allow_absolute_url flag is enabled on the HCM.
@@ -295,14 +295,17 @@ struct HeaderMapImpl::StaticLookupTable : public TrieLookupTable<EntryCb> {
}
};

void HeaderMapImpl::appendToHeader(HeaderString& header, absl::string_view data) {
uint64_t HeaderMapImpl::appendToHeader(HeaderString& header, absl::string_view data) {
if (data.empty()) {
return;
return 0;
}
uint64_t byte_size = 0;
if (!header.empty()) {
header.append(",", 1);
byte_size += 1;
}
header.append(data.data(), data.size());
return data.size() + byte_size;
}

HeaderMapImpl::HeaderMapImpl() { memset(&inline_headers_, 0, sizeof(inline_headers_)); }
@@ -319,6 +322,20 @@ HeaderMapImpl::HeaderMapImpl(
}
}

void HeaderMapImpl::addSize(uint64_t size) {
// Adds size to cached_byte_size_ if it exists.
if (cached_byte_size_.has_value()) {
cached_byte_size_.value() += size;
}
}

void HeaderMapImpl::subtractSize(uint64_t size) {
if (cached_byte_size_.has_value()) {
ASSERT(cached_byte_size_ >= size);
cached_byte_size_.value() -= size;
}
}

void HeaderMapImpl::copyFrom(const HeaderMap& header_map) {
header_map.iterate(
[](const HeaderEntry& header, void* context) -> HeaderMap::Iterate {
@@ -359,10 +376,13 @@ void HeaderMapImpl::insertByKey(HeaderString&& key, HeaderString&& value) {
if (*ref_lookup_response.entry_ == nullptr) {
maybeCreateInline(ref_lookup_response.entry_, *ref_lookup_response.key_, std::move(value));
} else {
appendToHeader((*ref_lookup_response.entry_)->value(), value.getStringView());
const uint64_t added_size =
appendToHeader((*ref_lookup_response.entry_)->value(), value.getStringView());
addSize(added_size);
value.clear();
}
} else {
addSize(key.size() + value.size());
std::list<HeaderEntryImpl>::iterator i = headers_.insert(std::move(key), std::move(value));
i->entry_ = i;
}
@@ -373,7 +393,8 @@ void HeaderMapImpl::addViaMove(HeaderString&& key, HeaderString&& value) {
// the existing value.
auto* entry = getExistingInline(key.getStringView());
if (entry != nullptr) {
appendToHeader(entry->value(), value.getStringView());
const uint64_t added_size = appendToHeader(entry->value(), value.getStringView());
addSize(added_size);
key.clear();
value.clear();
} else {
@@ -408,7 +429,8 @@ void HeaderMapImpl::addCopy(const LowerCaseString& key, uint64_t value) {
if (entry != nullptr) {
char buf[32];
StringUtil::itoa(buf, sizeof(buf), value);
appendToHeader(entry->value(), buf);
const uint64_t added_size = appendToHeader(entry->value(), buf);
addSize(added_size);
return;
}
HeaderString new_key;
@@ -423,7 +445,8 @@ void HeaderMapImpl::addCopy(const LowerCaseString& key, uint64_t value) {
void HeaderMapImpl::addCopy(const LowerCaseString& key, const std::string& value) {
auto* entry = getExistingInline(key.get());
if (entry != nullptr) {
appendToHeader(entry->value(), value);
const uint64_t added_size = appendToHeader(entry->value(), value);
addSize(added_size);
return;
}
HeaderString new_key;
@@ -451,13 +474,24 @@ void HeaderMapImpl::setReferenceKey(const LowerCaseString& key, const std::strin
ASSERT(new_value.empty()); // NOLINT(bugprone-use-after-move)
}

uint64_t HeaderMapImpl::byteSize() const {
absl::optional<uint64_t> HeaderMapImpl::byteSize() const { return cached_byte_size_; }

uint64_t HeaderMapImpl::refreshByteSize() {
if (!cached_byte_size_.has_value()) {
// In this case, the cached byte size is not valid, and the byte size is computed via an
// iteration over the HeaderMap. The cached byte size is updated.
cached_byte_size_ = byteSizeInternal();
}
return cached_byte_size_.value();
}

uint64_t HeaderMapImpl::byteSizeInternal() const {
// Computes the total byte size by summing the byte size of the keys and values.
uint64_t byte_size = 0;
for (const HeaderEntryImpl& header : headers_) {
byte_size += header.key().size();
byte_size += header.value().size();
}

return byte_size;
}

@@ -474,6 +508,7 @@ const HeaderEntry* HeaderMapImpl::get(const LowerCaseString& key) const {
HeaderEntry* HeaderMapImpl::get(const LowerCaseString& key) {
for (HeaderEntryImpl& header : headers_) {
if (header.key() == key.get().c_str()) {
cached_byte_size_.reset();
return &header;
}
}
@@ -528,6 +563,7 @@ void HeaderMapImpl::remove(const LowerCaseString& key) {
} else {
for (auto i = headers_.begin(); i != headers_.end();) {
if (i->key() == key.get().c_str()) {
subtractSize(i->key().size() + i->value().size());
i = headers_.erase(i);
} else {
++i;
@@ -537,7 +573,7 @@ void HeaderMapImpl::remove(const LowerCaseString& key) {
}

void HeaderMapImpl::removePrefix(const LowerCaseString& prefix) {
headers_.remove_if([&](const HeaderEntryImpl& entry) {
headers_.remove_if([&prefix, this](const HeaderEntryImpl& entry) {
bool to_remove = absl::StartsWith(entry.key().getStringView(), prefix.get());
if (to_remove) {
// If this header should be removed, make sure any references in the
@@ -546,8 +582,13 @@ void HeaderMapImpl::removePrefix(const LowerCaseString& prefix) {
if (cb) {
StaticLookupResponse ref_lookup_response = cb(*this);
if (ref_lookup_response.entry_) {
const uint32_t key_value_size = (*ref_lookup_response.entry_)->key().size() +
(*ref_lookup_response.entry_)->value().size();
subtractSize(key_value_size);
*ref_lookup_response.entry_ = nullptr;
}
} else {
subtractSize(entry.key().size() + entry.value().size());
}
}
return to_remove;
@@ -570,6 +611,7 @@ void HeaderMapImpl::dumpState(std::ostream& os, int indent_level) const {

HeaderMapImpl::HeaderEntryImpl& HeaderMapImpl::maybeCreateInline(HeaderEntryImpl** entry,
const LowerCaseString& key) {
cached_byte_size_.reset();
if (*entry) {
return **entry;
}
@@ -588,6 +630,7 @@ HeaderMapImpl::HeaderEntryImpl& HeaderMapImpl::maybeCreateInline(HeaderEntryImpl
return **entry;
}

addSize(key.get().size() + value.size());
std::list<HeaderEntryImpl>::iterator i = headers_.insert(key, std::move(value));
i->entry_ = i;
*entry = &(*i);
@@ -609,6 +652,8 @@ void HeaderMapImpl::removeInline(HeaderEntryImpl** ptr_to_entry) {
}

HeaderEntryImpl* entry = *ptr_to_entry;
const uint64_t size_to_subtract = entry->entry_->key().size() + entry->entry_->value().size();
subtractSize(size_to_subtract);
*ptr_to_entry = nullptr;
headers_.erase(entry->entry_);
}
@@ -16,12 +16,21 @@ namespace Http {

/**
* These are definitions of all of the inline header access functions described inside header_map.h
*
* When a non-const reference or pointer to a HeaderEntry is returned, the internal byte size count
* will be cleared, since HeaderMap will no longer be able to accurately update the size of that
* HeaderEntry.
* TODO(asraa): Remove functions with a non-const HeaderEntry return value.
*/
#define DEFINE_INLINE_HEADER_FUNCS(name) \
public: \
const HeaderEntry* name() const override { return inline_headers_.name##_; } \
HeaderEntry* name() override { return inline_headers_.name##_; } \
HeaderEntry* name() override { \
cached_byte_size_.reset(); \
return inline_headers_.name##_; \
} \
HeaderEntry& insert##name() override { \
cached_byte_size_.reset(); \
return maybeCreateInline(&inline_headers_.name##_, Headers::get().name); \
} \
void remove##name() override { removeInline(&inline_headers_.name##_); }
@@ -43,7 +52,7 @@ class HeaderMapImpl : public HeaderMap, NonCopyable {
* @param header the header to append to.
* @param data to append to the header.
*/
static void appendToHeader(HeaderString& header, absl::string_view data);
static uint64_t appendToHeader(HeaderString& header, absl::string_view data);

HeaderMapImpl();
explicit HeaderMapImpl(
@@ -71,7 +80,9 @@ class HeaderMapImpl : public HeaderMap, NonCopyable {
void addCopy(const LowerCaseString& key, const std::string& value) override;
void setReference(const LowerCaseString& key, const std::string& value) override;
void setReferenceKey(const LowerCaseString& key, const std::string& value) override;
uint64_t byteSize() const override;
absl::optional<uint64_t> byteSize() const override;
uint64_t refreshByteSize() override;
uint64_t byteSizeInternal() const override;
const HeaderEntry* get(const LowerCaseString& key) const override;
HeaderEntry* get(const LowerCaseString& key) override;
void iterate(ConstIterateCb cb, void* context) const override;
@@ -195,10 +206,16 @@ class HeaderMapImpl : public HeaderMap, NonCopyable {
HeaderEntryImpl* getExistingInline(absl::string_view key);

void removeInline(HeaderEntryImpl** entry);
void addSize(uint64_t size);
void subtractSize(uint64_t size);

AllInlineHeaders inline_headers_;
HeaderList headers_;

// When present, this holds the internal byte size of the HeaderMap. The value is removed once an
// inline header entry is accessed and updated when refreshByteSize() is called.
absl::optional<uint64_t> cached_byte_size_ = 0;

ALL_INLINE_HEADERS(DEFINE_INLINE_HEADER_FUNCS)
};

@@ -460,8 +460,10 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) {
header_parsing_state_ = HeaderParsingState::Value;
current_header_value_.append(data, length);

const uint32_t total =
current_header_field_.size() + current_header_value_.size() + current_header_map_->byteSize();
// Verify that the cached value in byte size exists.
ASSERT(current_header_map_->byteSize().has_value());
const uint32_t total = current_header_field_.size() + current_header_value_.size() +
current_header_map_->byteSize().value();
if (total > (max_request_headers_kb_ * 1024)) {
error_code_ = Http::Code::RequestHeaderFieldsTooLarge;
sendProtocolError();
@@ -472,6 +474,10 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) {
int ConnectionImpl::onHeadersCompleteBase() {
ENVOY_CONN_LOG(trace, "headers complete", connection_);
completeLastHeader();
// Validate that the completed HeaderMap's cached byte size exists and is correct.
// This assert iterates over the HeaderMap.
ASSERT(current_header_map_->byteSize().has_value() &&
current_header_map_->byteSize() == current_header_map_->byteSizeInternal());
if (!(parser_.http_major == 1 && parser_.http_minor == 1)) {
// This is not necessarily true, but it's good enough since higher layers only care if this is
// HTTP/1.1 or not.
@@ -509,6 +509,10 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) {

switch (frame->hd.type) {
case NGHTTP2_HEADERS: {
// Verify that the final HeaderMap's byte size is under the limit before decoding headers.
// This assert iterates over the HeaderMap.
ASSERT(stream->headers_->byteSize().has_value() &&
stream->headers_->byteSize().value() == stream->headers_->byteSizeInternal());
stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM;
if (!stream->cookies_.empty()) {
HeaderString key(Headers::get().Cookie);
@@ -620,6 +624,12 @@ int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) {
case NGHTTP2_HEADERS:
case NGHTTP2_DATA: {
StreamImpl* stream = getStream(frame->hd.stream_id);
if (stream->headers_) {
// Verify that the final HeaderMap's byte size is under the limit before sending frames.
// This assert iterates over the HeaderMap.
ASSERT(stream->headers_->byteSize().has_value() &&
stream->headers_->byteSize().value() == stream->headers_->byteSizeInternal());
}
stream->local_end_stream_sent_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM;
break;
}
@@ -808,9 +818,10 @@ int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name,
stats_.headers_cb_no_stream_.inc();
return 0;
}

stream->saveHeader(std::move(name), std::move(value));
if (stream->headers_->byteSize() > max_request_headers_kb_ * 1024) {
// Verify that the cached value in byte size exists.
ASSERT(stream->headers_->byteSize().has_value());
if (stream->headers_->byteSize().value() > max_request_headers_kb_ * 1024) {
// This will cause the library to reset/close the stream.
stats_.header_overflow_.inc();
return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;

0 comments on commit afc39be

Please sign in to comment.
You can’t perform that action at this time.