diff --git a/include/envoy/stats/stats.h b/include/envoy/stats/stats.h index 31d0966d0cac..c39e41749699 100644 --- a/include/envoy/stats/stats.h +++ b/include/envoy/stats/stats.h @@ -467,7 +467,7 @@ class StatDataAllocator { * @return CounterSharedPtr a counter, or nullptr if allocation failed, in which case * tag_extracted_name and tags are not moved. */ - virtual CounterSharedPtr makeCounter(const std::string& name, std::string&& tag_extracted_name, + virtual CounterSharedPtr makeCounter(absl::string_view name, std::string&& tag_extracted_name, std::vector&& tags) PURE; /** @@ -477,9 +477,14 @@ class StatDataAllocator { * @return GaugeSharedPtr a gauge, or nullptr if allocation failed, in which case * tag_extracted_name and tags are not moved. */ - virtual GaugeSharedPtr makeGauge(const std::string& name, std::string&& tag_extracted_name, + virtual GaugeSharedPtr makeGauge(absl::string_view name, std::string&& tag_extracted_name, std::vector&& tags) PURE; + /** + * Determines whether this stats allocator requires bounded stat-name size. + */ + virtual bool requiresBoundedStatNameSize() const PURE; + // TODO(jmarantz): create a parallel mechanism to instantiate histograms. At // the moment, histograms don't fit the same pattern of counters and gaugaes // as they are not actually created in the context of a stats allocator. diff --git a/source/common/common/block_memory_hash_set.h b/source/common/common/block_memory_hash_set.h index e28a1c59595e..5724e527067b 100644 --- a/source/common/common/block_memory_hash_set.h +++ b/source/common/common/block_memory_hash_set.h @@ -165,7 +165,7 @@ template class BlockMemoryHashSet : public Logger::LoggabletruncateAndInit(key, stats_options_); + value->initialize(key, stats_options_); ++control_->size; return ValueCreatedPair(value, true); } diff --git a/source/common/stats/stats_impl.cc b/source/common/stats/stats_impl.cc index e0a370ae1672..681bc77d640f 100644 --- a/source/common/stats/stats_impl.cc +++ b/source/common/stats/stats_impl.cc @@ -135,35 +135,35 @@ bool TagExtractorImpl::extractTag(const std::string& stat_name, std::vector return false; } -RawStatData* HeapRawStatDataAllocator::alloc(const std::string& name) { - uint64_t num_bytes_to_allocate = RawStatData::structSize(name.size()); - RawStatData* data = static_cast(::calloc(num_bytes_to_allocate, 1)); - if (data == nullptr) { - throw std::bad_alloc(); - } - data->checkAndInit(name, num_bytes_to_allocate); +HeapStatData::HeapStatData(absl::string_view key) : name_(key.data(), key.size()) {} +HeapStatData* HeapStatDataAllocator::alloc(absl::string_view name) { + // Any expected truncation of name is done at the callsite. No truncation is + // required to use this allocator. + auto data = std::make_unique(name); Thread::ReleasableLockGuard lock(mutex_); - auto ret = stats_.insert(data); - RawStatData* existing_data = *ret.first; + auto ret = stats_.insert(data.get()); + HeapStatData* existing_data = *ret.first; lock.release(); - if (!ret.second) { - ::free(data); - ++existing_data->ref_count_; - return existing_data; - } else { - return data; + if (ret.second) { + return data.release(); } + ++existing_data->ref_count_; + return existing_data; } /** - * Counter implementation that wraps a RawStatData. + * Counter implementation that wraps a StatData. StatData must have data members: + * std::atomic value_; + * std::atomic pending_increment_; + * std::atomic flags_; + * std::atomic ref_count_; */ -class CounterImpl : public Counter, public MetricImpl { +template class CounterImpl : public Counter, public MetricImpl { public: - CounterImpl(RawStatData& data, RawStatDataAllocator& alloc, std::string&& tag_extracted_name, - std::vector&& tags) + CounterImpl(StatData& data, StatDataAllocatorImpl& alloc, + std::string&& tag_extracted_name, std::vector&& tags) : MetricImpl(data.name_, std::move(tag_extracted_name), std::move(tags)), data_(data), alloc_(alloc) {} ~CounterImpl() { alloc_.free(data_); } @@ -182,17 +182,17 @@ class CounterImpl : public Counter, public MetricImpl { uint64_t value() const override { return data_.value_; } private: - RawStatData& data_; - RawStatDataAllocator& alloc_; + StatData& data_; + StatDataAllocatorImpl& alloc_; }; /** - * Gauge implementation that wraps a RawStatData. + * Gauge implementation that wraps a StatData. */ -class GaugeImpl : public Gauge, public MetricImpl { +template class GaugeImpl : public Gauge, public MetricImpl { public: - GaugeImpl(RawStatData& data, RawStatDataAllocator& alloc, std::string&& tag_extracted_name, - std::vector&& tags) + GaugeImpl(StatData& data, StatDataAllocatorImpl& alloc, + std::string&& tag_extracted_name, std::vector&& tags) : MetricImpl(data.name_, std::move(tag_extracted_name), std::move(tags)), data_(data), alloc_(alloc) {} ~GaugeImpl() { alloc_.free(data_); } @@ -217,8 +217,8 @@ class GaugeImpl : public Gauge, public MetricImpl { bool used() const override { return data_.flags_ & Flags::Used; } private: - RawStatData& data_; - RawStatDataAllocator& alloc_; + StatData& data_; + StatDataAllocatorImpl& alloc_; }; TagProducerImpl::TagProducerImpl(const envoy::config::metrics::v2::StatsConfig& config) { @@ -319,47 +319,28 @@ TagProducerImpl::addDefaultExtractors(const envoy::config::metrics::v2::StatsCon return names; } -void HeapRawStatDataAllocator::free(RawStatData& data) { +// TODO(jmarantz): move this below HeapStatDataAllocator::alloc. +void HeapStatDataAllocator::free(HeapStatData& data) { ASSERT(data.ref_count_ > 0); if (--data.ref_count_ > 0) { return; } - size_t key_removed; { Thread::LockGuard lock(mutex_); - key_removed = stats_.erase(&data); + size_t key_removed = stats_.erase(&data); + ASSERT(key_removed == 1); } - ASSERT(key_removed == 1); - ::free(&data); + delete &data; } -void RawStatData::initialize(absl::string_view key, uint64_t xfer_size) { +void RawStatData::initialize(absl::string_view key, const StatsOptions& stats_options) { ASSERT(!initialized()); + ASSERT(key.size() <= stats_options.maxNameLength()); ref_count_ = 1; - memcpy(name_, key.data(), xfer_size); - name_[xfer_size] = '\0'; -} - -void RawStatData::checkAndInit(absl::string_view key, uint64_t num_bytes_allocated) { - uint64_t xfer_size = key.size(); - ASSERT(structSize(xfer_size) <= num_bytes_allocated); - - initialize(key, xfer_size); -} - -void RawStatData::truncateAndInit(absl::string_view key, const StatsOptions& stats_options) { - if (key.size() > stats_options.maxNameLength()) { - ENVOY_LOG_MISC( - warn, - "Statistic '{}' is too long with {} characters, it will be truncated to {} characters", key, - key.size(), stats_options.maxNameLength()); - } - - // key is not necessarily nul-terminated, but we want to make sure name_ is. - uint64_t xfer_size = std::min(stats_options.maxNameLength(), key.size()); - initialize(key, xfer_size); + memcpy(name_, key.data(), key.size()); + name_[key.size()] = '\0'; } HistogramStatisticsImpl::HistogramStatisticsImpl(const histogram_t* histogram_ptr) @@ -420,26 +401,32 @@ void SourceImpl::clearCache() { histograms_.reset(); } -CounterSharedPtr RawStatDataAllocator::makeCounter(const std::string& name, - std::string&& tag_extracted_name, - std::vector&& tags) { - RawStatData* data = alloc(name); +template +CounterSharedPtr StatDataAllocatorImpl::makeCounter(absl::string_view name, + std::string&& tag_extracted_name, + std::vector&& tags) { + StatData* data = alloc(name); if (data == nullptr) { return nullptr; } - return std::make_shared(*data, *this, std::move(tag_extracted_name), - std::move(tags)); + return std::make_shared>(*data, *this, std::move(tag_extracted_name), + std::move(tags)); } -GaugeSharedPtr RawStatDataAllocator::makeGauge(const std::string& name, - std::string&& tag_extracted_name, - std::vector&& tags) { - RawStatData* data = alloc(name); +template +GaugeSharedPtr StatDataAllocatorImpl::makeGauge(absl::string_view name, + std::string&& tag_extracted_name, + std::vector&& tags) { + StatData* data = alloc(name); if (data == nullptr) { return nullptr; } - return std::make_shared(*data, *this, std::move(tag_extracted_name), std::move(tags)); + return std::make_shared>(*data, *this, std::move(tag_extracted_name), + std::move(tags)); } +template class StatDataAllocatorImpl; +template class StatDataAllocatorImpl; + } // namespace Stats } // namespace Envoy diff --git a/source/common/stats/stats_impl.h b/source/common/stats/stats_impl.h index 70a37d18ba1a..a007d4a2a21d 100644 --- a/source/common/stats/stats_impl.h +++ b/source/common/stats/stats_impl.h @@ -204,8 +204,7 @@ struct RawStatData { /** * Returns the size of this struct, accounting for the length of name_ - * and padding for alignment. Required for the HeapRawStatDataAllocator, which does not truncate - * at a maximum stat name length. + * and padding for alignment. */ static uint64_t structSize(uint64_t name_size); @@ -221,14 +220,7 @@ struct RawStatData { * does not expect stat name truncation. We pass in the number of bytes allocated in order to * assert the copy is safe inline. */ - void checkAndInit(absl::string_view key, uint64_t num_bytes_allocated); - - /** - * Initializes this object to have the specified key, - * a refcount of 1, and all other values zero. Required by the BlockMemoryHashSet. StatsOptions is - * used to truncate key inline, if necessary. - */ - void truncateAndInit(absl::string_view key, const StatsOptions& stats_options); + void initialize(absl::string_view key, const StatsOptions& stats_options); /** * Returns a hash of the key. This is required by BlockMemoryHashSet. @@ -251,9 +243,6 @@ struct RawStatData { std::atomic ref_count_; std::atomic unused_; char name_[]; - -private: - void initialize(absl::string_view key, uint64_t num_bytes_allocated); }; /** @@ -283,33 +272,48 @@ class MetricImpl : public virtual Metric { const std::vector tags_; }; -/** - * Implements a StatDataAllocator that uses RawStatData -- capable of deploying - * in a shared memory block without internal pointers. - */ -class RawStatDataAllocator : public StatDataAllocator { +// Partially implements a StatDataAllocator, leaving alloc & free for subclasses. +// We templatize on StatData rather than defining a virtual base StatData class +// for performance reasons; stat increment is on the hot path. +// +// The two production derivations cover using a fixed block of shared-memory for +// hot restart stat continuity, and heap allocation for more efficient RAM usage +// for when hot-restart is not required. +// +// Also note that RawStatData needs to live in a shared memory block, and it's +// possible, but not obvious, that a vptr would be usable across processes. In +// any case, RawStatData is allocated from a shared-memory block rather than via +// new, so the usual C++ compiler assistance for setting up vptrs will not be +// available. This could be resolved with placed new, or another nesting level. +template class StatDataAllocatorImpl : public StatDataAllocator { public: // StatDataAllocator - CounterSharedPtr makeCounter(const std::string& name, std::string&& tag_extracted_name, + CounterSharedPtr makeCounter(absl::string_view name, std::string&& tag_extracted_name, std::vector&& tags) override; - GaugeSharedPtr makeGauge(const std::string& name, std::string&& tag_extracted_name, + GaugeSharedPtr makeGauge(absl::string_view name, std::string&& tag_extracted_name, std::vector&& tags) override; /** * @param name the full name of the stat. - * @return RawStatData* a raw stat data block for a given stat name or nullptr if there is no - * more memory available for stats. The allocator should return a reference counted - * data location by name if one already exists with the same name. This is used for - * intra-process scope swapping as well as inter-process hot restart. + * @return StatData* a data block for a given stat name or nullptr if there is no more memory + * available for stats. The allocator should return a reference counted data location + * by name if one already exists with the same name. This is used for intra-process + * scope swapping as well as inter-process hot restart. */ - virtual RawStatData* alloc(const std::string& name) PURE; + virtual StatData* alloc(absl::string_view name) PURE; /** * Free a raw stat data block. The allocator should handle reference counting and only truly * free the block if it is no longer needed. * @param data the data returned by alloc(). */ - virtual void free(RawStatData& data) PURE; + virtual void free(StatData& data) PURE; +}; + +class RawStatDataAllocator : public StatDataAllocatorImpl { +public: + // StatDataAllocator + bool requiresBoundedStatNameSize() const override { return true; } }; /** @@ -373,30 +377,58 @@ class SourceImpl : public Source { }; /** - * Implementation of RawStatDataAllocator that uses an unordered set to store - * RawStatData pointers. + * This structure is an alternate backing store for both CounterImpl and GaugeImpl. It is designed + * so that it can be allocated efficiently from the heap on demand. */ -class HeapRawStatDataAllocator : public RawStatDataAllocator { +struct HeapStatData { + explicit HeapStatData(absl::string_view key); + + /** + * @returns absl::string_view the name as a string_view. + */ + absl::string_view key() const { return name_; } + + std::atomic value_{0}; + std::atomic pending_increment_{0}; + std::atomic flags_{0}; + std::atomic ref_count_{1}; + std::string name_; +}; + +/** + * Implementation of StatDataAllocator using a pure heap-based strategy, so that + * Envoy implementations that do not require hot-restart can use less memory. + */ +class HeapStatDataAllocator : public StatDataAllocatorImpl { public: - // RawStatDataAllocator - ~HeapRawStatDataAllocator() { ASSERT(stats_.empty()); } - RawStatData* alloc(const std::string& name) override; - void free(RawStatData& data) override; + HeapStatDataAllocator() {} + ~HeapStatDataAllocator() { ASSERT(stats_.empty()); } + + // StatDataAllocatorImpl + HeapStatData* alloc(absl::string_view name) override; + void free(HeapStatData& data) override; + + // StatDataAllocator + bool requiresBoundedStatNameSize() const override { return false; } private: - struct RawStatDataHash_ { - size_t operator()(const RawStatData* a) const { return HashUtil::xxHash64(a->key()); } + struct HeapStatHash_ { + size_t operator()(const HeapStatData* a) const { return HashUtil::xxHash64(a->key()); } }; - struct RawStatDataCompare_ { - bool operator()(const RawStatData* a, const RawStatData* b) const { + struct HeapStatCompare_ { + bool operator()(const HeapStatData* a, const HeapStatData* b) const { return (a->key() == b->key()); } }; - typedef std::unordered_set StringRawDataSet; - // An unordered set of RawStatData pointers which keys off the key() + // TODO(jmarantz): See https://github.com/envoyproxy/envoy/pull/3927 and + // https://github.com/envoyproxy/envoy/issues/3585, which can help reorganize + // the heap stats using a ref-counted symbol table to compress the stat strings. + typedef std::unordered_set StatSet; + + // An unordered set of HeapStatData pointers which keys off the key() // field in each object. This necessitates a custom comparator and hasher. - StringRawDataSet stats_ GUARDED_BY(mutex_); + StatSet stats_ GUARDED_BY(mutex_); // A mutex is needed here to protect the stats_ object from both alloc() and free() operations. // Although alloc() operations are called under existing locking, free() operations are made from // the destructors of the individual stat objects, which are not protected by locks. @@ -500,11 +532,11 @@ class IsolatedStoreImpl : public Store { const std::string prefix_; }; - HeapRawStatDataAllocator alloc_; + HeapStatDataAllocator alloc_; IsolatedStatsCache counters_; IsolatedStatsCache gauges_; IsolatedStatsCache histograms_; - const Stats::StatsOptionsImpl stats_options_; + const StatsOptionsImpl stats_options_; }; } // namespace Stats diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 7e6e448e63c8..0467e47f587a 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -156,6 +156,26 @@ void ThreadLocalStoreImpl::clearScopeFromCaches(uint64_t scope_id) { } } +absl::string_view ThreadLocalStoreImpl::truncateStatNameIfNeeded(absl::string_view name) { + // If the main allocator requires stat name truncation, warn and truncate, before + // attempting to allocate. + if (alloc_.requiresBoundedStatNameSize()) { + const uint64_t max_length = stats_options_.maxNameLength(); + + // Note that the heap-allocator does not truncate itself; we have to + // truncate here if we are using heap-allocation as a fallback due to an + // exahusted shared-memory block + if (name.size() > max_length) { + ENVOY_LOG_MISC( + warn, + "Statistic '{}' is too long with {} characters, it will be truncated to {} characters", + name, name.size(), max_length); + name = absl::string_view(name.data(), max_length); + } + } + return name; +} + std::atomic ThreadLocalStoreImpl::ScopeImpl::next_scope_id_; ThreadLocalStoreImpl::ScopeImpl::~ScopeImpl() { parent_.releaseScopeCrossThread(this); } @@ -177,19 +197,17 @@ StatType& ThreadLocalStoreImpl::ScopeImpl::safeMakeStat( std::shared_ptr& central_ref = central_cache_map[name]; if (!central_ref) { std::vector tags; + + // Tag extraction occurs on the original, untruncated name so the extraction + // can complete properly, even if the tag values are partially truncated. std::string tag_extracted_name = parent_.getTagsForName(name, tags); + absl::string_view truncated_name = parent_.truncateStatNameIfNeeded(name); std::shared_ptr stat = - make_stat(parent_.alloc_, name, std::move(tag_extracted_name), std::move(tags)); + make_stat(parent_.alloc_, truncated_name, std::move(tag_extracted_name), std::move(tags)); if (stat == nullptr) { parent_.num_last_resort_stats_.inc(); - // TODO(jbuckland) Performing the fallback from non-heap allocator to heap allocator should be - // the responsibility of the non-heap allocator, not the client of the non-heap allocator. - // This branch will never be used in the non-hot-restart case, since the parent_.alloc_ object - // will throw instead of returning a nullptr; we should remove the assumption that this - // branching case is always available. - stat = - make_stat(parent_.heap_allocator_, name.substr(0, parent_.statsOptions().maxNameLength()), - std::move(tag_extracted_name), std::move(tags)); + stat = make_stat(parent_.heap_allocator_, truncated_name, std::move(tag_extracted_name), + std::move(tags)); ASSERT(stat != nullptr); } central_ref = stat; @@ -219,7 +237,7 @@ Counter& ThreadLocalStoreImpl::ScopeImpl::counter(const std::string& name) { return safeMakeStat( final_name, central_cache_.counters_, - [](StatDataAllocator& allocator, const std::string& name, std::string&& tag_extracted_name, + [](StatDataAllocator& allocator, absl::string_view name, std::string&& tag_extracted_name, std::vector&& tags) -> CounterSharedPtr { return allocator.makeCounter(name, std::move(tag_extracted_name), std::move(tags)); }, @@ -253,7 +271,7 @@ Gauge& ThreadLocalStoreImpl::ScopeImpl::gauge(const std::string& name) { return safeMakeStat( final_name, central_cache_.gauges_, - [](StatDataAllocator& allocator, const std::string& name, std::string&& tag_extracted_name, + [](StatDataAllocator& allocator, absl::string_view name, std::string&& tag_extracted_name, std::vector&& tags) -> GaugeSharedPtr { return allocator.makeGauge(name, std::move(tag_extracted_name), std::move(tags)); }, diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index 69cebaeae32c..cc280613ce66 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -230,7 +230,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo template using MakeStatFn = - std::function(StatDataAllocator&, const std::string& name, + std::function(StatDataAllocator&, absl::string_view name, std::string&& tag_extracted_name, std::vector&& tags)>; @@ -274,6 +274,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo void clearScopeFromCaches(uint64_t scope_id); void releaseScopeCrossThread(ScopeImpl* scope); void mergeInternal(PostMergeCb mergeCb); + absl::string_view truncateStatNameIfNeeded(absl::string_view name); const Stats::StatsOptions& stats_options_; StatDataAllocator& alloc_; @@ -287,7 +288,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo std::atomic shutting_down_{}; std::atomic merge_in_progress_{}; Counter& num_last_resort_stats_; - HeapRawStatDataAllocator heap_allocator_; + HeapStatDataAllocator heap_allocator_; SourceImpl source_; }; diff --git a/source/server/hot_restart_impl.cc b/source/server/hot_restart_impl.cc index e41db9566a6e..80b4f2531307 100644 --- a/source/server/hot_restart_impl.cc +++ b/source/server/hot_restart_impl.cc @@ -141,14 +141,14 @@ HotRestartImpl::HotRestartImpl(Options& options) RELEASE_ASSERT(rc != -1, ""); } -Stats::RawStatData* HotRestartImpl::alloc(const std::string& name) { +Stats::RawStatData* HotRestartImpl::alloc(absl::string_view name) { // Try to find the existing slot in shared memory, otherwise allocate a new one. Thread::LockGuard lock(stat_lock_); - absl::string_view key = name; - if (key.size() > options_.statsOptions().maxNameLength()) { - key.remove_suffix(key.size() - options_.statsOptions().maxNameLength()); - } - auto value_created = stats_set_->insert(key); + // In production, the name is truncated in ThreadLocalStore before this + // is called. This is just a sanity check to make sure that actually happens; + // it is coded as an if/return-null to facilitate testing. + ASSERT(name.length() <= options_.statsOptions().maxNameLength()); + auto value_created = stats_set_->insert(name); Stats::RawStatData* data = value_created.first; if (data == nullptr) { return nullptr; diff --git a/source/server/hot_restart_impl.h b/source/server/hot_restart_impl.h index 39f537d55523..c24c07b10904 100644 --- a/source/server/hot_restart_impl.h +++ b/source/server/hot_restart_impl.h @@ -139,7 +139,7 @@ class HotRestartImpl : public HotRestart, static std::string hotRestartVersion(uint64_t max_num_stats, uint64_t max_stat_name_len); // RawStatDataAllocator - Stats::RawStatData* alloc(const std::string& name) override; + Stats::RawStatData* alloc(absl::string_view name) override; void free(Stats::RawStatData& data) override; private: diff --git a/source/server/hot_restart_nop_impl.h b/source/server/hot_restart_nop_impl.h index 73568945fdb9..8444dea459a4 100644 --- a/source/server/hot_restart_nop_impl.h +++ b/source/server/hot_restart_nop_impl.h @@ -27,12 +27,12 @@ class HotRestartNopImpl : public Server::HotRestart { std::string version() override { return "disabled"; } Thread::BasicLockable& logLock() override { return log_lock_; } Thread::BasicLockable& accessLogLock() override { return access_log_lock_; } - Stats::RawStatDataAllocator& statsAllocator() override { return stats_allocator_; } + Stats::StatDataAllocator& statsAllocator() override { return stats_allocator_; } private: Thread::MutexBasicLockable log_lock_; Thread::MutexBasicLockable access_log_lock_; - Stats::HeapRawStatDataAllocator stats_allocator_; + Stats::HeapStatDataAllocator stats_allocator_; }; } // namespace Server diff --git a/test/common/common/block_memory_hash_set_test.cc b/test/common/common/block_memory_hash_set_test.cc index da5477cf8325..f0082da91ad5 100644 --- a/test/common/common/block_memory_hash_set_test.cc +++ b/test/common/common/block_memory_hash_set_test.cc @@ -22,10 +22,10 @@ class BlockMemoryHashSetTest : public testing::Test { // TestValue that doesn't define a hash. struct TestValueBase { absl::string_view key() const { return name; } - void truncateAndInit(absl::string_view key, const Stats::StatsOptions& stats_options) { - uint64_t xfer = std::min(stats_options.maxNameLength(), key.size()); - memcpy(name, key.data(), xfer); - name[xfer] = '\0'; + void initialize(absl::string_view key, const Stats::StatsOptions& stats_options) { + ASSERT(key.size() <= stats_options.maxNameLength()); + memcpy(name, key.data(), key.size()); + name[key.size()] = '\0'; } static uint64_t structSizeWithOptions(const Stats::StatsOptions& stats_options) { UNREFERENCED_PARAMETER(stats_options); diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index 863c7b626e10..a157fb6cdad8 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -30,6 +30,7 @@ envoy_cc_test( "//test/mocks/server:server_mocks", "//test/mocks/stats:stats_mocks", "//test/mocks/thread_local:thread_local_mocks", + "//test/test_common:logging_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/common/stats/stats_impl_test.cc b/test/common/stats/stats_impl_test.cc index eb25227e5d58..d976a5a5ffdf 100644 --- a/test/common/stats/stats_impl_test.cc +++ b/test/common/stats/stats_impl_test.cc @@ -66,6 +66,16 @@ TEST(StatsIsolatedStoreImplTest, All) { EXPECT_EQ(2UL, store.gauges().size()); } +TEST(StatsIsolatedStoreImplTest, LongStatName) { + IsolatedStoreImpl store; + Stats::StatsOptionsImpl stats_options; + const std::string long_string(stats_options.maxNameLength() + 1, 'A'); + + ScopePtr scope = store.createScope("scope."); + Counter& counter = scope->counter(long_string); + EXPECT_EQ(absl::StrCat("scope.", long_string), counter.name()); +} + /** * Test stats macros. @see stats_macros.h */ @@ -478,40 +488,26 @@ TEST(TagProducerTest, CheckConstructor) { "No regex specified for tag specifier and no default regex for name: 'test_extractor'"); } -// Check consistency of internal stat representation -TEST(RawStatDataTest, Consistency) { - HeapRawStatDataAllocator alloc; - // Generate a stat, encode it to hex, and take the hash of that hex string. We expect the hash to - // vary only when the internal representation of a stat has been intentionally changed, in which - // case SharedMemory::VERSION should be incremented as well. - uint64_t expected_hash = 1874506077228772558; +// No truncation occurs in the implementation of HeapStatData. +TEST(RawStatDataTest, HeapNoTruncate) { Stats::StatsOptionsImpl stats_options; - uint64_t max_name_length = stats_options.maxNameLength(); - - const std::string name_1(max_name_length, 'A'); - RawStatData* stat_1 = alloc.alloc(name_1); - std::string stat_hex_dump_1 = - Hex::encode(reinterpret_cast(stat_1), sizeof(RawStatData) + max_name_length); - EXPECT_EQ(HashUtil::xxHash64(stat_hex_dump_1), expected_hash); - alloc.free(*stat_1); - - // If a stat name is truncated, we expect that its internal representation is the same as if it - // had been initialized with the already-truncated name. - const std::string name_2(max_name_length + 1, 'A'); - RawStatData* stat_2 = alloc.alloc(name_2); - std::string stat_hex_dump_2 = - Hex::encode(reinterpret_cast(stat_2), sizeof(RawStatData) + max_name_length); - EXPECT_EQ(HashUtil::xxHash64(stat_hex_dump_2), expected_hash); - alloc.free(*stat_2); + HeapStatDataAllocator alloc; //(/*stats_options*/); + const std::string long_string(stats_options.maxNameLength() + 1, 'A'); + HeapStatData* stat{}; + EXPECT_NO_LOGS(stat = alloc.alloc(long_string)); + EXPECT_EQ(stat->key(), long_string); + alloc.free(*stat); } +// Note: a similar test using RawStatData* is in test/server/hot_restart_impl_test.cc. TEST(RawStatDataTest, HeapAlloc) { - HeapRawStatDataAllocator alloc; - RawStatData* stat_1 = alloc.alloc("ref_name"); + Stats::StatsOptionsImpl stats_options; + HeapStatDataAllocator alloc; //(stats_options); + HeapStatData* stat_1 = alloc.alloc("ref_name"); ASSERT_NE(stat_1, nullptr); - RawStatData* stat_2 = alloc.alloc("ref_name"); + HeapStatData* stat_2 = alloc.alloc("ref_name"); ASSERT_NE(stat_2, nullptr); - RawStatData* stat_3 = alloc.alloc("not_ref_name"); + HeapStatData* stat_3 = alloc.alloc("not_ref_name"); ASSERT_NE(stat_3, nullptr); EXPECT_EQ(stat_1, stat_2); EXPECT_NE(stat_1, stat_3); @@ -521,19 +517,6 @@ TEST(RawStatDataTest, HeapAlloc) { alloc.free(*stat_3); } -TEST(RawStatDataTest, Truncate) { - // RawStatData::truncateAndInit(absl::string_view key, const StatsOptions& stats_options) will - // truncate and log to ENVOY_LOG_MISC if given a key longer than the allowed - // stats_options.maxNameLength(). This mechanism is also tested in HotRestartImplTest.truncateKey. - Stats::StatsOptionsImpl stats_options; - const std::string long_string(stats_options.maxNameLength() + 1, 'A'); - RawStatData* stat = - static_cast(::calloc(RawStatData::structSizeWithOptions(stats_options), 1)); - EXPECT_LOG_CONTAINS("warning", "is too long with", - stat->truncateAndInit(long_string, stats_options)); - ::free(stat); -} - TEST(SourceImplTest, Caching) { NiceMock store; std::vector stored_counters; diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index 98de7d312d0a..a32cc1bd4bb5 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -10,6 +10,7 @@ #include "test/mocks/server/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/mocks/thread_local/mocks.h" +#include "test/test_common/logging.h" #include "test/test_common/utility.h" #include "absl/strings/str_split.h" @@ -26,48 +27,34 @@ using testing::_; namespace Envoy { namespace Stats { -class StatsThreadLocalStoreTest : public testing::Test, public RawStatDataAllocator { +class StatsThreadLocalStoreTest : public testing::Test { public: - StatsThreadLocalStoreTest() { - ON_CALL(*this, alloc(_)).WillByDefault(Invoke([this](const std::string& name) -> RawStatData* { - return alloc_.alloc(name); - })); - - ON_CALL(*this, free(_)).WillByDefault(Invoke([this](RawStatData& data) -> void { - return alloc_.free(data); - })); + void SetUp() override { + alloc_ = std::make_unique(options_); + resetStoreWithAlloc(*alloc_); + } - EXPECT_CALL(*this, alloc("stats.overflow")); - store_ = std::make_unique(options_, *this); + void resetStoreWithAlloc(StatDataAllocator& alloc) { + store_ = std::make_unique(options_, alloc); store_->addSink(sink_); } - MOCK_METHOD1(alloc, RawStatData*(const std::string& name)); - MOCK_METHOD1(free, void(RawStatData& data)); - NiceMock main_thread_dispatcher_; NiceMock tls_; - Stats::StatsOptionsImpl options_; - TestAllocator alloc_; + StatsOptionsImpl options_; + std::unique_ptr alloc_; MockSink sink_; std::unique_ptr store_; }; -class HistogramTest : public testing::Test, public RawStatDataAllocator { +class HistogramTest : public testing::Test { public: - typedef std::map NameHistogramMap; + typedef std::map NameHistogramMap; - void SetUp() override { - ON_CALL(*this, alloc(_)).WillByDefault(Invoke([this](const std::string& name) -> RawStatData* { - return alloc_.alloc(name); - })); - - ON_CALL(*this, free(_)).WillByDefault(Invoke([this](RawStatData& data) -> void { - return alloc_.free(data); - })); + HistogramTest() : alloc_(options_) {} - EXPECT_CALL(*this, alloc("stats.overflow")); - store_ = std::make_unique(options_, *this); + void SetUp() override { + store_ = std::make_unique(options_, alloc_); store_->addSink(sink_); store_->initializeThreading(main_thread_dispatcher_, tls_); } @@ -76,12 +63,12 @@ class HistogramTest : public testing::Test, public RawStatDataAllocator { store_->shutdownThreading(); tls_.shutdownThread(); // Includes overflow stat. - EXPECT_CALL(*this, free(_)); + EXPECT_CALL(alloc_, free(_)); } NameHistogramMap makeHistogramMap(const std::vector& hist_list) { NameHistogramMap name_histogram_map; - for (const Stats::ParentHistogramSharedPtr& histogram : hist_list) { + for (const ParentHistogramSharedPtr& histogram : hist_list) { // Exclude the scope part of the name. const std::vector& split_vector = absl::StrSplit(histogram->name(), '.'); name_histogram_map.insert(std::make_pair(split_vector.back(), histogram)); @@ -112,12 +99,12 @@ class HistogramTest : public testing::Test, public RawStatDataAllocator { HistogramStatisticsImpl h2_interval_statistics(hist2_interval); NameHistogramMap name_histogram_map = makeHistogramMap(histogram_list); - const Stats::ParentHistogramSharedPtr& h1 = name_histogram_map["h1"]; + const ParentHistogramSharedPtr& h1 = name_histogram_map["h1"]; EXPECT_EQ(h1->cumulativeStatistics().summary(), h1_cumulative_statistics.summary()); EXPECT_EQ(h1->intervalStatistics().summary(), h1_interval_statistics.summary()); if (histogram_list.size() > 1) { - const Stats::ParentHistogramSharedPtr& h2 = name_histogram_map["h2"]; + const ParentHistogramSharedPtr& h2 = name_histogram_map["h2"]; EXPECT_EQ(h2->cumulativeStatistics().summary(), h2_cumulative_statistics.summary()); EXPECT_EQ(h2->intervalStatistics().summary(), h2_interval_statistics.summary()); } @@ -159,8 +146,8 @@ class HistogramTest : public testing::Test, public RawStatDataAllocator { NiceMock main_thread_dispatcher_; NiceMock tls_; - Stats::StatsOptionsImpl options_; - TestAllocator alloc_; + StatsOptionsImpl options_; + MockedTestAllocator alloc_; MockSink sink_; std::unique_ptr store_; InSequence s; @@ -170,7 +157,7 @@ class HistogramTest : public testing::Test, public RawStatDataAllocator { TEST_F(StatsThreadLocalStoreTest, NoTls) { InSequence s; - EXPECT_CALL(*this, alloc(_)).Times(2); + EXPECT_CALL(*alloc_, alloc(_)).Times(2); Counter& c1 = store_->counter("c1"); EXPECT_EQ(&c1, &store_->counter("c1")); @@ -194,7 +181,7 @@ TEST_F(StatsThreadLocalStoreTest, NoTls) { EXPECT_EQ(2L, store_->gauges().front().use_count()); // Includes overflow stat. - EXPECT_CALL(*this, free(_)).Times(3); + EXPECT_CALL(*alloc_, free(_)).Times(3); store_->shutdownThreading(); } @@ -203,7 +190,7 @@ TEST_F(StatsThreadLocalStoreTest, Tls) { InSequence s; store_->initializeThreading(main_thread_dispatcher_, tls_); - EXPECT_CALL(*this, alloc(_)).Times(2); + EXPECT_CALL(*alloc_, alloc(_)).Times(2); Counter& c1 = store_->counter("c1"); EXPECT_EQ(&c1, &store_->counter("c1")); @@ -232,7 +219,7 @@ TEST_F(StatsThreadLocalStoreTest, Tls) { EXPECT_EQ(2L, store_->gauges().front().use_count()); // Includes overflow stat. - EXPECT_CALL(*this, free(_)).Times(3); + EXPECT_CALL(*alloc_, free(_)).Times(3); } TEST_F(StatsThreadLocalStoreTest, BasicScope) { @@ -240,7 +227,7 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { store_->initializeThreading(main_thread_dispatcher_, tls_); ScopePtr scope1 = store_->createScope("scope1."); - EXPECT_CALL(*this, alloc(_)).Times(4); + EXPECT_CALL(*alloc_, alloc(_)).Times(4); Counter& c1 = store_->counter("c1"); Counter& c2 = scope1->counter("c2"); EXPECT_EQ("c1", c1.name()); @@ -266,7 +253,7 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { tls_.shutdownThread(); // Includes overflow stat. - EXPECT_CALL(*this, free(_)).Times(5); + EXPECT_CALL(*alloc_, free(_)).Times(5); } // Validate that we sanitize away bad characters in the stats prefix. @@ -275,7 +262,7 @@ TEST_F(StatsThreadLocalStoreTest, SanitizePrefix) { store_->initializeThreading(main_thread_dispatcher_, tls_); ScopePtr scope1 = store_->createScope(std::string("scope1:\0:foo.", 13)); - EXPECT_CALL(*this, alloc(_)); + EXPECT_CALL(*alloc_, alloc(_)); Counter& c1 = scope1->counter("c1"); EXPECT_EQ("scope1___foo.c1", c1.name()); @@ -283,7 +270,7 @@ TEST_F(StatsThreadLocalStoreTest, SanitizePrefix) { tls_.shutdownThread(); // Includes overflow stat. - EXPECT_CALL(*this, free(_)).Times(2); + EXPECT_CALL(*alloc_, free(_)).Times(2); } TEST_F(StatsThreadLocalStoreTest, ScopeDelete) { @@ -291,7 +278,7 @@ TEST_F(StatsThreadLocalStoreTest, ScopeDelete) { store_->initializeThreading(main_thread_dispatcher_, tls_); ScopePtr scope1 = store_->createScope("scope1."); - EXPECT_CALL(*this, alloc(_)); + EXPECT_CALL(*alloc_, alloc(_)); scope1->counter("c1"); EXPECT_EQ(2UL, store_->counters().size()); CounterSharedPtr c1 = store_->counters().front(); @@ -306,7 +293,7 @@ TEST_F(StatsThreadLocalStoreTest, ScopeDelete) { store_->source().clearCache(); EXPECT_EQ(1UL, store_->source().cachedCounters().size()); - EXPECT_CALL(*this, free(_)); + EXPECT_CALL(*alloc_, free(_)); EXPECT_EQ(1L, c1.use_count()); c1.reset(); @@ -314,7 +301,7 @@ TEST_F(StatsThreadLocalStoreTest, ScopeDelete) { tls_.shutdownThread(); // Includes overflow stat. - EXPECT_CALL(*this, free(_)); + EXPECT_CALL(*alloc_, free(_)); } TEST_F(StatsThreadLocalStoreTest, NestedScopes) { @@ -322,12 +309,12 @@ TEST_F(StatsThreadLocalStoreTest, NestedScopes) { store_->initializeThreading(main_thread_dispatcher_, tls_); ScopePtr scope1 = store_->createScope("scope1."); - EXPECT_CALL(*this, alloc(_)); + EXPECT_CALL(*alloc_, alloc(_)); Counter& c1 = scope1->counter("foo.bar"); EXPECT_EQ("scope1.foo.bar", c1.name()); ScopePtr scope2 = scope1->createScope("foo."); - EXPECT_CALL(*this, alloc(_)); + EXPECT_CALL(*alloc_, alloc(_)); Counter& c2 = scope2->counter("bar"); EXPECT_NE(&c1, &c2); EXPECT_EQ("scope1.foo.bar", c2.name()); @@ -337,7 +324,7 @@ TEST_F(StatsThreadLocalStoreTest, NestedScopes) { EXPECT_EQ(1UL, c1.value()); EXPECT_EQ(c1.value(), c2.value()); - EXPECT_CALL(*this, alloc(_)); + EXPECT_CALL(*alloc_, alloc(_)); Gauge& g1 = scope2->gauge("some_gauge"); EXPECT_EQ("scope1.foo.some_gauge", g1.name()); @@ -345,7 +332,7 @@ TEST_F(StatsThreadLocalStoreTest, NestedScopes) { tls_.shutdownThread(); // Includes overflow stat. - EXPECT_CALL(*this, free(_)).Times(4); + EXPECT_CALL(*alloc_, free(_)).Times(4); } TEST_F(StatsThreadLocalStoreTest, OverlappingScopes) { @@ -358,7 +345,7 @@ TEST_F(StatsThreadLocalStoreTest, OverlappingScopes) { ScopePtr scope2 = store_->createScope("scope1."); // We will call alloc twice, but they should point to the same backing storage. - EXPECT_CALL(*this, alloc(_)).Times(2); + EXPECT_CALL(*alloc_, alloc(_)).Times(2); Counter& c1 = scope1->counter("c"); Counter& c2 = scope2->counter("c"); EXPECT_NE(&c1, &c2); @@ -373,7 +360,7 @@ TEST_F(StatsThreadLocalStoreTest, OverlappingScopes) { EXPECT_EQ(2UL, store_->counters().size()); // Gauges should work the same way. - EXPECT_CALL(*this, alloc(_)).Times(2); + EXPECT_CALL(*alloc_, alloc(_)).Times(2); Gauge& g1 = scope1->gauge("g"); Gauge& g2 = scope2->gauge("g"); EXPECT_NE(&g1, &g2); @@ -386,7 +373,7 @@ TEST_F(StatsThreadLocalStoreTest, OverlappingScopes) { EXPECT_EQ(1UL, store_->gauges().size()); // Deleting scope 1 will call free but will be reference counted. It still leaves scope 2 valid. - EXPECT_CALL(*this, free(_)).Times(2); + EXPECT_CALL(*alloc_, free(_)).Times(2); scope1.reset(); c2.inc(); EXPECT_EQ(3UL, c2.value()); @@ -399,14 +386,14 @@ TEST_F(StatsThreadLocalStoreTest, OverlappingScopes) { tls_.shutdownThread(); // Includes overflow stat. - EXPECT_CALL(*this, free(_)).Times(3); + EXPECT_CALL(*alloc_, free(_)).Times(3); } TEST_F(StatsThreadLocalStoreTest, AllocFailed) { InSequence s; store_->initializeThreading(main_thread_dispatcher_, tls_); - EXPECT_CALL(*this, alloc("foo")).WillOnce(Return(nullptr)); + EXPECT_CALL(*alloc_, alloc(absl::string_view("foo"))).WillOnce(Return(nullptr)); Counter& c1 = store_->counter("foo"); EXPECT_EQ(1UL, store_->counter("stats.overflow").value()); @@ -417,14 +404,89 @@ TEST_F(StatsThreadLocalStoreTest, AllocFailed) { tls_.shutdownThread(); // Includes overflow but not the failsafe stat which we allocated from the heap. - EXPECT_CALL(*this, free(_)); + EXPECT_CALL(*alloc_, free(_)); +} + +TEST_F(StatsThreadLocalStoreTest, HotRestartTruncation) { + InSequence s; + store_->initializeThreading(main_thread_dispatcher_, tls_); + + // First, with a successful RawStatData allocation: + const uint64_t max_name_length = options_.maxNameLength(); + const std::string name_1(max_name_length + 1, 'A'); + + EXPECT_CALL(*alloc_, alloc(_)); + EXPECT_LOG_CONTAINS("warning", "is too long with", store_->counter(name_1)); + + // The stats did not overflow yet. + EXPECT_EQ(0UL, store_->counter("stats.overflow").value()); + + // The name will be truncated, so we won't be able to find it with the entire name. + EXPECT_EQ(nullptr, TestUtility::findCounter(*store_, name_1).get()); + + // But we can find it based on the expected truncation. + EXPECT_NE(nullptr, TestUtility::findCounter(*store_, name_1.substr(0, max_name_length)).get()); + + // The same should be true with heap allocation, which occurs when the default + // allocator fails. + const std::string name_2(max_name_length + 1, 'B'); + EXPECT_CALL(*alloc_, alloc(_)).WillOnce(Return(nullptr)); + store_->counter(name_2); + + // Same deal: the name will be truncated, so we won't be able to find it with the entire name. + EXPECT_EQ(nullptr, TestUtility::findCounter(*store_, name_1).get()); + + // But we can find it based on the expected truncation. + EXPECT_NE(nullptr, TestUtility::findCounter(*store_, name_1.substr(0, max_name_length)).get()); + + // Now the stats have overflowed. + EXPECT_EQ(1UL, store_->counter("stats.overflow").value()); + + store_->shutdownThreading(); + tls_.shutdownThread(); + + // Includes overflow, and the first raw-allocated stat, but not the failsafe stat which we + // allocated from the heap. + EXPECT_CALL(*alloc_, free(_)).Times(2); +} + +class HeapStatsThreadLocalStoreTest : public StatsThreadLocalStoreTest { +public: + void SetUp() override { + resetStoreWithAlloc(heap_alloc_); + // Note: we do not call StatsThreadLocalStoreTest::SetUp here as that + // sets up a thread_local_store with raw stat alloc. + } + void TearDown() override { + store_.reset(); // delete before the allocator. + } + + HeapStatDataAllocator heap_alloc_; +}; + +TEST_F(HeapStatsThreadLocalStoreTest, NonHotRestartNoTruncation) { + InSequence s; + store_->initializeThreading(main_thread_dispatcher_, tls_); + + // Allocate a stat greater than the max name length. + const uint64_t max_name_length = options_.maxNameLength(); + const std::string name_1(max_name_length + 1, 'A'); + + store_->counter(name_1); + + // This works fine, and we can find it by its long name because heap-stats do not + // get truncsated. + EXPECT_NE(nullptr, TestUtility::findCounter(*store_, name_1).get()); + + store_->shutdownThreading(); + tls_.shutdownThread(); } TEST_F(StatsThreadLocalStoreTest, ShuttingDown) { InSequence s; store_->initializeThreading(main_thread_dispatcher_, tls_); - EXPECT_CALL(*this, alloc(_)).Times(4); + EXPECT_CALL(*alloc_, alloc(_)).Times(4); store_->counter("c1"); store_->gauge("g1"); store_->shutdownThreading(); @@ -440,7 +502,7 @@ TEST_F(StatsThreadLocalStoreTest, ShuttingDown) { tls_.shutdownThread(); // Includes overflow stat. - EXPECT_CALL(*this, free(_)).Times(5); + EXPECT_CALL(*alloc_, free(_)).Times(5); } TEST_F(StatsThreadLocalStoreTest, MergeDuringShutDown) { @@ -463,7 +525,7 @@ TEST_F(StatsThreadLocalStoreTest, MergeDuringShutDown) { tls_.shutdownThread(); - EXPECT_CALL(*this, free(_)); + EXPECT_CALL(*alloc_, free(_)); } // Histogram tests @@ -610,7 +672,7 @@ TEST_F(HistogramTest, BasicHistogramUsed) { // Merge histograms again and validate that both h1 and h2 are used. store_->mergeHistograms([]() -> void {}); - for (const Stats::ParentHistogramSharedPtr& histogram : store_->histograms()) { + for (const ParentHistogramSharedPtr& histogram : store_->histograms()) { EXPECT_TRUE(histogram->used()); } } diff --git a/test/integration/server.cc b/test/integration/server.cc index 38e33b148c97..eb84cfe8ad91 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -92,9 +92,9 @@ void IntegrationTestServer::threadRoutine(const Network::Address::IpVersion vers Thread::MutexBasicLockable lock; ThreadLocal::InstanceImpl tls; - Stats::HeapRawStatDataAllocator stats_allocator; - Stats::StatsOptionsImpl options_; - Stats::ThreadLocalStoreImpl stats_store(options_, stats_allocator); + Stats::HeapStatDataAllocator stats_allocator; + Stats::StatsOptionsImpl stats_options; + Stats::ThreadLocalStoreImpl stats_store(stats_options, stats_allocator); stat_store_ = &stats_store; Runtime::RandomGeneratorPtr random_generator; if (deterministic) { diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 324088fbf111..83fbea4dcfbb 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -173,12 +173,12 @@ class MockHotRestart : public HotRestart { MOCK_METHOD0(version, std::string()); MOCK_METHOD0(logLock, Thread::BasicLockable&()); MOCK_METHOD0(accessLogLock, Thread::BasicLockable&()); - MOCK_METHOD0(statsAllocator, Stats::RawStatDataAllocator&()); + MOCK_METHOD0(statsAllocator, Stats::StatDataAllocator&()); private: Thread::MutexBasicLockable log_lock_; Thread::MutexBasicLockable access_log_lock_; - Stats::HeapRawStatDataAllocator stats_allocator_; + Stats::HeapStatDataAllocator stats_allocator_; }; class MockListenerComponentFactory : public ListenerComponentFactory { diff --git a/test/server/BUILD b/test/server/BUILD index 46002908db60..4ceb534c4463 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -73,6 +73,7 @@ envoy_cc_test( "//source/common/stats:stats_lib", "//source/server:hot_restart_lib", "//test/mocks/server:server_mocks", + "//test/test_common:logging_lib", "//test/test_common:threadsafe_singleton_injector_lib", ], ) diff --git a/test/server/hot_restart_impl_test.cc b/test/server/hot_restart_impl_test.cc index de7a1965d34c..4216a265c8fe 100644 --- a/test/server/hot_restart_impl_test.cc +++ b/test/server/hot_restart_impl_test.cc @@ -1,10 +1,12 @@ #include "common/api/os_sys_calls_impl.h" +#include "common/common/hex.h" #include "common/stats/stats_impl.h" #include "server/hot_restart_impl.h" #include "test/mocks/api/mocks.h" #include "test/mocks/server/mocks.h" +#include "test/test_common/logging.h" #include "test/test_common/threadsafe_singleton_injector.h" #include "absl/strings/match.h" @@ -88,6 +90,44 @@ TEST_F(HotRestartImplTest, versionString) { } } +// Check consistency of internal raw stat representation by comparing hash of +// memory contents against a previously recorded value. +TEST_F(HotRestartImplTest, Consistency) { + setup(); + + // Generate a stat, encode it to hex, and take the hash of that hex string. We + // expect the hash to vary only when the internal representation of a stat has + // been intentionally changed, in which case SharedMemory::VERSION should be + // incremented as well. + const uint64_t expected_hash = 1874506077228772558; + const uint64_t max_name_length = stats_options_.maxNameLength(); + + const std::string name_1(max_name_length, 'A'); + Stats::RawStatData* stat_1 = hot_restart_->alloc(name_1); + const uint64_t stat_size = sizeof(Stats::RawStatData) + max_name_length; + const std::string stat_hex_dump_1 = Hex::encode(reinterpret_cast(stat_1), stat_size); + EXPECT_EQ(HashUtil::xxHash64(stat_hex_dump_1), expected_hash); + EXPECT_EQ(name_1, stat_1->key()); + hot_restart_->free(*stat_1); +} + +TEST_F(HotRestartImplTest, RawAlloc) { + setup(); + + Stats::RawStatData* stat_1 = hot_restart_->alloc("ref_name"); + ASSERT_NE(stat_1, nullptr); + Stats::RawStatData* stat_2 = hot_restart_->alloc("ref_name"); + ASSERT_NE(stat_2, nullptr); + Stats::RawStatData* stat_3 = hot_restart_->alloc("not_ref_name"); + ASSERT_NE(stat_3, nullptr); + EXPECT_EQ(stat_1, stat_2); + EXPECT_NE(stat_1, stat_3); + EXPECT_NE(stat_2, stat_3); + hot_restart_->free(*stat_1); + hot_restart_->free(*stat_2); + hot_restart_->free(*stat_3); +} + TEST_F(HotRestartImplTest, crossAlloc) { setup(); @@ -114,16 +154,6 @@ TEST_F(HotRestartImplTest, crossAlloc) { EXPECT_EQ(stat5, stat5_prime); } -TEST_F(HotRestartImplTest, truncateKey) { - setup(); - - std::string key1(options_.statsOptions().maxNameLength(), 'a'); - Stats::RawStatData* stat1 = hot_restart_->alloc(key1); - std::string key2 = key1 + "a"; - Stats::RawStatData* stat2 = hot_restart_->alloc(key2); - EXPECT_EQ(stat1, stat2); -} - TEST_F(HotRestartImplTest, allocFail) { EXPECT_CALL(options_, maxStats()).WillRepeatedly(Return(2)); setup(); diff --git a/test/server/http/admin_test.cc b/test/server/http/admin_test.cc index 7fd74e7ea69e..66dcf511f742 100644 --- a/test/server/http/admin_test.cc +++ b/test/server/http/admin_test.cc @@ -39,21 +39,10 @@ using testing::_; namespace Envoy { namespace Server { -class AdminStatsTest : public testing::TestWithParam, - public Stats::RawStatDataAllocator { +class AdminStatsTest : public testing::TestWithParam { public: -public: - AdminStatsTest() { - ON_CALL(*this, alloc(_)) - .WillByDefault(Invoke( - [this](const std::string& name) -> Stats::RawStatData* { return alloc_.alloc(name); })); - - ON_CALL(*this, free(_)).WillByDefault(Invoke([this](Stats::RawStatData& data) -> void { - return alloc_.free(data); - })); - - EXPECT_CALL(*this, alloc("stats.overflow")); - store_ = std::make_unique(options_, *this); + AdminStatsTest() : alloc_(options_) { + store_ = std::make_unique(options_, alloc_); store_->addSink(sink_); } @@ -64,14 +53,11 @@ class AdminStatsTest : public testing::TestWithParam main_thread_dispatcher_; NiceMock tls_; - Stats::TestAllocator alloc_; - Stats::MockSink sink_; Stats::StatsOptionsImpl options_; + Stats::MockedTestAllocator alloc_; + Stats::MockSink sink_; std::unique_ptr store_; }; @@ -121,7 +107,7 @@ TEST_P(AdminStatsTest, StatsAsJson) { store_->mergeHistograms([]() -> void {}); - EXPECT_CALL(*this, free(_)); + EXPECT_CALL(alloc_, free(_)); std::map all_stats; @@ -257,7 +243,7 @@ TEST_P(AdminStatsTest, UsedOnlyStatsAsJson) { store_->mergeHistograms([]() -> void {}); - EXPECT_CALL(*this, free(_)); + EXPECT_CALL(alloc_, free(_)); std::map all_stats; @@ -795,6 +781,7 @@ TEST_P(AdminInstanceTest, PostRequest) { class PrometheusStatsFormatterTest : public testing::Test { protected: + PrometheusStatsFormatterTest() /*: alloc_(stats_options_)*/ {} void addCounter(const std::string& name, std::vector cluster_tags) { std::string tname = std::string(name); counters_.push_back(alloc_.makeCounter(name, std::move(tname), std::move(cluster_tags))); @@ -805,7 +792,8 @@ class PrometheusStatsFormatterTest : public testing::Test { gauges_.push_back(alloc_.makeGauge(name, std::move(tname), std::move(cluster_tags))); } - Stats::HeapRawStatDataAllocator alloc_; + Stats::StatsOptionsImpl stats_options_; + Stats::HeapStatDataAllocator alloc_; std::vector counters_; std::vector gauges_; }; diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index 85508ea5e675..5427a548ea28 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -226,4 +226,24 @@ bool TestHeaderMapImpl::has(const std::string& key) { return get(LowerCaseString bool TestHeaderMapImpl::has(const LowerCaseString& key) { return get(key) != nullptr; } } // namespace Http + +namespace Stats { + +MockedTestAllocator::MockedTestAllocator(const StatsOptions& stats_options) + : alloc_(stats_options) { + ON_CALL(*this, alloc(_)).WillByDefault(Invoke([this](absl::string_view name) -> RawStatData* { + return alloc_.alloc(name); + })); + + ON_CALL(*this, free(_)).WillByDefault(Invoke([this](RawStatData& data) -> void { + return alloc_.free(data); + })); + + EXPECT_CALL(*this, alloc(absl::string_view("stats.overflow"))); +} + +MockedTestAllocator::~MockedTestAllocator() {} + +} // namespace Stats + } // namespace Envoy diff --git a/test/test_common/utility.h b/test/test_common/utility.h index c972c9ad407c..04bca5ca951c 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -26,6 +26,8 @@ using testing::AssertionFailure; using testing::AssertionResult; using testing::AssertionSuccess; +using testing::Invoke; +using testing::_; namespace Envoy { #define EXPECT_THROW_WITH_MESSAGE(statement, expected_exception, message) \ @@ -333,22 +335,22 @@ class TestHeaderMapImpl : public HeaderMapImpl { } // namespace Http namespace Stats { + /** * This is a heap test allocator that works similar to how the shared memory allocator works in * terms of reference counting, etc. */ class TestAllocator : public RawStatDataAllocator { public: + TestAllocator(const StatsOptions& stats_options) : stats_options_(stats_options) {} ~TestAllocator() { EXPECT_TRUE(stats_.empty()); } - RawStatData* alloc(const std::string& name) override { - Stats::StatsOptionsImpl stats_options; - stats_options.max_obj_name_length_ = 127; - CSmartPtr& stat_ref = stats_[name]; + RawStatData* alloc(absl::string_view name) override { + CSmartPtr& stat_ref = stats_[std::string(name)]; if (!stat_ref) { stat_ref.reset(static_cast( - ::calloc(RawStatData::structSizeWithOptions(stats_options), 1))); - stat_ref->truncateAndInit(name, stats_options); + ::calloc(RawStatData::structSizeWithOptions(stats_options_), 1))); + stat_ref->initialize(name, stats_options_); } else { stat_ref->ref_count_++; } @@ -369,6 +371,18 @@ class TestAllocator : public RawStatDataAllocator { private: static void freeAdapter(RawStatData* data) { ::free(data); } std::unordered_map> stats_; + const StatsOptions& stats_options_; +}; + +class MockedTestAllocator : public RawStatDataAllocator { +public: + MockedTestAllocator(const StatsOptions& stats_options); + virtual ~MockedTestAllocator(); + + MOCK_METHOD1(alloc, RawStatData*(absl::string_view name)); + MOCK_METHOD1(free, void(RawStatData& data)); + + TestAllocator alloc_; }; } // namespace Stats