From 87ca41da4c2c2d9f51e8530c4aad30633cffb5de Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Thu, 29 Dec 2016 15:39:21 -0600 Subject: [PATCH 01/81] Metric: Add Metric (scaled value) support class. --- lib/ts/Makefile.am | 4 +- lib/ts/Metric.h | 118 ++++++++++++++++++++++++++++++++++++ lib/ts/test_Metric.cc | 136 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 257 insertions(+), 1 deletion(-) create mode 100644 lib/ts/Metric.h create mode 100644 lib/ts/test_Metric.cc diff --git a/lib/ts/Makefile.am b/lib/ts/Makefile.am index 039db6e4b0b..4d2a17b30c9 100644 --- a/lib/ts/Makefile.am +++ b/lib/ts/Makefile.am @@ -23,7 +23,7 @@ library_includedir=$(includedir)/ts library_include_HEADERS = apidefs.h noinst_PROGRAMS = mkdfa CompileParseRules -check_PROGRAMS = test_tsutil test_arena test_atomic test_freelist test_geometry test_List test_Map test_Vec test_X509HostnameValidator +check_PROGRAMS = test_tsutil test_arena test_atomic test_freelist test_geometry test_List test_Map test_Vec test_X509HostnameValidator test_Metric TESTS_ENVIRONMENT = LSAN_OPTIONS=suppressions=suppression.txt @@ -243,6 +243,8 @@ test_tsutil_SOURCES = \ test_Regex.cc \ tests.cc +test_Metric_SOURCES = test_Metric.cc + CompileParseRules_SOURCES = CompileParseRules.cc clean-local: diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h new file mode 100644 index 00000000000..42d7545aba1 --- /dev/null +++ b/lib/ts/Metric.h @@ -0,0 +1,118 @@ +/** @file + + Scaled integral values. + + In many situations it is desirable to define scaling factors or base units (a "metric"). This template + enables this to be done in a type and scaling safe manner where the defined factors carry their scaling + information as part of the type. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#if !defined(TS_METRIC_H) +#define TS_METRIC_H + +#include +#include + +namespace ApacheTrafficServer +{ + /** A class to hold scaled values. + + Instances of this class have a @a count and a @a scale. The "value" of the instance is @a + count * @a scale. The scale is stored in the compiler in the class symbol table and so only + the count is a run time value. This prevents passing an incorrectly scaled value. Conversions + between scales are explicit using @c metric_round_up and @c metric_round_down. Because the + scales are not the same these conversions can be lossy and the two conversions determine + whether, in such a case, the result should be rounded up or down to the nearest scale value. + + @note This is modeled somewhat on @c std::chrono and serves a similar function for different + and simpler cases (where the ratio is always an integer, never a fraction). + + @see metric_round_up + @see metric_round_down + */ + template < intmax_t N, typename COUNT_TYPE = int > + class Metric + { + typedef Metric self; ///< Self reference type. + + public: + /// Scaling factor for instances. + constexpr static intmax_t SCALE = N; + typedef COUNT_TYPE CountType; ///< Type used to hold the count. + + Metric(); ///< Default contructor. + Metric(CountType n); ///< Contruct from unscaled integer. + + /// The count in terms of the local @c SCALE. + CountType count() const; + /// The absolute count, unscaled. + CountType units() const; + + /// Convert the count of a differently scaled @c Metric @a src by rounding down if needed. + /// @internal This is intended for internal use but may be handy for other clients. + template < intmax_t S, typename I > static intmax_t round_down(Metric const& src); + + protected: + CountType _n; ///< Number of scale units. + }; + + template < intmax_t N, typename C > + inline Metric::Metric() : _n() {} + template < intmax_t N, typename C > + inline Metric::Metric(CountType n) : _n(n) {} + template < intmax_t N, typename C > + inline auto Metric::count() const -> CountType { return _n; } + template < intmax_t N, typename C > + inline auto Metric::units() const -> CountType { return _n * SCALE; } + + template < intmax_t N, typename C > + template < intmax_t S, typename I > + intmax_t Metric::round_down(Metric const& src) + { + auto n = src.count(); + // Yes, a bit odd, but this minimizes the risk of integer overflow. + // I need to validate that under -O2 the compiler will only do 1 division to ge + // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are + // powers of 2 I have verified recent GNU compilers will optimize to bit operations. + return (n / N) * S + (( n % N ) * S) / N; + } + + template < typename M, intmax_t N, typename C > + M metric_round_up(Metric const& src) + { + if (1 == M::SCALE) { + return M(src.units()); + } else { + typedef std::ratio R; // R::num == M::SCALE / GCD(M::SCALE, N) == GCF(M::SCALE, N) + auto n = src.count(); + // Round down and add 1 unless @a n is an even multiple of the GCF of the two scales. + return M(M::round_down(src) + ((n % R::num) != 0)); + } + } + + template < typename M, intmax_t N, typename C > + M metric_round_down(Metric const& src) + { + return M(1 == M::SCALE ? src.units() : M::round_down(src)); + } +} + +#endif // TS_METRIC_H diff --git a/lib/ts/test_Metric.cc b/lib/ts/test_Metric.cc new file mode 100644 index 00000000000..5f066529208 --- /dev/null +++ b/lib/ts/test_Metric.cc @@ -0,0 +1,136 @@ +/** @file + + Intrusive pointer test. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include +#include +#include + +namespace ts { + using namespace ApacheTrafficServer; +} + +struct TestBox { + typedef TestBox self; ///< Self reference type. + + std::string _name; + + static int _count; + static int _fail; + + TestBox(char const* name) : _name(name) {} + TestBox(std::string const& name) : _name(name) {} + bool check(bool result, char const *fmt, ...) __attribute__((format(printf, 3, 4))); + + static void print_summary() + { + printf("Tests: %d of %d passed - %s\n", (_count - _fail), _count, _fail ? "FAIL" : "SUCCESS"); + } +}; + +int TestBox::_count = 0; +int TestBox::_fail = 0; + +bool +TestBox::check(bool result, char const *fmt, ...) +{ + ++_count; + + if (!result) { + static constexpr size_t N = 1 << 16; + size_t n = N; + size_t x; + char* s; + char buffer[N]; // just stack, go big. + + s = buffer; + x = snprintf(s, n, "%s: ", _name.c_str()); + n -= x; + s += x; + + va_list ap; + va_start(ap, fmt); + vsnprintf(s, n, fmt, ap); + va_end(ap); + printf("%s\n", buffer); + ++_fail; + } + return result; +} + +void +Test_1() +{ + constexpr static int SCALE = 4096; + typedef ts::Metric PageSize; + + TestBox test("TS Metric"); + PageSize pg1(1); + + test.check(pg1.count() == 1, "Count wrong, got %d expected %d", pg1.count(), 1); + test.check(pg1.units() == SCALE, "Units wrong, got %d expected %d", pg1.units(), SCALE); +} + +void +Test_2() +{ + constexpr static int SCALE_1 = 8192; + constexpr static int SCALE_2 = 512; + + typedef ts::Metric Size_1; + typedef ts::Metric Size_2; + + TestBox test("TS Metric Conversions"); + Size_2 sz_a(2); + Size_2 sz_b(57); + Size_2 sz_c(SCALE_1 / SCALE_2); + Size_2 sz_d(29 * SCALE_1 / SCALE_2); + + auto sz = ts::metric_round_up(sz_a); + test.check(sz.count() == 1 , "Rounding up, got %d expected %d", sz.count(), 1); + sz = ts::metric_round_down(sz_a); + test.check(sz.count() == 0 , "Rounding down: got %d expected %d", sz.count(), 0); + + sz = ts::metric_round_up(sz_b); + test.check(sz.count() == 4 , "Rounding up, got %d expected %d", sz.count(), 4); + sz = ts::metric_round_down(sz_b); + test.check(sz.count() == 3 , "Rounding down, got %d expected %d", sz.count(), 3); + + sz = ts::metric_round_up(sz_c); + test.check(sz.count() == 1 , "Rounding up, got %d expected %d", sz.count(), 1); + sz = ts::metric_round_down(sz_c); + test.check(sz.count() == 1 , "Rounding down, got %d expected %d", sz.count(), 1); + + sz = ts::metric_round_up(sz_d); + test.check(sz.count() == 29 , "Rounding up, got %d expected %d", sz.count(), 29); + sz = ts::metric_round_down(sz_d); + test.check(sz.count() == 29 , "Rounding down, got %d expected %d", sz.count(), 29); +} +int +main(int, char **) +{ + Test_1(); + Test_2(); + TestBox::print_summary(); + return 0; +} + From 2de357872706fc707277d1faaa002cd767a2b4ac Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Thu, 29 Dec 2016 19:24:44 -0600 Subject: [PATCH 02/81] Metric: Add assignment, copy constructor. --- lib/ts/Metric.h | 35 +++++++++++++++++++++++++++++++++++ lib/ts/test_Metric.cc | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+) diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h index 42d7545aba1..af5e91d53f3 100644 --- a/lib/ts/Metric.h +++ b/lib/ts/Metric.h @@ -61,11 +61,25 @@ namespace ApacheTrafficServer Metric(); ///< Default contructor. Metric(CountType n); ///< Contruct from unscaled integer. + /// Copy constructor. + /// @note This is valid only if the scale does not increase. + template < intmax_t S, typename I > + Metric(Metric const& that); + + /// Direct assignment. + /// The count is set to @a n. + self& operator = (CountType n); + /// The count in terms of the local @c SCALE. CountType count() const; /// The absolute count, unscaled. CountType units() const; + /// Assignment operator. + /// @note This is valid only if the scale does not increase. + template < intmax_t S, typename I > + self& operator = (Metric const& that); + /// Convert the count of a differently scaled @c Metric @a src by rounding down if needed. /// @internal This is intended for internal use but may be handy for other clients. template < intmax_t S, typename I > static intmax_t round_down(Metric const& src); @@ -82,6 +96,27 @@ namespace ApacheTrafficServer inline auto Metric::count() const -> CountType { return _n; } template < intmax_t N, typename C > inline auto Metric::units() const -> CountType { return _n * SCALE; } + template < intmax_t N, typename C > + inline auto Metric::operator = (CountType n) -> self& { _n = n; return *this; } + + template + template + Metric::Metric(Metric const& that) + { + typedef std::ratio R; + static_assert(R::den == 1, "Construction not permitted - target scale is not an integral multiple of source scale."); + _n = that.count() * R::num; + } + + template + template + auto Metric::operator = (Metric const& that) -> self& + { + typedef std::ratio R; + static_assert(R::den == 1, "Assignment not permitted - target scale is not an integral multiple of source scale."); + _n = that.count() * R::num; + return *this; + } template < intmax_t N, typename C > template < intmax_t S, typename I > diff --git a/lib/ts/test_Metric.cc b/lib/ts/test_Metric.cc index 5f066529208..66495924639 100644 --- a/lib/ts/test_Metric.cc +++ b/lib/ts/test_Metric.cc @@ -124,12 +124,46 @@ Test_2() test.check(sz.count() == 29 , "Rounding up, got %d expected %d", sz.count(), 29); sz = ts::metric_round_down(sz_d); test.check(sz.count() == 29 , "Rounding down, got %d expected %d", sz.count(), 29); + + sz = 119; + sz_b = sz; // Should be OK because SCALE_1 is an integer multiple of SCALE_2 + // sz = sz_b; // Should not compile. + test.check(sz_b.count() == 119 * (SCALE_1/SCALE_2) , "Integral conversion, got %d expected %d", sz_b.count(), 119 * (SCALE_1/SCALE_2)); } + +void +Test_3() +{ + TestBox test("TS Metric: relatively prime tests"); + + ts::Metric<9> m_9; + ts::Metric<4> m_4, m_test; + + m_9 = 95; + // m_4 = m_9; // Should fail to compile with static assert. + // m_9 = m_4; // Should fail to compile with static assert. + + m_4 = ts::metric_round_up(m_9); + test.check(m_4.count() == 214 , "Rounding down, got %d expected %d", m_4.count(), 214); + m_4 = ts::metric_round_down(m_9); + test.check(m_4.count() == 213 , "Rounding down, got %d expected %d", m_4.count(), 213); + + m_4 = 213; + m_9 = ts::metric_round_up(m_4); + test.check(m_9.count() == 95 , "Rounding down, got %d expected %d", m_9.count(), 95); + m_9 = ts::metric_round_down(m_4); + test.check(m_9.count() == 94, "Rounding down, got %d expected %d", m_9.count(), 94); + + m_test = m_4; // Verify assignment of identical scale values compiles. + test.check(m_test.count() == 213 , "Assignment got %d expected %d", m_4.count(), 213); +} + int main(int, char **) { Test_1(); Test_2(); + Test_3(); TestBox::print_summary(); return 0; } From e1157094ccd5a573b9aeffd3e0a320f949773889 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Sat, 14 Jan 2017 08:08:46 -0600 Subject: [PATCH 03/81] Metric: Cherry pick up to 4104d5cd063116bb95b071caec3ee0c8db87ac30. Working rounding optimizations. --- lib/ts/Metric.h | 228 ++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 194 insertions(+), 34 deletions(-) diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h index af5e91d53f3..61b6326b336 100644 --- a/lib/ts/Metric.h +++ b/lib/ts/Metric.h @@ -37,10 +37,14 @@ namespace ApacheTrafficServer Instances of this class have a @a count and a @a scale. The "value" of the instance is @a count * @a scale. The scale is stored in the compiler in the class symbol table and so only - the count is a run time value. This prevents passing an incorrectly scaled value. Conversions - between scales are explicit using @c metric_round_up and @c metric_round_down. Because the - scales are not the same these conversions can be lossy and the two conversions determine - whether, in such a case, the result should be rounded up or down to the nearest scale value. + the count is a run time value. An instance with a large scale can be assign to an instance + with a smaller scale and the conversion is done automatically. Conversions from a smaller to + larger scale must be explicit using @c metric_round_up and @c metric_round_down. This prevents + inadvertent changes in value. Because the scales are not the same these conversions can be + lossy and the two conversions determine whether, in such a case, the result should be rounded + up or down to the nearest scale value. + + @a N sets the scale. @a T is the type used to hold the count, which is in units of @a N. @note This is modeled somewhat on @c std::chrono and serves a similar function for different and simpler cases (where the ratio is always an integer, never a fraction). @@ -48,56 +52,73 @@ namespace ApacheTrafficServer @see metric_round_up @see metric_round_down */ - template < intmax_t N, typename COUNT_TYPE = int > + template < intmax_t N, typename T = int > class Metric { typedef Metric self; ///< Self reference type. - + public: /// Scaling factor for instances. + /// Make it externally accessible. constexpr static intmax_t SCALE = N; - typedef COUNT_TYPE CountType; ///< Type used to hold the count. + typedef T Count; ///< Type used to hold the count. - Metric(); ///< Default contructor. - Metric(CountType n); ///< Contruct from unscaled integer. + constexpr Metric(); ///< Default contructor. + ///< Construct to have @a n scaled units. + constexpr Metric(Count n); - /// Copy constructor. - /// @note This is valid only if the scale does not increase. + /// Copy constructor for same scale. + template < typename C > + Metric(Metric const& that); + + /// Copy / conversion constructor. + /// @note Requires that @c S be an integer multiple of @c SCALE. template < intmax_t S, typename I > Metric(Metric const& that); /// Direct assignment. /// The count is set to @a n. - self& operator = (CountType n); - - /// The count in terms of the local @c SCALE. - CountType count() const; - /// The absolute count, unscaled. - CountType units() const; + self& operator = (Count n); + + /// The number of scale units. + constexpr Count count() const; + /// The absolute value, scaled up. + constexpr Count units() const; /// Assignment operator. - /// @note This is valid only if the scale does not increase. + /// @note Requires that @c S be an integer multiple of @c SCALE. template < intmax_t S, typename I > self& operator = (Metric const& that); - + /// Convert the count of a differently scaled @c Metric @a src by rounding down if needed. - /// @internal This is intended for internal use but may be handy for other clients. + /// @internal This is required for internal use but may be handy for other clients. + /// @internal Variants to optimize special cases. + template < typename I > static intmax_t round_down(Metric const& src); template < intmax_t S, typename I > static intmax_t round_down(Metric const& src); - + static intmax_t round_down(self const& that); + + static intmax_t scale(); + protected: - CountType _n; ///< Number of scale units. + Count _n; ///< Number of scale units. }; template < intmax_t N, typename C > - inline Metric::Metric() : _n() {} + constexpr Metric::Metric() : _n() {} template < intmax_t N, typename C > - inline Metric::Metric(CountType n) : _n(n) {} + constexpr Metric::Metric(Count n) : _n(n) {} template < intmax_t N, typename C > - inline auto Metric::count() const -> CountType { return _n; } + constexpr auto Metric::count() const -> Count { return _n; } template < intmax_t N, typename C > - inline auto Metric::units() const -> CountType { return _n * SCALE; } + constexpr auto Metric::units() const -> Count { return _n * SCALE; } template < intmax_t N, typename C > - inline auto Metric::operator = (CountType n) -> self& { _n = n; return *this; } + inline auto Metric::operator = (Count n) -> self& { _n = n; return *this; } + + template + template + Metric::Metric(Metric const& that) : _n(static_cast(that._n)) + { + } template template @@ -118,18 +139,56 @@ namespace ApacheTrafficServer return *this; } + // Same type, no rounding needed. + template < intmax_t N, typename C > + intmax_t Metric::round_down(self const& that) + { + return that._n; + } + + // Same scale just with different count type, no rounding. + template < intmax_t N, typename C > + template < typename I > + intmax_t Metric::round_down(Metric const& that) + { + return that._n; + } + + template < intmax_t N, typename C > template < intmax_t S, typename I > intmax_t Metric::round_down(Metric const& src) { - auto n = src.count(); - // Yes, a bit odd, but this minimizes the risk of integer overflow. - // I need to validate that under -O2 the compiler will only do 1 division to ge - // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are - // powers of 2 I have verified recent GNU compilers will optimize to bit operations. - return (n / N) * S + (( n % N ) * S) / N; + typedef std::ratio R_NS; + typedef std::ratio R_SN; + + if (R_NS::den == 1) { + return src.count() / R_NS::num; + } else if (R_SN::den ==1) { + return src.count() * R_SN::num; // N is a multiple of S. + } else { + // General case where neither N nor S are a multiple of the other. + auto n = src.count(); + // Yes, a bit odd, but this minimizes the risk of integer overflow. + // I need to validate that under -O2 the compiler will only do 1 division to get + // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are + // powers of 2 I have verified recent GNU compilers will optimize to bit operations. + return (n / N) * S + (( n % N ) * S) / N; + } } + /** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding up. + The resulting count in the return value will be the smallest count that is not smaller than the unit + value of @a src. + + @code + typedef Metric<16> Paragraphs; + typedef Metric<1024> KiloBytes; + + Paragraphs src(37459); + auto size = metric_round_up(src); // size.count() == 586 + @endcode + */ template < typename M, intmax_t N, typename C > M metric_round_up(Metric const& src) { @@ -142,12 +201,113 @@ namespace ApacheTrafficServer return M(M::round_down(src) + ((n % R::num) != 0)); } } - + + /** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding down. + The resulting count in the return value will be the largest count that is not larger than the unit + value of @a src. + + @code + typedef Metric<16> Paragraphs; + typedef Metric<1024> KiloBytes; + + Paragraphs src(37459); + auto size = metric_round_up(src); // size.count() == 585 + @endcode + */ template < typename M, intmax_t N, typename C > M metric_round_down(Metric const& src) { return M(1 == M::SCALE ? src.units() : M::round_down(src)); } + + /// Convert a unit value to a scaled count, rounding down. + template < typename M > + M metric_round_down(intmax_t src) + { + return M(src/M::SCALE); // assuming compiler will optimize out dividing by 1 if needed. + } + + /// Convert a unit value to a scaled count, rounding up. + template < typename M > + M metric_round_up(intmax_t src) + { + return M(M::SCALE == 1 ? src : (src/M::SCALE + 0 != src % M::SCALE)); + } + + + // --- Compare operators + + // Try for a bit of performance boost - if the metrics have the same scale + // just comparing the counts is sufficient and scaling conversion is avoided. + template < intmax_t N, typename C1, typename C2 > + bool operator < (Metric const& lhs, Metric const& rhs) + { + return lhs.count() < rhs.count(); + } + + template < intmax_t N, typename C1, typename C2 > + bool operator == (Metric const& lhs, Metric const& rhs) + { + return lhs.count() == rhs.count(); + } + + // Could be derived but if we're optimizing let's avoid the extra negation. + // Or we could check if the compiler can optimize that out anyway. + template < intmax_t N, typename C1, typename C2 > + bool operator <= (Metric const& lhs, Metric const& rhs) + { + return lhs.count() <= rhs.count(); + } + + // General base cases. + + template < intmax_t N1, typename C1, intmax_t N2, typename C2 > + bool operator < (Metric const& lhs, Metric const& rhs) + { + typedef std::ratio R12; + typedef std::ratio R21; + // Based on tests with the GNU compiler, the fact that the conditionals are compile time + // constant causes the never taken paths to be dropped so there are no runtime conditional + // checks, even with no optimization at all. + if (R12::den == 1) { return lhs.count() < rhs.count() * R12::num; } + else if (R21::den == 1) { return lhs.count() * R21::num < rhs.count(); } + else return lhs.units() < rhs.units(); + } + + template < intmax_t N1, typename C1, intmax_t N2, typename C2 > + bool operator == (Metric const& lhs, Metric const& rhs) + { + typedef std::ratio R12; + typedef std::ratio R21; + if (R12::den == 1) { return lhs.count() == rhs.count() * R12::num; } + else if (R21::den == 1) { return lhs.count() * R21::num == rhs.count(); } + else return lhs.units() == rhs.units(); + } + + template < intmax_t N1, typename C1, intmax_t N2, typename C2 > + bool operator <= (Metric const& lhs, Metric const& rhs) + { + typedef std::ratio R12; + typedef std::ratio R21; + if (R12::den == 1) { return lhs.count() <= rhs.count() * R12::num; } + else if (R21::den == 1) { return lhs.count() * R21::num <= rhs.count(); } + else return lhs.units() <= rhs.units(); + } + + // Derived compares. No narrowing optimization needed because if the scales + // are the same the nested call with be optimized. + + template < intmax_t N1, typename C1, intmax_t N2, typename C2 > + bool operator > (Metric const& lhs, Metric const& rhs) + { + return rhs < lhs; + } + + template < intmax_t N1, typename C1, intmax_t N2, typename C2 > + bool operator >= (Metric const& lhs, Metric const& rhs) + { + return rhs <= lhs; + } } #endif // TS_METRIC_H From 71ec482594ad7cf675d1beaeefbb6a73374eb48b Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Sat, 14 Jan 2017 10:07:17 -0600 Subject: [PATCH 04/81] Metric: Further optimization tweaks. Removed Metric::round_down. --- lib/ts/Metric.h | 127 ++++++++++++++++++++---------------------------- 1 file changed, 52 insertions(+), 75 deletions(-) diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h index 61b6326b336..a11a922774f 100644 --- a/lib/ts/Metric.h +++ b/lib/ts/Metric.h @@ -86,17 +86,13 @@ namespace ApacheTrafficServer constexpr Count units() const; /// Assignment operator. - /// @note Requires that @c S be an integer multiple of @c SCALE. + /// @note Requires the scale of @c S be an integer multiple of the scale of this. template < intmax_t S, typename I > self& operator = (Metric const& that); + /// Assignment from same scale. + self& operator = (self const& that); - /// Convert the count of a differently scaled @c Metric @a src by rounding down if needed. - /// @internal This is required for internal use but may be handy for other clients. - /// @internal Variants to optimize special cases. - template < typename I > static intmax_t round_down(Metric const& src); - template < intmax_t S, typename I > static intmax_t round_down(Metric const& src); - static intmax_t round_down(self const& that); - + /// Run time access to the scale of this metric (template arg @a N). static intmax_t scale(); protected: @@ -113,6 +109,8 @@ namespace ApacheTrafficServer constexpr auto Metric::units() const -> Count { return _n * SCALE; } template < intmax_t N, typename C > inline auto Metric::operator = (Count n) -> self& { _n = n; return *this; } + template < intmax_t N, typename C > + inline auto Metric::operator = (self const& that) -> self& { _n = that._n; return *this; } template template @@ -139,44 +137,8 @@ namespace ApacheTrafficServer return *this; } - // Same type, no rounding needed. - template < intmax_t N, typename C > - intmax_t Metric::round_down(self const& that) - { - return that._n; - } - - // Same scale just with different count type, no rounding. - template < intmax_t N, typename C > - template < typename I > - intmax_t Metric::round_down(Metric const& that) - { - return that._n; - } - - - template < intmax_t N, typename C > - template < intmax_t S, typename I > - intmax_t Metric::round_down(Metric const& src) - { - typedef std::ratio R_NS; - typedef std::ratio R_SN; - - if (R_NS::den == 1) { - return src.count() / R_NS::num; - } else if (R_SN::den ==1) { - return src.count() * R_SN::num; // N is a multiple of S. - } else { - // General case where neither N nor S are a multiple of the other. - auto n = src.count(); - // Yes, a bit odd, but this minimizes the risk of integer overflow. - // I need to validate that under -O2 the compiler will only do 1 division to get - // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are - // powers of 2 I have verified recent GNU compilers will optimize to bit operations. - return (n / N) * S + (( n % N ) * S) / N; - } - } - + // -- Free Functions -- + /** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding up. The resulting count in the return value will be the smallest count that is not smaller than the unit value of @a src. @@ -189,16 +151,20 @@ namespace ApacheTrafficServer auto size = metric_round_up(src); // size.count() == 586 @endcode */ - template < typename M, intmax_t N, typename C > - M metric_round_up(Metric const& src) + template < typename M, intmax_t S, typename I > + M metric_round_up(Metric const& src) { - if (1 == M::SCALE) { - return M(src.units()); + typedef std::ratio R; + auto c = src.count(); + + if (M::SCALE == S) { + return c; + } else if (R::den == 1) { + return c / R::num + (0 != c % R::num); // N is a multiple of S. + } else if (R::num == 1) { + return c * R::den; // S is a multiple of N. } else { - typedef std::ratio R; // R::num == M::SCALE / GCD(M::SCALE, N) == GCF(M::SCALE, N) - auto n = src.count(); - // Round down and add 1 unless @a n is an even multiple of the GCF of the two scales. - return M(M::round_down(src) + ((n % R::num) != 0)); + return (c / R::num) * R::den + (( c % R::num ) * R::den) / R::num + (0 != (c % R::den)); } } @@ -214,24 +180,38 @@ namespace ApacheTrafficServer auto size = metric_round_up(src); // size.count() == 585 @endcode */ - template < typename M, intmax_t N, typename C > - M metric_round_down(Metric const& src) + template < typename M, intmax_t S, typename I > + M metric_round_down(Metric const& src) { - return M(1 == M::SCALE ? src.units() : M::round_down(src)); + typedef std::ratio R; + auto c = src.count(); + + if (R::den == 1) { + return c / R::num; // S is a multiple of N. + } else if (R::num ==1) { + return c * R::den; // N is a multiple of S. + } else { + // General case where neither N nor S are a multiple of the other. + // Yes, a bit odd, but this minimizes the risk of integer overflow. + // I need to validate that under -O2 the compiler will only do 1 division to get + // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are + // powers of 2 I have verified recent GNU compilers will optimize to bit operations. + return (c / R::num) * R::den + (( c % R::num ) * R::den) / R::num; + } } - /// Convert a unit value to a scaled count, rounding down. + /// Convert a unit value @a n to a Metric, rounding down. template < typename M > - M metric_round_down(intmax_t src) + M metric_round_down(intmax_t n) { - return M(src/M::SCALE); // assuming compiler will optimize out dividing by 1 if needed. + return n/M::SCALE; // assuming compiler will optimize out dividing by 1 if needed. } - /// Convert a unit value to a scaled count, rounding up. + /// Convert a unit value @a n to a Metric, rounding up. template < typename M > - M metric_round_up(intmax_t src) + M metric_round_up(intmax_t n) { - return M(M::SCALE == 1 ? src : (src/M::SCALE + 0 != src % M::SCALE)); + return M::SCALE == 1 ? n : (n/M::SCALE + (0 != (n % M::SCALE))); } @@ -264,33 +244,30 @@ namespace ApacheTrafficServer template < intmax_t N1, typename C1, intmax_t N2, typename C2 > bool operator < (Metric const& lhs, Metric const& rhs) { - typedef std::ratio R12; - typedef std::ratio R21; + typedef std::ratio R; // Based on tests with the GNU compiler, the fact that the conditionals are compile time // constant causes the never taken paths to be dropped so there are no runtime conditional // checks, even with no optimization at all. - if (R12::den == 1) { return lhs.count() < rhs.count() * R12::num; } - else if (R21::den == 1) { return lhs.count() * R21::num < rhs.count(); } + if (R::den == 1) { return lhs.count() < rhs.count() * R::num; } + else if (R::num == 1) { return lhs.count() * R::den < rhs.count(); } else return lhs.units() < rhs.units(); } template < intmax_t N1, typename C1, intmax_t N2, typename C2 > bool operator == (Metric const& lhs, Metric const& rhs) { - typedef std::ratio R12; - typedef std::ratio R21; - if (R12::den == 1) { return lhs.count() == rhs.count() * R12::num; } - else if (R21::den == 1) { return lhs.count() * R21::num == rhs.count(); } + typedef std::ratio R; + if (R::den == 1) { return lhs.count() == rhs.count() * R::num; } + else if (R::num == 1) { return lhs.count() * R::den == rhs.count(); } else return lhs.units() == rhs.units(); } template < intmax_t N1, typename C1, intmax_t N2, typename C2 > bool operator <= (Metric const& lhs, Metric const& rhs) { - typedef std::ratio R12; - typedef std::ratio R21; - if (R12::den == 1) { return lhs.count() <= rhs.count() * R12::num; } - else if (R21::den == 1) { return lhs.count() * R21::num <= rhs.count(); } + typedef std::ratio R; + if (R::den == 1) { return lhs.count() <= rhs.count() * R::num; } + else if (R::num == 1) { return lhs.count() * R::den <= rhs.count(); } else return lhs.units() <= rhs.units(); } From 6fa53e51fff9bcef93dec70ad4d498ccd1abae0b Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Sat, 14 Jan 2017 17:31:58 -0600 Subject: [PATCH 05/81] Metric: Bug fixes. --- lib/ts/Metric.h | 4 ++-- lib/ts/test_Metric.cc | 51 +++++++++++++++++++++++++++++++++---------- 2 files changed, 41 insertions(+), 14 deletions(-) diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h index a11a922774f..184eddfd4e6 100644 --- a/lib/ts/Metric.h +++ b/lib/ts/Metric.h @@ -138,7 +138,7 @@ namespace ApacheTrafficServer } // -- Free Functions -- - + /** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding up. The resulting count in the return value will be the smallest count that is not smaller than the unit value of @a src. @@ -164,7 +164,7 @@ namespace ApacheTrafficServer } else if (R::num == 1) { return c * R::den; // S is a multiple of N. } else { - return (c / R::num) * R::den + (( c % R::num ) * R::den) / R::num + (0 != (c % R::den)); + return (c / R::num) * R::den + (( c % R::num ) * R::den) / R::num + (0 != (c % R::num)); } } diff --git a/lib/ts/test_Metric.cc b/lib/ts/test_Metric.cc index 66495924639..54ba5dac6b0 100644 --- a/lib/ts/test_Metric.cc +++ b/lib/ts/test_Metric.cc @@ -31,9 +31,9 @@ namespace ts { struct TestBox { typedef TestBox self; ///< Self reference type. - + std::string _name; - + static int _count; static int _fail; @@ -54,19 +54,19 @@ bool TestBox::check(bool result, char const *fmt, ...) { ++_count; - + if (!result) { static constexpr size_t N = 1 << 16; size_t n = N; size_t x; char* s; char buffer[N]; // just stack, go big. - + s = buffer; x = snprintf(s, n, "%s: ", _name.c_str()); n -= x; s += x; - + va_list ap; va_start(ap, fmt); vsnprintf(s, n, fmt, ap); @@ -77,29 +77,31 @@ TestBox::check(bool result, char const *fmt, ...) return result; } +// Extremely simple test. void Test_1() { constexpr static int SCALE = 4096; typedef ts::Metric PageSize; - TestBox test("TS Metric"); + TestBox test("TS Metric basic"); PageSize pg1(1); test.check(pg1.count() == 1, "Count wrong, got %d expected %d", pg1.count(), 1); test.check(pg1.units() == SCALE, "Units wrong, got %d expected %d", pg1.units(), SCALE); } +// Test multiples. void Test_2() { constexpr static int SCALE_1 = 8192; constexpr static int SCALE_2 = 512; - + typedef ts::Metric Size_1; typedef ts::Metric Size_2; - TestBox test("TS Metric Conversions"); + TestBox test("TS Metric Conversion of scales of multiples"); Size_2 sz_a(2); Size_2 sz_b(57); Size_2 sz_c(SCALE_1 / SCALE_2); @@ -114,12 +116,12 @@ Test_2() test.check(sz.count() == 4 , "Rounding up, got %d expected %d", sz.count(), 4); sz = ts::metric_round_down(sz_b); test.check(sz.count() == 3 , "Rounding down, got %d expected %d", sz.count(), 3); - + sz = ts::metric_round_up(sz_c); test.check(sz.count() == 1 , "Rounding up, got %d expected %d", sz.count(), 1); sz = ts::metric_round_down(sz_c); test.check(sz.count() == 1 , "Rounding down, got %d expected %d", sz.count(), 1); - + sz = ts::metric_round_up(sz_d); test.check(sz.count() == 29 , "Rounding up, got %d expected %d", sz.count(), 29); sz = ts::metric_round_down(sz_d); @@ -131,11 +133,36 @@ Test_2() test.check(sz_b.count() == 119 * (SCALE_1/SCALE_2) , "Integral conversion, got %d expected %d", sz_b.count(), 119 * (SCALE_1/SCALE_2)); } +// Test common factor. void Test_3() +{ + constexpr static int SCALE_1 = 30; + constexpr static int SCALE_2 = 20; + + typedef ts::Metric Size_1; + typedef ts::Metric Size_2; + + TestBox test("TS Metric common factor conversions"); + Size_2 sz_a(2); + Size_2 sz_b(97); + + auto sz = ts::metric_round_up(sz_a); + test.check(sz.count() ==2 , "Rounding up, got %d expected %d", sz.count(), 2); + sz = ts::metric_round_down(sz_a); + test.check(sz.count() == 1 , "Rounding down: got %d expected %d", sz.count(), 0); + + sz = ts::metric_round_up(sz_b); + test.check(sz.count() == 65 , "Rounding up, got %d expected %d", sz.count(), 65); + sz = ts::metric_round_down(sz_b); + test.check(sz.count() == 64 , "Rounding down, got %d expected %d", sz.count(), 64); +} + +void +Test_4() { TestBox test("TS Metric: relatively prime tests"); - + ts::Metric<9> m_9; ts::Metric<4> m_4, m_test; @@ -164,7 +191,7 @@ main(int, char **) Test_1(); Test_2(); Test_3(); + Test_4(); TestBox::print_summary(); return 0; } - From 1da4d2c908204f9a9d7fd0a16157ea379cb7fc16 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 2 Dec 2016 14:57:09 -0600 Subject: [PATCH 06/81] Doc: Cache Store docs. --- configure.ac | 1 + .../architecture/cache-store.en.rst | 83 ++ lib/ts/Makefile.am | 2 +- lib/ts/MemView.cc | 74 ++ lib/ts/MemView.h | 1105 +++++++++++++++++ lib/ts/Metric.h | 313 +++++ lib/ts/ink_memory.h | 15 +- lib/ts/test_Metric.cc | 170 +++ tools/Makefile.am | 3 + tools/cache_tool/CacheDefs.h | 138 ++ tools/cache_tool/CacheStore.h | 51 + tools/cache_tool/CacheTool.cc | 478 +++++++ tools/cache_tool/Command.cc | 126 ++ tools/cache_tool/Command.h | 116 ++ tools/cache_tool/File.cc | 86 ++ tools/cache_tool/File.h | 156 +++ tools/cache_tool/Makefile.am | 32 + 17 files changed, 2944 insertions(+), 5 deletions(-) create mode 100644 doc/developer-guide/architecture/cache-store.en.rst create mode 100644 lib/ts/MemView.cc create mode 100644 lib/ts/MemView.h create mode 100644 lib/ts/Metric.h create mode 100644 lib/ts/test_Metric.cc create mode 100644 tools/cache_tool/CacheDefs.h create mode 100644 tools/cache_tool/CacheStore.h create mode 100644 tools/cache_tool/CacheTool.cc create mode 100644 tools/cache_tool/Command.cc create mode 100644 tools/cache_tool/Command.h create mode 100644 tools/cache_tool/File.cc create mode 100644 tools/cache_tool/File.h create mode 100644 tools/cache_tool/Makefile.am diff --git a/configure.ac b/configure.ac index e7b29edb5ce..8ba12180a45 100644 --- a/configure.ac +++ b/configure.ac @@ -1945,6 +1945,7 @@ AC_CONFIG_FILES([ tools/Makefile tools/trafficserver.pc tools/tsxs + tools/cache_tool/Makefile ]) # ----------------------------------------------------------------------------- diff --git a/doc/developer-guide/architecture/cache-store.en.rst b/doc/developer-guide/architecture/cache-store.en.rst new file mode 100644 index 00000000000..2c88d2a5983 --- /dev/null +++ b/doc/developer-guide/architecture/cache-store.en.rst @@ -0,0 +1,83 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +.. include:: ../../common.defs + +.. _developer-cache-store: + +.. default-domain:: cpp + +Cache Store +****************** + +Initialization +============== + +:file:`storage.config` is read by :func:`Store::read_config` invoked from :code:`ink_cache_init`. + +Types +===== + +.. var:: size_t STORE_BLOCK_SIZE = 8192 + + The metric for measuring the size of stripe storage allocation. Note this is very different from + :var:`CACHE_BLOCK_SIZE` which is the metric for *object* allocation. + +.. var:: size_t CACHE_BLOCK_SIZE = 512 + + The metric for object storage allocation. The amount of storage allocated for an object in the + cache is a multiple of this value. + +.. class:: span_diskid_t + + Stores a 16 byte ID. + +.. class:: Span + + :class:`Span` models a :term:`cache span`. This is a contiguous span of storage. + + .. member:: int64_t blocks + + Number of storage blocks in the span. See :var:`STORE_BLOCK_SIZE`. + + .. member:: int64_t offset + + Offset (in bytes)_ to the start of the span. This is used only if the base storage is a file. + + .. member:: span_diskid_t disk_id + + No idea what this is. + +.. class:: Store + + A singleton containing all of the cache storage description. + + .. member:: unsigned n_disks_in_config + + The number of distinct devices in the configuration. + + .. member:: unsigned n_disks + + The number of valid and distinct devices in the configuration. + + .. member:: Span** disk + + List of spans. + + .. member:: char * read_config() +q + Read :file:`storage.config` and initialize the base state of the instance. The return value is :code:`nullptr` on success and a nul-terminated error string on error. diff --git a/lib/ts/Makefile.am b/lib/ts/Makefile.am index 039db6e4b0b..23546b3f208 100644 --- a/lib/ts/Makefile.am +++ b/lib/ts/Makefile.am @@ -189,6 +189,7 @@ libtsutil_la_SOURCES = \ lockfile.cc \ signals.cc \ signals.h \ + MemView.h MemView.cc \ X509HostnameValidator.cc \ X509HostnameValidator.h @@ -250,4 +251,3 @@ clean-local: tidy-local: $(DIST_SOURCES) $(CXX_Clang_Tidy) - diff --git a/lib/ts/MemView.cc b/lib/ts/MemView.cc new file mode 100644 index 00000000000..be6f80ae7d8 --- /dev/null +++ b/lib/ts/MemView.cc @@ -0,0 +1,74 @@ +#include +#include +#include + +namespace ApacheTrafficServer +{ + +int +memcmp(MemView const &lhs, MemView const &rhs) +{ + int zret; + size_t n; + + // Seems a bit ugly but size comparisons must be done anyway to get the memcmp args. + if (lhs.size() < rhs.size()) + zret = 1, n = lhs.size(); + else { + n = rhs.size(); + zret = rhs.size() < lhs.size() ? -1 : 0; + } + + int r = ::memcmp(lhs.ptr(), rhs.ptr(), n); + if (0 != r) // If we got a not-equal, override the size based result. + zret = r; + + return zret; +} + +int +strcasecmp(StringView lhs, StringView rhs) +{ + while (lhs && rhs) { + char l = tolower(*lhs); + char r = tolower(*rhs); + if (l < r) + return -1; + else if (r < l) + return 1; + ++lhs, ++rhs; + } + return lhs ? 1 : rhs ? -1 : 0; +} + +// Do the template instantions. +template void detail::stream_padding(std::ostream &, std::size_t); +template void detail::aligned_stream_write(std::ostream &, const StringView &); +} + +namespace std +{ + ostream& operator<<(ostream &os, const ApacheTrafficServer::MemView &b) + { + if (os.good()) { + ostringstream out; + out << b.size() << '@' << hex << b.ptr(); + os << out.str(); + } + return os; + } + + ostream& operator<<(ostream &os, const ApacheTrafficServer::StringView &b) + { + if (os.good()) { + const size_t size = b.size(); + const size_t w = static_cast(os.width()); + if (w <= size) + os.write(b.begin(), size); + else + ApacheTrafficServer::detail::aligned_stream_write(os, b); + os.width(0); + } + return os; + } +} diff --git a/lib/ts/MemView.h b/lib/ts/MemView.h new file mode 100644 index 00000000000..ea51e83aa75 --- /dev/null +++ b/lib/ts/MemView.h @@ -0,0 +1,1105 @@ +#if !defined TS_MEM_VIEW +#define TS_MEM_VIEW + +/** @file + + Class for handling "views" of a buffer. Views presume the memory for the buffer is managed + elsewhere and allow efficient access to segments of the buffer without copies. Views are read + only as the view doesn't own the memory. Along with generic buffer methods are specialized + methods to support better string parsing, particularly token based parsing. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#include +#include +#include +#include +#include + +/// Apache Traffic Server commons. +namespace ApacheTrafficServer +{ +class MemView; +class StringView; + +int memcmp(MemView const &lhs, MemView const &rhs); + int strcmp(StringView const &lhs, StringView const &rhs); +int strcasecmp(StringView lhs, StringView rhs); + +/** A read only view of contiguous piece of memory. + + A @c MemView does not own the memory to which it refers, it is simply a view of part of some + (presumably) larger memory object. The purpose is to allow working in a read only way a specific + part of the memory. This can avoid copying or allocation by allocating all needed memory at once + and then working with it via instances of this class. + + MemView is based on an earlier class ConstBuffer and influenced by Boost.string_ref. Neither + of these were adequate for how use of @c ConstBuffer evolved and so @c MemView is @c + ConstBuffer with some additional stylistic changes based on Boost.string_ref. + + This class is closely integrated with @c StringView. These classes have the same underlying + implementation and are differentiated only because of the return types and a few string oriented + methods. + */ +class MemView +{ + typedef MemView self; ///< Self reference type. + +protected: + const void *_ptr = nullptr; ///< Pointer to base of memory chunk. + size_t _size = 0; ///< Size of memory chunk. + +public: + /// Default constructor (empty buffer). + constexpr MemView(); + + /** Construct explicitly with a pointer and size. + */ + constexpr MemView(const void *ptr, ///< Pointer to buffer. + size_t n ///< Size of buffer. + ); + + /** Construct from a half open range of two pointers. + @note The byte at @start is in the view but the byte at @a end is not. + */ + constexpr MemView(const void *start, ///< First byte in the view. + const void *end ///< First byte not in the view. + ); + + /** Construct from nullptr. + This implicitly makes the length 0. + */ + constexpr MemView(std::nullptr_t); + + /// Convert from StringView. + constexpr MemView(StringView const& that); + + /** Equality. + + This is effectively a pointer comparison, buffer contents are not compared. + + @return @c true if @a that refers to the same view as @a this, + @c false otherwise. + */ + bool operator==(self const &that) const; + + /** Inequality. + @return @c true if @a that does not refer to the same view as @a this, + @c false otherwise. + */ + bool operator!=(self const &that) const; + + /// Assignment - the view is copied, not the content. + self &operator=(self const &that); + + /** Shift the view to discard the first byte. + @return @a this. + */ + self &operator++(); + + /** Shift the view to discard the leading @a n bytes. + @return @a this + */ + self &operator+=(size_t n); + + /// Check for empty view. + /// @return @c true if the view has a zero pointer @b or size. + bool operator!() const; + + /// Check for non-empty view. + /// @return @c true if the view refers to a non-empty range of bytes. + explicit operator bool() const; + + /// Check for empty view (no content). + /// @see operator bool + bool is_empty() const; + + /// @name Accessors. + //@{ + /// Pointer to the first byte in the view. + const void *begin() const; + /// Pointer to first byte not in the view. + const void *end() const; + /// Number of bytes in the view. + constexpr size_t size() const; + /// Memory pointer. + /// @note This is equivalent to @c begin currently but it's probably good to have separation. + constexpr const void *ptr() const; + //@} + + /// Set the view. + /// This is faster but equivalent to constructing a new view with the same + /// arguments and assigning it. + /// @return @c this. + self &setView(const void* ptr, ///< Buffer address. + size_t n = 0 ///< Buffer size. + ); + + /// Set the view. + /// This is faster but equivalent to constructing a new view with the same + /// arguments and assigning it. + /// @return @c this. + self &setView(const void *start, ///< First valid character. + const void *end ///< First invalid character. + ); + + /// Clear the view (become an empty view). + self &clear(); + + /// @return @c true if the byte at @a *p is in the view. + bool contains(const void *p) const; + + /** Find a value. + The memory is searched as if it were an array of the value type @a T. + + @return A pointer to the first occurrence of @a v in @a this + or @c nullptr if @a v is not found. + */ + template < typename V > + const V *find(V v) const; + + /** Find a value. + The memory is searched as if it were an array of the value type @a V. + + @return A pointer to the first value for which @a pred is @c true otherwise + @c nullptr. + */ + template + const V *find(std::function const &pred); + + /** Get the initial segment of the view before @a p. + + The byte at @a p is not included. If @a p is not in the view an empty view + is returned. + + @return A buffer that contains all data before @a p. + */ + self prefix(const void *p) const; + + /** Split the view at @a p. + + The view is split in to two parts at @a p and the prefix is returned. The view is updated to + contain the bytes not returned in the prefix. The prefix will not contain @a p. + + @note If @a *p refers to a byte that is not in @a this then @a this is not changed and an empty + buffer is returned. Therefore this method can be safely called with the return value of + calling @c find. + + @return A buffer containing data up to but not including @a p. + + @see extractPrefix + */ + self splitPrefix(const void *p); + + /** Extract a prefix delimited by @a p. + + A prefix of @a this is removed from the view and returned. If @a p is not in the view then the + entire view is extracted and returned. + + If @a p points at a byte in the view this is identical to @c splitPrefix. If not then the + entire view in @a this will be returned and @a this will become an empty view. + + @return The prefix bounded at @a p or the entire view if @a p is not a byte in the view. + + @see splitPrefix + */ + self extractPrefix(const void *p); + + /** Get the trailing segment of the view after @a p. + + The byte at @a p is not included. If @a p is not in the view an empty view is returned. + + @return A buffer that contains all data after @a p. + */ + self suffix(const void *p) const; + + /** Split the view at @a p. + + The view is split in to two parts and the suffix is returned. The view is updated to contain + the bytes not returned in the suffix. The suffix will not contain @a p. + + @note If @a p does not refer to a byte in the view, an empty view is returned and @a this is + unchanged. + + @return @a this. + */ + self splitSuffix(const void *p); +}; + +/** A read only view of contiguous piece of memory. + + A @c StringView does not own the memory to which it refers, it is simply a view of part of some + (presumably) larger memory object. The purpose is to allow working in a read only way a specific + part of the memory. A classic example for ATS is working with HTTP header fields and values + which need to be accessed independently but preferably without copying. A @c StringView supports this style. + + MemView is based on an earlier class ConstBuffer and influenced by Boost.string_ref. Neither + of these were adequate for how use of @c ConstBuffer evolved and so @c MemView is @c + ConstBuffer with some additional stylistic changes based on Boost.string_ref. + + In particular @c MemView is designed both to support passing via API (to replace the need to + pass two parameters for one real argument) and to aid in parsing input without copying. + + */ +class StringView +{ + typedef StringView self; ///< Self reference type. + +protected: + const char *_ptr = nullptr; ///< Pointer to base of memory chunk. + size_t _size = 0; ///< Size of memory chunk. + +public: + /// Default constructor (empty buffer). + constexpr StringView(); + + /** Construct explicitly with a pointer and size. + */ + constexpr StringView(const char *ptr, ///< Pointer to buffer. + size_t n ///< Size of buffer. + ); + + /** Construct from a half open range of two pointers. + @note The byte at @start is in the view but the byte at @a end is not. + */ + constexpr StringView(const char *start, ///< First byte in the view. + const char *end ///< First byte not in the view. + ); + + /** Construct from nullptr. + This implicitly makes the length 0. + */ + constexpr StringView(std::nullptr_t); + + /** Construct from null terminated string. + @note The terminating null is not included. @c strlen is used to determine the length. + */ + explicit constexpr StringView(const char *s); + + /// Construct from @c MemView to reference the same view. + constexpr StringView(MemView const& that); + + /** Equality. + + This is effectively a pointer comparison, buffer contents are not compared. + + @return @c true if @a that refers to the same view as @a this, + @c false otherwise. + */ + bool operator==(self const &that) const; + + /** Inequality. + @return @c true if @a that does not refer to the same view as @a this, + @c false otherwise. + */ + bool operator!=(self const &that) const; + + /// Assignment - the view is copied, not the content. + self &operator=(self const &that); + + /// @return The first byte in the view. + char operator*() const; + + /// @return the byte at offset @a n. + char operator[](size_t n) const; + + /// @return the byte at offset @a n. + char operator[](int n) const; + + /** Shift the view to discard the first byte. + @return @a this. + */ + self &operator++(); + + /** Shift the view to discard the leading @a n bytes. + @return @a this + */ + self &operator+=(size_t n); + + /// Check for empty view. + /// @return @c true if the view has a zero pointer @b or size. + bool operator!() const; + + /// Check for non-empty view. + /// @return @c true if the view refers to a non-empty range of bytes. + explicit operator bool() const; + + /// Check for empty view (no content). + /// @see operator bool + bool is_empty() const; + + /// @name Accessors. + //@{ + /// Pointer to the first byte in the view. + const char *begin() const; + /// Pointer to first byte not in the view. + const char *end() const; + /// Number of bytes in the view. + constexpr size_t size() const; + /// Memory pointer. + /// @note This is equivalent to @c begin currently but it's probably good to have separation. + constexpr const char *ptr() const; + //@} + + /// Set the view. + /// This is faster but equivalent to constructing a new view with the same + /// arguments and assigning it. + /// @return @c this. + self &setView(const char* ptr, ///< Buffer address. + size_t n = 0 ///< Buffer size. + ); + + /// Set the view. + /// This is faster but equivalent to constructing a new view with the same + /// arguments and assigning it. + /// @return @c this. + self &setView(const char *start, ///< First valid character. + const char *end ///< First invalid character. + ); + + /// Clear the view (become an empty view). + self &clear(); + + /// @return @c true if the byte at @a *p is in the view. + bool contains(const char *p) const; + + /** Find a byte. + @return A pointer to the first occurrence of @a c in @a this + or @c nullptr if @a c is not found. + */ + const char *find(char c) const; + + /** Find a byte. + @return A pointer to the first occurence of any of @a delimiters in @a + this or @c nullptr if not found. + */ + const char *find(self delimiters) const; + + /** Find a byte. + @return A pointer to the first byte for which @a pred is @c true otherwise + @c nullptr. + */ + const char *find(std::function const &pred) const; + + /** Remove bytes that match @a c from the start of the view. + */ + self <rim(char c); + + /** Remove bytes from the start of the view that are in @a delimiters. + */ + self <rim(self delimiters); + + /** Remove bytes from the start of the view for which @a pred is @c true. + */ + self <rim(std::function const &pred); + + /** Get the initial segment of the view before @a p. + + The byte at @a p is not included. If @a p is not in the view an empty view + is returned. + + @return A buffer that contains all data before @a p. + */ + self prefix(const char *p) const; + /// Convenience overload, split on delimiter set. + self prefix(self delimiters) const; + /// Convenience overload, split on predicate. + self prefix(std::function const &pred) const; + + + /** Split the view on the character at @a p. + + The view is split in to two parts and the byte at @a p is discarded. @a this retains all data + @b after @a p (equivalent to MemView(p+1, this->end()). A new view containing the + initial bytes up to but not including @a p is returned, (equivalent to + MemView(this->begin(), p)). + + This is convenient when tokenizing and @a p points at a delimiter. + + @note If @a *p refers toa byte that is not in @a this then @a this is not changed and an empty + buffer is returned. Therefore this method can be safely called with the return value of + calling @c find. + + @code + void f(MemView& text) { + MemView token = text.splitPrefix(text.find(delimiter)); + if (token) { // ... process token } + @endcode + + @return A buffer containing data up to but not including @a p. + + @see extractPrefix + */ + self splitPrefix(const char *p); + + /// Convenience overload, split on character. + self splitPrefix(char c); + /// Convenience overload, split on delimiter set. + self splitPrefix(self delimiters); + /// Convenience overload, split on predicate. + self splitPrefix(std::function const &pred); + + /** Extract a prefix delimited by @a p. + + A prefix of @a this is removed from the view and returned. If @a p is not in the view then the + entire view is extracted and returned. + + If @a p points at a byte in the view this is identical to @c splitPrefix. If not then the + entire view in @a this will be returned and @a this will become an empty view. This is easier + to use when repeated extracting tokens. The source view will become empty after extracting the + last token. + + @code + MemView text; + while (text) { + MemView token = text.extractPrefix(text.find(delimiter)); + // .. process token which will always be non-empty because text was not empty. + } + @endcode + + @return The prefix bounded at @a p or the entire view if @a p is not a byte in the view. + + @see splitPrefix + */ + self extractPrefix(const char *p); + + /// Convenience overload, extract on delimiter set. + self extractPrefix(char c); + /// Convenience overload, extract on delimiter set. + self extractPrefix(self delimiters); + /// Convenience overload, extract on predicate. + self extractPrefix(std::function const &pred); + + + /** Get the trailing segment of the view after @a p. + + The byte at @a p is not included. If @a p is not in the view an empty view is returned. + + @return A buffer that contains all data after @a p. + */ + self suffix(const char *p) const; + + /** Split the view on the character at @a p. + + The view is split in to two parts and the byte at @a p is discarded. @a this retains all data + @b before @a p (equivalent to MemView(this->begin(), p)). A new view containing + the trailing bytes after @a p is returned, (equivalent to MemView(p+1, + this->end())). + + @note If @a p does not refer to a byte in the view, an empty view is returned and @a this is + unchanged. + + @return @a this. + */ + self splitSuffix(const char *p); + + // Functors for using this class in STL containers. + /// Ordering functor, lexicographic comparison. + struct LessThan { + bool + operator()(MemView const &lhs, MemView const &rhs) + { + return -1 == strcmp(lhs, rhs); + } + }; + /// Ordering functor, case ignoring lexicographic comparison. + struct LessThanNoCase { + bool + operator()(MemView const &lhs, MemView const &rhs) + { + return -1 == strcasecmp(lhs, rhs); + } + }; +}; +// ---------------------------------------------------------- +// Inline implementations. + +inline constexpr MemView::MemView() +{ +} +inline constexpr MemView::MemView(void const *ptr, size_t n) : _ptr(ptr), _size(n) +{ +} +inline constexpr MemView::MemView(void const *start, void const *end) : _ptr(start), _size(static_cast(end) - static_cast(start)) +{ +} +inline constexpr MemView::MemView(std::nullptr_t) : _ptr(nullptr), _size(0) +{ +} +inline constexpr MemView::MemView(StringView const& that) : _ptr(that.ptr()), _size(that.size()) +{ +} + +inline MemView & +MemView::setView(const void *ptr, size_t n) +{ + _ptr = ptr; + _size = n; + return *this; +} + +inline MemView & +MemView::setView(const void *ptr, const void *limit) +{ + _ptr = ptr; + _size = static_cast(limit) - static_cast(ptr); + return *this; +} + +inline MemView & +MemView::clear() +{ + _ptr = 0; + _size = 0; + return *this; +} + +inline bool +MemView::operator==(self const &that) const +{ + return _size == that._size && _ptr == that._ptr; +} + +inline bool +MemView::operator!=(self const &that) const +{ + return !(*this == that); +} + +inline bool MemView::operator!() const +{ + return !(_ptr && _size); +} + +inline MemView::operator bool() const +{ + return _ptr && _size; +} + +inline bool +MemView::is_empty() const +{ + return !(_ptr && _size); +} + +inline MemView &MemView::operator++() +{ + _ptr = static_cast(_ptr) + 1; + --_size; + return *this; +} + +inline MemView & +MemView::operator+=(size_t n) +{ + if (n > _size) { + _ptr = nullptr; + _size = 0; + } else { + _ptr = static_cast(_ptr) + n; + _size -= n; + } + return *this; +} + +inline const void * +MemView::begin() const +{ + return _ptr; +} +inline constexpr const void * +MemView::ptr() const +{ + return _ptr; +} + +inline const void * +MemView::end() const +{ + return static_cast(_ptr) + _size; +} + +inline constexpr size_t +MemView::size() const +{ + return _size; +} + +inline MemView & +MemView::operator=(MemView const &that) +{ + _ptr = that._ptr; + _size = that._size; + return *this; +} + +inline bool +MemView::contains(const void *p) const +{ + return _ptr <= this->begin() && p < this->end(); +} + +inline MemView +MemView::prefix(const void *p) const +{ + self zret; + if (this->contains(p)) + zret.setView(_ptr, p); + return zret; +} + +inline MemView +MemView::splitPrefix(const void *p) +{ + self zret; // default to empty return. + if (this->contains(p)) { + zret.setView(_ptr, p); + this->setView(p, this->end()); + } + return zret; +} + +inline MemView +MemView::extractPrefix(const void *p) +{ + self zret{this->splitPrefix(p)}; + + // For extraction if zret is empty, use up all of @a this + if (!zret) { + zret = *this; + this->clear(); + } + + return zret; +} + +inline MemView +MemView::suffix(const void *p) const +{ + self zret; + if (this->contains(p)) + zret.setView(p, this->end()); + return zret; +} + +inline MemView +MemView::splitSuffix(const void *p) +{ + self zret; + if (this->contains(p)) { + zret.setView(p, this->end()); + this->setView(_ptr, p); + } + return zret; +} + +template < typename V > +inline const V * +MemView::find(V v) const +{ + for ( const V* spot = static_cast(_ptr), limit = spot + (_size/sizeof(V)) ; spot < limit ; ++spot ) + if (v == *spot) return spot; + return nullptr; +} + +// Specialize char for performance. +template < > +inline const char * +MemView::find(char v) const +{ + return static_cast(memchr(_ptr, v, _size)); +} + +template < typename V > +inline const V * +MemView::find(std::function const &pred) +{ + for (const V *p = static_cast(_ptr), *limit = p + (_size/sizeof(V)) ; p < limit; ++p) + if (pred(*p)) return p; + return nullptr; +} + +// === StringView Implementation === +inline constexpr StringView::StringView() +{ +} +inline constexpr StringView::StringView(const char *ptr, size_t n) : _ptr(ptr), _size(n) +{ +} +inline constexpr StringView::StringView(const char *start, const char *end) : _ptr(start), _size(end - start) +{ +} +inline constexpr StringView::StringView(const char *s) : _ptr(s), _size(strlen(s)) +{ +} +inline constexpr StringView::StringView(std::nullptr_t) : _ptr(nullptr), _size(0) +{ +} +inline constexpr StringView::StringView(MemView const& that) : _ptr(static_cast(that.ptr())), _size(that.size()) +{ +} + +inline StringView & +StringView::setView(const char *ptr, size_t n) +{ + _ptr = ptr; + _size = n; + return *this; +} + +inline StringView & +StringView::setView(const char *ptr, const char *limit) +{ + _ptr = ptr; + _size = limit - ptr; + return *this; +} + +inline StringView & +StringView::clear() +{ + _ptr = 0; + _size = 0; + return *this; +} + +inline bool +StringView::operator==(self const &that) const +{ + return _size == that._size && _ptr == that._ptr; +} + +inline bool +StringView::operator!=(self const &that) const +{ + return !(*this == that); +} + +inline bool StringView::operator!() const +{ + return !(_ptr && _size); +} + +inline StringView::operator bool() const +{ + return _ptr && _size; +} + +inline bool +StringView::is_empty() const +{ + return !(_ptr && _size); +} + +inline char StringView::operator*() const +{ + return *_ptr; +} + +inline StringView &StringView::operator++() +{ + ++_ptr; + --_size; + return *this; +} + +inline StringView & +StringView::operator+=(size_t n) +{ + if (n > _size) { + _ptr = nullptr; + _size = 0; + } else { + _ptr += n; + _size -= n; + } + return *this; +} + +inline const char * +StringView::begin() const +{ + return _ptr; +} +inline constexpr const char * +StringView::ptr() const +{ + return _ptr; +} + +inline const char * +StringView::end() const +{ + return _ptr + _size; +} + +inline constexpr size_t +StringView::size() const +{ + return _size; +} + +inline StringView & +StringView::operator=(StringView const &that) +{ + _ptr = that._ptr; + _size = that._size; + return *this; +} + +inline char StringView::operator[](size_t n) const +{ + return _ptr[n]; +} + +inline char StringView::operator[](int n) const +{ + return _ptr[n]; +} + +inline bool +StringView::contains(const char *p) const +{ + return _ptr <= p && p < _ptr + _size; +} + +inline auto +StringView::prefix(const char *p) const -> self +{ + self zret; + if (this->contains(p)) + zret.setView(_ptr, p); + return zret; +} + +inline auto +StringView::prefix(self delimiters) const -> self +{ + return this->prefix(this->find(delimiters)); +} + +inline auto +StringView::prefix(std::function const &pred) const -> self +{ + return this->prefix(this->find(pred)); +} + +inline auto +StringView::splitPrefix(const char *p) -> self +{ + self zret; // default to empty return. + if (this->contains(p)) { + zret.setView(_ptr, p); + this->setView(p + 1, this->end()); + } + return zret; +} + +inline auto +StringView::splitPrefix(char c) -> self +{ + return this->splitPrefix(this->find(c)); +} + +inline auto +StringView::splitPrefix(self delimiters) -> self +{ + return this->splitPrefix(this->find(delimiters)); +} + +inline auto +StringView::splitPrefix(std::function const &pred) -> self +{ + return this->splitPrefix(this->find(pred)); +} + +inline StringView +StringView::extractPrefix(const char *p) +{ + self zret{this->splitPrefix(p)}; + + // For extraction if zret is empty, use up all of @a this + if (!zret) { + zret = *this; + this->clear(); + } + + return zret; +} + +inline auto +StringView::extractPrefix(char c) -> self +{ + return this->extractPrefix(this->find(c)); +} + +inline auto +StringView::extractPrefix(self delimiters) -> self +{ + return this->extractPrefix(this->find(delimiters)); +} + +inline auto +StringView::extractPrefix(std::function const &pred) -> self +{ + return this->extractPrefix(this->find(pred)); +} + +inline StringView +StringView::suffix(const char *p) const +{ + self zret; + if (this->contains(p)) + zret.setView(p + 1, _ptr + _size); + return zret; +} + +inline StringView +StringView::splitSuffix(const char *p) +{ + self zret; + if (this->contains(p)) { + zret.setView(p + 1, this->end()); + this->setView(_ptr, p); + } + return zret; +} + +inline const char * +StringView::find(char c) const +{ + return static_cast(memchr(_ptr, c, _size)); +} + +inline const char * +StringView::find(self delimiters) const +{ + std::bitset<256> valid; + + // Load the bits in the array. This should be faster because this iterates over the delimiters + // exactly once instead of for each byte in @a this. + for (char c : delimiters) + valid[static_cast(c)] = true; + + for (const char *p = this->begin(), *limit = this->end(); p < limit; ++p) + if (valid[static_cast(*p)]) + return p; + + return nullptr; +} + +inline const char * +StringView::find(std::function const &pred) const +{ + const char* p = std::find_if(this->begin(), this->end(), pred); + return p == this->end() ? nullptr : p; +} + +inline StringView & +StringView::ltrim(char c) +{ + while (_size && *_ptr == c) + ++*this; + return *this; +} + +inline StringView & +StringView::ltrim(self delimiters) +{ + std::bitset<256> valid; + + // Load the bits in the array. This should be faster because this iterates over the delimiters + // exactly once instead of for each byte in @a this. + for (char c : delimiters) + valid[static_cast(c)] = true; + + while (*this && valid[static_cast(**this)]) + ++*this; + + return *this; +} + +inline StringView & +StringView::ltrim(std::function const &pred) +{ + while (*this && pred(**this)) + ++*this; + return *this; +} + +inline int +strcmp(StringView const &lhs, StringView const &rhs) +{ + return ApacheTrafficServer::memcmp(lhs, rhs); +} + +namespace detail +{ + // These are templated in order to not require including std::ostream but only std::iosfwd. + // Templates allows the use of specific stream mechanisms to be delayed until use at which point + // the caller will have included the required headers if needed but callers who don't won't need + // to. + + template + void + stream_padding(Stream &os, std::size_t n) + { + static constexpr size_t pad_size = 8; + char padding[pad_size]; + + std::fill_n(padding, pad_size, os.fill()); + for (; n >= pad_size && os.good(); n -= pad_size) + os.write(padding, pad_size); + if (n > 0 && os.good()) + os.write(padding, n); + } + + template + void + aligned_stream_write(Stream &os, const StringView &b) + { + const std::size_t size = b.size(); + const std::size_t alignment_size = static_cast(os.width()) - size; + const bool align_left = (os.flags() & Stream::adjustfield) == Stream::left; + if (!align_left) { + detail::stream_padding(os, alignment_size); + if (os.good()) + os.write(b.begin(), size); + } else { + os.write(b.begin(), size); + if (os.good()) + detail::stream_padding(os, alignment_size); + } + } + + extern template void stream_padding(std::ostream &, std::size_t); + extern template void aligned_stream_write(std::ostream &, const StringView &); + +} // detail + +} // end namespace ApacheTrafficServer + +namespace std +{ + ostream& operator<<(ostream &os, const ApacheTrafficServer::MemView &b); + ostream& operator<<(ostream &os, const ApacheTrafficServer::StringView &b); +} + +#endif // TS_BUFFER_HEADER diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h new file mode 100644 index 00000000000..61b6326b336 --- /dev/null +++ b/lib/ts/Metric.h @@ -0,0 +1,313 @@ +/** @file + + Scaled integral values. + + In many situations it is desirable to define scaling factors or base units (a "metric"). This template + enables this to be done in a type and scaling safe manner where the defined factors carry their scaling + information as part of the type. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#if !defined(TS_METRIC_H) +#define TS_METRIC_H + +#include +#include + +namespace ApacheTrafficServer +{ + /** A class to hold scaled values. + + Instances of this class have a @a count and a @a scale. The "value" of the instance is @a + count * @a scale. The scale is stored in the compiler in the class symbol table and so only + the count is a run time value. An instance with a large scale can be assign to an instance + with a smaller scale and the conversion is done automatically. Conversions from a smaller to + larger scale must be explicit using @c metric_round_up and @c metric_round_down. This prevents + inadvertent changes in value. Because the scales are not the same these conversions can be + lossy and the two conversions determine whether, in such a case, the result should be rounded + up or down to the nearest scale value. + + @a N sets the scale. @a T is the type used to hold the count, which is in units of @a N. + + @note This is modeled somewhat on @c std::chrono and serves a similar function for different + and simpler cases (where the ratio is always an integer, never a fraction). + + @see metric_round_up + @see metric_round_down + */ + template < intmax_t N, typename T = int > + class Metric + { + typedef Metric self; ///< Self reference type. + + public: + /// Scaling factor for instances. + /// Make it externally accessible. + constexpr static intmax_t SCALE = N; + typedef T Count; ///< Type used to hold the count. + + constexpr Metric(); ///< Default contructor. + ///< Construct to have @a n scaled units. + constexpr Metric(Count n); + + /// Copy constructor for same scale. + template < typename C > + Metric(Metric const& that); + + /// Copy / conversion constructor. + /// @note Requires that @c S be an integer multiple of @c SCALE. + template < intmax_t S, typename I > + Metric(Metric const& that); + + /// Direct assignment. + /// The count is set to @a n. + self& operator = (Count n); + + /// The number of scale units. + constexpr Count count() const; + /// The absolute value, scaled up. + constexpr Count units() const; + + /// Assignment operator. + /// @note Requires that @c S be an integer multiple of @c SCALE. + template < intmax_t S, typename I > + self& operator = (Metric const& that); + + /// Convert the count of a differently scaled @c Metric @a src by rounding down if needed. + /// @internal This is required for internal use but may be handy for other clients. + /// @internal Variants to optimize special cases. + template < typename I > static intmax_t round_down(Metric const& src); + template < intmax_t S, typename I > static intmax_t round_down(Metric const& src); + static intmax_t round_down(self const& that); + + static intmax_t scale(); + + protected: + Count _n; ///< Number of scale units. + }; + + template < intmax_t N, typename C > + constexpr Metric::Metric() : _n() {} + template < intmax_t N, typename C > + constexpr Metric::Metric(Count n) : _n(n) {} + template < intmax_t N, typename C > + constexpr auto Metric::count() const -> Count { return _n; } + template < intmax_t N, typename C > + constexpr auto Metric::units() const -> Count { return _n * SCALE; } + template < intmax_t N, typename C > + inline auto Metric::operator = (Count n) -> self& { _n = n; return *this; } + + template + template + Metric::Metric(Metric const& that) : _n(static_cast(that._n)) + { + } + + template + template + Metric::Metric(Metric const& that) + { + typedef std::ratio R; + static_assert(R::den == 1, "Construction not permitted - target scale is not an integral multiple of source scale."); + _n = that.count() * R::num; + } + + template + template + auto Metric::operator = (Metric const& that) -> self& + { + typedef std::ratio R; + static_assert(R::den == 1, "Assignment not permitted - target scale is not an integral multiple of source scale."); + _n = that.count() * R::num; + return *this; + } + + // Same type, no rounding needed. + template < intmax_t N, typename C > + intmax_t Metric::round_down(self const& that) + { + return that._n; + } + + // Same scale just with different count type, no rounding. + template < intmax_t N, typename C > + template < typename I > + intmax_t Metric::round_down(Metric const& that) + { + return that._n; + } + + + template < intmax_t N, typename C > + template < intmax_t S, typename I > + intmax_t Metric::round_down(Metric const& src) + { + typedef std::ratio R_NS; + typedef std::ratio R_SN; + + if (R_NS::den == 1) { + return src.count() / R_NS::num; + } else if (R_SN::den ==1) { + return src.count() * R_SN::num; // N is a multiple of S. + } else { + // General case where neither N nor S are a multiple of the other. + auto n = src.count(); + // Yes, a bit odd, but this minimizes the risk of integer overflow. + // I need to validate that under -O2 the compiler will only do 1 division to get + // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are + // powers of 2 I have verified recent GNU compilers will optimize to bit operations. + return (n / N) * S + (( n % N ) * S) / N; + } + } + + /** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding up. + The resulting count in the return value will be the smallest count that is not smaller than the unit + value of @a src. + + @code + typedef Metric<16> Paragraphs; + typedef Metric<1024> KiloBytes; + + Paragraphs src(37459); + auto size = metric_round_up(src); // size.count() == 586 + @endcode + */ + template < typename M, intmax_t N, typename C > + M metric_round_up(Metric const& src) + { + if (1 == M::SCALE) { + return M(src.units()); + } else { + typedef std::ratio R; // R::num == M::SCALE / GCD(M::SCALE, N) == GCF(M::SCALE, N) + auto n = src.count(); + // Round down and add 1 unless @a n is an even multiple of the GCF of the two scales. + return M(M::round_down(src) + ((n % R::num) != 0)); + } + } + + /** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding down. + The resulting count in the return value will be the largest count that is not larger than the unit + value of @a src. + + @code + typedef Metric<16> Paragraphs; + typedef Metric<1024> KiloBytes; + + Paragraphs src(37459); + auto size = metric_round_up(src); // size.count() == 585 + @endcode + */ + template < typename M, intmax_t N, typename C > + M metric_round_down(Metric const& src) + { + return M(1 == M::SCALE ? src.units() : M::round_down(src)); + } + + /// Convert a unit value to a scaled count, rounding down. + template < typename M > + M metric_round_down(intmax_t src) + { + return M(src/M::SCALE); // assuming compiler will optimize out dividing by 1 if needed. + } + + /// Convert a unit value to a scaled count, rounding up. + template < typename M > + M metric_round_up(intmax_t src) + { + return M(M::SCALE == 1 ? src : (src/M::SCALE + 0 != src % M::SCALE)); + } + + + // --- Compare operators + + // Try for a bit of performance boost - if the metrics have the same scale + // just comparing the counts is sufficient and scaling conversion is avoided. + template < intmax_t N, typename C1, typename C2 > + bool operator < (Metric const& lhs, Metric const& rhs) + { + return lhs.count() < rhs.count(); + } + + template < intmax_t N, typename C1, typename C2 > + bool operator == (Metric const& lhs, Metric const& rhs) + { + return lhs.count() == rhs.count(); + } + + // Could be derived but if we're optimizing let's avoid the extra negation. + // Or we could check if the compiler can optimize that out anyway. + template < intmax_t N, typename C1, typename C2 > + bool operator <= (Metric const& lhs, Metric const& rhs) + { + return lhs.count() <= rhs.count(); + } + + // General base cases. + + template < intmax_t N1, typename C1, intmax_t N2, typename C2 > + bool operator < (Metric const& lhs, Metric const& rhs) + { + typedef std::ratio R12; + typedef std::ratio R21; + // Based on tests with the GNU compiler, the fact that the conditionals are compile time + // constant causes the never taken paths to be dropped so there are no runtime conditional + // checks, even with no optimization at all. + if (R12::den == 1) { return lhs.count() < rhs.count() * R12::num; } + else if (R21::den == 1) { return lhs.count() * R21::num < rhs.count(); } + else return lhs.units() < rhs.units(); + } + + template < intmax_t N1, typename C1, intmax_t N2, typename C2 > + bool operator == (Metric const& lhs, Metric const& rhs) + { + typedef std::ratio R12; + typedef std::ratio R21; + if (R12::den == 1) { return lhs.count() == rhs.count() * R12::num; } + else if (R21::den == 1) { return lhs.count() * R21::num == rhs.count(); } + else return lhs.units() == rhs.units(); + } + + template < intmax_t N1, typename C1, intmax_t N2, typename C2 > + bool operator <= (Metric const& lhs, Metric const& rhs) + { + typedef std::ratio R12; + typedef std::ratio R21; + if (R12::den == 1) { return lhs.count() <= rhs.count() * R12::num; } + else if (R21::den == 1) { return lhs.count() * R21::num <= rhs.count(); } + else return lhs.units() <= rhs.units(); + } + + // Derived compares. No narrowing optimization needed because if the scales + // are the same the nested call with be optimized. + + template < intmax_t N1, typename C1, intmax_t N2, typename C2 > + bool operator > (Metric const& lhs, Metric const& rhs) + { + return rhs < lhs; + } + + template < intmax_t N1, typename C1, intmax_t N2, typename C2 > + bool operator >= (Metric const& lhs, Metric const& rhs) + { + return rhs <= lhs; + } +} + +#endif // TS_METRIC_H diff --git a/lib/ts/ink_memory.h b/lib/ts/ink_memory.h index 2eb08d89de4..2ce9a4eba92 100644 --- a/lib/ts/ink_memory.h +++ b/lib/ts/ink_memory.h @@ -251,6 +251,8 @@ class ats_scoped_resource ats_scoped_resource() : _r(Traits::initValue()) {} /// Construct with contained resource. explicit ats_scoped_resource(value_type rt) : _r(rt) {} + /// rvalue constructor + ats_scoped_resource(self && that) : _r(that.release()) {} /// Destructor. ~ats_scoped_resource() { @@ -328,8 +330,8 @@ class ats_scoped_resource protected: value_type _r; ///< Resource. private: - ats_scoped_resource(self const &); ///< Copy constructor not permitted. - self &operator=(self const &); ///< Self assignment not permitted. + ats_scoped_resource(self const &) = delete; ///< Copy constructor not permitted. + self &operator=(self const &) = delete; ///< Self assignment not permitted. }; namespace detail @@ -367,6 +369,8 @@ class ats_scoped_fd : public ats_scoped_resource ats_scoped_fd() : super() {} /// Construct with contained resource. explicit ats_scoped_fd(value_type rt) : super(rt) {} + /// rvalue / move constructor + ats_scoped_fd(self && that) : super(static_cast(that)) {} /** Place a new resource @a rt in the container. Any resource currently contained is destroyed. This object becomes the owner of @a rt. @@ -442,7 +446,9 @@ class ats_scoped_str : public ats_scoped_resource(ats_malloc(n))) {} /// Put string @a s in this container for cleanup. explicit ats_scoped_str(char *s) : super(s) {} - /// Assign a string @a s to this container. + /// rvalue constructor + ats_scoped_str(self && that) : super(static_cast(that)) {} + /// Assign a string @a s to this container.` self & operator=(char *s) { @@ -501,7 +507,7 @@ class ats_scoped_obj : public ats_scoped_resource +#include +#include + +namespace ts { + using namespace ApacheTrafficServer; +} + +struct TestBox { + typedef TestBox self; ///< Self reference type. + + std::string _name; + + static int _count; + static int _fail; + + TestBox(char const* name) : _name(name) {} + TestBox(std::string const& name) : _name(name) {} + bool check(bool result, char const *fmt, ...) __attribute__((format(printf, 3, 4))); + + static void print_summary() + { + printf("Tests: %d of %d passed - %s\n", (_count - _fail), _count, _fail ? "FAIL" : "SUCCESS"); + } +}; + +int TestBox::_count = 0; +int TestBox::_fail = 0; + +bool +TestBox::check(bool result, char const *fmt, ...) +{ + ++_count; + + if (!result) { + static constexpr size_t N = 1 << 16; + size_t n = N; + size_t x; + char* s; + char buffer[N]; // just stack, go big. + + s = buffer; + x = snprintf(s, n, "%s: ", _name.c_str()); + n -= x; + s += x; + + va_list ap; + va_start(ap, fmt); + vsnprintf(s, n, fmt, ap); + va_end(ap); + printf("%s\n", buffer); + ++_fail; + } + return result; +} + +void +Test_1() +{ + constexpr static int SCALE = 4096; + typedef ts::Metric PageSize; + + TestBox test("TS Metric"); + PageSize pg1(1); + + test.check(pg1.count() == 1, "Count wrong, got %d expected %d", pg1.count(), 1); + test.check(pg1.units() == SCALE, "Units wrong, got %d expected %d", pg1.units(), SCALE); +} + +void +Test_2() +{ + constexpr static int SCALE_1 = 8192; + constexpr static int SCALE_2 = 512; + + typedef ts::Metric Size_1; + typedef ts::Metric Size_2; + + TestBox test("TS Metric Conversions"); + Size_2 sz_a(2); + Size_2 sz_b(57); + Size_2 sz_c(SCALE_1 / SCALE_2); + Size_2 sz_d(29 * SCALE_1 / SCALE_2); + + auto sz = ts::metric_round_up(sz_a); + test.check(sz.count() == 1 , "Rounding up, got %d expected %d", sz.count(), 1); + sz = ts::metric_round_down(sz_a); + test.check(sz.count() == 0 , "Rounding down: got %d expected %d", sz.count(), 0); + + sz = ts::metric_round_up(sz_b); + test.check(sz.count() == 4 , "Rounding up, got %d expected %d", sz.count(), 4); + sz = ts::metric_round_down(sz_b); + test.check(sz.count() == 3 , "Rounding down, got %d expected %d", sz.count(), 3); + + sz = ts::metric_round_up(sz_c); + test.check(sz.count() == 1 , "Rounding up, got %d expected %d", sz.count(), 1); + sz = ts::metric_round_down(sz_c); + test.check(sz.count() == 1 , "Rounding down, got %d expected %d", sz.count(), 1); + + sz = ts::metric_round_up(sz_d); + test.check(sz.count() == 29 , "Rounding up, got %d expected %d", sz.count(), 29); + sz = ts::metric_round_down(sz_d); + test.check(sz.count() == 29 , "Rounding down, got %d expected %d", sz.count(), 29); + + sz = 119; + sz_b = sz; // Should be OK because SCALE_1 is an integer multiple of SCALE_2 + // sz = sz_b; // Should not compile. + test.check(sz_b.count() == 119 * (SCALE_1/SCALE_2) , "Integral conversion, got %d expected %d", sz_b.count(), 119 * (SCALE_1/SCALE_2)); +} + +void +Test_3() +{ + TestBox test("TS Metric: relatively prime tests"); + + ts::Metric<9> m_9; + ts::Metric<4> m_4, m_test; + + m_9 = 95; + // m_4 = m_9; // Should fail to compile with static assert. + // m_9 = m_4; // Should fail to compile with static assert. + + m_4 = ts::metric_round_up(m_9); + test.check(m_4.count() == 214 , "Rounding down, got %d expected %d", m_4.count(), 214); + m_4 = ts::metric_round_down(m_9); + test.check(m_4.count() == 213 , "Rounding down, got %d expected %d", m_4.count(), 213); + + m_4 = 213; + m_9 = ts::metric_round_up(m_4); + test.check(m_9.count() == 95 , "Rounding down, got %d expected %d", m_9.count(), 95); + m_9 = ts::metric_round_down(m_4); + test.check(m_9.count() == 94, "Rounding down, got %d expected %d", m_9.count(), 94); + + m_test = m_4; // Verify assignment of identical scale values compiles. + test.check(m_test.count() == 213 , "Assignment got %d expected %d", m_4.count(), 213); +} + +int +main(int, char **) +{ + Test_1(); + Test_2(); + Test_3(); + TestBox::print_summary(); + return 0; +} + diff --git a/tools/Makefile.am b/tools/Makefile.am index a94647e47a9..7b76f4ef2d2 100644 --- a/tools/Makefile.am +++ b/tools/Makefile.am @@ -17,6 +17,9 @@ # See the License for the specific language governing permissions and # limitations under the License. + +SUBDIRS = cache_tool + bin_SCRIPTS = tsxs tspush pkgconfigdir = $(libdir)/pkgconfig diff --git a/tools/cache_tool/CacheDefs.h b/tools/cache_tool/CacheDefs.h new file mode 100644 index 00000000000..96d6a34211b --- /dev/null +++ b/tools/cache_tool/CacheDefs.h @@ -0,0 +1,138 @@ +/** @file + + A brief file description + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#if !defined(CACHE_DEFS_H) +#define CACHE_DEFS_H +#include +#include + +namespace ApacheTrafficServer +{ + + constexpr static uint8_t CACHE_DB_MAJOR_VERSION = 24; + + typedef Metric<8192, int64_t> CacheStripeBlocks; + typedef Metric<512, int64_t> CacheContentBlocks; + typedef Metric<1, int64_t> CacheBytes; + + /** A cache span is a representation of raw storage. + It corresponds to a raw disk, disk partition, file, or directory. + */ + class CacheSpan + { + public: + /// Default offset of start of data in a span. + /// @internal I think this is done to avoid collisions with partition tracking mechanisms. + constexpr static CacheStripeBlocks OFFSET { 1 }; + }; + + /** A section of storage in a span, used to contain a stripe. + + @note Serializable. + + @internal nee @c DiskVolBlock + */ + struct CacheStripeDescriptor { + CacheBytes offset; // offset in bytes from the start of the disk + CacheStripeBlocks len; // length of block. + uint32_t vol_idx; ///< If in use, the volume index. + unsigned int type : 3; + unsigned int free : 1; + }; + + /** Header data for a span. + + @internal nee DiskHeader + */ + struct SpanHeader { + static constexpr uint32_t MAGIC = 0xABCD1237; + uint32_t magic; + uint32_t num_volumes; /* number of discrete volumes (DiskVol) */ + uint32_t num_free; /* number of disk volume blocks free */ + uint32_t num_used; /* number of disk volume blocks in use */ + uint32_t num_diskvol_blks; /* number of disk volume blocks */ + uint64_t num_blocks; + /// Serialized stripe descriptors. This is treated as a variable sized array. + CacheStripeDescriptor stripes[1]; + }; + + /** Stripe data, serialized format. + + @internal nee VolHeadFooter + */ + class CacheStripeMeta { + public: + static constexpr uint32_t MAGIC = 0xF1D0F00D; + + uint32_t magic; + VersionNumber version; + time_t create_time; + off_t write_pos; + off_t last_write_pos; + off_t agg_pos; + uint32_t generation; // token generation (vary), this cannot be 0 + uint32_t phase; + uint32_t cycle; + uint32_t sync_serial; + uint32_t write_serial; + uint32_t dirty; + uint32_t sector_size; + uint32_t unused; // pad out to 8 byte boundary + uint16_t freelist[1]; + }; + + class StripeData + { + public: + size_t calc_hdr_len() const; + + int64_t segments; ///< Number of segments. + int64_t buckets; ///< Number of buckets. + off_t skip; ///< Start of stripe data. + off_t start; ///< Start of content data. + off_t len; ///< Total size of stripe (metric?) + }; + + inline size_t StripeData::calc_hdr_len() const { return sizeof(CacheStripeMeta) + sizeof(uint16_t) * (this->segments-1); } + // inline size_t StripeData::calc_dir_len() const { return this->calc_hdr_len() + this->buckets * DIR_DEPTH * this->segments * SIZEOF_DIR + sizeof(CacheStripeMeta); } + + class CacheDirEntry + { + unsigned int offset : 24; + unsigned int big : 2; + unsigned int size : 6; + unsigned int tag : 12; + unsigned int phase : 1; + unsigned int head : 1; + unsigned int pinnned : 1; + unsigned int token : 1; + unsigned int next : 16; + uint16_t offset_high; + }; + + class CacheVolume + { + }; +} + +#endif // CACHE_DEFS_H diff --git a/tools/cache_tool/CacheStore.h b/tools/cache_tool/CacheStore.h new file mode 100644 index 00000000000..faba04389eb --- /dev/null +++ b/tools/cache_tool/CacheStore.h @@ -0,0 +1,51 @@ +/** @file + + Overall cache storage structures. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#if !defined(CACHE_STORE_H) +#define CACHE_STORE_H + +#include + +namespace ApacheTrafficServer +{ + class CacheStore + { + /// Configuration data for the cache store. + class Config + { + public: + /// A single item (line) from the configuration. + struct Item + { + StringView + }; + private: + /// Items read from the configuration. + std::vector _items; + /// The raw text of the configuration file. + std::unique_ptr _text; + }; + }; +} + +#endif // CACHE_STORE_H diff --git a/tools/cache_tool/CacheTool.cc b/tools/cache_tool/CacheTool.cc new file mode 100644 index 00000000000..fd0117090ea --- /dev/null +++ b/tools/cache_tool/CacheTool.cc @@ -0,0 +1,478 @@ +/** @file + + Main program file for Cache Tool. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "File.h" +#include "CacheDefs.h" +#include "Command.h" + +// Sigh, a hack for now. We already have "ts" defined as a namespace in various places so for now +// just import the Full Name namespace in to 'ts' rather than direct 'namespace ts = ApachTrafficServer' +namespace ts { + using namespace ApacheTrafficServer; +} + +namespace ApacheTrafficServer { + constexpr CacheStripeBlocks CacheSpan::OFFSET; +} + +namespace { + + ts::FilePath TargetFile; + ts::CommandTable Commands; + // Default this to read only, only enable write if specifically required. + int OPEN_RW_FLAGS = O_RDONLY; + + struct Span + { + Span(ts::FilePath const& path) : _path(path) {} + + void clearPermanently(); + + ts::FilePath _path; + ats_scoped_fd _fd; + std::unique_ptr _header; + }; + + struct Volume + { + struct StripeRef + { + Span* _span; ///< Span with stripe. + int _idx; ///< Stripe index in span. + }; + int _idx; ///< Volume index. + std::vector _stripes; + }; + + // All of these free functions need to be moved to the Cache class. + + bool Validate_Stripe_Meta(ts::CacheStripeMeta const& stripe) + { + return ts::CacheStripeMeta::MAGIC == stripe.magic && + stripe.version.ink_major <= ts::CACHE_DB_MAJOR_VERSION && + stripe.version.ink_minor <= 2 // This may have always been zero, actually. + ; + } + + typedef std::tuple ProbeResult; + + ProbeResult Probe_For_Stripe(ts::StringView& mem) + { + ProbeResult zret{mem.size() >= sizeof(ts::CacheStripeMeta) ? 0 : -1, ts::StringView(nullptr)}; + ts::StringView& test_site = std::get<1>(zret); + + while (mem.size() >= sizeof(ts::CacheStripeMeta)) { + // The meta data is stored aligned on a stripe block boundary, so only need to check there. + test_site = mem; + mem += ts::CacheStripeBlocks::SCALE; // always move this forward to make restarting search easy. + + if (Validate_Stripe_Meta(*reinterpret_cast(test_site.ptr()))) { + std::get<0>(zret) = 1; + break; + } + } + return zret; + } + + void Calc_Stripe_Data(ts::CacheStripeMeta const& header, ts::CacheStripeMeta const& footer, off_t delta, ts::StripeData& data) + { + // Assuming header + free list fits in one cache stripe block, which isn't true for large stripes (>2G or so). + // Need to detect that, presumably by checking that the segment count fits in the stripe block. + ts::CacheStripeBlocks hdr_size { 1 }; + off_t space = delta - hdr_size.units(); + int64_t n_buckets = space / 40; + data.segments = n_buckets / (1<<14); + // This should never be more than one loop, usually none. + while ((n_buckets / data.segments) > 1<<14) + ++(data.segments); + data.buckets = n_buckets / data.segments; + data.start = delta * 2; // this is wrong, need to add in the base block position. + + std::cout << "Stripe is " << data.segments << " segments with " << data.buckets << " buckets per segment for " << data.buckets * data.segments * 4 << " total directory entries taking " << data.buckets * data.segments * 40 << " out of " << space << " bytes." << std::endl; + } + + void Open_Stripe(ats_scoped_fd const& fd, ts::CacheStripeDescriptor const& block) + { + int found; + ts::StringView data; + ts::StringView stripe_mem; + constexpr static int64_t N = 1 << 24; + int64_t n; + off_t pos = block.offset.units(); + ts::CacheStripeMeta stripe_meta[4]; + off_t stripe_pos[4] = { 0,0,0,0 }; + off_t delta; + // Avoid searching the entire span, because some of it must be content. Assume that AOS is more than 160 + // which means at most 10/160 (1/16) of the span can be directory/header. + off_t limit = pos + block.len.units() / 16; + alignas(4096) static char buff[N]; + + // Check the earlier part of the block. Header A must be at the start of the stripe block. + // A full chunk is read in case Footer A is in that range. + n = pread(fd, buff, N, pos); + data.setView(buff, n); + std::tie(found, stripe_mem) = Probe_For_Stripe(data); + + if (found > 0) { + if (stripe_mem.ptr() != buff) { + std::cout << "Header A found at" << pos + stripe_mem.ptr() - buff << " which is not at start of stripe block" << std::endl; + } else { + stripe_pos[0] = pos; + stripe_meta[0] = reinterpret_cast(buff); // copy it out of buffer. + std::cout << "Header A found at " << stripe_pos[0] << std::endl; + // Search for Footer A, skipping false positives. + while (stripe_pos[1] == 0) { + std::tie(found, stripe_mem) = Probe_For_Stripe(data); + while (found == 0 && pos < limit) { + pos += N; + n = pread(fd, buff, N, pos); + data.setView(buff, n); + std::tie(found, stripe_mem) = Probe_For_Stripe(data); + } + if (found > 0) { + // Need to be more thorough in cross checks but this is OK for now. + ts::CacheStripeMeta const& s = *reinterpret_cast(stripe_mem.ptr()); + if (s.version == stripe_meta[0].version) { + stripe_meta[1] = s; + stripe_pos[1] = pos + (stripe_mem.ptr() - buff); + printf("Footer A found at %" PRIu64 "\n", stripe_pos[1]); + if (stripe_meta[0].sync_serial == stripe_meta[1].sync_serial) { + printf("Copy A is valid - sync=%d\n", stripe_meta[0].sync_serial); + } + } else { + // false positive, keep looking. + found = 0; + } + } else { + printf("Header A not found, invalid stripe.\n"); + break; + } + } + + // Technically if Copy A is valid, Copy B is not needed. But at this point it's cheap to retrieve + // (as the exact offsets are computable). + if (stripe_pos[1]) { + delta = stripe_pos[1] - stripe_pos[0]; + // Header B should be immediately after Footer A. If at the end of the last read, + // do another read. + if (!data) { + pos += N; + n = pread(fd, buff, ts::CacheStripeBlocks::SCALE, pos); + data.setView(buff, n); + } + std::tie(found, stripe_mem) = Probe_For_Stripe(data); + if (found <= 0) { + printf("Header B not found at expected location.\n"); + } else { + stripe_meta[2] = *reinterpret_cast(stripe_mem.ptr()); + stripe_pos[2] = pos + (stripe_mem.ptr() - buff); + printf("Found Header B at expected location %" PRIu64 ".\n", stripe_pos[2]); + + // Footer B must be at the same relative offset to Header B as Footer A -> Header A. + n = pread(fd, buff, ts::CacheStripeBlocks::SCALE, stripe_pos[2] + delta); + data.setView(buff, n); + std::tie(found, stripe_mem) = Probe_For_Stripe(data); + if (found == 1) { + stripe_pos[3] = stripe_pos[2] + delta; + stripe_meta[3] = *reinterpret_cast(stripe_mem.ptr()); + printf("Footer B found at expected location %" PRIu64 ".\n", stripe_pos[3]); + } else { + printf("Footer B not found at expected location %" PRIu64 ".\n", stripe_pos[2] + delta); + } + } + } + + if (stripe_pos[1]) { + if (stripe_meta[0].sync_serial == stripe_meta[1].sync_serial && + (0 == stripe_pos[3] || stripe_meta[2].sync_serial != stripe_meta[3].sync_serial || stripe_meta[0].sync_serial > stripe_meta[2].sync_serial)) { + ts::StripeData sdata; + Calc_Stripe_Data(stripe_meta[0], stripe_meta[1], delta, sdata); + } else if (stripe_pos[3] && stripe_meta[2].sync_serial == stripe_meta[3].sync_serial) { + ts::StripeData sdata; + Calc_Stripe_Data(stripe_meta[2], stripe_meta[3], delta, sdata); + } else { + std::cout << "Invalid stripe data - candidates found but sync serial data not valid." << std::endl; + } + } else { + std::cout << "Invalid stripe data - no candidates found." << std::endl; + } + } + } else { + printf("Stripe Header A not found in first chunk\n"); + } + } + + + // -------------------- + struct Cache + { + ~Cache(); + + void load(ts::FilePath const& path); + void loadConfig(ts::FilePath const& path); + void loadDevice(ts::FilePath const& path); + + enum class SpanDumpDepth { SPAN, STRIPE, DIRECTORY }; + void dumpSpans(SpanDumpDepth depth); + void dumpVolumes(); + + std::list _spans; + std::map _volumes; + }; + + void + Cache::load(ts::FilePath const& path) + { + if (!path.is_readable()) throw(std::system_error(errno, std::system_category(), static_cast(path))); + else if (path.is_regular_file()) this->loadConfig(path); + else if (path.is_char_device() || path.is_block_device()) this->loadDevice(path); + else printf("Not a valid file type: '%s'\n", static_cast(path)); + } + + void + Cache::loadConfig(ts::FilePath const& path) + { + + static const ts::StringView TAG_ID("id"); + static const ts::StringView TAG_VOL("volume"); + + ts::BulkFile cfile(path); + if (0 == cfile.load()) { + ts::StringView content = cfile.content(); + while (content) { + ts::StringView line = content.splitPrefix('\n'); + line.ltrim(&isspace); + if (!line || '#' == *line) continue; + ts::StringView path = line.extractPrefix(&isspace); + if (path) { + // After this the line is [size] [id=string] [vol=#] + while (line) { + ts::StringView value(line.extractPrefix(&isspace)); + if (value) { + ts::StringView tag(value.splitPrefix('=')); + if (!tag) { + } else if (0 == strcasecmp(tag,TAG_ID)) { + } else if (0 == strcasecmp(tag,TAG_VOL)) { + } + } + } + this->load(ts::FilePath(path)); + } + } + } + } + + void + Cache::loadDevice(ts::FilePath const& path) + { + int flags; + + flags = OPEN_RW_FLAGS +#if defined(O_DIRECT) + | O_DIRECT +#endif +#if defined(O_DSYNC) + | O_DSYNC +#endif + ; + + ats_scoped_fd fd(path.open(flags)); + + if (fd) { + off_t offset = ts::CacheSpan::OFFSET.units(); + alignas(512) char buff[8192]; + int64_t n = pread(fd, buff, sizeof(buff), offset); + if (n >= static_cast(sizeof(ts::SpanHeader))) { + ts::SpanHeader& span_hdr = reinterpret_cast(buff); +# if 0 + printf("Span: %s\n : Magic = 0x%x (%s) volumes=%d used=%d free=%d vol_blocks=%d total blocks=%" PRIu64 " \n" + , static_cast(path) + , span_hdr.magic, (span_hdr.magic == ts::SpanHeader::MAGIC ? "match" : "fail") + , span_hdr.num_volumes, span_hdr.num_used, span_hdr.num_free + , span_hdr.num_diskvol_blks, span_hdr.num_blocks + ); +# endif + // See if it looks valid + if (span_hdr.magic == ts::SpanHeader::MAGIC && + span_hdr.num_diskvol_blks == span_hdr.num_used + span_hdr.num_free) { + int nspb = span_hdr.num_diskvol_blks; + size_t span_hdr_size = sizeof(ts::SpanHeader) + ( nspb - 1 ) * sizeof(ts::CacheStripeDescriptor); + Span* span = new Span(path); + span->_header.reset(new (malloc(span_hdr_size)) ts::SpanHeader); + if (span_hdr_size <= sizeof(buff)) { + memcpy(span->_header.get(), buff, span_hdr_size); + } else { + // TODO - check the pread return + pread(fd, span->_header.get(), span_hdr_size, offset); + } + span->_fd = fd.release(); + _spans.push_back(span); + for ( auto i = 0 ; i < nspb ; ++i ) { + ts::CacheStripeDescriptor& stripe = span->_header->stripes[i]; +# if 0 + std::cout << " : SpanBlock " << i << " @ " << stripe.offset.units() + << " blocks=" << stripe.len.units() << " vol=" << stripe.vol_idx + << " type=" << stripe.type << " " << (stripe.free ? "free" : "in-use") << std::endl; +# endif + if (stripe.free == 0) { + // Add to volume. + _volumes[stripe.vol_idx]._stripes.push_back(Volume::StripeRef { span, i }); +// if (Examine_Stripes_P) Open_Stripe(fd, stripe); + } + } + } + } else { + printf("Failed to read from '%s' [%d]\n", path.path(), errno); + } + } else { + printf("Unable to open '%s'\n", static_cast(path)); + } + } + + void + Cache::dumpSpans(SpanDumpDepth depth) + { + if (depth >= SpanDumpDepth::SPAN) { + for (auto span : _spans) { + std::cout << "Span: " << span->_path << " " + << span->_header->num_volumes << " Volumes " + << span->_header->num_used << " in use " + << span->_header->num_free << " free " + << span->_header->num_diskvol_blks << " stripes " + << span->_header->num_blocks << " blocks" + << std::endl; + for (unsigned int i = 0 ; i < span->_header->num_diskvol_blks ; ++i ) { + ts::CacheStripeDescriptor& stripe = span->_header->stripes[i]; + std::cout << " : SpanBlock " << i << " @ " << stripe.offset.units() + << " blocks=" << stripe.len.units() << " vol=" << stripe.vol_idx + << " type=" << stripe.type << " " << (stripe.free ? "free" : "in-use") << std::endl; + if (depth >= SpanDumpDepth::STRIPE) { + Open_Stripe(span->_fd, stripe); + } + } + } + } + } + + void + Cache::dumpVolumes() + { + for ( auto const& elt : _volumes ) { + size_t size = 0; + for ( auto const& r : elt.second._stripes ) + size += r._span->_header->stripes[r._idx].len.units(); + + std::cout << "Volume " << elt.first << " has " << elt.second._stripes.size() << " stripes and " + << size << " bytes" + << std::endl; + } + } + + Cache::~Cache() + { + for ( auto* span : _spans) delete span; + } + + void + Span::clearPermanently() + { + alignas(512) static char zero[ts::CacheStripeBlocks::SCALE]; // should be all zero, it's static. + std::cout << "Clearing " << _path << " permanently on disk "; + ssize_t n = pwrite(_fd, zero, sizeof(zero), ts::CacheSpan::OFFSET.units()); + if (n == sizeof(zero)) std::cout << "done"; + else { + const char* text = strerror(errno); + std::cout << "failed"; + if (n >= 0) std::cout << " - " << n << " of " << sizeof(zero) << " bytes written"; + std::cout << " - " << text; + } + std::cout << std::endl; + } + + struct option Options[] = { + { "help", true, nullptr, 'h' } + }; + +} + +ts::Rv +List_Stripes(Cache::SpanDumpDepth depth, int argc, char *argv[]) +{ + Cache cache; + cache.load(TargetFile); + cache.dumpSpans(depth); + cache.dumpVolumes(); + return true; +} + +ts::Rv +Clear_Spans(int argc, char* argv[]) +{ + Cache cache; + OPEN_RW_FLAGS = O_RDWR; + cache.load(TargetFile); + for ( auto* span : cache._spans) { + span->clearPermanently(); + } + return true; +} + +int main(int argc, char* argv[]) +{ + int opt_idx = 0; + int opt_val; + while (-1 != (opt_val = getopt_long(argc, argv, "h", Options, &opt_idx))) { + switch (opt_val) { + case 'h': + printf("Usage: %s [device_path|config_file]", argv[0]); + return 1; + break; + } + } + + Commands.add(std::string("list"), std::string("List elements of the cache"), [] (int argc, char* argv[]) { return List_Stripes(Cache::SpanDumpDepth::SPAN, argc, argv); } ) + .subCommand(std::string("stripes"), std::string("The stripes"), [] (int argc, char* argv[]) { return List_Stripes(Cache::SpanDumpDepth::STRIPE, argc, argv); }); + Commands.add(std::string("clear"), std::string("Clear spans"), &Clear_Spans); + + if (optind < argc) { + TargetFile = argv[optind]; + argc -= optind+1; + argv += optind+1; + } else { + Commands.helpMessage(argc, argv); + exit(1); + } + ts::Rv result = Commands.invoke(argc, argv); + + return 0; +} diff --git a/tools/cache_tool/Command.cc b/tools/cache_tool/Command.cc new file mode 100644 index 00000000000..fd0aab1dbaf --- /dev/null +++ b/tools/cache_tool/Command.cc @@ -0,0 +1,126 @@ +/** @file + + Nest commands (for command line processing). + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#include "Command.h" +#include +#include +#include +#include +#include + +namespace ApacheTrafficServer +{ + + int CommandTable::_opt_idx = 0; + + // Error message functions. + ts::Errata ERR_COMMAND_TAG_NOT_FOUND(char const* tag) { std::ostringstream s; + s << "Command tag " << tag << " not found"; + return ts::Errata(s.str());} + + ts::Errata ERR_SUBCOMMAND_REQUIRED() { return ts::Errata(std::string("Incomplete command, additional keyword required")); } + + + CommandTable::Command::Command() + { + } + + CommandTable::Command::Command(std::string const& name, std::string const& help) : _name(name), _help(help) + { + } + + CommandTable::Command::Command(std::string const& name, std::string const& help, CommandFunction const& f) : _name(name), _help(help), _func(f) + { + } + + auto CommandTable::Command::set(CommandFunction const& f) -> self& + { + _func = f; + return *this; + } + + CommandTable::Command& CommandTable::Command::subCommand(std::string const& name, std::string const& help, CommandFunction const & f) + { + _group.emplace_back(Command(name, help, f)); + return _group.back(); + } + + auto CommandTable::Command::subCommand(std::string const& name, std::string const& help) -> self& + { + _group.emplace_back(Command(name,help)); + return _group.back(); + } + + ts::Rv CommandTable::Command::invoke(int argc, char* argv[]) + { + ts::Rv zret = true; + + if (CommandTable::_opt_idx >= argc || argv[CommandTable::_opt_idx][0] == '-') { + // Tail of command keywords, try to invoke. + if (_func) zret = _func(argc - CommandTable::_opt_idx, argv + CommandTable::_opt_idx); + else zret = false, zret = ERR_SUBCOMMAND_REQUIRED(); + } else { + char const* tag = argv[CommandTable::_opt_idx]; + auto spot = std::find_if(_group.begin(), _group.end(), + [tag](CommandGroup::value_type const& elt) { + return 0 == strcasecmp(tag, elt._name.c_str()); } ); + if (spot != _group.end()) { + ++CommandTable::_opt_idx; + zret = spot->invoke(argc, argv); + } + else { + zret = false; + zret = ERR_COMMAND_TAG_NOT_FOUND(tag); + } + } + return zret; + } + + CommandTable::Command::~Command() { } + + CommandTable::CommandTable() + { + } + + auto CommandTable::add(std::string const& name, std::string const& help) -> Command& + { + return _top.subCommand(name, help); + } + + auto CommandTable::add(std::string const& name, std::string const& help, CommandFunction const& f) -> Command& + { + return _top.subCommand(name, help, f); + } + + ts::Rv CommandTable::invoke(int argc, char* argv[]) + { + _opt_idx = 0; + return _top.invoke(argc, argv); + } + + ts::Rv CommandTable::helpMessage(int argc, char* argv[]) + { + std::cout << "Help message" << std::endl; + return false; + } +} diff --git a/tools/cache_tool/Command.h b/tools/cache_tool/Command.h new file mode 100644 index 00000000000..a321df47635 --- /dev/null +++ b/tools/cache_tool/Command.h @@ -0,0 +1,116 @@ +/** @file + + Command registration. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#include +#include +#include + +#if !defined(CACHE_TOOL_COMMAND_H) +#define CACHE_TOOL_COMMAND_H +namespace ApacheTrafficServer +{ + // Because in C+11 std::max is not constexpr + template < typename I > constexpr inline I maximum(I lhs, I rhs) { return lhs < rhs ? rhs : lhs; } + + /// Top level container for commands. + class CommandTable + { + typedef CommandTable self; ///< Self reference type. + public: + /// Signature for actual command implementation. + typedef std::function (int argc, char* argv[])> CommandFunction; + + CommandTable(); + + /// A command. + /// This is either a leaf (and has a function for an implementation) or it is a group + /// of nested commands. + class Command + { + typedef Command self; ///< Self reference type. + public: + ~Command(); + + /** Add a subcommand to this command. + @return The subcommand object. + */ + Command& subCommand(std::string const& name, std::string const& help); + /** Add a subcommand to this command. + @return The new sub command instance. + */ + Command& subCommand(std::string const& name, std::string const& help, CommandFunction const& f); + /** Add a leaf command. + @return This new sub command instance. + */ + Command& set(CommandFunction const& f); + + /** Invoke a command. + @return The return value of the executed command, or an error value if the command was not found. + */ + ts::Rv invoke(int argc, char* argv[]); + + protected: + typedef std::vector CommandGroup; + + std::string _name; ///< Command name. + std::string _help; ///< Help message. + /// Command to execute if no more keywords. + CommandFunction _func; + /// Next command for current keyword. + CommandGroup _group; + + /// Default constructor, no execution logic. + Command(); + /// Construct with a function for this command. + Command(std::string const& name, std::string const& help); + /// Construct with a function for this command. + Command(std::string const& name, std::string const& help, CommandFunction const& f); + + friend class CommandTable; + }; + + /** Add a direct command. + @return The created @c Command instance. + */ + Command& add(std::string const& name, std::string const& help, CommandFunction const& f); + + /** Add a parent command. + @return The created @c Command instance. + */ + Command& add(std::string const& name, std::string const& help); + + /** Invoke a command. + @return The return value of the executed command, or an error value if the command was not found. + */ + ts::Rv invoke(int argc, char* argv[]); + + ts::Rv helpMessage(int argc, char* argv[]); + + protected: + Command _top; + static int _opt_idx; + + friend class Command; + }; +} +#endif diff --git a/tools/cache_tool/File.cc b/tools/cache_tool/File.cc new file mode 100644 index 00000000000..9dc7c4c4838 --- /dev/null +++ b/tools/cache_tool/File.cc @@ -0,0 +1,86 @@ +/** @file + + File support classes. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#include "File.h" +#include +#include + +namespace ApacheTrafficServer { + + FilePath& FilePath::operator = (char const* path) + { + _path = ats_strdup(path); + _stat_p = false; + return *this; + } + + bool FilePath::is_readable() const { return 0 == access(_path, R_OK); } + + FilePath operator / (FilePath const& lhs, FilePath const& rhs) + { + return static_cast(lhs) / rhs; + } + + FilePath operator / (char const* lhs, FilePath const& rhs) + { + ats_scoped_str np; + + // If either path is empty, return the other path. + if (nullptr == lhs || 0 == *lhs) return rhs; + if (!rhs.has_path()) return FilePath(lhs); + + return FilePath(path_join(lhs, static_cast(rhs))); + } + + ats_scoped_fd FilePath::open(int flags) const + { + return ats_scoped_fd(this->has_path() ? ::open(_path, flags) : ats_scoped_fd::Traits::initValue()); + } + + int + BulkFile::load() + { + ats_scoped_fd fd(this->open(O_RDONLY)); + int zret = 0; // return errno if something goes wrong. + struct stat info; + if (0 == fstat(fd, &info)) { + size_t n = info.st_size; + _content = static_cast(ats_malloc(n+2)); + if (0 < (_len = read(fd, _content, n))) { + // Force a trailing linefeed and nul. + memset(_content + _len, 0, 2); + if (_content[n-1] != '\n') { + _content[n] = '\n'; + ++_len; + } + } else zret = errno; + } else zret = errno; + return zret; + } + + StringView + BulkFile::content() const + { + return StringView(_content, _len); + } +} diff --git a/tools/cache_tool/File.h b/tools/cache_tool/File.h new file mode 100644 index 00000000000..19fa254c9ab --- /dev/null +++ b/tools/cache_tool/File.h @@ -0,0 +1,156 @@ +/** @file + + File system support classes. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#if ! defined(ATS_FILE_HEADER) +#define ATS_FILE_HEADER + +#include +#include +#include + +namespace ApacheTrafficServer +{ + /** A file class for supporting path operations. + */ + class FilePath + { + typedef FilePath self; ///< Self reference type. + public: + FilePath(); + /// Construct from a null terminated string. + explicit FilePath(char const* path); + /// Construct from a string view. + explicit FilePath(StringView const& path); + /// Copy constructor - copies the path. + FilePath(self const& that); + /// Move constructor. + FilePath(self && that); + /// Assign a new path. + self& operator = (char const* path); + /// Combine two paths, making sure there is exactly one separator between them. + self operator / (self const& rhs); + /// Create a new instance by appended @a path. + self operator / (char const* path); + /// Check if there is a path. + bool has_path() const; + /// Check if the path is absolute. + bool is_absolute() const; + /// Check if the path is not absolute. + bool is_relative() const; + /// Check if file is readable. + bool is_readable() const; + /// Access the path as a null terminated string. + operator const char* () const; + /// Access the path explicitly. + char const* path() const; + + /// Get the stat buffer. + /// @return A valid stat buffer or @c nullptr if the system call failed. + struct stat const* stat() const; + + /// Return the file type value. + int file_type() const; + + bool is_char_device() const; + bool is_block_device() const; + bool is_dir() const; + bool is_regular_file() const; + + // Utility methods. + ats_scoped_fd open(int flags) const; + + protected: + ats_scoped_str _path; ///< File path. + mutable struct stat _stat; ///< File information. + mutable bool _stat_p = false; ///< Whether _stat is valid. + }; + + /** A file support class for handling files as bulk content. + + @note This is used primarily for configuration files where the entire file is read every time + and it's rarely (if ever) useful to read it incrementally. The general scheme is the entire file + is read and then @c StringView elements are used to reference the bulk content. + + @internal The design goal of this class is to supplant the free functions later in this header. + + */ + class BulkFile : public FilePath + { + typedef BulkFile self; ///< Self reference type. + typedef FilePath super; ///< Parent type. + public: + // Inherit super class constructors. + using super::super; + ///< Conversion constructor from base class. + BulkFile(super&& that); + /// Read the contents of the file in a local buffer. + /// @return @c errno + int load(); + StringView content() const; + private: + ats_scoped_str _content; ///< The file contents. + size_t _len; ///< Length of file content. + }; + +/* ------------------------------------------------------------------- */ + + inline FilePath::FilePath() {} + inline FilePath::FilePath(char const* path) : _path(ats_strdup(path)) {} + inline FilePath::FilePath(StringView const& path) + { + _path = static_cast(ats_malloc(path.size()+1)); + memcpy(_path, path.ptr(), path.size()); + _path[path.size()] = 0; + } + inline FilePath::FilePath(self const& that) : _path(ats_strdup(static_cast(that))) {} + inline FilePath::FilePath(self&& that) : _path(static_cast(that._path)) {} + inline FilePath::operator const char* () const { return _path; } + inline char const* FilePath::path() const { return _path; } + + inline bool FilePath::has_path() const { return _path && 0 != _path[0]; } + inline bool FilePath::is_absolute() const { return _path && '/' == _path[0]; } + inline bool FilePath::is_relative() const { return !this->is_absolute(); } + + inline struct stat const* FilePath::stat() const + { + if (!_stat_p) _stat_p = ::stat(_path, &_stat) >= 0; + return _stat_p ? &_stat : nullptr; + } + + FilePath operator / (FilePath const& lhs, FilePath const& rhs); + FilePath operator / (char const* lhs, FilePath const& rhs); + + inline int FilePath::file_type() const { return this->stat() ? (_stat.st_mode & S_IFMT) : 0; } + + inline bool FilePath::is_dir() const { return this->file_type() == S_IFDIR; } + inline bool FilePath::is_char_device() const { return this->file_type() == S_IFCHR; } + inline bool FilePath::is_block_device() const { return this->file_type() == S_IFBLK; } + inline bool FilePath::is_regular_file() const { return this->file_type() == S_IFREG; } + + inline BulkFile::BulkFile(super&& that) : super(that) {} + +/* ------------------------------------------------------------------- */ +} // namespace +/* ------------------------------------------------------------------- */ + +#endif diff --git a/tools/cache_tool/Makefile.am b/tools/cache_tool/Makefile.am new file mode 100644 index 00000000000..c828bb77743 --- /dev/null +++ b/tools/cache_tool/Makefile.am @@ -0,0 +1,32 @@ +# +# Cache Tool Makefile +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +AM_LDFLAGS = @EXTRA_CXX_LDFLAGS@ @LIBTOOL_LINK_FLAGS@ -Wl,--as-needed +AM_CPPFLAGS = -I $(srcdir)/iocore -I $(srcdir)/lib/ts + +noinst_PROGRAMS = cache_tool + +cache_tool_SOURCES = CacheDefs.h CacheTool.cc File.h File.cc Command.h Command.cc +cache_tool_LDADD = \ + $(top_builddir)/lib/ts/.libs/MemView.o \ + $(top_builddir)/lib/ts/.libs/ink_memory.o \ + $(top_builddir)/lib/ts/.libs/ink_error.o \ + $(top_builddir)/lib/tsconfig/.libs/Errata.o + +all-am: Makefile $(PROGRAMS) From 3c368b93124bb79d42af8f952c39ce51dd3132aa Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 17 Jan 2017 17:53:33 -0600 Subject: [PATCH 07/81] CacheTool: Update help message. --- tools/cache_tool/CacheTool.cc | 4 ++-- tools/cache_tool/Makefile.am | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/cache_tool/CacheTool.cc b/tools/cache_tool/CacheTool.cc index fd0117090ea..67704c358f2 100644 --- a/tools/cache_tool/CacheTool.cc +++ b/tools/cache_tool/CacheTool.cc @@ -420,7 +420,7 @@ namespace { } struct option Options[] = { - { "help", true, nullptr, 'h' } + { "help", false, nullptr, 'h' } }; } @@ -454,7 +454,7 @@ int main(int argc, char* argv[]) while (-1 != (opt_val = getopt_long(argc, argv, "h", Options, &opt_idx))) { switch (opt_val) { case 'h': - printf("Usage: %s [device_path|config_file]", argv[0]); + printf("Usage: %s [device_path|config_file] [ ...]\n", argv[0]); return 1; break; } diff --git a/tools/cache_tool/Makefile.am b/tools/cache_tool/Makefile.am index c828bb77743..79ddab352d7 100644 --- a/tools/cache_tool/Makefile.am +++ b/tools/cache_tool/Makefile.am @@ -20,10 +20,10 @@ AM_LDFLAGS = @EXTRA_CXX_LDFLAGS@ @LIBTOOL_LINK_FLAGS@ -Wl,--as-needed AM_CPPFLAGS = -I $(srcdir)/iocore -I $(srcdir)/lib/ts -noinst_PROGRAMS = cache_tool +noinst_PROGRAMS = traffic_cache_tool -cache_tool_SOURCES = CacheDefs.h CacheTool.cc File.h File.cc Command.h Command.cc -cache_tool_LDADD = \ +traffic_cache_tool_SOURCES = CacheDefs.h CacheTool.cc File.h File.cc Command.h Command.cc +traffic_cache_tool_LDADD = \ $(top_builddir)/lib/ts/.libs/MemView.o \ $(top_builddir)/lib/ts/.libs/ink_memory.o \ $(top_builddir)/lib/ts/.libs/ink_error.o \ From 4a2c9ef6d993f58f1105c38f331437f99217b50d Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 17 Jan 2017 19:03:55 -0600 Subject: [PATCH 08/81] Metric: Add scale() method. --- lib/ts/Metric.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h index 61b6326b336..2a6c253ca6a 100644 --- a/lib/ts/Metric.h +++ b/lib/ts/Metric.h @@ -97,7 +97,7 @@ namespace ApacheTrafficServer template < intmax_t S, typename I > static intmax_t round_down(Metric const& src); static intmax_t round_down(self const& that); - static intmax_t scale(); + constexpr static intmax_t scale(); protected: Count _n; ///< Number of scale units. @@ -113,6 +113,8 @@ namespace ApacheTrafficServer constexpr auto Metric::units() const -> Count { return _n * SCALE; } template < intmax_t N, typename C > inline auto Metric::operator = (Count n) -> self& { _n = n; return *this; } + template < intmax_t N, typename C > + inline constexpr intmax_t Metric::scale() { return SCALE; } template template From 01708862cdcc3e43f64f87ee0bc33decaa61b2b4 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 17 Jan 2017 20:48:08 -0600 Subject: [PATCH 09/81] CacheTool: minor tweaks. --- tools/cache_tool/CacheDefs.h | 12 +++++++++--- tools/cache_tool/CacheTool.cc | 2 +- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/tools/cache_tool/CacheDefs.h b/tools/cache_tool/CacheDefs.h index 96d6a34211b..bff54f6955a 100644 --- a/tools/cache_tool/CacheDefs.h +++ b/tools/cache_tool/CacheDefs.h @@ -31,9 +31,15 @@ namespace ApacheTrafficServer constexpr static uint8_t CACHE_DB_MAJOR_VERSION = 24; + typedef Metric<1, int64_t> Bytes; + typedef Metric<1024, int64_t> Kilobytes; + typedef Metric<1<<20, int64_t> Megabytes; + + // Size measurement of cache storage. + // Also size of meta data storage units. typedef Metric<8192, int64_t> CacheStripeBlocks; + // Size unit for content stored in cache. typedef Metric<512, int64_t> CacheContentBlocks; - typedef Metric<1, int64_t> CacheBytes; /** A cache span is a representation of raw storage. It corresponds to a raw disk, disk partition, file, or directory. @@ -43,7 +49,7 @@ namespace ApacheTrafficServer public: /// Default offset of start of data in a span. /// @internal I think this is done to avoid collisions with partition tracking mechanisms. - constexpr static CacheStripeBlocks OFFSET { 1 }; + static const Bytes OFFSET; }; /** A section of storage in a span, used to contain a stripe. @@ -53,7 +59,7 @@ namespace ApacheTrafficServer @internal nee @c DiskVolBlock */ struct CacheStripeDescriptor { - CacheBytes offset; // offset in bytes from the start of the disk + Bytes offset; // offset of start of stripe from start of span. CacheStripeBlocks len; // length of block. uint32_t vol_idx; ///< If in use, the volume index. unsigned int type : 3; diff --git a/tools/cache_tool/CacheTool.cc b/tools/cache_tool/CacheTool.cc index 67704c358f2..f6855bdf254 100644 --- a/tools/cache_tool/CacheTool.cc +++ b/tools/cache_tool/CacheTool.cc @@ -42,7 +42,7 @@ namespace ts { } namespace ApacheTrafficServer { - constexpr CacheStripeBlocks CacheSpan::OFFSET; + const Bytes CacheSpan::OFFSET{ CacheStripeBlocks::scale() }; } namespace { From a1f8e88af909b641b85277092e4ac3f741aff68e Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 17 Jan 2017 20:55:32 -0600 Subject: [PATCH 10/81] Metric: Add scale() method implementation. --- lib/ts/Metric.h | 129 ++++++++++++++++++++---------------------------- 1 file changed, 53 insertions(+), 76 deletions(-) diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h index 2a6c253ca6a..fe3120b997e 100644 --- a/lib/ts/Metric.h +++ b/lib/ts/Metric.h @@ -86,18 +86,14 @@ namespace ApacheTrafficServer constexpr Count units() const; /// Assignment operator. - /// @note Requires that @c S be an integer multiple of @c SCALE. + /// @note Requires the scale of @c S be an integer multiple of the scale of this. template < intmax_t S, typename I > self& operator = (Metric const& that); + /// Assignment from same scale. + self& operator = (self const& that); - /// Convert the count of a differently scaled @c Metric @a src by rounding down if needed. - /// @internal This is required for internal use but may be handy for other clients. - /// @internal Variants to optimize special cases. - template < typename I > static intmax_t round_down(Metric const& src); - template < intmax_t S, typename I > static intmax_t round_down(Metric const& src); - static intmax_t round_down(self const& that); - - constexpr static intmax_t scale(); + /// Run time access to the scale of this metric (template arg @a N). + static constexpr intmax_t scale(); protected: Count _n; ///< Number of scale units. @@ -114,7 +110,9 @@ namespace ApacheTrafficServer template < intmax_t N, typename C > inline auto Metric::operator = (Count n) -> self& { _n = n; return *this; } template < intmax_t N, typename C > - inline constexpr intmax_t Metric::scale() { return SCALE; } + inline auto Metric::operator = (self const& that) -> self& { _n = that._n; return *this; } + template < intmax_t N, typename C > + constexpr inline intmax_t Metric::scale() { return SCALE; } template template @@ -141,43 +139,7 @@ namespace ApacheTrafficServer return *this; } - // Same type, no rounding needed. - template < intmax_t N, typename C > - intmax_t Metric::round_down(self const& that) - { - return that._n; - } - - // Same scale just with different count type, no rounding. - template < intmax_t N, typename C > - template < typename I > - intmax_t Metric::round_down(Metric const& that) - { - return that._n; - } - - - template < intmax_t N, typename C > - template < intmax_t S, typename I > - intmax_t Metric::round_down(Metric const& src) - { - typedef std::ratio R_NS; - typedef std::ratio R_SN; - - if (R_NS::den == 1) { - return src.count() / R_NS::num; - } else if (R_SN::den ==1) { - return src.count() * R_SN::num; // N is a multiple of S. - } else { - // General case where neither N nor S are a multiple of the other. - auto n = src.count(); - // Yes, a bit odd, but this minimizes the risk of integer overflow. - // I need to validate that under -O2 the compiler will only do 1 division to get - // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are - // powers of 2 I have verified recent GNU compilers will optimize to bit operations. - return (n / N) * S + (( n % N ) * S) / N; - } - } + // -- Free Functions -- /** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding up. The resulting count in the return value will be the smallest count that is not smaller than the unit @@ -191,16 +153,20 @@ namespace ApacheTrafficServer auto size = metric_round_up(src); // size.count() == 586 @endcode */ - template < typename M, intmax_t N, typename C > - M metric_round_up(Metric const& src) + template < typename M, intmax_t S, typename I > + M metric_round_up(Metric const& src) { - if (1 == M::SCALE) { - return M(src.units()); + typedef std::ratio R; + auto c = src.count(); + + if (M::SCALE == S) { + return c; + } else if (R::den == 1) { + return c / R::num + (0 != c % R::num); // N is a multiple of S. + } else if (R::num == 1) { + return c * R::den; // S is a multiple of N. } else { - typedef std::ratio R; // R::num == M::SCALE / GCD(M::SCALE, N) == GCF(M::SCALE, N) - auto n = src.count(); - // Round down and add 1 unless @a n is an even multiple of the GCF of the two scales. - return M(M::round_down(src) + ((n % R::num) != 0)); + return (c / R::num) * R::den + (( c % R::num ) * R::den) / R::num + (0 != (c % R::num)); } } @@ -216,24 +182,38 @@ namespace ApacheTrafficServer auto size = metric_round_up(src); // size.count() == 585 @endcode */ - template < typename M, intmax_t N, typename C > - M metric_round_down(Metric const& src) + template < typename M, intmax_t S, typename I > + M metric_round_down(Metric const& src) { - return M(1 == M::SCALE ? src.units() : M::round_down(src)); + typedef std::ratio R; + auto c = src.count(); + + if (R::den == 1) { + return c / R::num; // S is a multiple of N. + } else if (R::num ==1) { + return c * R::den; // N is a multiple of S. + } else { + // General case where neither N nor S are a multiple of the other. + // Yes, a bit odd, but this minimizes the risk of integer overflow. + // I need to validate that under -O2 the compiler will only do 1 division to get + // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are + // powers of 2 I have verified recent GNU compilers will optimize to bit operations. + return (c / R::num) * R::den + (( c % R::num ) * R::den) / R::num; + } } - /// Convert a unit value to a scaled count, rounding down. + /// Convert a unit value @a n to a Metric, rounding down. template < typename M > - M metric_round_down(intmax_t src) + M metric_round_down(intmax_t n) { - return M(src/M::SCALE); // assuming compiler will optimize out dividing by 1 if needed. + return n/M::SCALE; // assuming compiler will optimize out dividing by 1 if needed. } - /// Convert a unit value to a scaled count, rounding up. + /// Convert a unit value @a n to a Metric, rounding up. template < typename M > - M metric_round_up(intmax_t src) + M metric_round_up(intmax_t n) { - return M(M::SCALE == 1 ? src : (src/M::SCALE + 0 != src % M::SCALE)); + return M::SCALE == 1 ? n : (n/M::SCALE + (0 != (n % M::SCALE))); } @@ -266,33 +246,30 @@ namespace ApacheTrafficServer template < intmax_t N1, typename C1, intmax_t N2, typename C2 > bool operator < (Metric const& lhs, Metric const& rhs) { - typedef std::ratio R12; - typedef std::ratio R21; + typedef std::ratio R; // Based on tests with the GNU compiler, the fact that the conditionals are compile time // constant causes the never taken paths to be dropped so there are no runtime conditional // checks, even with no optimization at all. - if (R12::den == 1) { return lhs.count() < rhs.count() * R12::num; } - else if (R21::den == 1) { return lhs.count() * R21::num < rhs.count(); } + if (R::den == 1) { return lhs.count() < rhs.count() * R::num; } + else if (R::num == 1) { return lhs.count() * R::den < rhs.count(); } else return lhs.units() < rhs.units(); } template < intmax_t N1, typename C1, intmax_t N2, typename C2 > bool operator == (Metric const& lhs, Metric const& rhs) { - typedef std::ratio R12; - typedef std::ratio R21; - if (R12::den == 1) { return lhs.count() == rhs.count() * R12::num; } - else if (R21::den == 1) { return lhs.count() * R21::num == rhs.count(); } + typedef std::ratio R; + if (R::den == 1) { return lhs.count() == rhs.count() * R::num; } + else if (R::num == 1) { return lhs.count() * R::den == rhs.count(); } else return lhs.units() == rhs.units(); } template < intmax_t N1, typename C1, intmax_t N2, typename C2 > bool operator <= (Metric const& lhs, Metric const& rhs) { - typedef std::ratio R12; - typedef std::ratio R21; - if (R12::den == 1) { return lhs.count() <= rhs.count() * R12::num; } - else if (R21::den == 1) { return lhs.count() * R21::num <= rhs.count(); } + typedef std::ratio R; + if (R::den == 1) { return lhs.count() <= rhs.count() * R::num; } + else if (R::num == 1) { return lhs.count() * R::den <= rhs.count(); } else return lhs.units() <= rhs.units(); } From 97f1625b12b143a033fdf2dc4c59dcdf3f9f9abc Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 17 Jan 2017 20:57:25 -0600 Subject: [PATCH 11/81] Metric: Add scale() method implementation. --- lib/ts/Metric.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h index 184eddfd4e6..fe3120b997e 100644 --- a/lib/ts/Metric.h +++ b/lib/ts/Metric.h @@ -93,7 +93,7 @@ namespace ApacheTrafficServer self& operator = (self const& that); /// Run time access to the scale of this metric (template arg @a N). - static intmax_t scale(); + static constexpr intmax_t scale(); protected: Count _n; ///< Number of scale units. @@ -111,6 +111,8 @@ namespace ApacheTrafficServer inline auto Metric::operator = (Count n) -> self& { _n = n; return *this; } template < intmax_t N, typename C > inline auto Metric::operator = (self const& that) -> self& { _n = that._n; return *this; } + template < intmax_t N, typename C > + constexpr inline intmax_t Metric::scale() { return SCALE; } template template From c04c3ba4066b07028999e29837ae242971f193e0 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 17 Jan 2017 22:28:39 -0600 Subject: [PATCH 12/81] CacheTool: Checkpoint for command help message. --- tools/cache_tool/CacheDefs.h | 10 ++++++---- tools/cache_tool/CacheTool.cc | 26 ++++++++++++++++---------- tools/cache_tool/Command.cc | 30 ++++++++++++++++++++++++++---- tools/cache_tool/Command.h | 6 +++++- 4 files changed, 53 insertions(+), 19 deletions(-) diff --git a/tools/cache_tool/CacheDefs.h b/tools/cache_tool/CacheDefs.h index bff54f6955a..098c0e9f4aa 100644 --- a/tools/cache_tool/CacheDefs.h +++ b/tools/cache_tool/CacheDefs.h @@ -33,13 +33,15 @@ namespace ApacheTrafficServer typedef Metric<1, int64_t> Bytes; typedef Metric<1024, int64_t> Kilobytes; - typedef Metric<1<<20, int64_t> Megabytes; + typedef Metric<1024 * Kilobytes::SCALE, int64_t> Megabytes; + // Units of allocation for stripes. + typedef Metric<128 * Megabytes::SCALE, int64_t> CacheStripeBlocks; // Size measurement of cache storage. // Also size of meta data storage units. - typedef Metric<8192, int64_t> CacheStripeBlocks; + typedef Metric<8 * Kilobytes::SCALE, int64_t> CacheStoreBlocks; // Size unit for content stored in cache. - typedef Metric<512, int64_t> CacheContentBlocks; + typedef Metric<512, int64_t> CacheDataBlocks; /** A cache span is a representation of raw storage. It corresponds to a raw disk, disk partition, file, or directory. @@ -60,7 +62,7 @@ namespace ApacheTrafficServer */ struct CacheStripeDescriptor { Bytes offset; // offset of start of stripe from start of span. - CacheStripeBlocks len; // length of block. + CacheStoreBlocks len; // length of block. uint32_t vol_idx; ///< If in use, the volume index. unsigned int type : 3; unsigned int free : 1; diff --git a/tools/cache_tool/CacheTool.cc b/tools/cache_tool/CacheTool.cc index f6855bdf254..298a92a5176 100644 --- a/tools/cache_tool/CacheTool.cc +++ b/tools/cache_tool/CacheTool.cc @@ -42,7 +42,7 @@ namespace ts { } namespace ApacheTrafficServer { - const Bytes CacheSpan::OFFSET{ CacheStripeBlocks::scale() }; + const Bytes CacheSpan::OFFSET{ CacheStoreBlocks{1} }; } namespace { @@ -94,7 +94,7 @@ namespace { while (mem.size() >= sizeof(ts::CacheStripeMeta)) { // The meta data is stored aligned on a stripe block boundary, so only need to check there. test_site = mem; - mem += ts::CacheStripeBlocks::SCALE; // always move this forward to make restarting search easy. + mem += ts::CacheStoreBlocks::SCALE; // always move this forward to make restarting search easy. if (Validate_Stripe_Meta(*reinterpret_cast(test_site.ptr()))) { std::get<0>(zret) = 1; @@ -108,7 +108,7 @@ namespace { { // Assuming header + free list fits in one cache stripe block, which isn't true for large stripes (>2G or so). // Need to detect that, presumably by checking that the segment count fits in the stripe block. - ts::CacheStripeBlocks hdr_size { 1 }; + ts::CacheStoreBlocks hdr_size { 1 }; off_t space = delta - hdr_size.units(); int64_t n_buckets = space / 40; data.segments = n_buckets / (1<<14); @@ -187,7 +187,7 @@ namespace { // do another read. if (!data) { pos += N; - n = pread(fd, buff, ts::CacheStripeBlocks::SCALE, pos); + n = pread(fd, buff, ts::CacheStoreBlocks::SCALE, pos); data.setView(buff, n); } std::tie(found, stripe_mem) = Probe_For_Stripe(data); @@ -199,7 +199,7 @@ namespace { printf("Found Header B at expected location %" PRIu64 ".\n", stripe_pos[2]); // Footer B must be at the same relative offset to Header B as Footer A -> Header A. - n = pread(fd, buff, ts::CacheStripeBlocks::SCALE, stripe_pos[2] + delta); + n = pread(fd, buff, ts::CacheStoreBlocks::SCALE, stripe_pos[2] + delta); data.setView(buff, n); std::tie(found, stripe_mem) = Probe_For_Stripe(data); if (found == 1) { @@ -406,7 +406,7 @@ namespace { void Span::clearPermanently() { - alignas(512) static char zero[ts::CacheStripeBlocks::SCALE]; // should be all zero, it's static. + alignas(512) static char zero[ts::CacheStoreBlocks::SCALE]; // should be all zero, it's static. std::cout << "Clearing " << _path << " permanently on disk "; ssize_t n = pwrite(_fd, zero, sizeof(zero), ts::CacheSpan::OFFSET.units()); if (n == sizeof(zero)) std::cout << "done"; @@ -451,11 +451,12 @@ int main(int argc, char* argv[]) { int opt_idx = 0; int opt_val; + bool help = false; while (-1 != (opt_val = getopt_long(argc, argv, "h", Options, &opt_idx))) { switch (opt_val) { case 'h': printf("Usage: %s [device_path|config_file] [ ...]\n", argv[0]); - return 1; + help = true; break; } } @@ -464,15 +465,20 @@ int main(int argc, char* argv[]) .subCommand(std::string("stripes"), std::string("The stripes"), [] (int argc, char* argv[]) { return List_Stripes(Cache::SpanDumpDepth::STRIPE, argc, argv); }); Commands.add(std::string("clear"), std::string("Clear spans"), &Clear_Spans); + if (help) { + Commands.helpMessage(argc - optind, argv + optind); + exit(1); + } + if (optind < argc) { TargetFile = argv[optind]; argc -= optind+1; argv += optind+1; - } else { - Commands.helpMessage(argc, argv); - exit(1); } ts::Rv result = Commands.invoke(argc, argv); + if (!result) { + std::cerr << result.errata(); + } return 0; } diff --git a/tools/cache_tool/Command.cc b/tools/cache_tool/Command.cc index fd0aab1dbaf..75d8db56722 100644 --- a/tools/cache_tool/Command.cc +++ b/tools/cache_tool/Command.cc @@ -25,7 +25,6 @@ #include #include #include -#include #include namespace ApacheTrafficServer @@ -96,6 +95,27 @@ namespace ApacheTrafficServer return zret; } + void CommandTable::Command::helpMessage(int argc, char* argv[], std::ostream& out, std::string const& prefix) const + { + + if (CommandTable::_opt_idx >= argc || argv[CommandTable::_opt_idx][0] == '-') { + // Tail of command keywords, start listing + if (!_name.empty()) out << prefix << _name << ": " << _help << std::endl; + for ( Command const& c : _group ) c.helpMessage(argc, argv, out, " " + prefix); + } else { + char const* tag = argv[CommandTable::_opt_idx]; + auto spot = std::find_if(_group.begin(), _group.end(), + [tag](CommandGroup::value_type const& elt) { + return 0 == strcasecmp(tag, elt._name.c_str()); } ); + if (spot != _group.end()) { + ++CommandTable::_opt_idx; + spot->helpMessage(argc, argv, out, prefix); + } else { + out << ERR_COMMAND_TAG_NOT_FOUND(tag) << std::endl; + } + } + } + CommandTable::Command::~Command() { } CommandTable::CommandTable() @@ -118,9 +138,11 @@ namespace ApacheTrafficServer return _top.invoke(argc, argv); } - ts::Rv CommandTable::helpMessage(int argc, char* argv[]) + // This is basically cloned from invoke(), need to find how to do some unification. + void CommandTable::helpMessage(int argc, char* argv[]) const { - std::cout << "Help message" << std::endl; - return false; + _opt_idx = 0; + std::cerr << "Command tree" << std::endl; + _top.helpMessage(argc, argv, std::cerr, std::string("* ")); } } diff --git a/tools/cache_tool/Command.h b/tools/cache_tool/Command.h index a321df47635..9c25dc2ab62 100644 --- a/tools/cache_tool/Command.h +++ b/tools/cache_tool/Command.h @@ -23,6 +23,8 @@ #include #include +#include +#include #include #if !defined(CACHE_TOOL_COMMAND_H) @@ -69,6 +71,8 @@ namespace ApacheTrafficServer */ ts::Rv invoke(int argc, char* argv[]); + void helpMessage(int argc, char* argv[], std::ostream& out = std::cerr, std::string const& prefix = std::string()) const; + protected: typedef std::vector CommandGroup; @@ -104,7 +108,7 @@ namespace ApacheTrafficServer */ ts::Rv invoke(int argc, char* argv[]); - ts::Rv helpMessage(int argc, char* argv[]); + void helpMessage(int argc, char* argv[]) const; protected: Command _top; From 31e99b1b358335d6805b1d1753f2acc09743e0d3 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Wed, 18 Jan 2017 06:52:21 -0600 Subject: [PATCH 13/81] CacheTool: Help message improvements. --- tools/cache_tool/Command.cc | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/cache_tool/Command.cc b/tools/cache_tool/Command.cc index 75d8db56722..add56a2f53f 100644 --- a/tools/cache_tool/Command.cc +++ b/tools/cache_tool/Command.cc @@ -100,8 +100,12 @@ namespace ApacheTrafficServer if (CommandTable::_opt_idx >= argc || argv[CommandTable::_opt_idx][0] == '-') { // Tail of command keywords, start listing - if (!_name.empty()) out << prefix << _name << ": " << _help << std::endl; - for ( Command const& c : _group ) c.helpMessage(argc, argv, out, " " + prefix); + if (_name.empty()) { // root command group, don't print for that. + for ( Command const& c : _group ) c.helpMessage(argc, argv, out, prefix); + } else { + out << prefix << _name << ": " << _help << std::endl; + for ( Command const& c : _group ) c.helpMessage(argc, argv, out, " " + prefix); + } } else { char const* tag = argv[CommandTable::_opt_idx]; auto spot = std::find_if(_group.begin(), _group.end(), From bd3fbe029b20474751c894a86e052a1ee4354c6e Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Wed, 18 Jan 2017 07:12:43 -0600 Subject: [PATCH 14/81] Clang-Format. --- lib/ts/MemView.cc | 43 +-- lib/ts/MemView.h | 63 ++-- lib/ts/Metric.h | 513 +++++++++++++------------- lib/ts/ink_memory.h | 6 +- lib/ts/test_Metric.cc | 68 ++-- tools/cache_tool/CacheDefs.h | 229 ++++++------ tools/cache_tool/CacheStore.h | 30 +- tools/cache_tool/CacheTool.cc | 654 +++++++++++++++++----------------- tools/cache_tool/Command.cc | 214 ++++++----- tools/cache_tool/Command.h | 151 ++++---- tools/cache_tool/File.cc | 112 +++--- tools/cache_tool/File.h | 284 +++++++++------ 12 files changed, 1244 insertions(+), 1123 deletions(-) diff --git a/lib/ts/MemView.cc b/lib/ts/MemView.cc index be6f80ae7d8..473607af44d 100644 --- a/lib/ts/MemView.cc +++ b/lib/ts/MemView.cc @@ -4,7 +4,6 @@ namespace ApacheTrafficServer { - int memcmp(MemView const &lhs, MemView const &rhs) { @@ -48,27 +47,29 @@ template void detail::aligned_stream_write(std::ostream &, const StringView &); namespace std { - ostream& operator<<(ostream &os, const ApacheTrafficServer::MemView &b) - { - if (os.good()) { - ostringstream out; - out << b.size() << '@' << hex << b.ptr(); - os << out.str(); - } - return os; +ostream & +operator<<(ostream &os, const ApacheTrafficServer::MemView &b) +{ + if (os.good()) { + ostringstream out; + out << b.size() << '@' << hex << b.ptr(); + os << out.str(); } + return os; +} - ostream& operator<<(ostream &os, const ApacheTrafficServer::StringView &b) - { - if (os.good()) { - const size_t size = b.size(); - const size_t w = static_cast(os.width()); - if (w <= size) - os.write(b.begin(), size); - else - ApacheTrafficServer::detail::aligned_stream_write(os, b); - os.width(0); - } - return os; +ostream & +operator<<(ostream &os, const ApacheTrafficServer::StringView &b) +{ + if (os.good()) { + const size_t size = b.size(); + const size_t w = static_cast(os.width()); + if (w <= size) + os.write(b.begin(), size); + else + ApacheTrafficServer::detail::aligned_stream_write(os, b); + os.width(0); } + return os; +} } diff --git a/lib/ts/MemView.h b/lib/ts/MemView.h index ea51e83aa75..b2bbbbda0b3 100644 --- a/lib/ts/MemView.h +++ b/lib/ts/MemView.h @@ -40,7 +40,7 @@ class MemView; class StringView; int memcmp(MemView const &lhs, MemView const &rhs); - int strcmp(StringView const &lhs, StringView const &rhs); +int strcmp(StringView const &lhs, StringView const &rhs); int strcasecmp(StringView lhs, StringView rhs); /** A read only view of contiguous piece of memory. @@ -89,7 +89,7 @@ class MemView constexpr MemView(std::nullptr_t); /// Convert from StringView. - constexpr MemView(StringView const& that); + constexpr MemView(StringView const &that); /** Equality. @@ -148,7 +148,7 @@ class MemView /// This is faster but equivalent to constructing a new view with the same /// arguments and assigning it. /// @return @c this. - self &setView(const void* ptr, ///< Buffer address. + self &setView(const void *ptr, ///< Buffer address. size_t n = 0 ///< Buffer size. ); @@ -172,8 +172,7 @@ class MemView @return A pointer to the first occurrence of @a v in @a this or @c nullptr if @a v is not found. */ - template < typename V > - const V *find(V v) const; + template const V *find(V v) const; /** Find a value. The memory is searched as if it were an array of the value type @a V. @@ -181,8 +180,7 @@ class MemView @return A pointer to the first value for which @a pred is @c true otherwise @c nullptr. */ - template - const V *find(std::function const &pred); + template const V *find(std::function const &pred); /** Get the initial segment of the view before @a p. @@ -273,15 +271,15 @@ class StringView /** Construct explicitly with a pointer and size. */ constexpr StringView(const char *ptr, ///< Pointer to buffer. - size_t n ///< Size of buffer. - ); + size_t n ///< Size of buffer. + ); /** Construct from a half open range of two pointers. @note The byte at @start is in the view but the byte at @a end is not. */ constexpr StringView(const char *start, ///< First byte in the view. - const char *end ///< First byte not in the view. - ); + const char *end ///< First byte not in the view. + ); /** Construct from nullptr. This implicitly makes the length 0. @@ -294,7 +292,7 @@ class StringView explicit constexpr StringView(const char *s); /// Construct from @c MemView to reference the same view. - constexpr StringView(MemView const& that); + constexpr StringView(MemView const &that); /** Equality. @@ -362,7 +360,7 @@ class StringView /// This is faster but equivalent to constructing a new view with the same /// arguments and assigning it. /// @return @c this. - self &setView(const char* ptr, ///< Buffer address. + self &setView(const char *ptr, ///< Buffer address. size_t n = 0 ///< Buffer size. ); @@ -423,7 +421,6 @@ class StringView /// Convenience overload, split on predicate. self prefix(std::function const &pred) const; - /** Split the view on the character at @a p. The view is split in to two parts and the byte at @a p is discarded. @a this retains all data @@ -487,7 +484,6 @@ class StringView /// Convenience overload, extract on predicate. self extractPrefix(std::function const &pred); - /** Get the trailing segment of the view after @a p. The byte at @a p is not included. If @a p is not in the view an empty view is returned. @@ -537,13 +533,14 @@ inline constexpr MemView::MemView() inline constexpr MemView::MemView(void const *ptr, size_t n) : _ptr(ptr), _size(n) { } -inline constexpr MemView::MemView(void const *start, void const *end) : _ptr(start), _size(static_cast(end) - static_cast(start)) +inline constexpr MemView::MemView(void const *start, void const *end) + : _ptr(start), _size(static_cast(end) - static_cast(start)) { } inline constexpr MemView::MemView(std::nullptr_t) : _ptr(nullptr), _size(0) { } -inline constexpr MemView::MemView(StringView const& that) : _ptr(that.ptr()), _size(that.size()) +inline constexpr MemView::MemView(StringView const &that) : _ptr(that.ptr()), _size(that.size()) { } @@ -559,7 +556,7 @@ inline MemView & MemView::setView(const void *ptr, const void *limit) { _ptr = ptr; - _size = static_cast(limit) - static_cast(ptr); + _size = static_cast(limit) - static_cast(ptr); return *this; } @@ -601,7 +598,7 @@ MemView::is_empty() const inline MemView &MemView::operator++() { - _ptr = static_cast(_ptr) + 1; + _ptr = static_cast(_ptr) + 1; --_size; return *this; } @@ -613,7 +610,7 @@ MemView::operator+=(size_t n) _ptr = nullptr; _size = 0; } else { - _ptr = static_cast(_ptr) + n; + _ptr = static_cast(_ptr) + n; _size -= n; } return *this; @@ -633,7 +630,7 @@ MemView::ptr() const inline const void * MemView::end() const { - return static_cast(_ptr) + _size; + return static_cast(_ptr) + _size; } inline constexpr size_t @@ -710,29 +707,31 @@ MemView::splitSuffix(const void *p) return zret; } -template < typename V > +template inline const V * MemView::find(V v) const { - for ( const V* spot = static_cast(_ptr), limit = spot + (_size/sizeof(V)) ; spot < limit ; ++spot ) - if (v == *spot) return spot; + for (const V *spot = static_cast(_ptr), limit = spot + (_size / sizeof(V)); spot < limit; ++spot) + if (v == *spot) + return spot; return nullptr; } // Specialize char for performance. -template < > +template <> inline const char * MemView::find(char v) const { return static_cast(memchr(_ptr, v, _size)); } -template < typename V > +template inline const V * MemView::find(std::function const &pred) { - for (const V *p = static_cast(_ptr), *limit = p + (_size/sizeof(V)) ; p < limit; ++p) - if (pred(*p)) return p; + for (const V *p = static_cast(_ptr), *limit = p + (_size / sizeof(V)); p < limit; ++p) + if (pred(*p)) + return p; return nullptr; } @@ -752,7 +751,7 @@ inline constexpr StringView::StringView(const char *s) : _ptr(s), _size(strlen(s inline constexpr StringView::StringView(std::nullptr_t) : _ptr(nullptr), _size(0) { } -inline constexpr StringView::StringView(MemView const& that) : _ptr(static_cast(that.ptr())), _size(that.size()) +inline constexpr StringView::StringView(MemView const &that) : _ptr(static_cast(that.ptr())), _size(that.size()) { } @@ -1008,7 +1007,7 @@ StringView::find(self delimiters) const inline const char * StringView::find(std::function const &pred) const { - const char* p = std::find_if(this->begin(), this->end(), pred); + const char *p = std::find_if(this->begin(), this->end(), pred); return p == this->end() ? nullptr : p; } @@ -1098,8 +1097,8 @@ namespace detail namespace std { - ostream& operator<<(ostream &os, const ApacheTrafficServer::MemView &b); - ostream& operator<<(ostream &os, const ApacheTrafficServer::StringView &b); +ostream &operator<<(ostream &os, const ApacheTrafficServer::MemView &b); +ostream &operator<<(ostream &os, const ApacheTrafficServer::StringView &b); } #endif // TS_BUFFER_HEADER diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h index fe3120b997e..ccb8126ff4a 100644 --- a/lib/ts/Metric.h +++ b/lib/ts/Metric.h @@ -33,260 +33,297 @@ namespace ApacheTrafficServer { - /** A class to hold scaled values. - - Instances of this class have a @a count and a @a scale. The "value" of the instance is @a - count * @a scale. The scale is stored in the compiler in the class symbol table and so only - the count is a run time value. An instance with a large scale can be assign to an instance - with a smaller scale and the conversion is done automatically. Conversions from a smaller to - larger scale must be explicit using @c metric_round_up and @c metric_round_down. This prevents - inadvertent changes in value. Because the scales are not the same these conversions can be - lossy and the two conversions determine whether, in such a case, the result should be rounded - up or down to the nearest scale value. - - @a N sets the scale. @a T is the type used to hold the count, which is in units of @a N. - - @note This is modeled somewhat on @c std::chrono and serves a similar function for different - and simpler cases (where the ratio is always an integer, never a fraction). - - @see metric_round_up - @see metric_round_down - */ - template < intmax_t N, typename T = int > - class Metric - { - typedef Metric self; ///< Self reference type. - - public: - /// Scaling factor for instances. - /// Make it externally accessible. - constexpr static intmax_t SCALE = N; - typedef T Count; ///< Type used to hold the count. - - constexpr Metric(); ///< Default contructor. - ///< Construct to have @a n scaled units. - constexpr Metric(Count n); - - /// Copy constructor for same scale. - template < typename C > - Metric(Metric const& that); - - /// Copy / conversion constructor. - /// @note Requires that @c S be an integer multiple of @c SCALE. - template < intmax_t S, typename I > - Metric(Metric const& that); - - /// Direct assignment. - /// The count is set to @a n. - self& operator = (Count n); - - /// The number of scale units. - constexpr Count count() const; - /// The absolute value, scaled up. - constexpr Count units() const; - - /// Assignment operator. - /// @note Requires the scale of @c S be an integer multiple of the scale of this. - template < intmax_t S, typename I > - self& operator = (Metric const& that); - /// Assignment from same scale. - self& operator = (self const& that); - - /// Run time access to the scale of this metric (template arg @a N). - static constexpr intmax_t scale(); - - protected: - Count _n; ///< Number of scale units. - }; - - template < intmax_t N, typename C > - constexpr Metric::Metric() : _n() {} - template < intmax_t N, typename C > - constexpr Metric::Metric(Count n) : _n(n) {} - template < intmax_t N, typename C > - constexpr auto Metric::count() const -> Count { return _n; } - template < intmax_t N, typename C > - constexpr auto Metric::units() const -> Count { return _n * SCALE; } - template < intmax_t N, typename C > - inline auto Metric::operator = (Count n) -> self& { _n = n; return *this; } - template < intmax_t N, typename C > - inline auto Metric::operator = (self const& that) -> self& { _n = that._n; return *this; } - template < intmax_t N, typename C > - constexpr inline intmax_t Metric::scale() { return SCALE; } - - template - template - Metric::Metric(Metric const& that) : _n(static_cast(that._n)) - { - } - - template - template - Metric::Metric(Metric const& that) - { - typedef std::ratio R; - static_assert(R::den == 1, "Construction not permitted - target scale is not an integral multiple of source scale."); - _n = that.count() * R::num; - } - - template - template - auto Metric::operator = (Metric const& that) -> self& - { - typedef std::ratio R; - static_assert(R::den == 1, "Assignment not permitted - target scale is not an integral multiple of source scale."); - _n = that.count() * R::num; - return *this; - } - - // -- Free Functions -- - - /** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding up. - The resulting count in the return value will be the smallest count that is not smaller than the unit - value of @a src. - - @code - typedef Metric<16> Paragraphs; - typedef Metric<1024> KiloBytes; - - Paragraphs src(37459); - auto size = metric_round_up(src); // size.count() == 586 - @endcode - */ - template < typename M, intmax_t S, typename I > - M metric_round_up(Metric const& src) - { - typedef std::ratio R; - auto c = src.count(); - - if (M::SCALE == S) { - return c; - } else if (R::den == 1) { - return c / R::num + (0 != c % R::num); // N is a multiple of S. - } else if (R::num == 1) { - return c * R::den; // S is a multiple of N. - } else { - return (c / R::num) * R::den + (( c % R::num ) * R::den) / R::num + (0 != (c % R::num)); - } - } +/** A class to hold scaled values. - /** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding down. - The resulting count in the return value will be the largest count that is not larger than the unit - value of @a src. - - @code - typedef Metric<16> Paragraphs; - typedef Metric<1024> KiloBytes; - - Paragraphs src(37459); - auto size = metric_round_up(src); // size.count() == 585 - @endcode - */ - template < typename M, intmax_t S, typename I > - M metric_round_down(Metric const& src) - { - typedef std::ratio R; - auto c = src.count(); - - if (R::den == 1) { - return c / R::num; // S is a multiple of N. - } else if (R::num ==1) { - return c * R::den; // N is a multiple of S. - } else { - // General case where neither N nor S are a multiple of the other. - // Yes, a bit odd, but this minimizes the risk of integer overflow. - // I need to validate that under -O2 the compiler will only do 1 division to get - // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are - // powers of 2 I have verified recent GNU compilers will optimize to bit operations. - return (c / R::num) * R::den + (( c % R::num ) * R::den) / R::num; - } - } + Instances of this class have a @a count and a @a scale. The "value" of the instance is @a + count * @a scale. The scale is stored in the compiler in the class symbol table and so only + the count is a run time value. An instance with a large scale can be assign to an instance + with a smaller scale and the conversion is done automatically. Conversions from a smaller to + larger scale must be explicit using @c metric_round_up and @c metric_round_down. This prevents + inadvertent changes in value. Because the scales are not the same these conversions can be + lossy and the two conversions determine whether, in such a case, the result should be rounded + up or down to the nearest scale value. - /// Convert a unit value @a n to a Metric, rounding down. - template < typename M > - M metric_round_down(intmax_t n) - { - return n/M::SCALE; // assuming compiler will optimize out dividing by 1 if needed. - } + @a N sets the scale. @a T is the type used to hold the count, which is in units of @a N. - /// Convert a unit value @a n to a Metric, rounding up. - template < typename M > - M metric_round_up(intmax_t n) - { - return M::SCALE == 1 ? n : (n/M::SCALE + (0 != (n % M::SCALE))); - } + @note This is modeled somewhat on @c std::chrono and serves a similar function for different + and simpler cases (where the ratio is always an integer, never a fraction). + @see metric_round_up + @see metric_round_down + */ +template class Metric +{ + typedef Metric self; ///< Self reference type. - // --- Compare operators +public: + /// Scaling factor for instances. + /// Make it externally accessible. + constexpr static intmax_t SCALE = N; + typedef T Count; ///< Type used to hold the count. - // Try for a bit of performance boost - if the metrics have the same scale - // just comparing the counts is sufficient and scaling conversion is avoided. - template < intmax_t N, typename C1, typename C2 > - bool operator < (Metric const& lhs, Metric const& rhs) - { - return lhs.count() < rhs.count(); - } + constexpr Metric(); ///< Default contructor. + ///< Construct to have @a n scaled units. + constexpr Metric(Count n); - template < intmax_t N, typename C1, typename C2 > - bool operator == (Metric const& lhs, Metric const& rhs) - { - return lhs.count() == rhs.count(); - } + /// Copy constructor for same scale. + template Metric(Metric const &that); - // Could be derived but if we're optimizing let's avoid the extra negation. - // Or we could check if the compiler can optimize that out anyway. - template < intmax_t N, typename C1, typename C2 > - bool operator <= (Metric const& lhs, Metric const& rhs) - { - return lhs.count() <= rhs.count(); - } + /// Copy / conversion constructor. + /// @note Requires that @c S be an integer multiple of @c SCALE. + template Metric(Metric const &that); - // General base cases. - - template < intmax_t N1, typename C1, intmax_t N2, typename C2 > - bool operator < (Metric const& lhs, Metric const& rhs) - { - typedef std::ratio R; - // Based on tests with the GNU compiler, the fact that the conditionals are compile time - // constant causes the never taken paths to be dropped so there are no runtime conditional - // checks, even with no optimization at all. - if (R::den == 1) { return lhs.count() < rhs.count() * R::num; } - else if (R::num == 1) { return lhs.count() * R::den < rhs.count(); } - else return lhs.units() < rhs.units(); - } + /// Direct assignment. + /// The count is set to @a n. + self &operator=(Count n); - template < intmax_t N1, typename C1, intmax_t N2, typename C2 > - bool operator == (Metric const& lhs, Metric const& rhs) - { - typedef std::ratio R; - if (R::den == 1) { return lhs.count() == rhs.count() * R::num; } - else if (R::num == 1) { return lhs.count() * R::den == rhs.count(); } - else return lhs.units() == rhs.units(); - } + /// The number of scale units. + constexpr Count count() const; + /// The absolute value, scaled up. + constexpr Count units() const; - template < intmax_t N1, typename C1, intmax_t N2, typename C2 > - bool operator <= (Metric const& lhs, Metric const& rhs) - { - typedef std::ratio R; - if (R::den == 1) { return lhs.count() <= rhs.count() * R::num; } - else if (R::num == 1) { return lhs.count() * R::den <= rhs.count(); } - else return lhs.units() <= rhs.units(); - } + /// Assignment operator. + /// @note Requires the scale of @c S be an integer multiple of the scale of this. + template self &operator=(Metric const &that); + /// Assignment from same scale. + self &operator=(self const &that); + + /// Run time access to the scale of this metric (template arg @a N). + static constexpr intmax_t scale(); - // Derived compares. No narrowing optimization needed because if the scales - // are the same the nested call with be optimized. +protected: + Count _n; ///< Number of scale units. +}; - template < intmax_t N1, typename C1, intmax_t N2, typename C2 > - bool operator > (Metric const& lhs, Metric const& rhs) - { - return rhs < lhs; +template constexpr Metric::Metric() : _n() +{ +} +template constexpr Metric::Metric(Count n) : _n(n) +{ +} +template +constexpr auto +Metric::count() const -> Count +{ + return _n; +} +template +constexpr auto +Metric::units() const -> Count +{ + return _n * SCALE; +} +template +inline auto +Metric::operator=(Count n) -> self & +{ + _n = n; + return *this; +} +template +inline auto +Metric::operator=(self const &that) -> self & +{ + _n = that._n; + return *this; +} +template +constexpr inline intmax_t +Metric::scale() +{ + return SCALE; +} + +template template Metric::Metric(Metric const &that) : _n(static_cast(that._n)) +{ +} + +template template Metric::Metric(Metric const &that) +{ + typedef std::ratio R; + static_assert(R::den == 1, "Construction not permitted - target scale is not an integral multiple of source scale."); + _n = that.count() * R::num; +} + +template +template +auto +Metric::operator=(Metric const &that) -> self & +{ + typedef std::ratio R; + static_assert(R::den == 1, "Assignment not permitted - target scale is not an integral multiple of source scale."); + _n = that.count() * R::num; + return *this; +} + +// -- Free Functions -- + +/** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding up. + The resulting count in the return value will be the smallest count that is not smaller than the unit + value of @a src. + + @code + typedef Metric<16> Paragraphs; + typedef Metric<1024> KiloBytes; + + Paragraphs src(37459); + auto size = metric_round_up(src); // size.count() == 586 + @endcode + */ +template +M +metric_round_up(Metric const &src) +{ + typedef std::ratio R; + auto c = src.count(); + + if (M::SCALE == S) { + return c; + } else if (R::den == 1) { + return c / R::num + (0 != c % R::num); // N is a multiple of S. + } else if (R::num == 1) { + return c * R::den; // S is a multiple of N. + } else { + return (c / R::num) * R::den + ((c % R::num) * R::den) / R::num + (0 != (c % R::num)); } +} + +/** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding down. + The resulting count in the return value will be the largest count that is not larger than the unit + value of @a src. - template < intmax_t N1, typename C1, intmax_t N2, typename C2 > - bool operator >= (Metric const& lhs, Metric const& rhs) - { - return rhs <= lhs; + @code + typedef Metric<16> Paragraphs; + typedef Metric<1024> KiloBytes; + + Paragraphs src(37459); + auto size = metric_round_up(src); // size.count() == 585 + @endcode + */ +template +M +metric_round_down(Metric const &src) +{ + typedef std::ratio R; + auto c = src.count(); + + if (R::den == 1) { + return c / R::num; // S is a multiple of N. + } else if (R::num == 1) { + return c * R::den; // N is a multiple of S. + } else { + // General case where neither N nor S are a multiple of the other. + // Yes, a bit odd, but this minimizes the risk of integer overflow. + // I need to validate that under -O2 the compiler will only do 1 division to get + // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are + // powers of 2 I have verified recent GNU compilers will optimize to bit operations. + return (c / R::num) * R::den + ((c % R::num) * R::den) / R::num; } } +/// Convert a unit value @a n to a Metric, rounding down. +template +M +metric_round_down(intmax_t n) +{ + return n / M::SCALE; // assuming compiler will optimize out dividing by 1 if needed. +} + +/// Convert a unit value @a n to a Metric, rounding up. +template +M +metric_round_up(intmax_t n) +{ + return M::SCALE == 1 ? n : (n / M::SCALE + (0 != (n % M::SCALE))); +} + +// --- Compare operators + +// Try for a bit of performance boost - if the metrics have the same scale +// just comparing the counts is sufficient and scaling conversion is avoided. +template +bool +operator<(Metric const &lhs, Metric const &rhs) +{ + return lhs.count() < rhs.count(); +} + +template +bool +operator==(Metric const &lhs, Metric const &rhs) +{ + return lhs.count() == rhs.count(); +} + +// Could be derived but if we're optimizing let's avoid the extra negation. +// Or we could check if the compiler can optimize that out anyway. +template +bool +operator<=(Metric const &lhs, Metric const &rhs) +{ + return lhs.count() <= rhs.count(); +} + +// General base cases. + +template +bool +operator<(Metric const &lhs, Metric const &rhs) +{ + typedef std::ratio R; + // Based on tests with the GNU compiler, the fact that the conditionals are compile time + // constant causes the never taken paths to be dropped so there are no runtime conditional + // checks, even with no optimization at all. + if (R::den == 1) { + return lhs.count() < rhs.count() * R::num; + } else if (R::num == 1) { + return lhs.count() * R::den < rhs.count(); + } else + return lhs.units() < rhs.units(); +} + +template +bool +operator==(Metric const &lhs, Metric const &rhs) +{ + typedef std::ratio R; + if (R::den == 1) { + return lhs.count() == rhs.count() * R::num; + } else if (R::num == 1) { + return lhs.count() * R::den == rhs.count(); + } else + return lhs.units() == rhs.units(); +} + +template +bool +operator<=(Metric const &lhs, Metric const &rhs) +{ + typedef std::ratio R; + if (R::den == 1) { + return lhs.count() <= rhs.count() * R::num; + } else if (R::num == 1) { + return lhs.count() * R::den <= rhs.count(); + } else + return lhs.units() <= rhs.units(); +} + +// Derived compares. No narrowing optimization needed because if the scales +// are the same the nested call with be optimized. + +template +bool +operator>(Metric const &lhs, Metric const &rhs) +{ + return rhs < lhs; +} + +template +bool +operator>=(Metric const &lhs, Metric const &rhs) +{ + return rhs <= lhs; +} +} + #endif // TS_METRIC_H diff --git a/lib/ts/ink_memory.h b/lib/ts/ink_memory.h index 2ce9a4eba92..9017d226d58 100644 --- a/lib/ts/ink_memory.h +++ b/lib/ts/ink_memory.h @@ -252,7 +252,7 @@ class ats_scoped_resource /// Construct with contained resource. explicit ats_scoped_resource(value_type rt) : _r(rt) {} /// rvalue constructor - ats_scoped_resource(self && that) : _r(that.release()) {} + ats_scoped_resource(self &&that) : _r(that.release()) {} /// Destructor. ~ats_scoped_resource() { @@ -370,7 +370,7 @@ class ats_scoped_fd : public ats_scoped_resource /// Construct with contained resource. explicit ats_scoped_fd(value_type rt) : super(rt) {} /// rvalue / move constructor - ats_scoped_fd(self && that) : super(static_cast(that)) {} + ats_scoped_fd(self &&that) : super(static_cast(that)) {} /** Place a new resource @a rt in the container. Any resource currently contained is destroyed. This object becomes the owner of @a rt. @@ -447,7 +447,7 @@ class ats_scoped_str : public ats_scoped_resource(that)) {} + ats_scoped_str(self &&that) : super(static_cast(that)) {} /// Assign a string @a s to this container.` self & operator=(char *s) diff --git a/lib/ts/test_Metric.cc b/lib/ts/test_Metric.cc index 66495924639..f62f5a0e1ea 100644 --- a/lib/ts/test_Metric.cc +++ b/lib/ts/test_Metric.cc @@ -25,48 +25,50 @@ #include #include -namespace ts { - using namespace ApacheTrafficServer; +namespace ts +{ +using namespace ApacheTrafficServer; } struct TestBox { - typedef TestBox self; ///< Self reference type. - + typedef TestBox self; ///< Self reference type. + std::string _name; - + static int _count; static int _fail; - TestBox(char const* name) : _name(name) {} - TestBox(std::string const& name) : _name(name) {} + TestBox(char const *name) : _name(name) {} + TestBox(std::string const &name) : _name(name) {} bool check(bool result, char const *fmt, ...) __attribute__((format(printf, 3, 4))); - static void print_summary() + static void + print_summary() { printf("Tests: %d of %d passed - %s\n", (_count - _fail), _count, _fail ? "FAIL" : "SUCCESS"); } }; int TestBox::_count = 0; -int TestBox::_fail = 0; +int TestBox::_fail = 0; bool TestBox::check(bool result, char const *fmt, ...) { ++_count; - + if (!result) { static constexpr size_t N = 1 << 16; - size_t n = N; + size_t n = N; size_t x; - char* s; + char *s; char buffer[N]; // just stack, go big. - + s = buffer; x = snprintf(s, n, "%s: ", _name.c_str()); n -= x; s += x; - + va_list ap; va_start(ap, fmt); vsnprintf(s, n, fmt, ap); @@ -95,7 +97,7 @@ Test_2() { constexpr static int SCALE_1 = 8192; constexpr static int SCALE_2 = 512; - + typedef ts::Metric Size_1; typedef ts::Metric Size_2; @@ -106,36 +108,37 @@ Test_2() Size_2 sz_d(29 * SCALE_1 / SCALE_2); auto sz = ts::metric_round_up(sz_a); - test.check(sz.count() == 1 , "Rounding up, got %d expected %d", sz.count(), 1); + test.check(sz.count() == 1, "Rounding up, got %d expected %d", sz.count(), 1); sz = ts::metric_round_down(sz_a); - test.check(sz.count() == 0 , "Rounding down: got %d expected %d", sz.count(), 0); + test.check(sz.count() == 0, "Rounding down: got %d expected %d", sz.count(), 0); sz = ts::metric_round_up(sz_b); - test.check(sz.count() == 4 , "Rounding up, got %d expected %d", sz.count(), 4); + test.check(sz.count() == 4, "Rounding up, got %d expected %d", sz.count(), 4); sz = ts::metric_round_down(sz_b); - test.check(sz.count() == 3 , "Rounding down, got %d expected %d", sz.count(), 3); - + test.check(sz.count() == 3, "Rounding down, got %d expected %d", sz.count(), 3); + sz = ts::metric_round_up(sz_c); - test.check(sz.count() == 1 , "Rounding up, got %d expected %d", sz.count(), 1); + test.check(sz.count() == 1, "Rounding up, got %d expected %d", sz.count(), 1); sz = ts::metric_round_down(sz_c); - test.check(sz.count() == 1 , "Rounding down, got %d expected %d", sz.count(), 1); - + test.check(sz.count() == 1, "Rounding down, got %d expected %d", sz.count(), 1); + sz = ts::metric_round_up(sz_d); - test.check(sz.count() == 29 , "Rounding up, got %d expected %d", sz.count(), 29); + test.check(sz.count() == 29, "Rounding up, got %d expected %d", sz.count(), 29); sz = ts::metric_round_down(sz_d); - test.check(sz.count() == 29 , "Rounding down, got %d expected %d", sz.count(), 29); + test.check(sz.count() == 29, "Rounding down, got %d expected %d", sz.count(), 29); - sz = 119; + sz = 119; sz_b = sz; // Should be OK because SCALE_1 is an integer multiple of SCALE_2 // sz = sz_b; // Should not compile. - test.check(sz_b.count() == 119 * (SCALE_1/SCALE_2) , "Integral conversion, got %d expected %d", sz_b.count(), 119 * (SCALE_1/SCALE_2)); + test.check(sz_b.count() == 119 * (SCALE_1 / SCALE_2), "Integral conversion, got %d expected %d", sz_b.count(), + 119 * (SCALE_1 / SCALE_2)); } void Test_3() { TestBox test("TS Metric: relatively prime tests"); - + ts::Metric<9> m_9; ts::Metric<4> m_4, m_test; @@ -144,18 +147,18 @@ Test_3() // m_9 = m_4; // Should fail to compile with static assert. m_4 = ts::metric_round_up(m_9); - test.check(m_4.count() == 214 , "Rounding down, got %d expected %d", m_4.count(), 214); + test.check(m_4.count() == 214, "Rounding down, got %d expected %d", m_4.count(), 214); m_4 = ts::metric_round_down(m_9); - test.check(m_4.count() == 213 , "Rounding down, got %d expected %d", m_4.count(), 213); + test.check(m_4.count() == 213, "Rounding down, got %d expected %d", m_4.count(), 213); m_4 = 213; m_9 = ts::metric_round_up(m_4); - test.check(m_9.count() == 95 , "Rounding down, got %d expected %d", m_9.count(), 95); + test.check(m_9.count() == 95, "Rounding down, got %d expected %d", m_9.count(), 95); m_9 = ts::metric_round_down(m_4); test.check(m_9.count() == 94, "Rounding down, got %d expected %d", m_9.count(), 94); m_test = m_4; // Verify assignment of identical scale values compiles. - test.check(m_test.count() == 213 , "Assignment got %d expected %d", m_4.count(), 213); + test.check(m_test.count() == 213, "Assignment got %d expected %d", m_4.count(), 213); } int @@ -167,4 +170,3 @@ main(int, char **) TestBox::print_summary(); return 0; } - diff --git a/tools/cache_tool/CacheDefs.h b/tools/cache_tool/CacheDefs.h index 098c0e9f4aa..e62136ecf28 100644 --- a/tools/cache_tool/CacheDefs.h +++ b/tools/cache_tool/CacheDefs.h @@ -28,119 +28,124 @@ namespace ApacheTrafficServer { +constexpr static uint8_t CACHE_DB_MAJOR_VERSION = 24; + +typedef Metric<1, int64_t> Bytes; +typedef Metric<1024, int64_t> Kilobytes; +typedef Metric<1024 * Kilobytes::SCALE, int64_t> Megabytes; + +// Units of allocation for stripes. +typedef Metric<128 * Megabytes::SCALE, int64_t> CacheStripeBlocks; +// Size measurement of cache storage. +// Also size of meta data storage units. +typedef Metric<8 * Kilobytes::SCALE, int64_t> CacheStoreBlocks; +// Size unit for content stored in cache. +typedef Metric<512, int64_t> CacheDataBlocks; + +/** A cache span is a representation of raw storage. + It corresponds to a raw disk, disk partition, file, or directory. + */ +class CacheSpan +{ +public: + /// Default offset of start of data in a span. + /// @internal I think this is done to avoid collisions with partition tracking mechanisms. + static const Bytes OFFSET; +}; + +/** A section of storage in a span, used to contain a stripe. + + @note Serializable. + + @internal nee @c DiskVolBlock + */ +struct CacheStripeDescriptor { + Bytes offset; // offset of start of stripe from start of span. + CacheStoreBlocks len; // length of block. + uint32_t vol_idx; ///< If in use, the volume index. + unsigned int type : 3; + unsigned int free : 1; +}; + +/** Header data for a span. + + @internal nee DiskHeader + */ +struct SpanHeader { + static constexpr uint32_t MAGIC = 0xABCD1237; + uint32_t magic; + uint32_t num_volumes; /* number of discrete volumes (DiskVol) */ + uint32_t num_free; /* number of disk volume blocks free */ + uint32_t num_used; /* number of disk volume blocks in use */ + uint32_t num_diskvol_blks; /* number of disk volume blocks */ + uint64_t num_blocks; + /// Serialized stripe descriptors. This is treated as a variable sized array. + CacheStripeDescriptor stripes[1]; +}; + +/** Stripe data, serialized format. + + @internal nee VolHeadFooter + */ +class CacheStripeMeta +{ +public: + static constexpr uint32_t MAGIC = 0xF1D0F00D; + + uint32_t magic; + VersionNumber version; + time_t create_time; + off_t write_pos; + off_t last_write_pos; + off_t agg_pos; + uint32_t generation; // token generation (vary), this cannot be 0 + uint32_t phase; + uint32_t cycle; + uint32_t sync_serial; + uint32_t write_serial; + uint32_t dirty; + uint32_t sector_size; + uint32_t unused; // pad out to 8 byte boundary + uint16_t freelist[1]; +}; + +class StripeData +{ +public: + size_t calc_hdr_len() const; + + int64_t segments; ///< Number of segments. + int64_t buckets; ///< Number of buckets. + off_t skip; ///< Start of stripe data. + off_t start; ///< Start of content data. + off_t len; ///< Total size of stripe (metric?) +}; + +inline size_t +StripeData::calc_hdr_len() const +{ + return sizeof(CacheStripeMeta) + sizeof(uint16_t) * (this->segments - 1); +} +// inline size_t StripeData::calc_dir_len() const { return this->calc_hdr_len() + this->buckets * DIR_DEPTH * this->segments * +// SIZEOF_DIR + sizeof(CacheStripeMeta); } - constexpr static uint8_t CACHE_DB_MAJOR_VERSION = 24; - - typedef Metric<1, int64_t> Bytes; - typedef Metric<1024, int64_t> Kilobytes; - typedef Metric<1024 * Kilobytes::SCALE, int64_t> Megabytes; - - // Units of allocation for stripes. - typedef Metric<128 * Megabytes::SCALE, int64_t> CacheStripeBlocks; - // Size measurement of cache storage. - // Also size of meta data storage units. - typedef Metric<8 * Kilobytes::SCALE, int64_t> CacheStoreBlocks; - // Size unit for content stored in cache. - typedef Metric<512, int64_t> CacheDataBlocks; - - /** A cache span is a representation of raw storage. - It corresponds to a raw disk, disk partition, file, or directory. - */ - class CacheSpan - { - public: - /// Default offset of start of data in a span. - /// @internal I think this is done to avoid collisions with partition tracking mechanisms. - static const Bytes OFFSET; - }; - - /** A section of storage in a span, used to contain a stripe. - - @note Serializable. - - @internal nee @c DiskVolBlock - */ - struct CacheStripeDescriptor { - Bytes offset; // offset of start of stripe from start of span. - CacheStoreBlocks len; // length of block. - uint32_t vol_idx; ///< If in use, the volume index. - unsigned int type : 3; - unsigned int free : 1; - }; - - /** Header data for a span. - - @internal nee DiskHeader - */ - struct SpanHeader { - static constexpr uint32_t MAGIC = 0xABCD1237; - uint32_t magic; - uint32_t num_volumes; /* number of discrete volumes (DiskVol) */ - uint32_t num_free; /* number of disk volume blocks free */ - uint32_t num_used; /* number of disk volume blocks in use */ - uint32_t num_diskvol_blks; /* number of disk volume blocks */ - uint64_t num_blocks; - /// Serialized stripe descriptors. This is treated as a variable sized array. - CacheStripeDescriptor stripes[1]; - }; - - /** Stripe data, serialized format. - - @internal nee VolHeadFooter - */ - class CacheStripeMeta { - public: - static constexpr uint32_t MAGIC = 0xF1D0F00D; - - uint32_t magic; - VersionNumber version; - time_t create_time; - off_t write_pos; - off_t last_write_pos; - off_t agg_pos; - uint32_t generation; // token generation (vary), this cannot be 0 - uint32_t phase; - uint32_t cycle; - uint32_t sync_serial; - uint32_t write_serial; - uint32_t dirty; - uint32_t sector_size; - uint32_t unused; // pad out to 8 byte boundary - uint16_t freelist[1]; - }; - - class StripeData - { - public: - size_t calc_hdr_len() const; - - int64_t segments; ///< Number of segments. - int64_t buckets; ///< Number of buckets. - off_t skip; ///< Start of stripe data. - off_t start; ///< Start of content data. - off_t len; ///< Total size of stripe (metric?) - }; - - inline size_t StripeData::calc_hdr_len() const { return sizeof(CacheStripeMeta) + sizeof(uint16_t) * (this->segments-1); } - // inline size_t StripeData::calc_dir_len() const { return this->calc_hdr_len() + this->buckets * DIR_DEPTH * this->segments * SIZEOF_DIR + sizeof(CacheStripeMeta); } - - class CacheDirEntry - { - unsigned int offset : 24; - unsigned int big : 2; - unsigned int size : 6; - unsigned int tag : 12; - unsigned int phase : 1; - unsigned int head : 1; - unsigned int pinnned : 1; - unsigned int token : 1; - unsigned int next : 16; - uint16_t offset_high; - }; - - class CacheVolume - { - }; +class CacheDirEntry +{ + unsigned int offset : 24; + unsigned int big : 2; + unsigned int size : 6; + unsigned int tag : 12; + unsigned int phase : 1; + unsigned int head : 1; + unsigned int pinnned : 1; + unsigned int token : 1; + unsigned int next : 16; + uint16_t offset_high; +}; + +class CacheVolume +{ +}; } #endif // CACHE_DEFS_H diff --git a/tools/cache_tool/CacheStore.h b/tools/cache_tool/CacheStore.h index faba04389eb..0e870ff1e21 100644 --- a/tools/cache_tool/CacheStore.h +++ b/tools/cache_tool/CacheStore.h @@ -28,24 +28,24 @@ namespace ApacheTrafficServer { - class CacheStore +class CacheStore +{ + /// Configuration data for the cache store. + class Config { - /// Configuration data for the cache store. - class Config - { - public: - /// A single item (line) from the configuration. - struct Item - { - StringView - }; - private: - /// Items read from the configuration. - std::vector _items; - /// The raw text of the configuration file. - std::unique_ptr _text; + public: + /// A single item (line) from the configuration. + struct Item { + StringView }; + + private: + /// Items read from the configuration. + std::vector _items; + /// The raw text of the configuration file. + std::unique_ptr _text; }; +}; } #endif // CACHE_STORE_H diff --git a/tools/cache_tool/CacheTool.cc b/tools/cache_tool/CacheTool.cc index 298a92a5176..6c6d2ddc04a 100644 --- a/tools/cache_tool/CacheTool.cc +++ b/tools/cache_tool/CacheTool.cc @@ -37,392 +37,378 @@ // Sigh, a hack for now. We already have "ts" defined as a namespace in various places so for now // just import the Full Name namespace in to 'ts' rather than direct 'namespace ts = ApachTrafficServer' -namespace ts { - using namespace ApacheTrafficServer; +namespace ts +{ +using namespace ApacheTrafficServer; } -namespace ApacheTrafficServer { - const Bytes CacheSpan::OFFSET{ CacheStoreBlocks{1} }; +namespace ApacheTrafficServer +{ +const Bytes CacheSpan::OFFSET{CacheStoreBlocks{1}}; } -namespace { - - ts::FilePath TargetFile; - ts::CommandTable Commands; - // Default this to read only, only enable write if specifically required. - int OPEN_RW_FLAGS = O_RDONLY; - - struct Span - { - Span(ts::FilePath const& path) : _path(path) {} - - void clearPermanently(); - - ts::FilePath _path; - ats_scoped_fd _fd; - std::unique_ptr _header; - }; - - struct Volume - { - struct StripeRef - { - Span* _span; ///< Span with stripe. - int _idx; ///< Stripe index in span. - }; - int _idx; ///< Volume index. - std::vector _stripes; +namespace +{ +ts::FilePath TargetFile; +ts::FilePath VolumeFile; +ts::CommandTable Commands; +// Default this to read only, only enable write if specifically required. +int OPEN_RW_FLAGS = O_RDONLY; + +struct Span { + Span(ts::FilePath const &path) : _path(path) {} + void clearPermanently(); + + ts::FilePath _path; + ats_scoped_fd _fd; + std::unique_ptr _header; +}; + +struct Volume { + struct StripeRef { + Span *_span; ///< Span with stripe. + int _idx; ///< Stripe index in span. }; + int _idx; ///< Volume index. + std::vector _stripes; +}; - // All of these free functions need to be moved to the Cache class. +// All of these free functions need to be moved to the Cache class. - bool Validate_Stripe_Meta(ts::CacheStripeMeta const& stripe) - { - return ts::CacheStripeMeta::MAGIC == stripe.magic && - stripe.version.ink_major <= ts::CACHE_DB_MAJOR_VERSION && - stripe.version.ink_minor <= 2 // This may have always been zero, actually. - ; - } +bool +Validate_Stripe_Meta(ts::CacheStripeMeta const &stripe) +{ + return ts::CacheStripeMeta::MAGIC == stripe.magic && stripe.version.ink_major <= ts::CACHE_DB_MAJOR_VERSION && + stripe.version.ink_minor <= 2 // This may have always been zero, actually. + ; +} - typedef std::tuple ProbeResult; +typedef std::tuple ProbeResult; - ProbeResult Probe_For_Stripe(ts::StringView& mem) - { - ProbeResult zret{mem.size() >= sizeof(ts::CacheStripeMeta) ? 0 : -1, ts::StringView(nullptr)}; - ts::StringView& test_site = std::get<1>(zret); +ProbeResult +Probe_For_Stripe(ts::StringView &mem) +{ + ProbeResult zret{mem.size() >= sizeof(ts::CacheStripeMeta) ? 0 : -1, ts::StringView(nullptr)}; + ts::StringView &test_site = std::get<1>(zret); - while (mem.size() >= sizeof(ts::CacheStripeMeta)) { - // The meta data is stored aligned on a stripe block boundary, so only need to check there. - test_site = mem; - mem += ts::CacheStoreBlocks::SCALE; // always move this forward to make restarting search easy. + while (mem.size() >= sizeof(ts::CacheStripeMeta)) { + // The meta data is stored aligned on a stripe block boundary, so only need to check there. + test_site = mem; + mem += ts::CacheStoreBlocks::SCALE; // always move this forward to make restarting search easy. - if (Validate_Stripe_Meta(*reinterpret_cast(test_site.ptr()))) { - std::get<0>(zret) = 1; - break; - } + if (Validate_Stripe_Meta(*reinterpret_cast(test_site.ptr()))) { + std::get<0>(zret) = 1; + break; } - return zret; } + return zret; +} - void Calc_Stripe_Data(ts::CacheStripeMeta const& header, ts::CacheStripeMeta const& footer, off_t delta, ts::StripeData& data) - { - // Assuming header + free list fits in one cache stripe block, which isn't true for large stripes (>2G or so). - // Need to detect that, presumably by checking that the segment count fits in the stripe block. - ts::CacheStoreBlocks hdr_size { 1 }; - off_t space = delta - hdr_size.units(); - int64_t n_buckets = space / 40; - data.segments = n_buckets / (1<<14); - // This should never be more than one loop, usually none. - while ((n_buckets / data.segments) > 1<<14) - ++(data.segments); - data.buckets = n_buckets / data.segments; - data.start = delta * 2; // this is wrong, need to add in the base block position. - - std::cout << "Stripe is " << data.segments << " segments with " << data.buckets << " buckets per segment for " << data.buckets * data.segments * 4 << " total directory entries taking " << data.buckets * data.segments * 40 << " out of " << space << " bytes." << std::endl; - } +void +Calc_Stripe_Data(ts::CacheStripeMeta const &header, ts::CacheStripeMeta const &footer, off_t delta, ts::StripeData &data) +{ + // Assuming header + free list fits in one cache stripe block, which isn't true for large stripes (>2G or so). + // Need to detect that, presumably by checking that the segment count fits in the stripe block. + ts::CacheStoreBlocks hdr_size{1}; + off_t space = delta - hdr_size.units(); + int64_t n_buckets = space / 40; + data.segments = n_buckets / (1 << 14); + // This should never be more than one loop, usually none. + while ((n_buckets / data.segments) > 1 << 14) + ++(data.segments); + data.buckets = n_buckets / data.segments; + data.start = delta * 2; // this is wrong, need to add in the base block position. + + std::cout << "Stripe is " << data.segments << " segments with " << data.buckets << " buckets per segment for " + << data.buckets * data.segments * 4 << " total directory entries taking " << data.buckets * data.segments * 40 + << " out of " << space << " bytes." << std::endl; +} - void Open_Stripe(ats_scoped_fd const& fd, ts::CacheStripeDescriptor const& block) - { - int found; - ts::StringView data; - ts::StringView stripe_mem; - constexpr static int64_t N = 1 << 24; - int64_t n; - off_t pos = block.offset.units(); - ts::CacheStripeMeta stripe_meta[4]; - off_t stripe_pos[4] = { 0,0,0,0 }; - off_t delta; - // Avoid searching the entire span, because some of it must be content. Assume that AOS is more than 160 - // which means at most 10/160 (1/16) of the span can be directory/header. - off_t limit = pos + block.len.units() / 16; - alignas(4096) static char buff[N]; - - // Check the earlier part of the block. Header A must be at the start of the stripe block. - // A full chunk is read in case Footer A is in that range. - n = pread(fd, buff, N, pos); - data.setView(buff, n); - std::tie(found, stripe_mem) = Probe_For_Stripe(data); - - if (found > 0) { - if (stripe_mem.ptr() != buff) { - std::cout << "Header A found at" << pos + stripe_mem.ptr() - buff << " which is not at start of stripe block" << std::endl; - } else { - stripe_pos[0] = pos; - stripe_meta[0] = reinterpret_cast(buff); // copy it out of buffer. - std::cout << "Header A found at " << stripe_pos[0] << std::endl; - // Search for Footer A, skipping false positives. - while (stripe_pos[1] == 0) { +void +Open_Stripe(ats_scoped_fd const &fd, ts::CacheStripeDescriptor const &block) +{ + int found; + ts::StringView data; + ts::StringView stripe_mem; + constexpr static int64_t N = 1 << 24; + int64_t n; + off_t pos = block.offset.units(); + ts::CacheStripeMeta stripe_meta[4]; + off_t stripe_pos[4] = {0, 0, 0, 0}; + off_t delta; + // Avoid searching the entire span, because some of it must be content. Assume that AOS is more than 160 + // which means at most 10/160 (1/16) of the span can be directory/header. + off_t limit = pos + block.len.units() / 16; + alignas(4096) static char buff[N]; + + // Check the earlier part of the block. Header A must be at the start of the stripe block. + // A full chunk is read in case Footer A is in that range. + n = pread(fd, buff, N, pos); + data.setView(buff, n); + std::tie(found, stripe_mem) = Probe_For_Stripe(data); + + if (found > 0) { + if (stripe_mem.ptr() != buff) { + std::cout << "Header A found at" << pos + stripe_mem.ptr() - buff << " which is not at start of stripe block" << std::endl; + } else { + stripe_pos[0] = pos; + stripe_meta[0] = reinterpret_cast(buff); // copy it out of buffer. + std::cout << "Header A found at " << stripe_pos[0] << std::endl; + // Search for Footer A, skipping false positives. + while (stripe_pos[1] == 0) { + std::tie(found, stripe_mem) = Probe_For_Stripe(data); + while (found == 0 && pos < limit) { + pos += N; + n = pread(fd, buff, N, pos); + data.setView(buff, n); std::tie(found, stripe_mem) = Probe_For_Stripe(data); - while (found == 0 && pos < limit) { - pos += N; - n = pread(fd, buff, N, pos); - data.setView(buff, n); - std::tie(found, stripe_mem) = Probe_For_Stripe(data); - } - if (found > 0) { - // Need to be more thorough in cross checks but this is OK for now. - ts::CacheStripeMeta const& s = *reinterpret_cast(stripe_mem.ptr()); - if (s.version == stripe_meta[0].version) { - stripe_meta[1] = s; - stripe_pos[1] = pos + (stripe_mem.ptr() - buff); - printf("Footer A found at %" PRIu64 "\n", stripe_pos[1]); - if (stripe_meta[0].sync_serial == stripe_meta[1].sync_serial) { - printf("Copy A is valid - sync=%d\n", stripe_meta[0].sync_serial); - } - } else { - // false positive, keep looking. - found = 0; + } + if (found > 0) { + // Need to be more thorough in cross checks but this is OK for now. + ts::CacheStripeMeta const &s = *reinterpret_cast(stripe_mem.ptr()); + if (s.version == stripe_meta[0].version) { + stripe_meta[1] = s; + stripe_pos[1] = pos + (stripe_mem.ptr() - buff); + printf("Footer A found at %" PRIu64 "\n", stripe_pos[1]); + if (stripe_meta[0].sync_serial == stripe_meta[1].sync_serial) { + printf("Copy A is valid - sync=%d\n", stripe_meta[0].sync_serial); } } else { - printf("Header A not found, invalid stripe.\n"); - break; + // false positive, keep looking. + found = 0; } + } else { + printf("Header A not found, invalid stripe.\n"); + break; } + } - // Technically if Copy A is valid, Copy B is not needed. But at this point it's cheap to retrieve - // (as the exact offsets are computable). - if (stripe_pos[1]) { - delta = stripe_pos[1] - stripe_pos[0]; - // Header B should be immediately after Footer A. If at the end of the last read, - // do another read. - if (!data) { - pos += N; - n = pread(fd, buff, ts::CacheStoreBlocks::SCALE, pos); - data.setView(buff, n); - } + // Technically if Copy A is valid, Copy B is not needed. But at this point it's cheap to retrieve + // (as the exact offsets are computable). + if (stripe_pos[1]) { + delta = stripe_pos[1] - stripe_pos[0]; + // Header B should be immediately after Footer A. If at the end of the last read, + // do another read. + if (!data) { + pos += N; + n = pread(fd, buff, ts::CacheStoreBlocks::SCALE, pos); + data.setView(buff, n); + } + std::tie(found, stripe_mem) = Probe_For_Stripe(data); + if (found <= 0) { + printf("Header B not found at expected location.\n"); + } else { + stripe_meta[2] = *reinterpret_cast(stripe_mem.ptr()); + stripe_pos[2] = pos + (stripe_mem.ptr() - buff); + printf("Found Header B at expected location %" PRIu64 ".\n", stripe_pos[2]); + + // Footer B must be at the same relative offset to Header B as Footer A -> Header A. + n = pread(fd, buff, ts::CacheStoreBlocks::SCALE, stripe_pos[2] + delta); + data.setView(buff, n); std::tie(found, stripe_mem) = Probe_For_Stripe(data); - if (found <= 0) { - printf("Header B not found at expected location.\n"); + if (found == 1) { + stripe_pos[3] = stripe_pos[2] + delta; + stripe_meta[3] = *reinterpret_cast(stripe_mem.ptr()); + printf("Footer B found at expected location %" PRIu64 ".\n", stripe_pos[3]); } else { - stripe_meta[2] = *reinterpret_cast(stripe_mem.ptr()); - stripe_pos[2] = pos + (stripe_mem.ptr() - buff); - printf("Found Header B at expected location %" PRIu64 ".\n", stripe_pos[2]); - - // Footer B must be at the same relative offset to Header B as Footer A -> Header A. - n = pread(fd, buff, ts::CacheStoreBlocks::SCALE, stripe_pos[2] + delta); - data.setView(buff, n); - std::tie(found, stripe_mem) = Probe_For_Stripe(data); - if (found == 1) { - stripe_pos[3] = stripe_pos[2] + delta; - stripe_meta[3] = *reinterpret_cast(stripe_mem.ptr()); - printf("Footer B found at expected location %" PRIu64 ".\n", stripe_pos[3]); - } else { - printf("Footer B not found at expected location %" PRIu64 ".\n", stripe_pos[2] + delta); - } + printf("Footer B not found at expected location %" PRIu64 ".\n", stripe_pos[2] + delta); } } + } - if (stripe_pos[1]) { - if (stripe_meta[0].sync_serial == stripe_meta[1].sync_serial && - (0 == stripe_pos[3] || stripe_meta[2].sync_serial != stripe_meta[3].sync_serial || stripe_meta[0].sync_serial > stripe_meta[2].sync_serial)) { - ts::StripeData sdata; - Calc_Stripe_Data(stripe_meta[0], stripe_meta[1], delta, sdata); - } else if (stripe_pos[3] && stripe_meta[2].sync_serial == stripe_meta[3].sync_serial) { - ts::StripeData sdata; - Calc_Stripe_Data(stripe_meta[2], stripe_meta[3], delta, sdata); - } else { - std::cout << "Invalid stripe data - candidates found but sync serial data not valid." << std::endl; - } + if (stripe_pos[1]) { + if (stripe_meta[0].sync_serial == stripe_meta[1].sync_serial && + (0 == stripe_pos[3] || stripe_meta[2].sync_serial != stripe_meta[3].sync_serial || + stripe_meta[0].sync_serial > stripe_meta[2].sync_serial)) { + ts::StripeData sdata; + Calc_Stripe_Data(stripe_meta[0], stripe_meta[1], delta, sdata); + } else if (stripe_pos[3] && stripe_meta[2].sync_serial == stripe_meta[3].sync_serial) { + ts::StripeData sdata; + Calc_Stripe_Data(stripe_meta[2], stripe_meta[3], delta, sdata); } else { - std::cout << "Invalid stripe data - no candidates found." << std::endl; + std::cout << "Invalid stripe data - candidates found but sync serial data not valid." << std::endl; } + } else { + std::cout << "Invalid stripe data - no candidates found." << std::endl; } - } else { - printf("Stripe Header A not found in first chunk\n"); } + } else { + printf("Stripe Header A not found in first chunk\n"); } +} +// -------------------- +struct Cache { + ~Cache(); - // -------------------- - struct Cache - { - ~Cache(); + void load(ts::FilePath const &path); + void loadConfig(ts::FilePath const &path); + void loadDevice(ts::FilePath const &path); - void load(ts::FilePath const& path); - void loadConfig(ts::FilePath const& path); - void loadDevice(ts::FilePath const& path); + enum class SpanDumpDepth { SPAN, STRIPE, DIRECTORY }; + void dumpSpans(SpanDumpDepth depth); + void dumpVolumes(); - enum class SpanDumpDepth { SPAN, STRIPE, DIRECTORY }; - void dumpSpans(SpanDumpDepth depth); - void dumpVolumes(); + std::list _spans; + std::map _volumes; +}; - std::list _spans; - std::map _volumes; - }; - - void - Cache::load(ts::FilePath const& path) - { - if (!path.is_readable()) throw(std::system_error(errno, std::system_category(), static_cast(path))); - else if (path.is_regular_file()) this->loadConfig(path); - else if (path.is_char_device() || path.is_block_device()) this->loadDevice(path); - else printf("Not a valid file type: '%s'\n", static_cast(path)); - } +void +Cache::load(ts::FilePath const &path) +{ + if (!path.is_readable()) + throw(std::system_error(errno, std::system_category(), static_cast(path))); + else if (path.is_regular_file()) + this->loadConfig(path); + else if (path.is_char_device() || path.is_block_device()) + this->loadDevice(path); + else + printf("Not a valid file type: '%s'\n", static_cast(path)); +} - void - Cache::loadConfig(ts::FilePath const& path) - { - - static const ts::StringView TAG_ID("id"); - static const ts::StringView TAG_VOL("volume"); - - ts::BulkFile cfile(path); - if (0 == cfile.load()) { - ts::StringView content = cfile.content(); - while (content) { - ts::StringView line = content.splitPrefix('\n'); - line.ltrim(&isspace); - if (!line || '#' == *line) continue; - ts::StringView path = line.extractPrefix(&isspace); - if (path) { - // After this the line is [size] [id=string] [vol=#] - while (line) { - ts::StringView value(line.extractPrefix(&isspace)); - if (value) { - ts::StringView tag(value.splitPrefix('=')); - if (!tag) { - } else if (0 == strcasecmp(tag,TAG_ID)) { - } else if (0 == strcasecmp(tag,TAG_VOL)) { - } +void +Cache::loadConfig(ts::FilePath const &path) +{ + static const ts::StringView TAG_ID("id"); + static const ts::StringView TAG_VOL("volume"); + + ts::BulkFile cfile(path); + if (0 == cfile.load()) { + ts::StringView content = cfile.content(); + while (content) { + ts::StringView line = content.splitPrefix('\n'); + line.ltrim(&isspace); + if (!line || '#' == *line) + continue; + ts::StringView path = line.extractPrefix(&isspace); + if (path) { + // After this the line is [size] [id=string] [vol=#] + while (line) { + ts::StringView value(line.extractPrefix(&isspace)); + if (value) { + ts::StringView tag(value.splitPrefix('=')); + if (!tag) { + } else if (0 == strcasecmp(tag, TAG_ID)) { + } else if (0 == strcasecmp(tag, TAG_VOL)) { } } - this->load(ts::FilePath(path)); } + this->load(ts::FilePath(path)); } } } +} - void - Cache::loadDevice(ts::FilePath const& path) - { - int flags; +void +Cache::loadDevice(ts::FilePath const &path) +{ + int flags; - flags = OPEN_RW_FLAGS + flags = OPEN_RW_FLAGS #if defined(O_DIRECT) - | O_DIRECT + | O_DIRECT #endif #if defined(O_DSYNC) - | O_DSYNC + | O_DSYNC #endif - ; - - ats_scoped_fd fd(path.open(flags)); - - if (fd) { - off_t offset = ts::CacheSpan::OFFSET.units(); - alignas(512) char buff[8192]; - int64_t n = pread(fd, buff, sizeof(buff), offset); - if (n >= static_cast(sizeof(ts::SpanHeader))) { - ts::SpanHeader& span_hdr = reinterpret_cast(buff); -# if 0 - printf("Span: %s\n : Magic = 0x%x (%s) volumes=%d used=%d free=%d vol_blocks=%d total blocks=%" PRIu64 " \n" - , static_cast(path) - , span_hdr.magic, (span_hdr.magic == ts::SpanHeader::MAGIC ? "match" : "fail") - , span_hdr.num_volumes, span_hdr.num_used, span_hdr.num_free - , span_hdr.num_diskvol_blks, span_hdr.num_blocks - ); -# endif - // See if it looks valid - if (span_hdr.magic == ts::SpanHeader::MAGIC && - span_hdr.num_diskvol_blks == span_hdr.num_used + span_hdr.num_free) { - int nspb = span_hdr.num_diskvol_blks; - size_t span_hdr_size = sizeof(ts::SpanHeader) + ( nspb - 1 ) * sizeof(ts::CacheStripeDescriptor); - Span* span = new Span(path); - span->_header.reset(new (malloc(span_hdr_size)) ts::SpanHeader); - if (span_hdr_size <= sizeof(buff)) { - memcpy(span->_header.get(), buff, span_hdr_size); - } else { - // TODO - check the pread return - pread(fd, span->_header.get(), span_hdr_size, offset); - } - span->_fd = fd.release(); - _spans.push_back(span); - for ( auto i = 0 ; i < nspb ; ++i ) { - ts::CacheStripeDescriptor& stripe = span->_header->stripes[i]; -# if 0 - std::cout << " : SpanBlock " << i << " @ " << stripe.offset.units() - << " blocks=" << stripe.len.units() << " vol=" << stripe.vol_idx - << " type=" << stripe.type << " " << (stripe.free ? "free" : "in-use") << std::endl; -# endif - if (stripe.free == 0) { - // Add to volume. - _volumes[stripe.vol_idx]._stripes.push_back(Volume::StripeRef { span, i }); -// if (Examine_Stripes_P) Open_Stripe(fd, stripe); - } + ; + + ats_scoped_fd fd(path.open(flags)); + + if (fd) { + off_t offset = ts::CacheSpan::OFFSET.units(); + alignas(512) char buff[8192]; + int64_t n = pread(fd, buff, sizeof(buff), offset); + if (n >= static_cast(sizeof(ts::SpanHeader))) { + ts::SpanHeader &span_hdr = reinterpret_cast(buff); + // See if it looks valid + if (span_hdr.magic == ts::SpanHeader::MAGIC && span_hdr.num_diskvol_blks == span_hdr.num_used + span_hdr.num_free) { + int nspb = span_hdr.num_diskvol_blks; + size_t span_hdr_size = sizeof(ts::SpanHeader) + (nspb - 1) * sizeof(ts::CacheStripeDescriptor); + Span *span = new Span(path); + span->_header.reset(new (malloc(span_hdr_size)) ts::SpanHeader); + if (span_hdr_size <= sizeof(buff)) { + memcpy(span->_header.get(), buff, span_hdr_size); + } else { + // TODO - check the pread return + pread(fd, span->_header.get(), span_hdr_size, offset); + } + span->_fd = fd.release(); + _spans.push_back(span); + for (auto i = 0; i < nspb; ++i) { + ts::CacheStripeDescriptor &stripe = span->_header->stripes[i]; + if (stripe.free == 0) { + // Add to volume. + _volumes[stripe.vol_idx]._stripes.push_back(Volume::StripeRef{span, i}); } } - } else { - printf("Failed to read from '%s' [%d]\n", path.path(), errno); } } else { - printf("Unable to open '%s'\n", static_cast(path)); + printf("Failed to read from '%s' [%d]\n", path.path(), errno); } + } else { + printf("Unable to open '%s'\n", static_cast(path)); } +} - void - Cache::dumpSpans(SpanDumpDepth depth) - { - if (depth >= SpanDumpDepth::SPAN) { - for (auto span : _spans) { - std::cout << "Span: " << span->_path << " " - << span->_header->num_volumes << " Volumes " - << span->_header->num_used << " in use " - << span->_header->num_free << " free " - << span->_header->num_diskvol_blks << " stripes " - << span->_header->num_blocks << " blocks" - << std::endl; - for (unsigned int i = 0 ; i < span->_header->num_diskvol_blks ; ++i ) { - ts::CacheStripeDescriptor& stripe = span->_header->stripes[i]; - std::cout << " : SpanBlock " << i << " @ " << stripe.offset.units() - << " blocks=" << stripe.len.units() << " vol=" << stripe.vol_idx - << " type=" << stripe.type << " " << (stripe.free ? "free" : "in-use") << std::endl; - if (depth >= SpanDumpDepth::STRIPE) { - Open_Stripe(span->_fd, stripe); - } +void +Cache::dumpSpans(SpanDumpDepth depth) +{ + if (depth >= SpanDumpDepth::SPAN) { + for (auto span : _spans) { + std::cout << "Span: " << span->_path << " " << span->_header->num_volumes << " Volumes " << span->_header->num_used + << " in use " << span->_header->num_free << " free " << span->_header->num_diskvol_blks << " stripes " + << span->_header->num_blocks << " blocks" << std::endl; + for (unsigned int i = 0; i < span->_header->num_diskvol_blks; ++i) { + ts::CacheStripeDescriptor &stripe = span->_header->stripes[i]; + std::cout << " : SpanBlock " << i << " @ " << stripe.offset.units() << " blocks=" << stripe.len.units() + << " vol=" << stripe.vol_idx << " type=" << stripe.type << " " << (stripe.free ? "free" : "in-use") << std::endl; + if (depth >= SpanDumpDepth::STRIPE) { + Open_Stripe(span->_fd, stripe); } } } } +} - void - Cache::dumpVolumes() - { - for ( auto const& elt : _volumes ) { - size_t size = 0; - for ( auto const& r : elt.second._stripes ) - size += r._span->_header->stripes[r._idx].len.units(); - - std::cout << "Volume " << elt.first << " has " << elt.second._stripes.size() << " stripes and " - << size << " bytes" - << std::endl; - } - } +void +Cache::dumpVolumes() +{ + for (auto const &elt : _volumes) { + size_t size = 0; + for (auto const &r : elt.second._stripes) + size += r._span->_header->stripes[r._idx].len.units(); - Cache::~Cache() - { - for ( auto* span : _spans) delete span; + std::cout << "Volume " << elt.first << " has " << elt.second._stripes.size() << " stripes and " << size << " bytes" + << std::endl; } +} - void - Span::clearPermanently() - { - alignas(512) static char zero[ts::CacheStoreBlocks::SCALE]; // should be all zero, it's static. - std::cout << "Clearing " << _path << " permanently on disk "; - ssize_t n = pwrite(_fd, zero, sizeof(zero), ts::CacheSpan::OFFSET.units()); - if (n == sizeof(zero)) std::cout << "done"; - else { - const char* text = strerror(errno); - std::cout << "failed"; - if (n >= 0) std::cout << " - " << n << " of " << sizeof(zero) << " bytes written"; - std::cout << " - " << text; - } - std::cout << std::endl; - } +Cache::~Cache() +{ + for (auto *span : _spans) + delete span; +} - struct option Options[] = { - { "help", false, nullptr, 'h' } - }; +void +Span::clearPermanently() +{ + alignas(512) static char zero[ts::CacheStoreBlocks::SCALE]; // should be all zero, it's static. + std::cout << "Clearing " << _path << " permanently on disk "; + ssize_t n = pwrite(_fd, zero, sizeof(zero), ts::CacheSpan::OFFSET.units()); + if (n == sizeof(zero)) + std::cout << "done"; + else { + const char *text = strerror(errno); + std::cout << "failed"; + if (n >= 0) + std::cout << " - " << n << " of " << sizeof(zero) << " bytes written"; + std::cout << " - " << text; + } + std::cout << std::endl; +} +struct option Options[] = {{"help", false, nullptr, 'h'}}; } ts::Rv @@ -436,33 +422,37 @@ List_Stripes(Cache::SpanDumpDepth depth, int argc, char *argv[]) } ts::Rv -Clear_Spans(int argc, char* argv[]) +Clear_Spans(int argc, char *argv[]) { Cache cache; OPEN_RW_FLAGS = O_RDWR; cache.load(TargetFile); - for ( auto* span : cache._spans) { + for (auto *span : cache._spans) { span->clearPermanently(); } return true; } -int main(int argc, char* argv[]) +int +main(int argc, char *argv[]) { int opt_idx = 0; int opt_val; bool help = false; while (-1 != (opt_val = getopt_long(argc, argv, "h", Options, &opt_idx))) { switch (opt_val) { - case 'h': - printf("Usage: %s [device_path|config_file] [ ...]\n", argv[0]); - help = true; - break; - } + case 'h': + printf("Usage: %s [device_path|config_file] [ ...]\n", argv[0]); + help = true; + break; + } } - Commands.add(std::string("list"), std::string("List elements of the cache"), [] (int argc, char* argv[]) { return List_Stripes(Cache::SpanDumpDepth::SPAN, argc, argv); } ) - .subCommand(std::string("stripes"), std::string("The stripes"), [] (int argc, char* argv[]) { return List_Stripes(Cache::SpanDumpDepth::STRIPE, argc, argv); }); + Commands + .add(std::string("list"), std::string("List elements of the cache"), + [](int argc, char *argv[]) { return List_Stripes(Cache::SpanDumpDepth::SPAN, argc, argv); }) + .subCommand(std::string("stripes"), std::string("The stripes"), + [](int argc, char *argv[]) { return List_Stripes(Cache::SpanDumpDepth::STRIPE, argc, argv); }); Commands.add(std::string("clear"), std::string("Clear spans"), &Clear_Spans); if (help) { @@ -472,8 +462,8 @@ int main(int argc, char* argv[]) if (optind < argc) { TargetFile = argv[optind]; - argc -= optind+1; - argv += optind+1; + argc -= optind + 1; + argv += optind + 1; } ts::Rv result = Commands.invoke(argc, argv); diff --git a/tools/cache_tool/Command.cc b/tools/cache_tool/Command.cc index add56a2f53f..f59d7cfaa18 100644 --- a/tools/cache_tool/Command.cc +++ b/tools/cache_tool/Command.cc @@ -29,124 +29,142 @@ namespace ApacheTrafficServer { +int CommandTable::_opt_idx = 0; - int CommandTable::_opt_idx = 0; - - // Error message functions. - ts::Errata ERR_COMMAND_TAG_NOT_FOUND(char const* tag) { std::ostringstream s; - s << "Command tag " << tag << " not found"; - return ts::Errata(s.str());} - - ts::Errata ERR_SUBCOMMAND_REQUIRED() { return ts::Errata(std::string("Incomplete command, additional keyword required")); } - +// Error message functions. +ts::Errata +ERR_COMMAND_TAG_NOT_FOUND(char const *tag) +{ + std::ostringstream s; + s << "Command tag " << tag << " not found"; + return ts::Errata(s.str()); +} - CommandTable::Command::Command() - { - } +ts::Errata +ERR_SUBCOMMAND_REQUIRED() +{ + return ts::Errata(std::string("Incomplete command, additional keyword required")); +} - CommandTable::Command::Command(std::string const& name, std::string const& help) : _name(name), _help(help) - { - } +CommandTable::Command::Command() +{ +} - CommandTable::Command::Command(std::string const& name, std::string const& help, CommandFunction const& f) : _name(name), _help(help), _func(f) - { - } +CommandTable::Command::Command(std::string const &name, std::string const &help) : _name(name), _help(help) +{ +} - auto CommandTable::Command::set(CommandFunction const& f) -> self& - { - _func = f; - return *this; - } +CommandTable::Command::Command(std::string const &name, std::string const &help, CommandFunction const &f) + : _name(name), _help(help), _func(f) +{ +} - CommandTable::Command& CommandTable::Command::subCommand(std::string const& name, std::string const& help, CommandFunction const & f) - { - _group.emplace_back(Command(name, help, f)); - return _group.back(); - } +auto +CommandTable::Command::set(CommandFunction const &f) -> self & +{ + _func = f; + return *this; +} - auto CommandTable::Command::subCommand(std::string const& name, std::string const& help) -> self& - { - _group.emplace_back(Command(name,help)); - return _group.back(); - } +CommandTable::Command & +CommandTable::Command::subCommand(std::string const &name, std::string const &help, CommandFunction const &f) +{ + _group.emplace_back(Command(name, help, f)); + return _group.back(); +} - ts::Rv CommandTable::Command::invoke(int argc, char* argv[]) - { - ts::Rv zret = true; +auto +CommandTable::Command::subCommand(std::string const &name, std::string const &help) -> self & +{ + _group.emplace_back(Command(name, help)); + return _group.back(); +} - if (CommandTable::_opt_idx >= argc || argv[CommandTable::_opt_idx][0] == '-') { - // Tail of command keywords, try to invoke. - if (_func) zret = _func(argc - CommandTable::_opt_idx, argv + CommandTable::_opt_idx); - else zret = false, zret = ERR_SUBCOMMAND_REQUIRED(); +ts::Rv +CommandTable::Command::invoke(int argc, char *argv[]) +{ + ts::Rv zret = true; + + if (CommandTable::_opt_idx >= argc || argv[CommandTable::_opt_idx][0] == '-') { + // Tail of command keywords, try to invoke. + if (_func) + zret = _func(argc - CommandTable::_opt_idx, argv + CommandTable::_opt_idx); + else + zret = false, zret = ERR_SUBCOMMAND_REQUIRED(); + } else { + char const *tag = argv[CommandTable::_opt_idx]; + auto spot = std::find_if(_group.begin(), _group.end(), + [tag](CommandGroup::value_type const &elt) { return 0 == strcasecmp(tag, elt._name.c_str()); }); + if (spot != _group.end()) { + ++CommandTable::_opt_idx; + zret = spot->invoke(argc, argv); } else { - char const* tag = argv[CommandTable::_opt_idx]; - auto spot = std::find_if(_group.begin(), _group.end(), - [tag](CommandGroup::value_type const& elt) { - return 0 == strcasecmp(tag, elt._name.c_str()); } ); - if (spot != _group.end()) { - ++CommandTable::_opt_idx; - zret = spot->invoke(argc, argv); - } - else { - zret = false; - zret = ERR_COMMAND_TAG_NOT_FOUND(tag); - } + zret = false; + zret = ERR_COMMAND_TAG_NOT_FOUND(tag); } - return zret; } + return zret; +} - void CommandTable::Command::helpMessage(int argc, char* argv[], std::ostream& out, std::string const& prefix) const - { - - if (CommandTable::_opt_idx >= argc || argv[CommandTable::_opt_idx][0] == '-') { - // Tail of command keywords, start listing - if (_name.empty()) { // root command group, don't print for that. - for ( Command const& c : _group ) c.helpMessage(argc, argv, out, prefix); - } else { - out << prefix << _name << ": " << _help << std::endl; - for ( Command const& c : _group ) c.helpMessage(argc, argv, out, " " + prefix); - } +void +CommandTable::Command::helpMessage(int argc, char *argv[], std::ostream &out, std::string const &prefix) const +{ + if (CommandTable::_opt_idx >= argc || argv[CommandTable::_opt_idx][0] == '-') { + // Tail of command keywords, start listing + if (_name.empty()) { // root command group, don't print for that. + for (Command const &c : _group) + c.helpMessage(argc, argv, out, prefix); + } else { + out << prefix << _name << ": " << _help << std::endl; + for (Command const &c : _group) + c.helpMessage(argc, argv, out, " " + prefix); + } + } else { + char const *tag = argv[CommandTable::_opt_idx]; + auto spot = std::find_if(_group.begin(), _group.end(), + [tag](CommandGroup::value_type const &elt) { return 0 == strcasecmp(tag, elt._name.c_str()); }); + if (spot != _group.end()) { + ++CommandTable::_opt_idx; + spot->helpMessage(argc, argv, out, prefix); } else { - char const* tag = argv[CommandTable::_opt_idx]; - auto spot = std::find_if(_group.begin(), _group.end(), - [tag](CommandGroup::value_type const& elt) { - return 0 == strcasecmp(tag, elt._name.c_str()); } ); - if (spot != _group.end()) { - ++CommandTable::_opt_idx; - spot->helpMessage(argc, argv, out, prefix); - } else { - out << ERR_COMMAND_TAG_NOT_FOUND(tag) << std::endl; - } + out << ERR_COMMAND_TAG_NOT_FOUND(tag) << std::endl; } } +} - CommandTable::Command::~Command() { } +CommandTable::Command::~Command() +{ +} - CommandTable::CommandTable() - { - } +CommandTable::CommandTable() +{ +} - auto CommandTable::add(std::string const& name, std::string const& help) -> Command& - { - return _top.subCommand(name, help); - } +auto +CommandTable::add(std::string const &name, std::string const &help) -> Command & +{ + return _top.subCommand(name, help); +} - auto CommandTable::add(std::string const& name, std::string const& help, CommandFunction const& f) -> Command& - { - return _top.subCommand(name, help, f); - } +auto +CommandTable::add(std::string const &name, std::string const &help, CommandFunction const &f) -> Command & +{ + return _top.subCommand(name, help, f); +} - ts::Rv CommandTable::invoke(int argc, char* argv[]) - { - _opt_idx = 0; - return _top.invoke(argc, argv); - } +ts::Rv +CommandTable::invoke(int argc, char *argv[]) +{ + _opt_idx = 0; + return _top.invoke(argc, argv); +} - // This is basically cloned from invoke(), need to find how to do some unification. - void CommandTable::helpMessage(int argc, char* argv[]) const - { - _opt_idx = 0; - std::cerr << "Command tree" << std::endl; - _top.helpMessage(argc, argv, std::cerr, std::string("* ")); - } +// This is basically cloned from invoke(), need to find how to do some unification. +void +CommandTable::helpMessage(int argc, char *argv[]) const +{ + _opt_idx = 0; + std::cerr << "Command tree" << std::endl; + _top.helpMessage(argc, argv, std::cerr, std::string("* ")); +} } diff --git a/tools/cache_tool/Command.h b/tools/cache_tool/Command.h index 9c25dc2ab62..4c336b83c64 100644 --- a/tools/cache_tool/Command.h +++ b/tools/cache_tool/Command.h @@ -31,90 +31,95 @@ #define CACHE_TOOL_COMMAND_H namespace ApacheTrafficServer { - // Because in C+11 std::max is not constexpr - template < typename I > constexpr inline I maximum(I lhs, I rhs) { return lhs < rhs ? rhs : lhs; } +// Because in C+11 std::max is not constexpr +template +constexpr inline I +maximum(I lhs, I rhs) +{ + return lhs < rhs ? rhs : lhs; +} + +/// Top level container for commands. +class CommandTable +{ + typedef CommandTable self; ///< Self reference type. +public: + /// Signature for actual command implementation. + typedef std::function(int argc, char *argv[])> CommandFunction; - /// Top level container for commands. - class CommandTable + CommandTable(); + + /// A command. + /// This is either a leaf (and has a function for an implementation) or it is a group + /// of nested commands. + class Command { - typedef CommandTable self; ///< Self reference type. + typedef Command self; ///< Self reference type. public: - /// Signature for actual command implementation. - typedef std::function (int argc, char* argv[])> CommandFunction; - - CommandTable(); - - /// A command. - /// This is either a leaf (and has a function for an implementation) or it is a group - /// of nested commands. - class Command - { - typedef Command self; ///< Self reference type. - public: - ~Command(); - - /** Add a subcommand to this command. - @return The subcommand object. - */ - Command& subCommand(std::string const& name, std::string const& help); - /** Add a subcommand to this command. - @return The new sub command instance. - */ - Command& subCommand(std::string const& name, std::string const& help, CommandFunction const& f); - /** Add a leaf command. - @return This new sub command instance. - */ - Command& set(CommandFunction const& f); - - /** Invoke a command. - @return The return value of the executed command, or an error value if the command was not found. - */ - ts::Rv invoke(int argc, char* argv[]); - - void helpMessage(int argc, char* argv[], std::ostream& out = std::cerr, std::string const& prefix = std::string()) const; - - protected: - typedef std::vector CommandGroup; - - std::string _name; ///< Command name. - std::string _help; ///< Help message. - /// Command to execute if no more keywords. - CommandFunction _func; - /// Next command for current keyword. - CommandGroup _group; - - /// Default constructor, no execution logic. - Command(); - /// Construct with a function for this command. - Command(std::string const& name, std::string const& help); - /// Construct with a function for this command. - Command(std::string const& name, std::string const& help, CommandFunction const& f); - - friend class CommandTable; - }; - - /** Add a direct command. - @return The created @c Command instance. - */ - Command& add(std::string const& name, std::string const& help, CommandFunction const& f); - - /** Add a parent command. - @return The created @c Command instance. + ~Command(); + + /** Add a subcommand to this command. + @return The subcommand object. + */ + Command &subCommand(std::string const &name, std::string const &help); + /** Add a subcommand to this command. + @return The new sub command instance. + */ + Command &subCommand(std::string const &name, std::string const &help, CommandFunction const &f); + /** Add a leaf command. + @return This new sub command instance. */ - Command& add(std::string const& name, std::string const& help); + Command &set(CommandFunction const &f); /** Invoke a command. @return The return value of the executed command, or an error value if the command was not found. */ - ts::Rv invoke(int argc, char* argv[]); + ts::Rv invoke(int argc, char *argv[]); - void helpMessage(int argc, char* argv[]) const; + void helpMessage(int argc, char *argv[], std::ostream &out = std::cerr, std::string const &prefix = std::string()) const; protected: - Command _top; - static int _opt_idx; - - friend class Command; + typedef std::vector CommandGroup; + + std::string _name; ///< Command name. + std::string _help; ///< Help message. + /// Command to execute if no more keywords. + CommandFunction _func; + /// Next command for current keyword. + CommandGroup _group; + + /// Default constructor, no execution logic. + Command(); + /// Construct with a function for this command. + Command(std::string const &name, std::string const &help); + /// Construct with a function for this command. + Command(std::string const &name, std::string const &help, CommandFunction const &f); + + friend class CommandTable; }; + + /** Add a direct command. + @return The created @c Command instance. + */ + Command &add(std::string const &name, std::string const &help, CommandFunction const &f); + + /** Add a parent command. + @return The created @c Command instance. + */ + Command &add(std::string const &name, std::string const &help); + + /** Invoke a command. + @return The return value of the executed command, or an error value if the command was not found. + */ + ts::Rv invoke(int argc, char *argv[]); + + void helpMessage(int argc, char *argv[]) const; + +protected: + Command _top; + static int _opt_idx; + + friend class Command; +}; } #endif diff --git a/tools/cache_tool/File.cc b/tools/cache_tool/File.cc index 9dc7c4c4838..776f1706c39 100644 --- a/tools/cache_tool/File.cc +++ b/tools/cache_tool/File.cc @@ -25,62 +25,74 @@ #include #include -namespace ApacheTrafficServer { - - FilePath& FilePath::operator = (char const* path) - { - _path = ats_strdup(path); - _stat_p = false; - return *this; - } +namespace ApacheTrafficServer +{ +FilePath & +FilePath::operator=(char const *path) +{ + _path = ats_strdup(path); + _stat_p = false; + return *this; +} - bool FilePath::is_readable() const { return 0 == access(_path, R_OK); } +bool +FilePath::is_readable() const +{ + return 0 == access(_path, R_OK); +} - FilePath operator / (FilePath const& lhs, FilePath const& rhs) - { - return static_cast(lhs) / rhs; - } +FilePath +operator/(FilePath const &lhs, FilePath const &rhs) +{ + return static_cast(lhs) / rhs; +} - FilePath operator / (char const* lhs, FilePath const& rhs) - { - ats_scoped_str np; +FilePath +operator/(char const *lhs, FilePath const &rhs) +{ + ats_scoped_str np; - // If either path is empty, return the other path. - if (nullptr == lhs || 0 == *lhs) return rhs; - if (!rhs.has_path()) return FilePath(lhs); + // If either path is empty, return the other path. + if (nullptr == lhs || 0 == *lhs) + return rhs; + if (!rhs.has_path()) + return FilePath(lhs); - return FilePath(path_join(lhs, static_cast(rhs))); - } + return FilePath(path_join(lhs, static_cast(rhs))); +} - ats_scoped_fd FilePath::open(int flags) const - { - return ats_scoped_fd(this->has_path() ? ::open(_path, flags) : ats_scoped_fd::Traits::initValue()); - } +ats_scoped_fd +FilePath::open(int flags) const +{ + return ats_scoped_fd(this->has_path() ? ::open(_path, flags) : ats_scoped_fd::Traits::initValue()); +} - int - BulkFile::load() - { - ats_scoped_fd fd(this->open(O_RDONLY)); - int zret = 0; // return errno if something goes wrong. - struct stat info; - if (0 == fstat(fd, &info)) { - size_t n = info.st_size; - _content = static_cast(ats_malloc(n+2)); - if (0 < (_len = read(fd, _content, n))) { - // Force a trailing linefeed and nul. - memset(_content + _len, 0, 2); - if (_content[n-1] != '\n') { - _content[n] = '\n'; - ++_len; - } - } else zret = errno; - } else zret = errno; - return zret; - } +int +BulkFile::load() +{ + ats_scoped_fd fd(this->open(O_RDONLY)); + int zret = 0; // return errno if something goes wrong. + struct stat info; + if (0 == fstat(fd, &info)) { + size_t n = info.st_size; + _content = static_cast(ats_malloc(n + 2)); + if (0 < (_len = read(fd, _content, n))) { + // Force a trailing linefeed and nul. + memset(_content + _len, 0, 2); + if (_content[n - 1] != '\n') { + _content[n] = '\n'; + ++_len; + } + } else + zret = errno; + } else + zret = errno; + return zret; +} - StringView - BulkFile::content() const - { - return StringView(_content, _len); - } +StringView +BulkFile::content() const +{ + return StringView(_content, _len); +} } diff --git a/tools/cache_tool/File.h b/tools/cache_tool/File.h index 19fa254c9ab..82ec9a69abd 100644 --- a/tools/cache_tool/File.h +++ b/tools/cache_tool/File.h @@ -21,7 +21,7 @@ limitations under the License. */ -#if ! defined(ATS_FILE_HEADER) +#if !defined(ATS_FILE_HEADER) #define ATS_FILE_HEADER #include @@ -30,124 +30,176 @@ namespace ApacheTrafficServer { - /** A file class for supporting path operations. - */ - class FilePath - { - typedef FilePath self; ///< Self reference type. - public: - FilePath(); - /// Construct from a null terminated string. - explicit FilePath(char const* path); - /// Construct from a string view. - explicit FilePath(StringView const& path); - /// Copy constructor - copies the path. - FilePath(self const& that); - /// Move constructor. - FilePath(self && that); - /// Assign a new path. - self& operator = (char const* path); - /// Combine two paths, making sure there is exactly one separator between them. - self operator / (self const& rhs); - /// Create a new instance by appended @a path. - self operator / (char const* path); - /// Check if there is a path. - bool has_path() const; - /// Check if the path is absolute. - bool is_absolute() const; - /// Check if the path is not absolute. - bool is_relative() const; - /// Check if file is readable. - bool is_readable() const; - /// Access the path as a null terminated string. - operator const char* () const; - /// Access the path explicitly. - char const* path() const; - - /// Get the stat buffer. - /// @return A valid stat buffer or @c nullptr if the system call failed. - struct stat const* stat() const; - - /// Return the file type value. - int file_type() const; - - bool is_char_device() const; - bool is_block_device() const; - bool is_dir() const; - bool is_regular_file() const; - - // Utility methods. - ats_scoped_fd open(int flags) const; - - protected: - ats_scoped_str _path; ///< File path. - mutable struct stat _stat; ///< File information. - mutable bool _stat_p = false; ///< Whether _stat is valid. - }; - - /** A file support class for handling files as bulk content. - - @note This is used primarily for configuration files where the entire file is read every time - and it's rarely (if ever) useful to read it incrementally. The general scheme is the entire file - is read and then @c StringView elements are used to reference the bulk content. - - @internal The design goal of this class is to supplant the free functions later in this header. - - */ - class BulkFile : public FilePath - { - typedef BulkFile self; ///< Self reference type. - typedef FilePath super; ///< Parent type. - public: - // Inherit super class constructors. - using super::super; - ///< Conversion constructor from base class. - BulkFile(super&& that); - /// Read the contents of the file in a local buffer. - /// @return @c errno - int load(); - StringView content() const; - private: - ats_scoped_str _content; ///< The file contents. - size_t _len; ///< Length of file content. - }; +/** A file class for supporting path operations. + */ +class FilePath +{ + typedef FilePath self; ///< Self reference type. +public: + FilePath(); + /// Construct from a null terminated string. + explicit FilePath(char const *path); + /// Construct from a string view. + explicit FilePath(StringView const &path); + /// Copy constructor - copies the path. + FilePath(self const &that); + /// Move constructor. + FilePath(self &&that); + /// Assign a new path. + self &operator=(char const *path); + /// Combine two paths, making sure there is exactly one separator between them. + self operator/(self const &rhs); + /// Create a new instance by appended @a path. + self operator/(char const *path); + /// Check if there is a path. + bool has_path() const; + /// Check if the path is absolute. + bool is_absolute() const; + /// Check if the path is not absolute. + bool is_relative() const; + /// Check if file is readable. + bool is_readable() const; + /// Access the path as a null terminated string. + operator const char *() const; + /// Access the path explicitly. + char const *path() const; + + /// Get the stat buffer. + /// @return A valid stat buffer or @c nullptr if the system call failed. + struct stat const *stat() const; + + /// Return the file type value. + int file_type() const; + + bool is_char_device() const; + bool is_block_device() const; + bool is_dir() const; + bool is_regular_file() const; + + // Utility methods. + ats_scoped_fd open(int flags) const; + +protected: + ats_scoped_str _path; ///< File path. + mutable struct stat _stat; ///< File information. + mutable bool _stat_p = false; ///< Whether _stat is valid. +}; + +/** A file support class for handling files as bulk content. + + @note This is used primarily for configuration files where the entire file is read every time + and it's rarely (if ever) useful to read it incrementally. The general scheme is the entire file + is read and then @c StringView elements are used to reference the bulk content. + + @internal The design goal of this class is to supplant the free functions later in this header. + + */ +class BulkFile : public FilePath +{ + typedef BulkFile self; ///< Self reference type. + typedef FilePath super; ///< Parent type. +public: + // Inherit super class constructors. + using super::super; + ///< Conversion constructor from base class. + BulkFile(super &&that); + /// Read the contents of the file in a local buffer. + /// @return @c errno + int load(); + StringView content() const; + +private: + ats_scoped_str _content; ///< The file contents. + size_t _len; ///< Length of file content. +}; /* ------------------------------------------------------------------- */ - inline FilePath::FilePath() {} - inline FilePath::FilePath(char const* path) : _path(ats_strdup(path)) {} - inline FilePath::FilePath(StringView const& path) - { - _path = static_cast(ats_malloc(path.size()+1)); - memcpy(_path, path.ptr(), path.size()); - _path[path.size()] = 0; - } - inline FilePath::FilePath(self const& that) : _path(ats_strdup(static_cast(that))) {} - inline FilePath::FilePath(self&& that) : _path(static_cast(that._path)) {} - inline FilePath::operator const char* () const { return _path; } - inline char const* FilePath::path() const { return _path; } - - inline bool FilePath::has_path() const { return _path && 0 != _path[0]; } - inline bool FilePath::is_absolute() const { return _path && '/' == _path[0]; } - inline bool FilePath::is_relative() const { return !this->is_absolute(); } - - inline struct stat const* FilePath::stat() const - { - if (!_stat_p) _stat_p = ::stat(_path, &_stat) >= 0; - return _stat_p ? &_stat : nullptr; - } - - FilePath operator / (FilePath const& lhs, FilePath const& rhs); - FilePath operator / (char const* lhs, FilePath const& rhs); - - inline int FilePath::file_type() const { return this->stat() ? (_stat.st_mode & S_IFMT) : 0; } - - inline bool FilePath::is_dir() const { return this->file_type() == S_IFDIR; } - inline bool FilePath::is_char_device() const { return this->file_type() == S_IFCHR; } - inline bool FilePath::is_block_device() const { return this->file_type() == S_IFBLK; } - inline bool FilePath::is_regular_file() const { return this->file_type() == S_IFREG; } - - inline BulkFile::BulkFile(super&& that) : super(that) {} +inline FilePath::FilePath() +{ +} +inline FilePath::FilePath(char const *path) : _path(ats_strdup(path)) +{ +} +inline FilePath::FilePath(StringView const &path) +{ + _path = static_cast(ats_malloc(path.size() + 1)); + memcpy(_path, path.ptr(), path.size()); + _path[path.size()] = 0; +} +inline FilePath::FilePath(self const &that) : _path(ats_strdup(static_cast(that))) +{ +} +inline FilePath::FilePath(self &&that) : _path(static_cast(that._path)) +{ +} +inline FilePath::operator const char *() const +{ + return _path; +} +inline char const * +FilePath::path() const +{ + return _path; +} + +inline bool +FilePath::has_path() const +{ + return _path && 0 != _path[0]; +} +inline bool +FilePath::is_absolute() const +{ + return _path && '/' == _path[0]; +} +inline bool +FilePath::is_relative() const +{ + return !this->is_absolute(); +} + +inline struct stat const * +FilePath::stat() const +{ + if (!_stat_p) + _stat_p = ::stat(_path, &_stat) >= 0; + return _stat_p ? &_stat : nullptr; +} + +FilePath operator/(FilePath const &lhs, FilePath const &rhs); +FilePath operator/(char const *lhs, FilePath const &rhs); + +inline int +FilePath::file_type() const +{ + return this->stat() ? (_stat.st_mode & S_IFMT) : 0; +} + +inline bool +FilePath::is_dir() const +{ + return this->file_type() == S_IFDIR; +} +inline bool +FilePath::is_char_device() const +{ + return this->file_type() == S_IFCHR; +} +inline bool +FilePath::is_block_device() const +{ + return this->file_type() == S_IFBLK; +} +inline bool +FilePath::is_regular_file() const +{ + return this->file_type() == S_IFREG; +} + +inline BulkFile::BulkFile(super &&that) : super(that) +{ +} /* ------------------------------------------------------------------- */ } // namespace From d2bf39a990d5ae83a7463937d8d05788cfef10ae Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Wed, 18 Jan 2017 07:13:28 -0600 Subject: [PATCH 15/81] clang-format. --- lib/ts/Metric.h | 513 ++++++++++++++++++++++-------------------- lib/ts/test_Metric.cc | 57 ++--- 2 files changed, 305 insertions(+), 265 deletions(-) diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h index fe3120b997e..ccb8126ff4a 100644 --- a/lib/ts/Metric.h +++ b/lib/ts/Metric.h @@ -33,260 +33,297 @@ namespace ApacheTrafficServer { - /** A class to hold scaled values. - - Instances of this class have a @a count and a @a scale. The "value" of the instance is @a - count * @a scale. The scale is stored in the compiler in the class symbol table and so only - the count is a run time value. An instance with a large scale can be assign to an instance - with a smaller scale and the conversion is done automatically. Conversions from a smaller to - larger scale must be explicit using @c metric_round_up and @c metric_round_down. This prevents - inadvertent changes in value. Because the scales are not the same these conversions can be - lossy and the two conversions determine whether, in such a case, the result should be rounded - up or down to the nearest scale value. - - @a N sets the scale. @a T is the type used to hold the count, which is in units of @a N. - - @note This is modeled somewhat on @c std::chrono and serves a similar function for different - and simpler cases (where the ratio is always an integer, never a fraction). - - @see metric_round_up - @see metric_round_down - */ - template < intmax_t N, typename T = int > - class Metric - { - typedef Metric self; ///< Self reference type. - - public: - /// Scaling factor for instances. - /// Make it externally accessible. - constexpr static intmax_t SCALE = N; - typedef T Count; ///< Type used to hold the count. - - constexpr Metric(); ///< Default contructor. - ///< Construct to have @a n scaled units. - constexpr Metric(Count n); - - /// Copy constructor for same scale. - template < typename C > - Metric(Metric const& that); - - /// Copy / conversion constructor. - /// @note Requires that @c S be an integer multiple of @c SCALE. - template < intmax_t S, typename I > - Metric(Metric const& that); - - /// Direct assignment. - /// The count is set to @a n. - self& operator = (Count n); - - /// The number of scale units. - constexpr Count count() const; - /// The absolute value, scaled up. - constexpr Count units() const; - - /// Assignment operator. - /// @note Requires the scale of @c S be an integer multiple of the scale of this. - template < intmax_t S, typename I > - self& operator = (Metric const& that); - /// Assignment from same scale. - self& operator = (self const& that); - - /// Run time access to the scale of this metric (template arg @a N). - static constexpr intmax_t scale(); - - protected: - Count _n; ///< Number of scale units. - }; - - template < intmax_t N, typename C > - constexpr Metric::Metric() : _n() {} - template < intmax_t N, typename C > - constexpr Metric::Metric(Count n) : _n(n) {} - template < intmax_t N, typename C > - constexpr auto Metric::count() const -> Count { return _n; } - template < intmax_t N, typename C > - constexpr auto Metric::units() const -> Count { return _n * SCALE; } - template < intmax_t N, typename C > - inline auto Metric::operator = (Count n) -> self& { _n = n; return *this; } - template < intmax_t N, typename C > - inline auto Metric::operator = (self const& that) -> self& { _n = that._n; return *this; } - template < intmax_t N, typename C > - constexpr inline intmax_t Metric::scale() { return SCALE; } - - template - template - Metric::Metric(Metric const& that) : _n(static_cast(that._n)) - { - } - - template - template - Metric::Metric(Metric const& that) - { - typedef std::ratio R; - static_assert(R::den == 1, "Construction not permitted - target scale is not an integral multiple of source scale."); - _n = that.count() * R::num; - } - - template - template - auto Metric::operator = (Metric const& that) -> self& - { - typedef std::ratio R; - static_assert(R::den == 1, "Assignment not permitted - target scale is not an integral multiple of source scale."); - _n = that.count() * R::num; - return *this; - } - - // -- Free Functions -- - - /** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding up. - The resulting count in the return value will be the smallest count that is not smaller than the unit - value of @a src. - - @code - typedef Metric<16> Paragraphs; - typedef Metric<1024> KiloBytes; - - Paragraphs src(37459); - auto size = metric_round_up(src); // size.count() == 586 - @endcode - */ - template < typename M, intmax_t S, typename I > - M metric_round_up(Metric const& src) - { - typedef std::ratio R; - auto c = src.count(); - - if (M::SCALE == S) { - return c; - } else if (R::den == 1) { - return c / R::num + (0 != c % R::num); // N is a multiple of S. - } else if (R::num == 1) { - return c * R::den; // S is a multiple of N. - } else { - return (c / R::num) * R::den + (( c % R::num ) * R::den) / R::num + (0 != (c % R::num)); - } - } +/** A class to hold scaled values. - /** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding down. - The resulting count in the return value will be the largest count that is not larger than the unit - value of @a src. - - @code - typedef Metric<16> Paragraphs; - typedef Metric<1024> KiloBytes; - - Paragraphs src(37459); - auto size = metric_round_up(src); // size.count() == 585 - @endcode - */ - template < typename M, intmax_t S, typename I > - M metric_round_down(Metric const& src) - { - typedef std::ratio R; - auto c = src.count(); - - if (R::den == 1) { - return c / R::num; // S is a multiple of N. - } else if (R::num ==1) { - return c * R::den; // N is a multiple of S. - } else { - // General case where neither N nor S are a multiple of the other. - // Yes, a bit odd, but this minimizes the risk of integer overflow. - // I need to validate that under -O2 the compiler will only do 1 division to get - // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are - // powers of 2 I have verified recent GNU compilers will optimize to bit operations. - return (c / R::num) * R::den + (( c % R::num ) * R::den) / R::num; - } - } + Instances of this class have a @a count and a @a scale. The "value" of the instance is @a + count * @a scale. The scale is stored in the compiler in the class symbol table and so only + the count is a run time value. An instance with a large scale can be assign to an instance + with a smaller scale and the conversion is done automatically. Conversions from a smaller to + larger scale must be explicit using @c metric_round_up and @c metric_round_down. This prevents + inadvertent changes in value. Because the scales are not the same these conversions can be + lossy and the two conversions determine whether, in such a case, the result should be rounded + up or down to the nearest scale value. - /// Convert a unit value @a n to a Metric, rounding down. - template < typename M > - M metric_round_down(intmax_t n) - { - return n/M::SCALE; // assuming compiler will optimize out dividing by 1 if needed. - } + @a N sets the scale. @a T is the type used to hold the count, which is in units of @a N. - /// Convert a unit value @a n to a Metric, rounding up. - template < typename M > - M metric_round_up(intmax_t n) - { - return M::SCALE == 1 ? n : (n/M::SCALE + (0 != (n % M::SCALE))); - } + @note This is modeled somewhat on @c std::chrono and serves a similar function for different + and simpler cases (where the ratio is always an integer, never a fraction). + @see metric_round_up + @see metric_round_down + */ +template class Metric +{ + typedef Metric self; ///< Self reference type. - // --- Compare operators +public: + /// Scaling factor for instances. + /// Make it externally accessible. + constexpr static intmax_t SCALE = N; + typedef T Count; ///< Type used to hold the count. - // Try for a bit of performance boost - if the metrics have the same scale - // just comparing the counts is sufficient and scaling conversion is avoided. - template < intmax_t N, typename C1, typename C2 > - bool operator < (Metric const& lhs, Metric const& rhs) - { - return lhs.count() < rhs.count(); - } + constexpr Metric(); ///< Default contructor. + ///< Construct to have @a n scaled units. + constexpr Metric(Count n); - template < intmax_t N, typename C1, typename C2 > - bool operator == (Metric const& lhs, Metric const& rhs) - { - return lhs.count() == rhs.count(); - } + /// Copy constructor for same scale. + template Metric(Metric const &that); - // Could be derived but if we're optimizing let's avoid the extra negation. - // Or we could check if the compiler can optimize that out anyway. - template < intmax_t N, typename C1, typename C2 > - bool operator <= (Metric const& lhs, Metric const& rhs) - { - return lhs.count() <= rhs.count(); - } + /// Copy / conversion constructor. + /// @note Requires that @c S be an integer multiple of @c SCALE. + template Metric(Metric const &that); - // General base cases. - - template < intmax_t N1, typename C1, intmax_t N2, typename C2 > - bool operator < (Metric const& lhs, Metric const& rhs) - { - typedef std::ratio R; - // Based on tests with the GNU compiler, the fact that the conditionals are compile time - // constant causes the never taken paths to be dropped so there are no runtime conditional - // checks, even with no optimization at all. - if (R::den == 1) { return lhs.count() < rhs.count() * R::num; } - else if (R::num == 1) { return lhs.count() * R::den < rhs.count(); } - else return lhs.units() < rhs.units(); - } + /// Direct assignment. + /// The count is set to @a n. + self &operator=(Count n); - template < intmax_t N1, typename C1, intmax_t N2, typename C2 > - bool operator == (Metric const& lhs, Metric const& rhs) - { - typedef std::ratio R; - if (R::den == 1) { return lhs.count() == rhs.count() * R::num; } - else if (R::num == 1) { return lhs.count() * R::den == rhs.count(); } - else return lhs.units() == rhs.units(); - } + /// The number of scale units. + constexpr Count count() const; + /// The absolute value, scaled up. + constexpr Count units() const; - template < intmax_t N1, typename C1, intmax_t N2, typename C2 > - bool operator <= (Metric const& lhs, Metric const& rhs) - { - typedef std::ratio R; - if (R::den == 1) { return lhs.count() <= rhs.count() * R::num; } - else if (R::num == 1) { return lhs.count() * R::den <= rhs.count(); } - else return lhs.units() <= rhs.units(); - } + /// Assignment operator. + /// @note Requires the scale of @c S be an integer multiple of the scale of this. + template self &operator=(Metric const &that); + /// Assignment from same scale. + self &operator=(self const &that); + + /// Run time access to the scale of this metric (template arg @a N). + static constexpr intmax_t scale(); - // Derived compares. No narrowing optimization needed because if the scales - // are the same the nested call with be optimized. +protected: + Count _n; ///< Number of scale units. +}; - template < intmax_t N1, typename C1, intmax_t N2, typename C2 > - bool operator > (Metric const& lhs, Metric const& rhs) - { - return rhs < lhs; +template constexpr Metric::Metric() : _n() +{ +} +template constexpr Metric::Metric(Count n) : _n(n) +{ +} +template +constexpr auto +Metric::count() const -> Count +{ + return _n; +} +template +constexpr auto +Metric::units() const -> Count +{ + return _n * SCALE; +} +template +inline auto +Metric::operator=(Count n) -> self & +{ + _n = n; + return *this; +} +template +inline auto +Metric::operator=(self const &that) -> self & +{ + _n = that._n; + return *this; +} +template +constexpr inline intmax_t +Metric::scale() +{ + return SCALE; +} + +template template Metric::Metric(Metric const &that) : _n(static_cast(that._n)) +{ +} + +template template Metric::Metric(Metric const &that) +{ + typedef std::ratio R; + static_assert(R::den == 1, "Construction not permitted - target scale is not an integral multiple of source scale."); + _n = that.count() * R::num; +} + +template +template +auto +Metric::operator=(Metric const &that) -> self & +{ + typedef std::ratio R; + static_assert(R::den == 1, "Assignment not permitted - target scale is not an integral multiple of source scale."); + _n = that.count() * R::num; + return *this; +} + +// -- Free Functions -- + +/** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding up. + The resulting count in the return value will be the smallest count that is not smaller than the unit + value of @a src. + + @code + typedef Metric<16> Paragraphs; + typedef Metric<1024> KiloBytes; + + Paragraphs src(37459); + auto size = metric_round_up(src); // size.count() == 586 + @endcode + */ +template +M +metric_round_up(Metric const &src) +{ + typedef std::ratio R; + auto c = src.count(); + + if (M::SCALE == S) { + return c; + } else if (R::den == 1) { + return c / R::num + (0 != c % R::num); // N is a multiple of S. + } else if (R::num == 1) { + return c * R::den; // S is a multiple of N. + } else { + return (c / R::num) * R::den + ((c % R::num) * R::den) / R::num + (0 != (c % R::num)); } +} + +/** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding down. + The resulting count in the return value will be the largest count that is not larger than the unit + value of @a src. - template < intmax_t N1, typename C1, intmax_t N2, typename C2 > - bool operator >= (Metric const& lhs, Metric const& rhs) - { - return rhs <= lhs; + @code + typedef Metric<16> Paragraphs; + typedef Metric<1024> KiloBytes; + + Paragraphs src(37459); + auto size = metric_round_up(src); // size.count() == 585 + @endcode + */ +template +M +metric_round_down(Metric const &src) +{ + typedef std::ratio R; + auto c = src.count(); + + if (R::den == 1) { + return c / R::num; // S is a multiple of N. + } else if (R::num == 1) { + return c * R::den; // N is a multiple of S. + } else { + // General case where neither N nor S are a multiple of the other. + // Yes, a bit odd, but this minimizes the risk of integer overflow. + // I need to validate that under -O2 the compiler will only do 1 division to get + // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are + // powers of 2 I have verified recent GNU compilers will optimize to bit operations. + return (c / R::num) * R::den + ((c % R::num) * R::den) / R::num; } } +/// Convert a unit value @a n to a Metric, rounding down. +template +M +metric_round_down(intmax_t n) +{ + return n / M::SCALE; // assuming compiler will optimize out dividing by 1 if needed. +} + +/// Convert a unit value @a n to a Metric, rounding up. +template +M +metric_round_up(intmax_t n) +{ + return M::SCALE == 1 ? n : (n / M::SCALE + (0 != (n % M::SCALE))); +} + +// --- Compare operators + +// Try for a bit of performance boost - if the metrics have the same scale +// just comparing the counts is sufficient and scaling conversion is avoided. +template +bool +operator<(Metric const &lhs, Metric const &rhs) +{ + return lhs.count() < rhs.count(); +} + +template +bool +operator==(Metric const &lhs, Metric const &rhs) +{ + return lhs.count() == rhs.count(); +} + +// Could be derived but if we're optimizing let's avoid the extra negation. +// Or we could check if the compiler can optimize that out anyway. +template +bool +operator<=(Metric const &lhs, Metric const &rhs) +{ + return lhs.count() <= rhs.count(); +} + +// General base cases. + +template +bool +operator<(Metric const &lhs, Metric const &rhs) +{ + typedef std::ratio R; + // Based on tests with the GNU compiler, the fact that the conditionals are compile time + // constant causes the never taken paths to be dropped so there are no runtime conditional + // checks, even with no optimization at all. + if (R::den == 1) { + return lhs.count() < rhs.count() * R::num; + } else if (R::num == 1) { + return lhs.count() * R::den < rhs.count(); + } else + return lhs.units() < rhs.units(); +} + +template +bool +operator==(Metric const &lhs, Metric const &rhs) +{ + typedef std::ratio R; + if (R::den == 1) { + return lhs.count() == rhs.count() * R::num; + } else if (R::num == 1) { + return lhs.count() * R::den == rhs.count(); + } else + return lhs.units() == rhs.units(); +} + +template +bool +operator<=(Metric const &lhs, Metric const &rhs) +{ + typedef std::ratio R; + if (R::den == 1) { + return lhs.count() <= rhs.count() * R::num; + } else if (R::num == 1) { + return lhs.count() * R::den <= rhs.count(); + } else + return lhs.units() <= rhs.units(); +} + +// Derived compares. No narrowing optimization needed because if the scales +// are the same the nested call with be optimized. + +template +bool +operator>(Metric const &lhs, Metric const &rhs) +{ + return rhs < lhs; +} + +template +bool +operator>=(Metric const &lhs, Metric const &rhs) +{ + return rhs <= lhs; +} +} + #endif // TS_METRIC_H diff --git a/lib/ts/test_Metric.cc b/lib/ts/test_Metric.cc index 54ba5dac6b0..b0ab231e597 100644 --- a/lib/ts/test_Metric.cc +++ b/lib/ts/test_Metric.cc @@ -25,30 +25,32 @@ #include #include -namespace ts { - using namespace ApacheTrafficServer; +namespace ts +{ +using namespace ApacheTrafficServer; } struct TestBox { - typedef TestBox self; ///< Self reference type. + typedef TestBox self; ///< Self reference type. std::string _name; static int _count; static int _fail; - TestBox(char const* name) : _name(name) {} - TestBox(std::string const& name) : _name(name) {} + TestBox(char const *name) : _name(name) {} + TestBox(std::string const &name) : _name(name) {} bool check(bool result, char const *fmt, ...) __attribute__((format(printf, 3, 4))); - static void print_summary() + static void + print_summary() { printf("Tests: %d of %d passed - %s\n", (_count - _fail), _count, _fail ? "FAIL" : "SUCCESS"); } }; int TestBox::_count = 0; -int TestBox::_fail = 0; +int TestBox::_fail = 0; bool TestBox::check(bool result, char const *fmt, ...) @@ -57,9 +59,9 @@ TestBox::check(bool result, char const *fmt, ...) if (!result) { static constexpr size_t N = 1 << 16; - size_t n = N; + size_t n = N; size_t x; - char* s; + char *s; char buffer[N]; // just stack, go big. s = buffer; @@ -108,29 +110,30 @@ Test_2() Size_2 sz_d(29 * SCALE_1 / SCALE_2); auto sz = ts::metric_round_up(sz_a); - test.check(sz.count() == 1 , "Rounding up, got %d expected %d", sz.count(), 1); + test.check(sz.count() == 1, "Rounding up, got %d expected %d", sz.count(), 1); sz = ts::metric_round_down(sz_a); - test.check(sz.count() == 0 , "Rounding down: got %d expected %d", sz.count(), 0); + test.check(sz.count() == 0, "Rounding down: got %d expected %d", sz.count(), 0); sz = ts::metric_round_up(sz_b); - test.check(sz.count() == 4 , "Rounding up, got %d expected %d", sz.count(), 4); + test.check(sz.count() == 4, "Rounding up, got %d expected %d", sz.count(), 4); sz = ts::metric_round_down(sz_b); - test.check(sz.count() == 3 , "Rounding down, got %d expected %d", sz.count(), 3); + test.check(sz.count() == 3, "Rounding down, got %d expected %d", sz.count(), 3); sz = ts::metric_round_up(sz_c); - test.check(sz.count() == 1 , "Rounding up, got %d expected %d", sz.count(), 1); + test.check(sz.count() == 1, "Rounding up, got %d expected %d", sz.count(), 1); sz = ts::metric_round_down(sz_c); - test.check(sz.count() == 1 , "Rounding down, got %d expected %d", sz.count(), 1); + test.check(sz.count() == 1, "Rounding down, got %d expected %d", sz.count(), 1); sz = ts::metric_round_up(sz_d); - test.check(sz.count() == 29 , "Rounding up, got %d expected %d", sz.count(), 29); + test.check(sz.count() == 29, "Rounding up, got %d expected %d", sz.count(), 29); sz = ts::metric_round_down(sz_d); - test.check(sz.count() == 29 , "Rounding down, got %d expected %d", sz.count(), 29); + test.check(sz.count() == 29, "Rounding down, got %d expected %d", sz.count(), 29); - sz = 119; + sz = 119; sz_b = sz; // Should be OK because SCALE_1 is an integer multiple of SCALE_2 // sz = sz_b; // Should not compile. - test.check(sz_b.count() == 119 * (SCALE_1/SCALE_2) , "Integral conversion, got %d expected %d", sz_b.count(), 119 * (SCALE_1/SCALE_2)); + test.check(sz_b.count() == 119 * (SCALE_1 / SCALE_2), "Integral conversion, got %d expected %d", sz_b.count(), + 119 * (SCALE_1 / SCALE_2)); } // Test common factor. @@ -148,14 +151,14 @@ Test_3() Size_2 sz_b(97); auto sz = ts::metric_round_up(sz_a); - test.check(sz.count() ==2 , "Rounding up, got %d expected %d", sz.count(), 2); + test.check(sz.count() == 2, "Rounding up, got %d expected %d", sz.count(), 2); sz = ts::metric_round_down(sz_a); - test.check(sz.count() == 1 , "Rounding down: got %d expected %d", sz.count(), 0); + test.check(sz.count() == 1, "Rounding down: got %d expected %d", sz.count(), 0); sz = ts::metric_round_up(sz_b); - test.check(sz.count() == 65 , "Rounding up, got %d expected %d", sz.count(), 65); + test.check(sz.count() == 65, "Rounding up, got %d expected %d", sz.count(), 65); sz = ts::metric_round_down(sz_b); - test.check(sz.count() == 64 , "Rounding down, got %d expected %d", sz.count(), 64); + test.check(sz.count() == 64, "Rounding down, got %d expected %d", sz.count(), 64); } void @@ -171,18 +174,18 @@ Test_4() // m_9 = m_4; // Should fail to compile with static assert. m_4 = ts::metric_round_up(m_9); - test.check(m_4.count() == 214 , "Rounding down, got %d expected %d", m_4.count(), 214); + test.check(m_4.count() == 214, "Rounding down, got %d expected %d", m_4.count(), 214); m_4 = ts::metric_round_down(m_9); - test.check(m_4.count() == 213 , "Rounding down, got %d expected %d", m_4.count(), 213); + test.check(m_4.count() == 213, "Rounding down, got %d expected %d", m_4.count(), 213); m_4 = 213; m_9 = ts::metric_round_up(m_4); - test.check(m_9.count() == 95 , "Rounding down, got %d expected %d", m_9.count(), 95); + test.check(m_9.count() == 95, "Rounding down, got %d expected %d", m_9.count(), 95); m_9 = ts::metric_round_down(m_4); test.check(m_9.count() == 94, "Rounding down, got %d expected %d", m_9.count(), 94); m_test = m_4; // Verify assignment of identical scale values compiles. - test.check(m_test.count() == 213 , "Assignment got %d expected %d", m_4.count(), 213); + test.check(m_test.count() == 213, "Assignment got %d expected %d", m_4.count(), 213); } int From cd86662d4cbb79876f6e7b3516c321fa22668082 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Thu, 19 Jan 2017 08:31:39 -0600 Subject: [PATCH 16/81] CacheTool: Checkpoint, converting to Errata returns. --- lib/tsconfig/Errata.h | 16 +++++++++ tools/cache_tool/CacheTool.cc | 62 ++++++++++++++++++++++++++--------- tools/cache_tool/Command.cc | 9 +++-- tools/cache_tool/Command.h | 6 ++-- 4 files changed, 70 insertions(+), 23 deletions(-) diff --git a/lib/tsconfig/Errata.h b/lib/tsconfig/Errata.h index 8cb50da40ff..259c0136944 100644 --- a/lib/tsconfig/Errata.h +++ b/lib/tsconfig/Errata.h @@ -68,6 +68,7 @@ # include # include # include +# include # include # include "NumericType.h" # include "IntrusivePtr.h" @@ -382,6 +383,21 @@ struct Errata::Message { std::string const& text ///< Final text for message. ); + /// Construct with an @a id, @a code, and a @a message. + /// The message contents are created by converting the variable arguments + /// to strings using the stream operator and concatenated in order. + template < typename ... Args> + Message( + Id id, ///< Messag Id. + Code code, ///< Message Code. + Args const& ... message + ) : m_id(id), m_code(code) + { + std::ostringstream s; + (void)(int[]){0, ( (s << message) , 0 ) ... }; + m_text = s.str(); + } + /// Reset to the message to default state. self& clear(); diff --git a/tools/cache_tool/CacheTool.cc b/tools/cache_tool/CacheTool.cc index 6c6d2ddc04a..72c0845c4a8 100644 --- a/tools/cache_tool/CacheTool.cc +++ b/tools/cache_tool/CacheTool.cc @@ -73,6 +73,21 @@ struct Volume { std::vector _stripes; }; +// Data parsed from the volume config file. +struct VolumeConfig +{ + ts::Errata load(ts::FilePath const& path); + + struct VolData + { + int _idx; ///< Volume index. + int _percent; ///< Size if specified as a percent. + ts::CacheStripeBlocks _size; ///< Size if specified as an absolute. + }; + + std::vector _volumes; +}; + // All of these free functions need to be moved to the Cache class. bool @@ -241,7 +256,7 @@ Open_Stripe(ats_scoped_fd const &fd, ts::CacheStripeDescriptor const &block) struct Cache { ~Cache(); - void load(ts::FilePath const &path); + ts::Errata load(ts::FilePath const &path); void loadConfig(ts::FilePath const &path); void loadDevice(ts::FilePath const &path); @@ -253,17 +268,20 @@ struct Cache { std::map _volumes; }; -void +ts::Errata Cache::load(ts::FilePath const &path) { + ts::Errata zret; if (!path.is_readable()) - throw(std::system_error(errno, std::system_category(), static_cast(path))); + zret = ts::Errata::Message(0,0,path," is not readable"); +// throw(std::system_error(errno, std::system_category(), static_cast(path))); else if (path.is_regular_file()) this->loadConfig(path); else if (path.is_char_device() || path.is_block_device()) this->loadDevice(path); else printf("Not a valid file type: '%s'\n", static_cast(path)); + return zret; } void @@ -411,26 +429,40 @@ Span::clearPermanently() struct option Options[] = {{"help", false, nullptr, 'h'}}; } -ts::Rv +ts::Errata List_Stripes(Cache::SpanDumpDepth depth, int argc, char *argv[]) { + ts::Errata zret; Cache cache; - cache.load(TargetFile); - cache.dumpSpans(depth); - cache.dumpVolumes(); - return true; + + if ((zret = cache.load(TargetFile))) { + cache.dumpSpans(depth); + cache.dumpVolumes(); + } + return zret; +} + +ts::Errata +Simulate_Span_Allocation(int argc, char *argv[]) +{ + ts::Errata zret; + return zret; } -ts::Rv +ts::Errata Clear_Spans(int argc, char *argv[]) { + ts::Errata zret; + Cache cache; OPEN_RW_FLAGS = O_RDWR; - cache.load(TargetFile); - for (auto *span : cache._spans) { - span->clearPermanently(); + if ((zret = cache.load(TargetFile))) { + for (auto *span : cache._spans) { + span->clearPermanently(); + } } - return true; + + return zret; } int @@ -465,10 +497,10 @@ main(int argc, char *argv[]) argc -= optind + 1; argv += optind + 1; } - ts::Rv result = Commands.invoke(argc, argv); + ts::Errata result = Commands.invoke(argc, argv); if (!result) { - std::cerr << result.errata(); + std::cerr << result; } return 0; } diff --git a/tools/cache_tool/Command.cc b/tools/cache_tool/Command.cc index f59d7cfaa18..a0a1214e8f7 100644 --- a/tools/cache_tool/Command.cc +++ b/tools/cache_tool/Command.cc @@ -80,17 +80,17 @@ CommandTable::Command::subCommand(std::string const &name, std::string const &he return _group.back(); } -ts::Rv +ts::Errata CommandTable::Command::invoke(int argc, char *argv[]) { - ts::Rv zret = true; + ts::Errata zret; if (CommandTable::_opt_idx >= argc || argv[CommandTable::_opt_idx][0] == '-') { // Tail of command keywords, try to invoke. if (_func) zret = _func(argc - CommandTable::_opt_idx, argv + CommandTable::_opt_idx); else - zret = false, zret = ERR_SUBCOMMAND_REQUIRED(); + zret = ERR_SUBCOMMAND_REQUIRED(); } else { char const *tag = argv[CommandTable::_opt_idx]; auto spot = std::find_if(_group.begin(), _group.end(), @@ -99,7 +99,6 @@ CommandTable::Command::invoke(int argc, char *argv[]) ++CommandTable::_opt_idx; zret = spot->invoke(argc, argv); } else { - zret = false; zret = ERR_COMMAND_TAG_NOT_FOUND(tag); } } @@ -152,7 +151,7 @@ CommandTable::add(std::string const &name, std::string const &help, CommandFunct return _top.subCommand(name, help, f); } -ts::Rv +ts::Errata CommandTable::invoke(int argc, char *argv[]) { _opt_idx = 0; diff --git a/tools/cache_tool/Command.h b/tools/cache_tool/Command.h index 4c336b83c64..ebc6d55f67b 100644 --- a/tools/cache_tool/Command.h +++ b/tools/cache_tool/Command.h @@ -45,7 +45,7 @@ class CommandTable typedef CommandTable self; ///< Self reference type. public: /// Signature for actual command implementation. - typedef std::function(int argc, char *argv[])> CommandFunction; + typedef std::function CommandFunction; CommandTable(); @@ -74,7 +74,7 @@ class CommandTable /** Invoke a command. @return The return value of the executed command, or an error value if the command was not found. */ - ts::Rv invoke(int argc, char *argv[]); + ts::Errata invoke(int argc, char *argv[]); void helpMessage(int argc, char *argv[], std::ostream &out = std::cerr, std::string const &prefix = std::string()) const; @@ -111,7 +111,7 @@ class CommandTable /** Invoke a command. @return The return value of the executed command, or an error value if the command was not found. */ - ts::Rv invoke(int argc, char *argv[]); + ts::Errata invoke(int argc, char *argv[]); void helpMessage(int argc, char *argv[]) const; From 8cc25f7416c7aabd6b7556d21f222b74cd943199 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Thu, 19 Jan 2017 23:02:47 -0600 Subject: [PATCH 17/81] CacheTool: Errata updates. --- lib/tsconfig/Errata.cc | 45 ++++++++++++++++++++------------- lib/tsconfig/Errata.h | 57 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 74 insertions(+), 28 deletions(-) diff --git a/lib/tsconfig/Errata.cc b/lib/tsconfig/Errata.cc index 6352219d95e..dc92b32064b 100644 --- a/lib/tsconfig/Errata.cc +++ b/lib/tsconfig/Errata.cc @@ -53,6 +53,11 @@ Errata::Data::push(Message const& msg) { m_items.push_back(msg); } +void +Errata::Data::push(Message && msg) { + m_items.push_back(std::move(msg)); +} + Errata::Message const& Errata::Data::top() const { return m_items.size() ? m_items.back() : NIL_MESSAGE ; @@ -65,21 +70,16 @@ inline Errata::Errata(ImpPtr const& ptr) Errata::Data::~Data() { if (m_log_on_delete) { Errata tmp(this); // because client API requires a wrapper. - std::deque::iterator spot, limit; - for ( spot = Sink_List.begin(), limit = Sink_List.end(); - spot != limit; - ++spot - ) { - (**spot)(tmp); - } + for ( auto& f : Sink_List ) (*f)(tmp); tmp.m_data.release(); // don't delete this again. } } -Errata::Errata() { +Errata::Errata(self const& that) + : m_data(that.m_data) { } -Errata::Errata(self const& that) +inline Errata::Errata(self && that) : m_data(that.m_data) { } @@ -111,7 +111,7 @@ Errata::pre_write() { } // Just create an instance if needed. -Errata::Data* +Errata::Data const* Errata::instance() { if (!m_data) m_data = new Data; return m_data.get(); @@ -123,6 +123,12 @@ Errata::push(Message const& msg) { return *this; } +Errata& +Errata::push(Message && msg) { + this->pre_write()->push(std::move(msg)); + return *this; +} + Errata& Errata::operator=(self const& that) { m_data = that.m_data; @@ -142,6 +148,12 @@ Errata::operator = (Message const& msg) { return *this; } +Errata& +Errata::operator = (self && that) { + m_data = that.m_data; + return *this; +} + Errata& Errata::pull(self& that) { if (that.m_data) { @@ -216,19 +228,17 @@ Errata::write( int shift, char const* lead ) const { - for ( const_iterator spot = this->begin(), limit = this->end(); - spot != limit; - ++spot - ) { + + for ( auto m : *this ) { if ((offset + indent) > 0) out << std::setw(indent + offset) << std::setfill(' ') << ((indent > 0 && lead) ? lead : " "); - out << spot->m_id << " [" << spot->m_code << "]: " << spot->m_text + out << m.m_id << " [" << m.m_code << "]: " << m.m_text << std::endl ; - if (spot->getErrata().size()) - spot->getErrata().write(out, offset, indent+shift, shift, lead); + if (m.getErrata().size()) + m.getErrata().write(out, offset, indent+shift, shift, lead); } return out; @@ -256,4 +266,3 @@ std::ostream& operator<< (std::ostream& os, Errata const& err) { } } // namespace ts - diff --git a/lib/tsconfig/Errata.h b/lib/tsconfig/Errata.h index 259c0136944..7c959416512 100644 --- a/lib/tsconfig/Errata.h +++ b/lib/tsconfig/Errata.h @@ -142,6 +142,11 @@ class Errata { Message const& msg ///< Message to push ); + /// Move constructor. + Errata(self && that); + /// Move constructor from @c Message. + Errata(Message && msg); + /// destructor ~Errata(); @@ -151,6 +156,9 @@ class Errata { const self& that ///< Source instance. ); + /// Move assignment. + self& operator = (self && that); + /** Assign message. All other messages are discarded. @return A reference to this object. @@ -182,6 +190,15 @@ class Errata { @return A reference to this object. */ self& push(Message const& msg); + self& push(Message && msg); + + /** Push a constructed @c Message. + The @c Message is set to have the @a id and @a code. The other arguments are converted + to strings and concatenated to form the messsage text. + @return A reference to this object. + */ + template < typename ... Args > + self& push(Id id, Code code, Args const& ... args); /** Push a nested status. @a err becomes the top item. @@ -343,7 +360,7 @@ class Errata { Data* pre_write(); /// Force and return an implementation instance. /// Does not follow copy on write. - Data* instance(); + Data const* instance(); /// Used for returns when no data is present. static Message const NIL_MESSAGE; @@ -388,15 +405,10 @@ struct Errata::Message { /// to strings using the stream operator and concatenated in order. template < typename ... Args> Message( - Id id, ///< Messag Id. + Id id, ///< Message Id. Code code, ///< Message Code. - Args const& ... message - ) : m_id(id), m_code(code) - { - std::ostringstream s; - (void)(int[]){0, ( (s << message) , 0 ) ... }; - m_text = s.str(); - } + Args const& ... text + ); /// Reset to the message to default state. self& clear(); @@ -468,6 +480,8 @@ struct Errata::Message { static SuccessTest const DEFAULT_SUCCESS_TEST; + template < typename ... Args> static std::string stringify(Args const& ... items); + Id m_id; ///< Message ID. Code m_code; ///< Message code. std::string m_text; ///< Final text. @@ -500,9 +514,10 @@ struct Errata::Data : public IntrusivePtrCounter { /// Put a message on top of the stack. void push(Message const& msg); + void push(Message && msg); /// Log this when it is deleted. - bool m_log_on_delete; + mutable bool m_log_on_delete; //! The message stack. Container m_items; @@ -746,6 +761,12 @@ inline Errata::Message::Message(Id id, std::string const& text) inline Errata::Message::Message(Id id, Code code, std::string const& text) : m_id(id), m_code(code), m_text(text) { } +template < typename ... Args> +Errata::Message::Message(Id id, Code code, Args const& ... text) + : m_id(id), m_code(code), m_text(stringify(text ...)) +{ +} + inline Errata::Message& Errata::Message::clear() { m_id = 0; m_code = Default_Code; @@ -780,6 +801,15 @@ inline Errata::Message& Errata::Message::set(Errata const& err) { return *this; } +template < typename ... Args> +std::string Errata::Message::stringify(Args const& ... items) +{ + std::ostringstream s; + (void)(int[]){0, ( (s << items) , 0 ) ... }; + return s.str(); +} + +inline Errata::Errata() {} inline Errata::Errata(Id id, Code code, std::string const& text) { this->push(Message(id, code, text)); } @@ -818,6 +848,13 @@ Errata::push(Id id, Code code, std::string const& text) { return *this; } +template < typename ... Args > +auto Errata::push(Id id, Code code, Args const& ... args) -> self& +{ + this->push(Message(id, code, args ...)); + return *this; +} + inline Errata::Message const& Errata::top() const { return m_data ? m_data->top() : NIL_MESSAGE; From 82b1136abe7a36eb819860d9d84f9b911d77c0e5 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Thu, 19 Jan 2017 23:03:02 -0600 Subject: [PATCH 18/81] CacheTool: Updates for new Errata changes. --- tools/cache_tool/CacheTool.cc | 67 +++++++++++++++++++++++++++++------ 1 file changed, 56 insertions(+), 11 deletions(-) diff --git a/tools/cache_tool/CacheTool.cc b/tools/cache_tool/CacheTool.cc index 72c0845c4a8..41884df70da 100644 --- a/tools/cache_tool/CacheTool.cc +++ b/tools/cache_tool/CacheTool.cc @@ -257,8 +257,8 @@ struct Cache { ~Cache(); ts::Errata load(ts::FilePath const &path); - void loadConfig(ts::FilePath const &path); - void loadDevice(ts::FilePath const &path); + ts::Errata loadConfig(ts::FilePath const &path); + ts::Errata loadDevice(ts::FilePath const &path); enum class SpanDumpDepth { SPAN, STRIPE, DIRECTORY }; void dumpSpans(SpanDumpDepth depth); @@ -273,23 +273,25 @@ Cache::load(ts::FilePath const &path) { ts::Errata zret; if (!path.is_readable()) - zret = ts::Errata::Message(0,0,path," is not readable"); + zret = ts::Errata::Message(0, EPERM, path," is not readable."); // throw(std::system_error(errno, std::system_category(), static_cast(path))); else if (path.is_regular_file()) - this->loadConfig(path); + zret = this->loadConfig(path); else if (path.is_char_device() || path.is_block_device()) - this->loadDevice(path); + zret = this->loadDevice(path); else - printf("Not a valid file type: '%s'\n", static_cast(path)); + zret = ts::Errata::Message(0, EBADF, path, " is not a valid file type"); return zret; } -void +ts::Errata Cache::loadConfig(ts::FilePath const &path) { static const ts::StringView TAG_ID("id"); static const ts::StringView TAG_VOL("volume"); + ts::Errata zret; + ts::BulkFile cfile(path); if (0 == cfile.load()) { ts::StringView content = cfile.content(); @@ -311,15 +313,19 @@ Cache::loadConfig(ts::FilePath const &path) } } } - this->load(ts::FilePath(path)); + zret = this->load(ts::FilePath(path)); } } + } else { + zret = ts::Errata::Message(0, EBADF, "Unable to load ", path); } + return zret; } -void +ts::Errata Cache::loadDevice(ts::FilePath const &path) { + ts::Errata zret; int flags; flags = OPEN_RW_FLAGS @@ -362,11 +368,12 @@ Cache::loadDevice(ts::FilePath const &path) } } } else { - printf("Failed to read from '%s' [%d]\n", path.path(), errno); + zret = ts::Errata::Message(0, errno, "Failed to read from ", path, '[', errno, ':', strerror(errno), ']'); } } else { - printf("Unable to open '%s'\n", static_cast(path)); + zret = ts::Errata::Message(0, errno, "Unable to open ", path); } + return zret; } void @@ -425,7 +432,45 @@ Span::clearPermanently() } std::cout << std::endl; } +/* --------------------------------------------------------------------------------------- */ +ts::Errata +VolumeConfig::load(ts::FilePath const& path) +{ + static const ts::StringView TAG_SIZE("size"); + static const ts::StringView TAG_VOL("volume"); + + ts::Errata zret; + + int ln = 0; + ts::BulkFile cfile(path); + if (0 == cfile.load()) { + ts::StringView content = cfile.content(); + while (content) { + ++ln; + ts::StringView line = content.splitPrefix('\n'); + line.ltrim(&isspace); + if (!line || '#' == *line) + continue; + + VolData v; + while (line) { + ts::StringView value(line.extractPrefix(&isspace)); + ts::StringView tag(value.splitPrefix('=')); + if (!tag) { + zret.push(0, 1, "Line ", ln, " is invalid"); + } else if (0 == strcasecmp(tag, TAG_SIZE)) { + auto n = ts::svtoi(value); + } else if (0 == strcasecmp(tag, TAG_VOL)) { + } + } + } + } else { + zret = ts::Errata::Message(0, EBADF, "Unable to load ", path); + } + return zret; +} +/* --------------------------------------------------------------------------------------- */ struct option Options[] = {{"help", false, nullptr, 'h'}}; } From 8ca6489609243976ece861ae131b9e6cb5ec6f68 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Mon, 23 Jan 2017 15:46:08 -0600 Subject: [PATCH 19/81] Scalar: Add integer comparisons. --- lib/ts/Metric.h | 72 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h index ccb8126ff4a..0779889e6ab 100644 --- a/lib/ts/Metric.h +++ b/lib/ts/Metric.h @@ -248,6 +248,20 @@ operator<(Metric const &lhs, Metric const &rhs) return lhs.count() < rhs.count(); } +template +bool +operator<(Metric const &lhs, C n) +{ + return lhs.count() < n; +} + +template +bool +operator<(C n, Metric const &rhs) +{ + return n < rhs.count(); +} + template bool operator==(Metric const &lhs, Metric const &rhs) @@ -255,6 +269,20 @@ operator==(Metric const &lhs, Metric const &rhs) return lhs.count() == rhs.count(); } +template +bool +operator==(Metric const &lhs, C n) +{ + return lhs.count() == n; +} + +template +bool +operator==(C n, Metric const &rhs) +{ + return n == rhs.count(); +} + // Could be derived but if we're optimizing let's avoid the extra negation. // Or we could check if the compiler can optimize that out anyway. template @@ -264,6 +292,50 @@ operator<=(Metric const &lhs, Metric const &rhs) return lhs.count() <= rhs.count(); } +template +bool +operator<=(Metric const &lhs, C n) +{ + return lhs.count() <= n; +} + +template +bool +operator<=(C n, Metric const &rhs) +{ + return n <= rhs.count(); +} + +// Do the integer compares. + +template +bool +operator>(Metric const &lhs, C n) +{ + return lhs.count() > n; +} + +template +bool +operator>(C n, Metric const &rhs) +{ + return n > rhs.count(); +} + +template +bool +operator>=(Metric const &lhs, C n) +{ + return lhs.count() >= n; +} + +template +bool +operator>=(C n, Metric const &rhs) +{ + return n >= rhs.count(); +} + // General base cases. template From 0450c97a99b34381a93ee2bf92de1986ca2d9f6c Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 24 Jan 2017 05:19:31 -0600 Subject: [PATCH 20/81] Scalar: Add comparison operators. --- lib/ts/Metric.h | 144 ++++++++++++++++++++---------------------- lib/ts/test_Metric.cc | 16 +++++ 2 files changed, 84 insertions(+), 76 deletions(-) diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h index 0779889e6ab..5a0561e46db 100644 --- a/lib/ts/Metric.h +++ b/lib/ts/Metric.h @@ -33,6 +33,24 @@ namespace ApacheTrafficServer { +namespace detail +{ + // The built in type 'int' is special because that's the type for untyped numbers. + // That means if we have operators for comparison to unscaled values there is ambiguity if + // the internal counter type is also 'int'. To avoid that this class (and hence methods) are + // inherited so if there is a conflict these methods are silently overridden. + template < intmax_t N, typename C > + struct ScalarIntOperators + { + bool operator < (int n) { return *this < static_cast(n); } + bool operator > (int n) { return *this > static_cast(n); } + }; + + template < intmax_t N > + struct ScalarIntOperators + { + }; +} /** A class to hold scaled values. Instances of this class have a @a count and a @a scale. The "value" of the instance is @a @@ -52,7 +70,7 @@ namespace ApacheTrafficServer @see metric_round_up @see metric_round_down */ -template class Metric +template class Metric { typedef Metric self; ///< Self reference type. @@ -60,14 +78,14 @@ template class Metric /// Scaling factor for instances. /// Make it externally accessible. constexpr static intmax_t SCALE = N; - typedef T Count; ///< Type used to hold the count. + typedef C Count; ///< Type used to hold the count. constexpr Metric(); ///< Default contructor. ///< Construct to have @a n scaled units. constexpr Metric(Count n); /// Copy constructor for same scale. - template Metric(Metric const &that); + template Metric(Metric const &that); /// Copy / conversion constructor. /// @note Requires that @c S be an integer multiple of @c SCALE. @@ -88,6 +106,12 @@ template class Metric /// Assignment from same scale. self &operator=(self const &that); + ///@{ Comparisons. + // bool operator < (C n); + // bool operator > (C n); + // bool operator == (C n); + ///@} + /// Run time access to the scale of this metric (template arg @a N). static constexpr intmax_t scale(); @@ -248,20 +272,6 @@ operator<(Metric const &lhs, Metric const &rhs) return lhs.count() < rhs.count(); } -template -bool -operator<(Metric const &lhs, C n) -{ - return lhs.count() < n; -} - -template -bool -operator<(C n, Metric const &rhs) -{ - return n < rhs.count(); -} - template bool operator==(Metric const &lhs, Metric const &rhs) @@ -269,20 +279,6 @@ operator==(Metric const &lhs, Metric const &rhs) return lhs.count() == rhs.count(); } -template -bool -operator==(Metric const &lhs, C n) -{ - return lhs.count() == n; -} - -template -bool -operator==(C n, Metric const &rhs) -{ - return n == rhs.count(); -} - // Could be derived but if we're optimizing let's avoid the extra negation. // Or we could check if the compiler can optimize that out anyway. template @@ -292,50 +288,6 @@ operator<=(Metric const &lhs, Metric const &rhs) return lhs.count() <= rhs.count(); } -template -bool -operator<=(Metric const &lhs, C n) -{ - return lhs.count() <= n; -} - -template -bool -operator<=(C n, Metric const &rhs) -{ - return n <= rhs.count(); -} - -// Do the integer compares. - -template -bool -operator>(Metric const &lhs, C n) -{ - return lhs.count() > n; -} - -template -bool -operator>(C n, Metric const &rhs) -{ - return n > rhs.count(); -} - -template -bool -operator>=(Metric const &lhs, C n) -{ - return lhs.count() >= n; -} - -template -bool -operator>=(C n, Metric const &rhs) -{ - return n >= rhs.count(); -} - // General base cases. template @@ -396,6 +348,46 @@ operator>=(Metric const &lhs, Metric const &rhs) { return rhs <= lhs; } -} +// Do the integer compares. +// A bit ugly to handle the issue that integers without explicit type are 'int'. Therefore suppport must be provided +// for comparison not just the counter type C but also explicitly 'int'. That makes the operators ambiguous if C is +// 'int'. The specializations for 'int' resolve this as their presence "covers" the generic cases. + +template bool operator < (Metric const &lhs, C n) { return lhs.count() < n; } +template bool operator < (C n, Metric const &rhs) { return n < rhs.count(); } +template bool operator < (Metric const &lhs, int n) { return lhs.count() < static_cast(n); } +template bool operator < (int n, Metric const &rhs) { return static_cast(n) < rhs.count(); } +template bool operator < (Metric const &lhs, int n) { return lhs.count() < n; } +template bool operator < (int n, Metric const &rhs) { return n < rhs.count(); } + +template bool operator == (Metric const &lhs, C n) { return lhs.count() == n; } +template bool operator == (C n, Metric const &rhs) { return n == rhs.count(); } +template bool operator == (Metric const &lhs, int n) { return lhs.count() == static_cast(n); } +template bool operator == (int n, Metric const &rhs) { return static_cast(n) == rhs.count(); } +template bool operator == (Metric const &lhs, int n) { return lhs.count() == n; } +template bool operator == (int n, Metric const &rhs) { return n == rhs.count(); } + +template bool operator > (Metric const &lhs, C n) { return lhs.count() > n; } +template bool operator > (C n, Metric const &rhs) { return n > rhs.count(); } +template bool operator > (Metric const &lhs, int n) { return lhs.count() > static_cast(n); } +template bool operator > (int n, Metric const &rhs) { return static_cast(n) > rhs.count(); } +template bool operator > (Metric const &lhs, int n) { return lhs.count() > n; } +template bool operator > (int n, Metric const &rhs) { return n > rhs.count(); } + +template bool operator <= (Metric const &lhs, C n) { return lhs.count() <= n; } +template bool operator <= (C n, Metric const &rhs) { return n <= rhs.count(); } +template bool operator <= (Metric const &lhs, int n) { return lhs.count() <= static_cast(n); } +template bool operator <= (int n, Metric const &rhs) { return static_cast(n) <= rhs.count(); } +template bool operator <= (Metric const &lhs, int n) { return lhs.count() <= n; } +template bool operator <= (int n, Metric const &rhs) { return n <= rhs.count(); } + +template bool operator >= (Metric const &lhs, C n) { return lhs.count() >= n; } +template bool operator >= (C n, Metric const &rhs) { return n >= rhs.count(); } +template bool operator >= (Metric const &lhs, int n) { return lhs.count() >= static_cast(n); } +template bool operator >= (int n, Metric const &rhs) { return static_cast(n) >= rhs.count(); } +template bool operator >= (Metric const &lhs, int n) { return lhs.count() >= n; } +template bool operator >= (int n, Metric const &rhs) { return n >= rhs.count(); } + +} // namespace #endif // TS_METRIC_H diff --git a/lib/ts/test_Metric.cc b/lib/ts/test_Metric.cc index b0ab231e597..932f7893539 100644 --- a/lib/ts/test_Metric.cc +++ b/lib/ts/test_Metric.cc @@ -24,6 +24,7 @@ #include #include #include +#include namespace ts { @@ -188,6 +189,21 @@ Test_4() test.check(m_test.count() == 213, "Assignment got %d expected %d", m_4.count(), 213); } +void +test_Compile() +{ + // These tests aren't normally run, they exist to detect compiler issues. + + typedef ts::Metric<1024, long int> KBytes; + typedef ts::Metric<1024, int> KiBytes; + + KBytes x(12); + KiBytes y(12); + + if (x > 12) std::cout << "Operator > works" << std::endl; + if (y > 12) std::cout << "Operator > works" << std::endl; +} + int main(int, char **) { From 992865c0e47a9e2d4293b10c7b26151872949ab8 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Mon, 23 Jan 2017 17:52:38 -0600 Subject: [PATCH 21/81] Scalar: Rename files, update Makefile. --- lib/ts/Makefile.am | 4 ++-- lib/ts/{Metric.h => Scalar.h} | 0 lib/ts/{test_Metric.cc => test_Scalar.cc} | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename lib/ts/{Metric.h => Scalar.h} (100%) rename lib/ts/{test_Metric.cc => test_Scalar.cc} (100%) diff --git a/lib/ts/Makefile.am b/lib/ts/Makefile.am index 4d2a17b30c9..14243c35f62 100644 --- a/lib/ts/Makefile.am +++ b/lib/ts/Makefile.am @@ -23,7 +23,7 @@ library_includedir=$(includedir)/ts library_include_HEADERS = apidefs.h noinst_PROGRAMS = mkdfa CompileParseRules -check_PROGRAMS = test_tsutil test_arena test_atomic test_freelist test_geometry test_List test_Map test_Vec test_X509HostnameValidator test_Metric +check_PROGRAMS = test_tsutil test_arena test_atomic test_freelist test_geometry test_List test_Map test_Vec test_X509HostnameValidator test_Scalar TESTS_ENVIRONMENT = LSAN_OPTIONS=suppressions=suppression.txt @@ -243,7 +243,7 @@ test_tsutil_SOURCES = \ test_Regex.cc \ tests.cc -test_Metric_SOURCES = test_Metric.cc +test_Scalar_SOURCES = test_Scalar.cc Scalar.h CompileParseRules_SOURCES = CompileParseRules.cc diff --git a/lib/ts/Metric.h b/lib/ts/Scalar.h similarity index 100% rename from lib/ts/Metric.h rename to lib/ts/Scalar.h diff --git a/lib/ts/test_Metric.cc b/lib/ts/test_Scalar.cc similarity index 100% rename from lib/ts/test_Metric.cc rename to lib/ts/test_Scalar.cc From 423e2bc90d3653a28d3de7632d4a12b90bd575fe Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Mon, 23 Jan 2017 17:53:22 -0600 Subject: [PATCH 22/81] Scalar: Change Metric -> Scalar in code. --- lib/ts/Scalar.h | 134 +++++++++++++++++++++--------------------- lib/ts/test_Scalar.cc | 28 ++++----- 2 files changed, 81 insertions(+), 81 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 5a0561e46db..fe045a9c0ab 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -70,9 +70,9 @@ namespace detail @see metric_round_up @see metric_round_down */ -template class Metric +template class Scalar { - typedef Metric self; ///< Self reference type. + typedef Scalar self; ///< Self reference type. public: /// Scaling factor for instances. @@ -80,16 +80,16 @@ template class Metric constexpr static intmax_t SCALE = N; typedef C Count; ///< Type used to hold the count. - constexpr Metric(); ///< Default contructor. + constexpr Scalar(); ///< Default contructor. ///< Construct to have @a n scaled units. - constexpr Metric(Count n); + constexpr Scalar(Count n); /// Copy constructor for same scale. - template Metric(Metric const &that); + template Scalar(Scalar const &that); /// Copy / conversion constructor. /// @note Requires that @c S be an integer multiple of @c SCALE. - template Metric(Metric const &that); + template Scalar(Scalar const &that); /// Direct assignment. /// The count is set to @a n. @@ -102,7 +102,7 @@ template class Metric /// Assignment operator. /// @note Requires the scale of @c S be an integer multiple of the scale of this. - template self &operator=(Metric const &that); + template self &operator=(Scalar const &that); /// Assignment from same scale. self &operator=(self const &that); @@ -119,50 +119,50 @@ template class Metric Count _n; ///< Number of scale units. }; -template constexpr Metric::Metric() : _n() +template constexpr Scalar::Scalar() : _n() { } -template constexpr Metric::Metric(Count n) : _n(n) +template constexpr Scalar::Scalar(Count n) : _n(n) { } template constexpr auto -Metric::count() const -> Count +Scalar::count() const -> Count { return _n; } template constexpr auto -Metric::units() const -> Count +Scalar::units() const -> Count { return _n * SCALE; } template inline auto -Metric::operator=(Count n) -> self & +Scalar::operator=(Count n) -> self & { _n = n; return *this; } template inline auto -Metric::operator=(self const &that) -> self & +Scalar::operator=(self const &that) -> self & { _n = that._n; return *this; } template constexpr inline intmax_t -Metric::scale() +Scalar::scale() { return SCALE; } -template template Metric::Metric(Metric const &that) : _n(static_cast(that._n)) +template template Scalar::Scalar(Scalar const &that) : _n(static_cast(that._n)) { } -template template Metric::Metric(Metric const &that) +template template Scalar::Scalar(Scalar const &that) { typedef std::ratio R; static_assert(R::den == 1, "Construction not permitted - target scale is not an integral multiple of source scale."); @@ -172,7 +172,7 @@ template template Metric template template auto -Metric::operator=(Metric const &that) -> self & +Scalar::operator=(Scalar const &that) -> self & { typedef std::ratio R; static_assert(R::den == 1, "Assignment not permitted - target scale is not an integral multiple of source scale."); @@ -187,8 +187,8 @@ Metric::operator=(Metric const &that) -> self & value of @a src. @code - typedef Metric<16> Paragraphs; - typedef Metric<1024> KiloBytes; + typedef Scalar<16> Paragraphs; + typedef Scalar<1024> KiloBytes; Paragraphs src(37459); auto size = metric_round_up(src); // size.count() == 586 @@ -196,7 +196,7 @@ Metric::operator=(Metric const &that) -> self & */ template M -metric_round_up(Metric const &src) +metric_round_up(Scalar const &src) { typedef std::ratio R; auto c = src.count(); @@ -217,8 +217,8 @@ metric_round_up(Metric const &src) value of @a src. @code - typedef Metric<16> Paragraphs; - typedef Metric<1024> KiloBytes; + typedef Scalar<16> Paragraphs; + typedef Scalar<1024> KiloBytes; Paragraphs src(37459); auto size = metric_round_up(src); // size.count() == 585 @@ -226,7 +226,7 @@ metric_round_up(Metric const &src) */ template M -metric_round_down(Metric const &src) +metric_round_down(Scalar const &src) { typedef std::ratio R; auto c = src.count(); @@ -245,7 +245,7 @@ metric_round_down(Metric const &src) } } -/// Convert a unit value @a n to a Metric, rounding down. +/// Convert a unit value @a n to a Scalar, rounding down. template M metric_round_down(intmax_t n) @@ -253,7 +253,7 @@ metric_round_down(intmax_t n) return n / M::SCALE; // assuming compiler will optimize out dividing by 1 if needed. } -/// Convert a unit value @a n to a Metric, rounding up. +/// Convert a unit value @a n to a Scalar, rounding up. template M metric_round_up(intmax_t n) @@ -267,14 +267,14 @@ metric_round_up(intmax_t n) // just comparing the counts is sufficient and scaling conversion is avoided. template bool -operator<(Metric const &lhs, Metric const &rhs) +operator<(Scalar const &lhs, Scalar const &rhs) { return lhs.count() < rhs.count(); } template bool -operator==(Metric const &lhs, Metric const &rhs) +operator==(Scalar const &lhs, Scalar const &rhs) { return lhs.count() == rhs.count(); } @@ -283,7 +283,7 @@ operator==(Metric const &lhs, Metric const &rhs) // Or we could check if the compiler can optimize that out anyway. template bool -operator<=(Metric const &lhs, Metric const &rhs) +operator<=(Scalar const &lhs, Scalar const &rhs) { return lhs.count() <= rhs.count(); } @@ -292,7 +292,7 @@ operator<=(Metric const &lhs, Metric const &rhs) template bool -operator<(Metric const &lhs, Metric const &rhs) +operator<(Scalar const &lhs, Scalar const &rhs) { typedef std::ratio R; // Based on tests with the GNU compiler, the fact that the conditionals are compile time @@ -308,7 +308,7 @@ operator<(Metric const &lhs, Metric const &rhs) template bool -operator==(Metric const &lhs, Metric const &rhs) +operator==(Scalar const &lhs, Scalar const &rhs) { typedef std::ratio R; if (R::den == 1) { @@ -321,7 +321,7 @@ operator==(Metric const &lhs, Metric const &rhs) template bool -operator<=(Metric const &lhs, Metric const &rhs) +operator<=(Scalar const &lhs, Scalar const &rhs) { typedef std::ratio R; if (R::den == 1) { @@ -337,14 +337,14 @@ operator<=(Metric const &lhs, Metric const &rhs) template bool -operator>(Metric const &lhs, Metric const &rhs) +operator>(Scalar const &lhs, Scalar const &rhs) { return rhs < lhs; } template bool -operator>=(Metric const &lhs, Metric const &rhs) +operator>=(Scalar const &lhs, Scalar const &rhs) { return rhs <= lhs; } @@ -354,40 +354,40 @@ operator>=(Metric const &lhs, Metric const &rhs) // for comparison not just the counter type C but also explicitly 'int'. That makes the operators ambiguous if C is // 'int'. The specializations for 'int' resolve this as their presence "covers" the generic cases. -template bool operator < (Metric const &lhs, C n) { return lhs.count() < n; } -template bool operator < (C n, Metric const &rhs) { return n < rhs.count(); } -template bool operator < (Metric const &lhs, int n) { return lhs.count() < static_cast(n); } -template bool operator < (int n, Metric const &rhs) { return static_cast(n) < rhs.count(); } -template bool operator < (Metric const &lhs, int n) { return lhs.count() < n; } -template bool operator < (int n, Metric const &rhs) { return n < rhs.count(); } - -template bool operator == (Metric const &lhs, C n) { return lhs.count() == n; } -template bool operator == (C n, Metric const &rhs) { return n == rhs.count(); } -template bool operator == (Metric const &lhs, int n) { return lhs.count() == static_cast(n); } -template bool operator == (int n, Metric const &rhs) { return static_cast(n) == rhs.count(); } -template bool operator == (Metric const &lhs, int n) { return lhs.count() == n; } -template bool operator == (int n, Metric const &rhs) { return n == rhs.count(); } - -template bool operator > (Metric const &lhs, C n) { return lhs.count() > n; } -template bool operator > (C n, Metric const &rhs) { return n > rhs.count(); } -template bool operator > (Metric const &lhs, int n) { return lhs.count() > static_cast(n); } -template bool operator > (int n, Metric const &rhs) { return static_cast(n) > rhs.count(); } -template bool operator > (Metric const &lhs, int n) { return lhs.count() > n; } -template bool operator > (int n, Metric const &rhs) { return n > rhs.count(); } - -template bool operator <= (Metric const &lhs, C n) { return lhs.count() <= n; } -template bool operator <= (C n, Metric const &rhs) { return n <= rhs.count(); } -template bool operator <= (Metric const &lhs, int n) { return lhs.count() <= static_cast(n); } -template bool operator <= (int n, Metric const &rhs) { return static_cast(n) <= rhs.count(); } -template bool operator <= (Metric const &lhs, int n) { return lhs.count() <= n; } -template bool operator <= (int n, Metric const &rhs) { return n <= rhs.count(); } - -template bool operator >= (Metric const &lhs, C n) { return lhs.count() >= n; } -template bool operator >= (C n, Metric const &rhs) { return n >= rhs.count(); } -template bool operator >= (Metric const &lhs, int n) { return lhs.count() >= static_cast(n); } -template bool operator >= (int n, Metric const &rhs) { return static_cast(n) >= rhs.count(); } -template bool operator >= (Metric const &lhs, int n) { return lhs.count() >= n; } -template bool operator >= (int n, Metric const &rhs) { return n >= rhs.count(); } +template bool operator < (Scalar const &lhs, C n) { return lhs.count() < n; } +template bool operator < (C n, Scalar const &rhs) { return n < rhs.count(); } +template bool operator < (Scalar const &lhs, int n) { return lhs.count() < static_cast(n); } +template bool operator < (int n, Scalar const &rhs) { return static_cast(n) < rhs.count(); } +template bool operator < (Scalar const &lhs, int n) { return lhs.count() < n; } +template bool operator < (int n, Scalar const &rhs) { return n < rhs.count(); } + +template bool operator == (Scalar const &lhs, C n) { return lhs.count() == n; } +template bool operator == (C n, Scalar const &rhs) { return n == rhs.count(); } +template bool operator == (Scalar const &lhs, int n) { return lhs.count() == static_cast(n); } +template bool operator == (int n, Scalar const &rhs) { return static_cast(n) == rhs.count(); } +template bool operator == (Scalar const &lhs, int n) { return lhs.count() == n; } +template bool operator == (int n, Scalar const &rhs) { return n == rhs.count(); } + +template bool operator > (Scalar const &lhs, C n) { return lhs.count() > n; } +template bool operator > (C n, Scalar const &rhs) { return n > rhs.count(); } +template bool operator > (Scalar const &lhs, int n) { return lhs.count() > static_cast(n); } +template bool operator > (int n, Scalar const &rhs) { return static_cast(n) > rhs.count(); } +template bool operator > (Scalar const &lhs, int n) { return lhs.count() > n; } +template bool operator > (int n, Scalar const &rhs) { return n > rhs.count(); } + +template bool operator <= (Scalar const &lhs, C n) { return lhs.count() <= n; } +template bool operator <= (C n, Scalar const &rhs) { return n <= rhs.count(); } +template bool operator <= (Scalar const &lhs, int n) { return lhs.count() <= static_cast(n); } +template bool operator <= (int n, Scalar const &rhs) { return static_cast(n) <= rhs.count(); } +template bool operator <= (Scalar const &lhs, int n) { return lhs.count() <= n; } +template bool operator <= (int n, Scalar const &rhs) { return n <= rhs.count(); } + +template bool operator >= (Scalar const &lhs, C n) { return lhs.count() >= n; } +template bool operator >= (C n, Scalar const &rhs) { return n >= rhs.count(); } +template bool operator >= (Scalar const &lhs, int n) { return lhs.count() >= static_cast(n); } +template bool operator >= (int n, Scalar const &rhs) { return static_cast(n) >= rhs.count(); } +template bool operator >= (Scalar const &lhs, int n) { return lhs.count() >= n; } +template bool operator >= (int n, Scalar const &rhs) { return n >= rhs.count(); } } // namespace #endif // TS_METRIC_H diff --git a/lib/ts/test_Scalar.cc b/lib/ts/test_Scalar.cc index 932f7893539..6cc513e7497 100644 --- a/lib/ts/test_Scalar.cc +++ b/lib/ts/test_Scalar.cc @@ -21,7 +21,7 @@ limitations under the License. */ -#include +#include #include #include #include @@ -85,9 +85,9 @@ void Test_1() { constexpr static int SCALE = 4096; - typedef ts::Metric PageSize; + typedef ts::Scalar PageSize; - TestBox test("TS Metric basic"); + TestBox test("TS Scalar basic"); PageSize pg1(1); test.check(pg1.count() == 1, "Count wrong, got %d expected %d", pg1.count(), 1); @@ -101,10 +101,10 @@ Test_2() constexpr static int SCALE_1 = 8192; constexpr static int SCALE_2 = 512; - typedef ts::Metric Size_1; - typedef ts::Metric Size_2; + typedef ts::Scalar Size_1; + typedef ts::Scalar Size_2; - TestBox test("TS Metric Conversion of scales of multiples"); + TestBox test("TS Scalar Conversion of scales of multiples"); Size_2 sz_a(2); Size_2 sz_b(57); Size_2 sz_c(SCALE_1 / SCALE_2); @@ -144,10 +144,10 @@ Test_3() constexpr static int SCALE_1 = 30; constexpr static int SCALE_2 = 20; - typedef ts::Metric Size_1; - typedef ts::Metric Size_2; + typedef ts::Scalar Size_1; + typedef ts::Scalar Size_2; - TestBox test("TS Metric common factor conversions"); + TestBox test("TS Scalar common factor conversions"); Size_2 sz_a(2); Size_2 sz_b(97); @@ -165,10 +165,10 @@ Test_3() void Test_4() { - TestBox test("TS Metric: relatively prime tests"); + TestBox test("TS Scalar: relatively prime tests"); - ts::Metric<9> m_9; - ts::Metric<4> m_4, m_test; + ts::Scalar<9> m_9; + ts::Scalar<4> m_4, m_test; m_9 = 95; // m_4 = m_9; // Should fail to compile with static assert. @@ -194,8 +194,8 @@ test_Compile() { // These tests aren't normally run, they exist to detect compiler issues. - typedef ts::Metric<1024, long int> KBytes; - typedef ts::Metric<1024, int> KiBytes; + typedef ts::Scalar<1024, long int> KBytes; + typedef ts::Scalar<1024, int> KiBytes; KBytes x(12); KiBytes y(12); From 94a07153a223c549534268ff346f6bb2c72cca63 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Mon, 23 Jan 2017 17:57:57 -0600 Subject: [PATCH 23/81] Scalar: Additional name change code cleanup. --- lib/ts/Scalar.h | 42 ++++++++++++------------------------------ lib/ts/test_Scalar.cc | 32 ++++++++++++++++---------------- 2 files changed, 28 insertions(+), 46 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index fe045a9c0ab..2f9ee581b8b 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -25,39 +25,21 @@ limitations under the License. */ -#if !defined(TS_METRIC_H) -#define TS_METRIC_H +#if !defined(TS_SCALAR_H) +#define TS_SCALAR_H #include #include namespace ApacheTrafficServer { -namespace detail -{ - // The built in type 'int' is special because that's the type for untyped numbers. - // That means if we have operators for comparison to unscaled values there is ambiguity if - // the internal counter type is also 'int'. To avoid that this class (and hence methods) are - // inherited so if there is a conflict these methods are silently overridden. - template < intmax_t N, typename C > - struct ScalarIntOperators - { - bool operator < (int n) { return *this < static_cast(n); } - bool operator > (int n) { return *this > static_cast(n); } - }; - - template < intmax_t N > - struct ScalarIntOperators - { - }; -} /** A class to hold scaled values. Instances of this class have a @a count and a @a scale. The "value" of the instance is @a count * @a scale. The scale is stored in the compiler in the class symbol table and so only the count is a run time value. An instance with a large scale can be assign to an instance with a smaller scale and the conversion is done automatically. Conversions from a smaller to - larger scale must be explicit using @c metric_round_up and @c metric_round_down. This prevents + larger scale must be explicit using @c scaled_up and @c scaled_down. This prevents inadvertent changes in value. Because the scales are not the same these conversions can be lossy and the two conversions determine whether, in such a case, the result should be rounded up or down to the nearest scale value. @@ -67,8 +49,8 @@ namespace detail @note This is modeled somewhat on @c std::chrono and serves a similar function for different and simpler cases (where the ratio is always an integer, never a fraction). - @see metric_round_up - @see metric_round_down + @see scaled_up + @see scaled_down */ template class Scalar { @@ -191,12 +173,12 @@ Scalar::operator=(Scalar const &that) -> self & typedef Scalar<1024> KiloBytes; Paragraphs src(37459); - auto size = metric_round_up(src); // size.count() == 586 + auto size = scaled_up(src); // size.count() == 586 @endcode */ template M -metric_round_up(Scalar const &src) +scaled_up(Scalar const &src) { typedef std::ratio R; auto c = src.count(); @@ -221,12 +203,12 @@ metric_round_up(Scalar const &src) typedef Scalar<1024> KiloBytes; Paragraphs src(37459); - auto size = metric_round_up(src); // size.count() == 585 + auto size = scaled_up(src); // size.count() == 585 @endcode */ template M -metric_round_down(Scalar const &src) +scaled_down(Scalar const &src) { typedef std::ratio R; auto c = src.count(); @@ -248,7 +230,7 @@ metric_round_down(Scalar const &src) /// Convert a unit value @a n to a Scalar, rounding down. template M -metric_round_down(intmax_t n) +scaled_down(intmax_t n) { return n / M::SCALE; // assuming compiler will optimize out dividing by 1 if needed. } @@ -256,7 +238,7 @@ metric_round_down(intmax_t n) /// Convert a unit value @a n to a Scalar, rounding up. template M -metric_round_up(intmax_t n) +scaled_up(intmax_t n) { return M::SCALE == 1 ? n : (n / M::SCALE + (0 != (n % M::SCALE))); } @@ -390,4 +372,4 @@ template bool operator >= (Scalar const &lhs, i template bool operator >= (int n, Scalar const &rhs) { return n >= rhs.count(); } } // namespace -#endif // TS_METRIC_H +#endif // TS_SCALAR_H diff --git a/lib/ts/test_Scalar.cc b/lib/ts/test_Scalar.cc index 6cc513e7497..f19d4846e1b 100644 --- a/lib/ts/test_Scalar.cc +++ b/lib/ts/test_Scalar.cc @@ -110,24 +110,24 @@ Test_2() Size_2 sz_c(SCALE_1 / SCALE_2); Size_2 sz_d(29 * SCALE_1 / SCALE_2); - auto sz = ts::metric_round_up(sz_a); + auto sz = ts::scaled_up(sz_a); test.check(sz.count() == 1, "Rounding up, got %d expected %d", sz.count(), 1); - sz = ts::metric_round_down(sz_a); + sz = ts::scaled_down(sz_a); test.check(sz.count() == 0, "Rounding down: got %d expected %d", sz.count(), 0); - sz = ts::metric_round_up(sz_b); + sz = ts::scaled_up(sz_b); test.check(sz.count() == 4, "Rounding up, got %d expected %d", sz.count(), 4); - sz = ts::metric_round_down(sz_b); + sz = ts::scaled_down(sz_b); test.check(sz.count() == 3, "Rounding down, got %d expected %d", sz.count(), 3); - sz = ts::metric_round_up(sz_c); + sz = ts::scaled_up(sz_c); test.check(sz.count() == 1, "Rounding up, got %d expected %d", sz.count(), 1); - sz = ts::metric_round_down(sz_c); + sz = ts::scaled_down(sz_c); test.check(sz.count() == 1, "Rounding down, got %d expected %d", sz.count(), 1); - sz = ts::metric_round_up(sz_d); + sz = ts::scaled_up(sz_d); test.check(sz.count() == 29, "Rounding up, got %d expected %d", sz.count(), 29); - sz = ts::metric_round_down(sz_d); + sz = ts::scaled_down(sz_d); test.check(sz.count() == 29, "Rounding down, got %d expected %d", sz.count(), 29); sz = 119; @@ -151,14 +151,14 @@ Test_3() Size_2 sz_a(2); Size_2 sz_b(97); - auto sz = ts::metric_round_up(sz_a); + auto sz = ts::scaled_up(sz_a); test.check(sz.count() == 2, "Rounding up, got %d expected %d", sz.count(), 2); - sz = ts::metric_round_down(sz_a); + sz = ts::scaled_down(sz_a); test.check(sz.count() == 1, "Rounding down: got %d expected %d", sz.count(), 0); - sz = ts::metric_round_up(sz_b); + sz = ts::scaled_up(sz_b); test.check(sz.count() == 65, "Rounding up, got %d expected %d", sz.count(), 65); - sz = ts::metric_round_down(sz_b); + sz = ts::scaled_down(sz_b); test.check(sz.count() == 64, "Rounding down, got %d expected %d", sz.count(), 64); } @@ -174,15 +174,15 @@ Test_4() // m_4 = m_9; // Should fail to compile with static assert. // m_9 = m_4; // Should fail to compile with static assert. - m_4 = ts::metric_round_up(m_9); + m_4 = ts::scaled_up(m_9); test.check(m_4.count() == 214, "Rounding down, got %d expected %d", m_4.count(), 214); - m_4 = ts::metric_round_down(m_9); + m_4 = ts::scaled_down(m_9); test.check(m_4.count() == 213, "Rounding down, got %d expected %d", m_4.count(), 213); m_4 = 213; - m_9 = ts::metric_round_up(m_4); + m_9 = ts::scaled_up(m_4); test.check(m_9.count() == 95, "Rounding down, got %d expected %d", m_9.count(), 95); - m_9 = ts::metric_round_down(m_4); + m_9 = ts::scaled_down(m_4); test.check(m_9.count() == 94, "Rounding down, got %d expected %d", m_9.count(), 94); m_test = m_4; // Verify assignment of identical scale values compiles. From 43b27c099e0306a791c215787a47250003430905 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Mon, 23 Jan 2017 19:03:22 -0600 Subject: [PATCH 24/81] Scalar: Fixups on addition. --- lib/ts/Scalar.h | 63 ++++++++++++++++++++++++++++++++++++------- lib/ts/test_Scalar.cc | 31 +++++++++++++++++++++ 2 files changed, 85 insertions(+), 9 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 2f9ee581b8b..b9a5b86718c 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -33,6 +33,23 @@ namespace ApacheTrafficServer { + template class Scalar; + + namespace detail { + // Internal class to deal with operator overload issues. + // Because the type of integers with no explicit type is (int) that type is special in terms of overloads. + // To be convienet @c Scalar should support operators for its internal declared counter type and (int). + // This creates ambiguous overloads when C is (int). This class lets the (int) overloads be moved to a super + // class so conflict causes overridding rather than ambiguity. I am a bit unclear on why no implementation is + // needed but there it is. + template < intmax_t N, typename C > + struct ScalarArithmetics + { + ApacheTrafficServer::Scalar operator += (int); + ApacheTrafficServer::Scalar operator -= (int); + }; + } + /** A class to hold scaled values. Instances of this class have a @a count and a @a scale. The "value" of the instance is @a @@ -52,7 +69,7 @@ namespace ApacheTrafficServer @see scaled_up @see scaled_down */ -template class Scalar +template class Scalar : public detail::ScalarArithmetics { typedef Scalar self; ///< Self reference type. @@ -83,18 +100,24 @@ template class Scalar constexpr Count units() const; /// Assignment operator. - /// @note Requires the scale of @c S be an integer multiple of the scale of this. + /// The value is scaled appropriately. + /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then + /// the @c scaled_up or @c scaled_down casts must be used to indicate the rounding direction. template self &operator=(Scalar const &that); /// Assignment from same scale. self &operator=(self const &that); - ///@{ Comparisons. - // bool operator < (C n); - // bool operator > (C n); - // bool operator == (C n); - ///@} - - /// Run time access to the scale of this metric (template arg @a N). + /// Addition operator. + /// The value is scaled from @a that to @a this. + /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then + /// the @c scaled_up or @c scaled_down casts must be used to indicate the rounding direction. + template self &operator += (Scalar const &that); + /// Addition - add @a n as a number of scaled units. + self& operator += (C n); + /// Addition - add @a n as a number of scaled units. + self& operator += (self const& that); + + /// Run time access to the scale (template arg @a N). static constexpr intmax_t scale(); protected: @@ -371,5 +394,27 @@ template bool operator >= (int n, Scalar const &r template bool operator >= (Scalar const &lhs, int n) { return lhs.count() >= n; } template bool operator >= (int n, Scalar const &rhs) { return n >= rhs.count(); } +// Arithmetic operators +template template auto Scalar::operator += (Scalar const& that) -> self& +{ + typedef std::ratio R; + static_assert(R::den == 1, "Assignment not permitted - target scale is not an integral multiple of source scale."); + _n += that.count() * R::num; + return *this; +} +template auto Scalar::operator += (self const& that) -> self& { _n += that._n; return *this; } +template auto Scalar::operator += (C n) -> self& { _n += n; return *this; } +//template auto Scalar::operator += (int n) -> self& { _n += static_cast(n); return *this; } +//template auto Scalar::operator += (int n) -> self& { _n += n; return *this; } + +template Scalar operator + (Scalar const &lhs, Scalar const& rhs) { return Scalar(lhs) += rhs; } +template Scalar operator + (Scalar const &lhs, C n) { return Scalar(lhs) += n; } +template Scalar operator + (C n, Scalar const& rhs) { return Scalar(rhs) += n; } +template Scalar operator + (Scalar const &lhs, int n) { return Scalar(lhs) += n; } +template Scalar operator + (int n, Scalar const& rhs) { return Scalar(rhs) += n; } +template Scalar operator + (Scalar const &lhs, int n) { return Scalar(lhs) += n; } +template Scalar operator + (int n, Scalar const& rhs) { return Scalar(rhs) += n; } + + } // namespace #endif // TS_SCALAR_H diff --git a/lib/ts/test_Scalar.cc b/lib/ts/test_Scalar.cc index f19d4846e1b..e3b6d888705 100644 --- a/lib/ts/test_Scalar.cc +++ b/lib/ts/test_Scalar.cc @@ -189,6 +189,36 @@ Test_4() test.check(m_test.count() == 213, "Assignment got %d expected %d", m_4.count(), 213); } +void +Test_5() +{ + TestBox test("TS Scalar: arithmetic operator tests"); + + typedef ts::Scalar<1024> KBytes; + typedef ts::Scalar<1, int64_t> Bytes; + typedef ts::Scalar<1024 * KBytes::SCALE> MBytes; + + Bytes bytes(96); + KBytes kbytes(2); + MBytes mbytes(5); + + Bytes z1 = bytes + 128; + test.check(z1.count() == 224, "Addition got %ld expected %d", z1.count(), 224); + KBytes z2 = kbytes + 3; + test.check(z2.count() == 5, "Addition got %d expected %d", z2.count(), 5); + Bytes z3(bytes); + z3 += kbytes; + test.check(z3.units() == 2048+96, "Addition got %ld expected %d", z3.units(), 2048+96); + MBytes z4 = mbytes; + z4 += 5; + z2 += z4; + test.check(z2.units() == ((10<<20) + (5<<10)), "Addition got %d expected %d", z2.units(), (10<<20) + (2<<10)); + + z1 += 128; + test.check(z1.count() == 352, "Addition got %ld expected %d", z1.count(), 352); +} + + void test_Compile() { @@ -211,6 +241,7 @@ main(int, char **) Test_2(); Test_3(); Test_4(); + Test_5(); TestBox::print_summary(); return 0; } From b7752ad280db23cf62bc8439c0dcee9d53f4ba0c Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Mon, 23 Jan 2017 21:10:39 -0600 Subject: [PATCH 25/81] CacheTool: Rename directory too. --- tools/{cache_tool => traffic_cache_tool}/CacheDefs.h | 0 tools/{cache_tool => traffic_cache_tool}/CacheStore.h | 0 tools/{cache_tool => traffic_cache_tool}/CacheTool.cc | 0 tools/{cache_tool => traffic_cache_tool}/Command.cc | 0 tools/{cache_tool => traffic_cache_tool}/Command.h | 0 tools/{cache_tool => traffic_cache_tool}/File.cc | 0 tools/{cache_tool => traffic_cache_tool}/File.h | 0 tools/{cache_tool => traffic_cache_tool}/Makefile.am | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename tools/{cache_tool => traffic_cache_tool}/CacheDefs.h (100%) rename tools/{cache_tool => traffic_cache_tool}/CacheStore.h (100%) rename tools/{cache_tool => traffic_cache_tool}/CacheTool.cc (100%) rename tools/{cache_tool => traffic_cache_tool}/Command.cc (100%) rename tools/{cache_tool => traffic_cache_tool}/Command.h (100%) rename tools/{cache_tool => traffic_cache_tool}/File.cc (100%) rename tools/{cache_tool => traffic_cache_tool}/File.h (100%) rename tools/{cache_tool => traffic_cache_tool}/Makefile.am (100%) diff --git a/tools/cache_tool/CacheDefs.h b/tools/traffic_cache_tool/CacheDefs.h similarity index 100% rename from tools/cache_tool/CacheDefs.h rename to tools/traffic_cache_tool/CacheDefs.h diff --git a/tools/cache_tool/CacheStore.h b/tools/traffic_cache_tool/CacheStore.h similarity index 100% rename from tools/cache_tool/CacheStore.h rename to tools/traffic_cache_tool/CacheStore.h diff --git a/tools/cache_tool/CacheTool.cc b/tools/traffic_cache_tool/CacheTool.cc similarity index 100% rename from tools/cache_tool/CacheTool.cc rename to tools/traffic_cache_tool/CacheTool.cc diff --git a/tools/cache_tool/Command.cc b/tools/traffic_cache_tool/Command.cc similarity index 100% rename from tools/cache_tool/Command.cc rename to tools/traffic_cache_tool/Command.cc diff --git a/tools/cache_tool/Command.h b/tools/traffic_cache_tool/Command.h similarity index 100% rename from tools/cache_tool/Command.h rename to tools/traffic_cache_tool/Command.h diff --git a/tools/cache_tool/File.cc b/tools/traffic_cache_tool/File.cc similarity index 100% rename from tools/cache_tool/File.cc rename to tools/traffic_cache_tool/File.cc diff --git a/tools/cache_tool/File.h b/tools/traffic_cache_tool/File.h similarity index 100% rename from tools/cache_tool/File.h rename to tools/traffic_cache_tool/File.h diff --git a/tools/cache_tool/Makefile.am b/tools/traffic_cache_tool/Makefile.am similarity index 100% rename from tools/cache_tool/Makefile.am rename to tools/traffic_cache_tool/Makefile.am From 064da1289c04b409afeebfb7fca07bef6b8f9ae0 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Mon, 23 Jan 2017 21:11:31 -0600 Subject: [PATCH 26/81] CacheTool: Move to 'cmd' directory. --- {tools => cmd}/traffic_cache_tool/CacheDefs.h | 0 {tools => cmd}/traffic_cache_tool/CacheStore.h | 0 {tools => cmd}/traffic_cache_tool/CacheTool.cc | 0 {tools => cmd}/traffic_cache_tool/Command.cc | 0 {tools => cmd}/traffic_cache_tool/Command.h | 0 {tools => cmd}/traffic_cache_tool/File.cc | 0 {tools => cmd}/traffic_cache_tool/File.h | 0 {tools => cmd}/traffic_cache_tool/Makefile.am | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename {tools => cmd}/traffic_cache_tool/CacheDefs.h (100%) rename {tools => cmd}/traffic_cache_tool/CacheStore.h (100%) rename {tools => cmd}/traffic_cache_tool/CacheTool.cc (100%) rename {tools => cmd}/traffic_cache_tool/Command.cc (100%) rename {tools => cmd}/traffic_cache_tool/Command.h (100%) rename {tools => cmd}/traffic_cache_tool/File.cc (100%) rename {tools => cmd}/traffic_cache_tool/File.h (100%) rename {tools => cmd}/traffic_cache_tool/Makefile.am (100%) diff --git a/tools/traffic_cache_tool/CacheDefs.h b/cmd/traffic_cache_tool/CacheDefs.h similarity index 100% rename from tools/traffic_cache_tool/CacheDefs.h rename to cmd/traffic_cache_tool/CacheDefs.h diff --git a/tools/traffic_cache_tool/CacheStore.h b/cmd/traffic_cache_tool/CacheStore.h similarity index 100% rename from tools/traffic_cache_tool/CacheStore.h rename to cmd/traffic_cache_tool/CacheStore.h diff --git a/tools/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc similarity index 100% rename from tools/traffic_cache_tool/CacheTool.cc rename to cmd/traffic_cache_tool/CacheTool.cc diff --git a/tools/traffic_cache_tool/Command.cc b/cmd/traffic_cache_tool/Command.cc similarity index 100% rename from tools/traffic_cache_tool/Command.cc rename to cmd/traffic_cache_tool/Command.cc diff --git a/tools/traffic_cache_tool/Command.h b/cmd/traffic_cache_tool/Command.h similarity index 100% rename from tools/traffic_cache_tool/Command.h rename to cmd/traffic_cache_tool/Command.h diff --git a/tools/traffic_cache_tool/File.cc b/cmd/traffic_cache_tool/File.cc similarity index 100% rename from tools/traffic_cache_tool/File.cc rename to cmd/traffic_cache_tool/File.cc diff --git a/tools/traffic_cache_tool/File.h b/cmd/traffic_cache_tool/File.h similarity index 100% rename from tools/traffic_cache_tool/File.h rename to cmd/traffic_cache_tool/File.h diff --git a/tools/traffic_cache_tool/Makefile.am b/cmd/traffic_cache_tool/Makefile.am similarity index 100% rename from tools/traffic_cache_tool/Makefile.am rename to cmd/traffic_cache_tool/Makefile.am From 31e64fe98f8a16b4e0277e2032cf3402b3cf279f Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Mon, 23 Jan 2017 21:15:23 -0600 Subject: [PATCH 27/81] CacheTool: Update parent directory makefiles. --- cmd/Makefile.am | 3 ++- tools/Makefile.am | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/Makefile.am b/cmd/Makefile.am index bd66e5278c8..b23ece5a0ef 100644 --- a/cmd/Makefile.am +++ b/cmd/Makefile.am @@ -16,9 +16,10 @@ # limitations under the License. SUBDIRS = \ + traffic_cache_tool \ traffic_cop \ - traffic_ctl \ traffic_crashlog \ + traffic_ctl \ traffic_layout \ traffic_manager \ traffic_top \ diff --git a/tools/Makefile.am b/tools/Makefile.am index 7b76f4ef2d2..cdc90969a4e 100644 --- a/tools/Makefile.am +++ b/tools/Makefile.am @@ -18,7 +18,7 @@ # limitations under the License. -SUBDIRS = cache_tool +SUBDIRS = bin_SCRIPTS = tsxs tspush From 173a14353fbd13f9bdacf375b8e627927c3ca48b Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Mon, 23 Jan 2017 15:46:08 -0600 Subject: [PATCH 28/81] Scalar: Add integer comparisons. --- lib/ts/Metric.h | 72 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h index ccb8126ff4a..0779889e6ab 100644 --- a/lib/ts/Metric.h +++ b/lib/ts/Metric.h @@ -248,6 +248,20 @@ operator<(Metric const &lhs, Metric const &rhs) return lhs.count() < rhs.count(); } +template +bool +operator<(Metric const &lhs, C n) +{ + return lhs.count() < n; +} + +template +bool +operator<(C n, Metric const &rhs) +{ + return n < rhs.count(); +} + template bool operator==(Metric const &lhs, Metric const &rhs) @@ -255,6 +269,20 @@ operator==(Metric const &lhs, Metric const &rhs) return lhs.count() == rhs.count(); } +template +bool +operator==(Metric const &lhs, C n) +{ + return lhs.count() == n; +} + +template +bool +operator==(C n, Metric const &rhs) +{ + return n == rhs.count(); +} + // Could be derived but if we're optimizing let's avoid the extra negation. // Or we could check if the compiler can optimize that out anyway. template @@ -264,6 +292,50 @@ operator<=(Metric const &lhs, Metric const &rhs) return lhs.count() <= rhs.count(); } +template +bool +operator<=(Metric const &lhs, C n) +{ + return lhs.count() <= n; +} + +template +bool +operator<=(C n, Metric const &rhs) +{ + return n <= rhs.count(); +} + +// Do the integer compares. + +template +bool +operator>(Metric const &lhs, C n) +{ + return lhs.count() > n; +} + +template +bool +operator>(C n, Metric const &rhs) +{ + return n > rhs.count(); +} + +template +bool +operator>=(Metric const &lhs, C n) +{ + return lhs.count() >= n; +} + +template +bool +operator>=(C n, Metric const &rhs) +{ + return n >= rhs.count(); +} + // General base cases. template From a6fe8f6ee4aea72a8269b1569780e98c6809e110 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 24 Jan 2017 05:19:03 -0600 Subject: [PATCH 29/81] MemView: Add svtoi(). --- lib/ts/MemView.cc | 47 +++++++++++++++++++++++++++++++++++++++++++++++ lib/ts/MemView.h | 9 +++++++++ 2 files changed, 56 insertions(+) diff --git a/lib/ts/MemView.cc b/lib/ts/MemView.cc index 473607af44d..8884add11d8 100644 --- a/lib/ts/MemView.cc +++ b/lib/ts/MemView.cc @@ -40,6 +40,53 @@ strcasecmp(StringView lhs, StringView rhs) return lhs ? 1 : rhs ? -1 : 0; } +intmax_t +svtoi(StringView src, StringView* out, int base) +{ + static const int8_t convert[256] = { +// 0 1 2 3 4 5 6 7 8 9 A B C D E F + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 00 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 10 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 20 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, // 30 + -1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, // 40 + 25, 26, 27, 28, 20, 30, 31, 32, 33, 34, 35, -1, -1, -1, -1, -1, // 50 + -1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, // 60 + 25, 26, 27, 28, 20, 30, 31, 32, 33, 34, 35, -1, -1, -1, -1, -1, // 70 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 80 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 90 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // A0 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // B0 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // C0 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // D0 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // E0 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // F0 + }; + + intmax_t zret = 0; + + if (*out) out->clear(); + if (1 <= base || base > 36) return 0; + if (src.ltrim(&isspace)) { + const char* start = src.ptr(); + int8_t v; + bool neg = false; + if ('-' == *src) { + ++src; + neg = true; + } + while (src.size() && (-1 != (v = convert[static_cast(*src)]))) { + zret = zret * base + v; + } + if (out && (src.ptr() > (neg ? start+1 : start))) { + out->setView(start, src.ptr()); + } + + if (neg) zret = -zret; + } + return zret; +} + // Do the template instantions. template void detail::stream_padding(std::ostream &, std::size_t); template void detail::aligned_stream_write(std::ostream &, const StringView &); diff --git a/lib/ts/MemView.h b/lib/ts/MemView.h index b2bbbbda0b3..f07b32f74ff 100644 --- a/lib/ts/MemView.h +++ b/lib/ts/MemView.h @@ -42,6 +42,15 @@ class StringView; int memcmp(MemView const &lhs, MemView const &rhs); int strcmp(StringView const &lhs, StringView const &rhs); int strcasecmp(StringView lhs, StringView rhs); +/** Convert the text in @c StringView @a src to a numeric value. + + If @a parsed is non-null then the part of the string actually parsed is placed there. + @a base sets the conversion base. This defaults to 10 with two special cases: + + - If the number starts with a literal '0' then it is treated as base 8. + - If the number starts with the literal characters '0x' or '0X' then it is treated as base 16. +*/ +intmax_t svtoi(StringView src, StringView* parsed = nullptr, int base = 10); /** A read only view of contiguous piece of memory. From 88e2c619114466bd35ec7f806a84ad75303564da Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 24 Jan 2017 05:19:31 -0600 Subject: [PATCH 30/81] Scalar: Add comparison operators. --- lib/ts/Metric.h | 144 ++++++++++++++++++++---------------------- lib/ts/test_Metric.cc | 16 +++++ 2 files changed, 84 insertions(+), 76 deletions(-) diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h index 0779889e6ab..5a0561e46db 100644 --- a/lib/ts/Metric.h +++ b/lib/ts/Metric.h @@ -33,6 +33,24 @@ namespace ApacheTrafficServer { +namespace detail +{ + // The built in type 'int' is special because that's the type for untyped numbers. + // That means if we have operators for comparison to unscaled values there is ambiguity if + // the internal counter type is also 'int'. To avoid that this class (and hence methods) are + // inherited so if there is a conflict these methods are silently overridden. + template < intmax_t N, typename C > + struct ScalarIntOperators + { + bool operator < (int n) { return *this < static_cast(n); } + bool operator > (int n) { return *this > static_cast(n); } + }; + + template < intmax_t N > + struct ScalarIntOperators + { + }; +} /** A class to hold scaled values. Instances of this class have a @a count and a @a scale. The "value" of the instance is @a @@ -52,7 +70,7 @@ namespace ApacheTrafficServer @see metric_round_up @see metric_round_down */ -template class Metric +template class Metric { typedef Metric self; ///< Self reference type. @@ -60,14 +78,14 @@ template class Metric /// Scaling factor for instances. /// Make it externally accessible. constexpr static intmax_t SCALE = N; - typedef T Count; ///< Type used to hold the count. + typedef C Count; ///< Type used to hold the count. constexpr Metric(); ///< Default contructor. ///< Construct to have @a n scaled units. constexpr Metric(Count n); /// Copy constructor for same scale. - template Metric(Metric const &that); + template Metric(Metric const &that); /// Copy / conversion constructor. /// @note Requires that @c S be an integer multiple of @c SCALE. @@ -88,6 +106,12 @@ template class Metric /// Assignment from same scale. self &operator=(self const &that); + ///@{ Comparisons. + // bool operator < (C n); + // bool operator > (C n); + // bool operator == (C n); + ///@} + /// Run time access to the scale of this metric (template arg @a N). static constexpr intmax_t scale(); @@ -248,20 +272,6 @@ operator<(Metric const &lhs, Metric const &rhs) return lhs.count() < rhs.count(); } -template -bool -operator<(Metric const &lhs, C n) -{ - return lhs.count() < n; -} - -template -bool -operator<(C n, Metric const &rhs) -{ - return n < rhs.count(); -} - template bool operator==(Metric const &lhs, Metric const &rhs) @@ -269,20 +279,6 @@ operator==(Metric const &lhs, Metric const &rhs) return lhs.count() == rhs.count(); } -template -bool -operator==(Metric const &lhs, C n) -{ - return lhs.count() == n; -} - -template -bool -operator==(C n, Metric const &rhs) -{ - return n == rhs.count(); -} - // Could be derived but if we're optimizing let's avoid the extra negation. // Or we could check if the compiler can optimize that out anyway. template @@ -292,50 +288,6 @@ operator<=(Metric const &lhs, Metric const &rhs) return lhs.count() <= rhs.count(); } -template -bool -operator<=(Metric const &lhs, C n) -{ - return lhs.count() <= n; -} - -template -bool -operator<=(C n, Metric const &rhs) -{ - return n <= rhs.count(); -} - -// Do the integer compares. - -template -bool -operator>(Metric const &lhs, C n) -{ - return lhs.count() > n; -} - -template -bool -operator>(C n, Metric const &rhs) -{ - return n > rhs.count(); -} - -template -bool -operator>=(Metric const &lhs, C n) -{ - return lhs.count() >= n; -} - -template -bool -operator>=(C n, Metric const &rhs) -{ - return n >= rhs.count(); -} - // General base cases. template @@ -396,6 +348,46 @@ operator>=(Metric const &lhs, Metric const &rhs) { return rhs <= lhs; } -} +// Do the integer compares. +// A bit ugly to handle the issue that integers without explicit type are 'int'. Therefore suppport must be provided +// for comparison not just the counter type C but also explicitly 'int'. That makes the operators ambiguous if C is +// 'int'. The specializations for 'int' resolve this as their presence "covers" the generic cases. + +template bool operator < (Metric const &lhs, C n) { return lhs.count() < n; } +template bool operator < (C n, Metric const &rhs) { return n < rhs.count(); } +template bool operator < (Metric const &lhs, int n) { return lhs.count() < static_cast(n); } +template bool operator < (int n, Metric const &rhs) { return static_cast(n) < rhs.count(); } +template bool operator < (Metric const &lhs, int n) { return lhs.count() < n; } +template bool operator < (int n, Metric const &rhs) { return n < rhs.count(); } + +template bool operator == (Metric const &lhs, C n) { return lhs.count() == n; } +template bool operator == (C n, Metric const &rhs) { return n == rhs.count(); } +template bool operator == (Metric const &lhs, int n) { return lhs.count() == static_cast(n); } +template bool operator == (int n, Metric const &rhs) { return static_cast(n) == rhs.count(); } +template bool operator == (Metric const &lhs, int n) { return lhs.count() == n; } +template bool operator == (int n, Metric const &rhs) { return n == rhs.count(); } + +template bool operator > (Metric const &lhs, C n) { return lhs.count() > n; } +template bool operator > (C n, Metric const &rhs) { return n > rhs.count(); } +template bool operator > (Metric const &lhs, int n) { return lhs.count() > static_cast(n); } +template bool operator > (int n, Metric const &rhs) { return static_cast(n) > rhs.count(); } +template bool operator > (Metric const &lhs, int n) { return lhs.count() > n; } +template bool operator > (int n, Metric const &rhs) { return n > rhs.count(); } + +template bool operator <= (Metric const &lhs, C n) { return lhs.count() <= n; } +template bool operator <= (C n, Metric const &rhs) { return n <= rhs.count(); } +template bool operator <= (Metric const &lhs, int n) { return lhs.count() <= static_cast(n); } +template bool operator <= (int n, Metric const &rhs) { return static_cast(n) <= rhs.count(); } +template bool operator <= (Metric const &lhs, int n) { return lhs.count() <= n; } +template bool operator <= (int n, Metric const &rhs) { return n <= rhs.count(); } + +template bool operator >= (Metric const &lhs, C n) { return lhs.count() >= n; } +template bool operator >= (C n, Metric const &rhs) { return n >= rhs.count(); } +template bool operator >= (Metric const &lhs, int n) { return lhs.count() >= static_cast(n); } +template bool operator >= (int n, Metric const &rhs) { return static_cast(n) >= rhs.count(); } +template bool operator >= (Metric const &lhs, int n) { return lhs.count() >= n; } +template bool operator >= (int n, Metric const &rhs) { return n >= rhs.count(); } + +} // namespace #endif // TS_METRIC_H diff --git a/lib/ts/test_Metric.cc b/lib/ts/test_Metric.cc index f62f5a0e1ea..7bd78325b5b 100644 --- a/lib/ts/test_Metric.cc +++ b/lib/ts/test_Metric.cc @@ -24,6 +24,7 @@ #include #include #include +#include namespace ts { @@ -161,6 +162,21 @@ Test_3() test.check(m_test.count() == 213, "Assignment got %d expected %d", m_4.count(), 213); } +void +test_Compile() +{ + // These tests aren't normally run, they exist to detect compiler issues. + + typedef ts::Metric<1024, long int> KBytes; + typedef ts::Metric<1024, int> KiBytes; + + KBytes x(12); + KiBytes y(12); + + if (x > 12) std::cout << "Operator > works" << std::endl; + if (y > 12) std::cout << "Operator > works" << std::endl; +} + int main(int, char **) { From 1c3aa16d946aae01d70252f899c78e32b0987a42 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 24 Jan 2017 05:19:47 -0600 Subject: [PATCH 31/81] CacheTool: First pass at volume.config parser. --- cmd/traffic_cache_tool/CacheTool.cc | 49 ++++++++++++++++++++++++++--- configure.ac | 2 +- 2 files changed, 45 insertions(+), 6 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index 41884df70da..f4b556bfb6c 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -80,9 +80,13 @@ struct VolumeConfig struct VolData { - int _idx; ///< Volume index. - int _percent; ///< Size if specified as a percent. - ts::CacheStripeBlocks _size; ///< Size if specified as an absolute. + int _idx = 0; ///< Volume index. + int _percent = 0; ///< Size if specified as a percent. + ts::Megabytes _size = 0; ///< Size if specified as an absolute. + + // Methods handy for parsing + bool hasSize() const { return _percent > 0 || _size > 0; } + bool hasIndex() const { return _idx > 0; } }; std::vector _volumes; @@ -447,23 +451,58 @@ VolumeConfig::load(ts::FilePath const& path) if (0 == cfile.load()) { ts::StringView content = cfile.content(); while (content) { + VolData v; + ++ln; ts::StringView line = content.splitPrefix('\n'); line.ltrim(&isspace); if (!line || '#' == *line) continue; - VolData v; while (line) { ts::StringView value(line.extractPrefix(&isspace)); ts::StringView tag(value.splitPrefix('=')); if (!tag) { zret.push(0, 1, "Line ", ln, " is invalid"); } else if (0 == strcasecmp(tag, TAG_SIZE)) { - auto n = ts::svtoi(value); + if (v.hasSize()) { + zret.push(0, 5, "Line ", ln, " has field ", TAG_SIZE, " more than once"); + } else { + ts::StringView text; + auto n = ts::svtoi(value, &text); + if (text) { + ts::StringView percent(text.end(), value.end()); // clip parsed number. + if (!percent) { + v._size = n; + } else if ('%' == *percent && percent.size() == 1) { + v._percent = n; + } else { + zret.push(0, 3, "Line ", ln, " has invalid value '", value, "' for ", TAG_SIZE, " field"); + } + } else { + zret.push(0, 2, "Line ", ln, " has invalid value '", value, "' for ", TAG_SIZE, " field"); + } + } } else if (0 == strcasecmp(tag, TAG_VOL)) { + if (v.hasIndex()) { + zret.push(0, 6, "Line ", ln, " has field ", TAG_VOL, " more than once"); + } else { + ts::StringView text; + auto n = ts::svtoi(value, &text); + if (text == value) { + v._idx = n; + } else { + zret.push(0, 4, "Line ", ln, " has invalid value '", value, "' for ", TAG_VOL, " field"); + } + } } } + if (v.hasSize() && v.hasIndex()) { + _volumes.push_back(std::move(v)); + } else { + if (!v.hasSize()) zret.push(0,7, "Line ", ln, " does not have the required field ", TAG_SIZE); + if (!v.hasIndex()) zret.push(0,8, "Line ", ln, " does not have the required field ", TAG_VOL); + } } } else { zret = ts::Errata::Message(0, EBADF, "Unable to load ", path); diff --git a/configure.ac b/configure.ac index 8ba12180a45..bc38a0e8511 100644 --- a/configure.ac +++ b/configure.ac @@ -1887,6 +1887,7 @@ AS_IF([test "x$RPATH" != "x"], [ AC_CONFIG_FILES([ Makefile cmd/Makefile + cmd/traffic_cache_tool/Makefile cmd/traffic_cop/Makefile cmd/traffic_crashlog/Makefile cmd/traffic_ctl/Makefile @@ -1945,7 +1946,6 @@ AC_CONFIG_FILES([ tools/Makefile tools/trafficserver.pc tools/tsxs - tools/cache_tool/Makefile ]) # ----------------------------------------------------------------------------- From 3407b91aa5be7e9239fe5b2a10d2f409db14814b Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Wed, 25 Jan 2017 02:07:06 -0600 Subject: [PATCH 32/81] MemView: Fix svtoi. --- lib/ts/MemView.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ts/MemView.cc b/lib/ts/MemView.cc index 8884add11d8..59d1559aaab 100644 --- a/lib/ts/MemView.cc +++ b/lib/ts/MemView.cc @@ -66,7 +66,7 @@ svtoi(StringView src, StringView* out, int base) intmax_t zret = 0; if (*out) out->clear(); - if (1 <= base || base > 36) return 0; + if (!(1 < base && base <= 36)) return 0; if (src.ltrim(&isspace)) { const char* start = src.ptr(); int8_t v; @@ -77,6 +77,7 @@ svtoi(StringView src, StringView* out, int base) } while (src.size() && (-1 != (v = convert[static_cast(*src)]))) { zret = zret * base + v; + ++src; } if (out && (src.ptr() > (neg ? start+1 : start))) { out->setView(start, src.ptr()); From f9a9d764b8e3d56b320785468fb5c602e9a1c908 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Wed, 25 Jan 2017 02:07:29 -0600 Subject: [PATCH 33/81] CacheTool: Parsing volume.config --- cmd/traffic_cache_tool/CacheTool.cc | 55 +++++++++++++++++++++++------ cmd/traffic_cache_tool/Command.cc | 1 - cmd/traffic_cache_tool/Command.h | 8 +++++ 3 files changed, 52 insertions(+), 12 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index f4b556bfb6c..bda3d5df366 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -49,9 +49,11 @@ const Bytes CacheSpan::OFFSET{CacheStoreBlocks{1}}; namespace { -ts::FilePath TargetFile; +ts::FilePath SpanFile; ts::FilePath VolumeFile; + ts::CommandTable Commands; + // Default this to read only, only enable write if specifically required. int OPEN_RW_FLAGS = O_RDONLY; @@ -90,6 +92,13 @@ struct VolumeConfig }; std::vector _volumes; + typedef std::vector::iterator iterator; + typedef std::vector::const_iterator const_iterator; + + iterator begin() { return _volumes.begin(); } + iterator end() { return _volumes.end(); } + const_iterator begin() const { return _volumes.begin(); } + const_iterator end() const { return _volumes.end(); } }; // All of these free functions need to be moved to the Cache class. @@ -510,7 +519,12 @@ VolumeConfig::load(ts::FilePath const& path) return zret; } /* --------------------------------------------------------------------------------------- */ -struct option Options[] = {{"help", false, nullptr, 'h'}}; +struct option Options[] = { + {"help", 0, nullptr, 'h'}, + {"spans", 1, nullptr, 's'}, + {"volumes", 1, nullptr, 'v'}, + {nullptr, 0, nullptr, 0 } +}; } ts::Errata @@ -519,7 +533,7 @@ List_Stripes(Cache::SpanDumpDepth depth, int argc, char *argv[]) ts::Errata zret; Cache cache; - if ((zret = cache.load(TargetFile))) { + if ((zret = cache.load(SpanFile))) { cache.dumpSpans(depth); cache.dumpVolumes(); } @@ -530,6 +544,21 @@ ts::Errata Simulate_Span_Allocation(int argc, char *argv[]) { ts::Errata zret; + VolumeConfig vols; + + if (!VolumeFile) { + return zret.push(0, 9, "Volume config file not set"); + } + + zret = vols.load(VolumeFile); + if (zret) { + for (VolumeConfig::VolData const& vd : vols) { + std::cout << "Volume " << vd._idx << " size "; + if (vd._percent) std::cout << vd._percent << '%'; + else std::cout << vd._size.count() << " megabytes"; + std::cout << std::endl; + } + } return zret; } @@ -540,7 +569,7 @@ Clear_Spans(int argc, char *argv[]) Cache cache; OPEN_RW_FLAGS = O_RDWR; - if ((zret = cache.load(TargetFile))) { + if ((zret = cache.load(SpanFile))) { for (auto *span : cache._spans) { span->clearPermanently(); } @@ -558,9 +587,15 @@ main(int argc, char *argv[]) while (-1 != (opt_val = getopt_long(argc, argv, "h", Options, &opt_idx))) { switch (opt_val) { case 'h': - printf("Usage: %s [device_path|config_file] [ ...]\n", argv[0]); + printf("Usage: %s --span --volume [ ...]\n", argv[0]); help = true; break; + case 's': + SpanFile = optarg; + break; + case 'v': + VolumeFile = optarg; + break; } } @@ -570,17 +605,15 @@ main(int argc, char *argv[]) .subCommand(std::string("stripes"), std::string("The stripes"), [](int argc, char *argv[]) { return List_Stripes(Cache::SpanDumpDepth::STRIPE, argc, argv); }); Commands.add(std::string("clear"), std::string("Clear spans"), &Clear_Spans); + Commands.add(std::string("volumes"), std::string("Volumes"), &Simulate_Span_Allocation); + + Commands.setArgIndex(optind); if (help) { - Commands.helpMessage(argc - optind, argv + optind); + Commands.helpMessage(argc, argv); exit(1); } - if (optind < argc) { - TargetFile = argv[optind]; - argc -= optind + 1; - argv += optind + 1; - } ts::Errata result = Commands.invoke(argc, argv); if (!result) { diff --git a/cmd/traffic_cache_tool/Command.cc b/cmd/traffic_cache_tool/Command.cc index a0a1214e8f7..f2106d98ed8 100644 --- a/cmd/traffic_cache_tool/Command.cc +++ b/cmd/traffic_cache_tool/Command.cc @@ -154,7 +154,6 @@ CommandTable::add(std::string const &name, std::string const &help, CommandFunct ts::Errata CommandTable::invoke(int argc, char *argv[]) { - _opt_idx = 0; return _top.invoke(argc, argv); } diff --git a/cmd/traffic_cache_tool/Command.h b/cmd/traffic_cache_tool/Command.h index ebc6d55f67b..d1949f24aef 100644 --- a/cmd/traffic_cache_tool/Command.h +++ b/cmd/traffic_cache_tool/Command.h @@ -108,6 +108,11 @@ class CommandTable */ Command &add(std::string const &name, std::string const &help); + /** Set the index of the "first" argument. + This causes the command processing to skip @a n arguments. + */ + self& setArgIndex(int n); + /** Invoke a command. @return The return value of the executed command, or an error value if the command was not found. */ @@ -121,5 +126,8 @@ class CommandTable friend class Command; }; + +inline CommandTable& CommandTable::setArgIndex(int n) { _opt_idx = n; return *this; } + } #endif From ffabe2bf06ad0b4ecf9782699b6450ad3e7bc11d Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Wed, 25 Jan 2017 02:33:07 -0600 Subject: [PATCH 34/81] CacheTool: Update Metric -> Scalar --- cmd/traffic_cache_tool/CacheDefs.h | 14 +- lib/ts/Metric.h | 393 ----------------------------- 2 files changed, 7 insertions(+), 400 deletions(-) delete mode 100644 lib/ts/Metric.h diff --git a/cmd/traffic_cache_tool/CacheDefs.h b/cmd/traffic_cache_tool/CacheDefs.h index e62136ecf28..fb6bf5f4f07 100644 --- a/cmd/traffic_cache_tool/CacheDefs.h +++ b/cmd/traffic_cache_tool/CacheDefs.h @@ -24,23 +24,23 @@ #if !defined(CACHE_DEFS_H) #define CACHE_DEFS_H #include -#include +#include namespace ApacheTrafficServer { constexpr static uint8_t CACHE_DB_MAJOR_VERSION = 24; -typedef Metric<1, int64_t> Bytes; -typedef Metric<1024, int64_t> Kilobytes; -typedef Metric<1024 * Kilobytes::SCALE, int64_t> Megabytes; +typedef Scalar<1, int64_t> Bytes; +typedef Scalar<1024, int64_t> Kilobytes; +typedef Scalar<1024 * Kilobytes::SCALE, int64_t> Megabytes; // Units of allocation for stripes. -typedef Metric<128 * Megabytes::SCALE, int64_t> CacheStripeBlocks; +typedef Scalar<128 * Megabytes::SCALE, int64_t> CacheStripeBlocks; // Size measurement of cache storage. // Also size of meta data storage units. -typedef Metric<8 * Kilobytes::SCALE, int64_t> CacheStoreBlocks; +typedef Scalar<8 * Kilobytes::SCALE, int64_t> CacheStoreBlocks; // Size unit for content stored in cache. -typedef Metric<512, int64_t> CacheDataBlocks; +typedef Scalar<512, int64_t> CacheDataBlocks; /** A cache span is a representation of raw storage. It corresponds to a raw disk, disk partition, file, or directory. diff --git a/lib/ts/Metric.h b/lib/ts/Metric.h deleted file mode 100644 index 5a0561e46db..00000000000 --- a/lib/ts/Metric.h +++ /dev/null @@ -1,393 +0,0 @@ -/** @file - - Scaled integral values. - - In many situations it is desirable to define scaling factors or base units (a "metric"). This template - enables this to be done in a type and scaling safe manner where the defined factors carry their scaling - information as part of the type. - - @section license License - - Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - */ - -#if !defined(TS_METRIC_H) -#define TS_METRIC_H - -#include -#include - -namespace ApacheTrafficServer -{ -namespace detail -{ - // The built in type 'int' is special because that's the type for untyped numbers. - // That means if we have operators for comparison to unscaled values there is ambiguity if - // the internal counter type is also 'int'. To avoid that this class (and hence methods) are - // inherited so if there is a conflict these methods are silently overridden. - template < intmax_t N, typename C > - struct ScalarIntOperators - { - bool operator < (int n) { return *this < static_cast(n); } - bool operator > (int n) { return *this > static_cast(n); } - }; - - template < intmax_t N > - struct ScalarIntOperators - { - }; -} -/** A class to hold scaled values. - - Instances of this class have a @a count and a @a scale. The "value" of the instance is @a - count * @a scale. The scale is stored in the compiler in the class symbol table and so only - the count is a run time value. An instance with a large scale can be assign to an instance - with a smaller scale and the conversion is done automatically. Conversions from a smaller to - larger scale must be explicit using @c metric_round_up and @c metric_round_down. This prevents - inadvertent changes in value. Because the scales are not the same these conversions can be - lossy and the two conversions determine whether, in such a case, the result should be rounded - up or down to the nearest scale value. - - @a N sets the scale. @a T is the type used to hold the count, which is in units of @a N. - - @note This is modeled somewhat on @c std::chrono and serves a similar function for different - and simpler cases (where the ratio is always an integer, never a fraction). - - @see metric_round_up - @see metric_round_down - */ -template class Metric -{ - typedef Metric self; ///< Self reference type. - -public: - /// Scaling factor for instances. - /// Make it externally accessible. - constexpr static intmax_t SCALE = N; - typedef C Count; ///< Type used to hold the count. - - constexpr Metric(); ///< Default contructor. - ///< Construct to have @a n scaled units. - constexpr Metric(Count n); - - /// Copy constructor for same scale. - template Metric(Metric const &that); - - /// Copy / conversion constructor. - /// @note Requires that @c S be an integer multiple of @c SCALE. - template Metric(Metric const &that); - - /// Direct assignment. - /// The count is set to @a n. - self &operator=(Count n); - - /// The number of scale units. - constexpr Count count() const; - /// The absolute value, scaled up. - constexpr Count units() const; - - /// Assignment operator. - /// @note Requires the scale of @c S be an integer multiple of the scale of this. - template self &operator=(Metric const &that); - /// Assignment from same scale. - self &operator=(self const &that); - - ///@{ Comparisons. - // bool operator < (C n); - // bool operator > (C n); - // bool operator == (C n); - ///@} - - /// Run time access to the scale of this metric (template arg @a N). - static constexpr intmax_t scale(); - -protected: - Count _n; ///< Number of scale units. -}; - -template constexpr Metric::Metric() : _n() -{ -} -template constexpr Metric::Metric(Count n) : _n(n) -{ -} -template -constexpr auto -Metric::count() const -> Count -{ - return _n; -} -template -constexpr auto -Metric::units() const -> Count -{ - return _n * SCALE; -} -template -inline auto -Metric::operator=(Count n) -> self & -{ - _n = n; - return *this; -} -template -inline auto -Metric::operator=(self const &that) -> self & -{ - _n = that._n; - return *this; -} -template -constexpr inline intmax_t -Metric::scale() -{ - return SCALE; -} - -template template Metric::Metric(Metric const &that) : _n(static_cast(that._n)) -{ -} - -template template Metric::Metric(Metric const &that) -{ - typedef std::ratio R; - static_assert(R::den == 1, "Construction not permitted - target scale is not an integral multiple of source scale."); - _n = that.count() * R::num; -} - -template -template -auto -Metric::operator=(Metric const &that) -> self & -{ - typedef std::ratio R; - static_assert(R::den == 1, "Assignment not permitted - target scale is not an integral multiple of source scale."); - _n = that.count() * R::num; - return *this; -} - -// -- Free Functions -- - -/** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding up. - The resulting count in the return value will be the smallest count that is not smaller than the unit - value of @a src. - - @code - typedef Metric<16> Paragraphs; - typedef Metric<1024> KiloBytes; - - Paragraphs src(37459); - auto size = metric_round_up(src); // size.count() == 586 - @endcode - */ -template -M -metric_round_up(Metric const &src) -{ - typedef std::ratio R; - auto c = src.count(); - - if (M::SCALE == S) { - return c; - } else if (R::den == 1) { - return c / R::num + (0 != c % R::num); // N is a multiple of S. - } else if (R::num == 1) { - return c * R::den; // S is a multiple of N. - } else { - return (c / R::num) * R::den + ((c % R::num) * R::den) / R::num + (0 != (c % R::num)); - } -} - -/** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding down. - The resulting count in the return value will be the largest count that is not larger than the unit - value of @a src. - - @code - typedef Metric<16> Paragraphs; - typedef Metric<1024> KiloBytes; - - Paragraphs src(37459); - auto size = metric_round_up(src); // size.count() == 585 - @endcode - */ -template -M -metric_round_down(Metric const &src) -{ - typedef std::ratio R; - auto c = src.count(); - - if (R::den == 1) { - return c / R::num; // S is a multiple of N. - } else if (R::num == 1) { - return c * R::den; // N is a multiple of S. - } else { - // General case where neither N nor S are a multiple of the other. - // Yes, a bit odd, but this minimizes the risk of integer overflow. - // I need to validate that under -O2 the compiler will only do 1 division to get - // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are - // powers of 2 I have verified recent GNU compilers will optimize to bit operations. - return (c / R::num) * R::den + ((c % R::num) * R::den) / R::num; - } -} - -/// Convert a unit value @a n to a Metric, rounding down. -template -M -metric_round_down(intmax_t n) -{ - return n / M::SCALE; // assuming compiler will optimize out dividing by 1 if needed. -} - -/// Convert a unit value @a n to a Metric, rounding up. -template -M -metric_round_up(intmax_t n) -{ - return M::SCALE == 1 ? n : (n / M::SCALE + (0 != (n % M::SCALE))); -} - -// --- Compare operators - -// Try for a bit of performance boost - if the metrics have the same scale -// just comparing the counts is sufficient and scaling conversion is avoided. -template -bool -operator<(Metric const &lhs, Metric const &rhs) -{ - return lhs.count() < rhs.count(); -} - -template -bool -operator==(Metric const &lhs, Metric const &rhs) -{ - return lhs.count() == rhs.count(); -} - -// Could be derived but if we're optimizing let's avoid the extra negation. -// Or we could check if the compiler can optimize that out anyway. -template -bool -operator<=(Metric const &lhs, Metric const &rhs) -{ - return lhs.count() <= rhs.count(); -} - -// General base cases. - -template -bool -operator<(Metric const &lhs, Metric const &rhs) -{ - typedef std::ratio R; - // Based on tests with the GNU compiler, the fact that the conditionals are compile time - // constant causes the never taken paths to be dropped so there are no runtime conditional - // checks, even with no optimization at all. - if (R::den == 1) { - return lhs.count() < rhs.count() * R::num; - } else if (R::num == 1) { - return lhs.count() * R::den < rhs.count(); - } else - return lhs.units() < rhs.units(); -} - -template -bool -operator==(Metric const &lhs, Metric const &rhs) -{ - typedef std::ratio R; - if (R::den == 1) { - return lhs.count() == rhs.count() * R::num; - } else if (R::num == 1) { - return lhs.count() * R::den == rhs.count(); - } else - return lhs.units() == rhs.units(); -} - -template -bool -operator<=(Metric const &lhs, Metric const &rhs) -{ - typedef std::ratio R; - if (R::den == 1) { - return lhs.count() <= rhs.count() * R::num; - } else if (R::num == 1) { - return lhs.count() * R::den <= rhs.count(); - } else - return lhs.units() <= rhs.units(); -} - -// Derived compares. No narrowing optimization needed because if the scales -// are the same the nested call with be optimized. - -template -bool -operator>(Metric const &lhs, Metric const &rhs) -{ - return rhs < lhs; -} - -template -bool -operator>=(Metric const &lhs, Metric const &rhs) -{ - return rhs <= lhs; -} - -// Do the integer compares. -// A bit ugly to handle the issue that integers without explicit type are 'int'. Therefore suppport must be provided -// for comparison not just the counter type C but also explicitly 'int'. That makes the operators ambiguous if C is -// 'int'. The specializations for 'int' resolve this as their presence "covers" the generic cases. - -template bool operator < (Metric const &lhs, C n) { return lhs.count() < n; } -template bool operator < (C n, Metric const &rhs) { return n < rhs.count(); } -template bool operator < (Metric const &lhs, int n) { return lhs.count() < static_cast(n); } -template bool operator < (int n, Metric const &rhs) { return static_cast(n) < rhs.count(); } -template bool operator < (Metric const &lhs, int n) { return lhs.count() < n; } -template bool operator < (int n, Metric const &rhs) { return n < rhs.count(); } - -template bool operator == (Metric const &lhs, C n) { return lhs.count() == n; } -template bool operator == (C n, Metric const &rhs) { return n == rhs.count(); } -template bool operator == (Metric const &lhs, int n) { return lhs.count() == static_cast(n); } -template bool operator == (int n, Metric const &rhs) { return static_cast(n) == rhs.count(); } -template bool operator == (Metric const &lhs, int n) { return lhs.count() == n; } -template bool operator == (int n, Metric const &rhs) { return n == rhs.count(); } - -template bool operator > (Metric const &lhs, C n) { return lhs.count() > n; } -template bool operator > (C n, Metric const &rhs) { return n > rhs.count(); } -template bool operator > (Metric const &lhs, int n) { return lhs.count() > static_cast(n); } -template bool operator > (int n, Metric const &rhs) { return static_cast(n) > rhs.count(); } -template bool operator > (Metric const &lhs, int n) { return lhs.count() > n; } -template bool operator > (int n, Metric const &rhs) { return n > rhs.count(); } - -template bool operator <= (Metric const &lhs, C n) { return lhs.count() <= n; } -template bool operator <= (C n, Metric const &rhs) { return n <= rhs.count(); } -template bool operator <= (Metric const &lhs, int n) { return lhs.count() <= static_cast(n); } -template bool operator <= (int n, Metric const &rhs) { return static_cast(n) <= rhs.count(); } -template bool operator <= (Metric const &lhs, int n) { return lhs.count() <= n; } -template bool operator <= (int n, Metric const &rhs) { return n <= rhs.count(); } - -template bool operator >= (Metric const &lhs, C n) { return lhs.count() >= n; } -template bool operator >= (C n, Metric const &rhs) { return n >= rhs.count(); } -template bool operator >= (Metric const &lhs, int n) { return lhs.count() >= static_cast(n); } -template bool operator >= (int n, Metric const &rhs) { return static_cast(n) >= rhs.count(); } -template bool operator >= (Metric const &lhs, int n) { return lhs.count() >= n; } -template bool operator >= (int n, Metric const &rhs) { return n >= rhs.count(); } - -} // namespace -#endif // TS_METRIC_H From a58df67b20bf7f4b0c83e2d0f46f513e81d41251 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Wed, 25 Jan 2017 04:06:33 -0600 Subject: [PATCH 35/81] CacheTool: Check for size rounding in volume.config. --- cmd/traffic_cache_tool/CacheTool.cc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index bda3d5df366..e1cf1e0ae33 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -482,7 +482,10 @@ VolumeConfig::load(ts::FilePath const& path) if (text) { ts::StringView percent(text.end(), value.end()); // clip parsed number. if (!percent) { - v._size = n; + v._size = ts::scaled_up(v._size = n); + if (v._size.count() != n) { + zret.push(0, 0, "Line ", ln, " size ", n, " was rounded up to ", v._size.count()); + } } else if ('%' == *percent && percent.size() == 1) { v._percent = n; } else { @@ -616,7 +619,7 @@ main(int argc, char *argv[]) ts::Errata result = Commands.invoke(argc, argv); - if (!result) { + if (result.size()) { std::cerr << result; } return 0; From 2e178814bb559362ff6f4955f2133809f0fe08cd Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Thu, 29 Dec 2016 15:39:21 -0600 Subject: [PATCH 36/81] Scalar: Add Scalar (scaled value) support class. --- lib/ts/Makefile.am | 4 +- lib/ts/Scalar.h | 790 ++++++++++++++++++++++++++++++++++++++++++ lib/ts/test_Scalar.cc | 248 +++++++++++++ 3 files changed, 1041 insertions(+), 1 deletion(-) create mode 100644 lib/ts/Scalar.h create mode 100644 lib/ts/test_Scalar.cc diff --git a/lib/ts/Makefile.am b/lib/ts/Makefile.am index 039db6e4b0b..14243c35f62 100644 --- a/lib/ts/Makefile.am +++ b/lib/ts/Makefile.am @@ -23,7 +23,7 @@ library_includedir=$(includedir)/ts library_include_HEADERS = apidefs.h noinst_PROGRAMS = mkdfa CompileParseRules -check_PROGRAMS = test_tsutil test_arena test_atomic test_freelist test_geometry test_List test_Map test_Vec test_X509HostnameValidator +check_PROGRAMS = test_tsutil test_arena test_atomic test_freelist test_geometry test_List test_Map test_Vec test_X509HostnameValidator test_Scalar TESTS_ENVIRONMENT = LSAN_OPTIONS=suppressions=suppression.txt @@ -243,6 +243,8 @@ test_tsutil_SOURCES = \ test_Regex.cc \ tests.cc +test_Scalar_SOURCES = test_Scalar.cc Scalar.h + CompileParseRules_SOURCES = CompileParseRules.cc clean-local: diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h new file mode 100644 index 00000000000..381b3ab8800 --- /dev/null +++ b/lib/ts/Scalar.h @@ -0,0 +1,790 @@ +/** @file + + Scaled integral values. + + In many situations it is desirable to define scaling factors or base units (a "metric"). This template + enables this to be done in a type and scaling safe manner where the defined factors carry their scaling + information as part of the type. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#if !defined(TS_SCALAR_H) +#define TS_SCALAR_H + +#include +#include + +namespace ApacheTrafficServer +{ +template class Scalar; + +namespace detail +{ + // Internal class to deal with operator overload issues. + // Because the type of integers with no explicit type is (int) that type is special in terms of overloads. + // To be convienet @c Scalar should support operators for its internal declared counter type and (int). + // This creates ambiguous overloads when C is (int). This class lets the (int) overloads be moved to a super + // class so conflict causes overridding rather than ambiguity. + template struct ScalarArithmetics { + typedef ApacheTrafficServer::Scalar S; + S &operator+=(int); + S &operator-=(int); + S &operator*=(int); + S &operator/=(int); + + protected: + // Only let subclasses construct, as this class only makes sense as an abstract superclass. + ScalarArithmetics(); + }; + template + auto + ScalarArithmetics::operator+=(int n) -> S & + { + return static_cast(this).operator+=(static_cast(n)); + } + template + auto + ScalarArithmetics::operator-=(int n) -> S & + { + return static_cast(this).operator-=(static_cast(n)); + } + template + auto + ScalarArithmetics::operator*=(int n) -> S & + { + return static_cast(this).operator*=(static_cast(n)); + } + template + auto + ScalarArithmetics::operator/=(int n) -> S & + { + return static_cast(this).operator/=(static_cast(n)); + } +} + +/** A class to hold scaled values. + + Instances of this class have a @a count and a @a scale. The "value" of the instance is @a + count * @a scale. The scale is stored in the compiler in the class symbol table and so only + the count is a run time value. An instance with a large scale can be assign to an instance + with a smaller scale and the conversion is done automatically. Conversions from a smaller to + larger scale must be explicit using @c scaled_up and @c scaled_down. This prevents + inadvertent changes in value. Because the scales are not the same these conversions can be + lossy and the two conversions determine whether, in such a case, the result should be rounded + up or down to the nearest scale value. + + @a N sets the scale. @a T is the type used to hold the count, which is in units of @a N. + + @note This is modeled somewhat on @c std::chrono and serves a similar function for different + and simpler cases (where the ratio is always an integer, never a fraction). + + @see scaled_up + @see scaled_down + */ +template class Scalar : public detail::ScalarArithmetics +{ + typedef Scalar self; ///< Self reference type. + +public: + /// Scaling factor for instances. + /// Make it externally accessible. + constexpr static intmax_t SCALE = N; + typedef C Count; ///< Type used to hold the count. + + constexpr Scalar(); ///< Default contructor. + ///< Construct to have @a n scaled units. + constexpr Scalar(Count n); + + /// Copy constructor for same scale. + template Scalar(Scalar const &that); + + /// Copy / conversion constructor. + /// @note Requires that @c S be an integer multiple of @c SCALE. + template Scalar(Scalar const &that); + + /// Direct assignment. + /// The count is set to @a n. + self &operator=(Count n); + + /// The number of scale units. + constexpr Count count() const; + /// The absolute value, scaled up. + constexpr Count units() const; + + /// Assignment operator. + /// The value is scaled appropriately. + /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then + /// the @c scaled_up or @c scaled_down casts must be used to indicate the rounding direction. + template self &operator=(Scalar const &that); + /// Assignment from same scale. + self &operator=(self const &that); + + /// Addition operator. + /// The value is scaled from @a that to @a this. + /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then + /// the @c scaled_up or @c scaled_down casts must be used to indicate the rounding direction. + template self &operator+=(Scalar const &that); + /// Addition - add @a n as a number of scaled units. + self &operator+=(C n); + /// Addition - add @a n as a number of scaled units. + self &operator+=(self const &that); + + /// Subtraction operator. + /// The value is scaled from @a that to @a this. + /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then + /// the @c scaled_up or @c scaled_down casts must be used to indicate the rounding direction. + template self &operator-=(Scalar const &that); + /// Subtraction - subtract @a n as a number of scaled units. + self &operator-=(C n); + /// Subtraction - subtract @a n as a number of scaled units. + self &operator-=(self const &that); + + /// Multiplication - multiple the count by @a n. + self &operator*=(C n); + + /// Division - divide (rounding down) the count by @a n. + self &operator/=(C n); + + /// Run time access to the scale (template arg @a N). + static constexpr intmax_t scale(); + +protected: + Count _n; ///< Number of scale units. +}; + +template constexpr Scalar::Scalar() : _n() +{ +} +template constexpr Scalar::Scalar(Count n) : _n(n) +{ +} +template +constexpr auto +Scalar::count() const -> Count +{ + return _n; +} +template +constexpr auto +Scalar::units() const -> Count +{ + return _n * SCALE; +} +template +inline auto +Scalar::operator=(Count n) -> self & +{ + _n = n; + return *this; +} +template +inline auto +Scalar::operator=(self const &that) -> self & +{ + _n = that._n; + return *this; +} +template +constexpr inline intmax_t +Scalar::scale() +{ + return SCALE; +} + +template template Scalar::Scalar(Scalar const &that) : _n(static_cast(that._n)) +{ +} + +template template Scalar::Scalar(Scalar const &that) +{ + typedef std::ratio R; + static_assert(R::den == 1, "Construction not permitted - target scale is not an integral multiple of source scale."); + _n = that.count() * R::num; +} + +template +template +auto +Scalar::operator=(Scalar const &that) -> self & +{ + typedef std::ratio R; + static_assert(R::den == 1, "Assignment not permitted - target scale is not an integral multiple of source scale."); + _n = that.count() * R::num; + return *this; +} + +// -- Free Functions -- + +/** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding up. + The resulting count in the return value will be the smallest count that is not smaller than the unit + value of @a src. + + @code + typedef Scalar<16> Paragraphs; + typedef Scalar<1024> KiloBytes; + + Paragraphs src(37459); + auto size = scaled_up(src); // size.count() == 586 + @endcode + */ +template +M +scaled_up(Scalar const &src) +{ + typedef std::ratio R; + auto c = src.count(); + + if (M::SCALE == S) { + return c; + } else if (R::den == 1) { + return c / R::num + (0 != c % R::num); // N is a multiple of S. + } else if (R::num == 1) { + return c * R::den; // S is a multiple of N. + } else { + return (c / R::num) * R::den + ((c % R::num) * R::den) / R::num + (0 != (c % R::num)); + } +} + +/** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding down. + The resulting count in the return value will be the largest count that is not larger than the unit + value of @a src. + + @code + typedef Scalar<16> Paragraphs; + typedef Scalar<1024> KiloBytes; + + Paragraphs src(37459); + auto size = scaled_up(src); // size.count() == 585 + @endcode + */ +template +M +scaled_down(Scalar const &src) +{ + typedef std::ratio R; + auto c = src.count(); + + if (R::den == 1) { + return c / R::num; // S is a multiple of N. + } else if (R::num == 1) { + return c * R::den; // N is a multiple of S. + } else { + // General case where neither N nor S are a multiple of the other. + // Yes, a bit odd, but this minimizes the risk of integer overflow. + // I need to validate that under -O2 the compiler will only do 1 division to get + // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are + // powers of 2 I have verified recent GNU compilers will optimize to bit operations. + return (c / R::num) * R::den + ((c % R::num) * R::den) / R::num; + } +} + +/// Convert a unit value @a n to a Scalar, rounding down. +template +M +scaled_down(intmax_t n) +{ + return n / M::SCALE; // assuming compiler will optimize out dividing by 1 if needed. +} + +/// Convert a unit value @a n to a Scalar, rounding up. +template +M +scaled_up(intmax_t n) +{ + return M::SCALE == 1 ? n : (n / M::SCALE + (0 != (n % M::SCALE))); +} + +// --- Compare operators + +// Try for a bit of performance boost - if the metrics have the same scale +// just comparing the counts is sufficient and scaling conversion is avoided. +template +bool +operator<(Scalar const &lhs, Scalar const &rhs) +{ + return lhs.count() < rhs.count(); +} + +template +bool +operator==(Scalar const &lhs, Scalar const &rhs) +{ + return lhs.count() == rhs.count(); +} + +// Could be derived but if we're optimizing let's avoid the extra negation. +// Or we could check if the compiler can optimize that out anyway. +template +bool +operator<=(Scalar const &lhs, Scalar const &rhs) +{ + return lhs.count() <= rhs.count(); +} + +// General base cases. + +template +bool +operator<(Scalar const &lhs, Scalar const &rhs) +{ + typedef std::ratio R; + // Based on tests with the GNU compiler, the fact that the conditionals are compile time + // constant causes the never taken paths to be dropped so there are no runtime conditional + // checks, even with no optimization at all. + if (R::den == 1) { + return lhs.count() < rhs.count() * R::num; + } else if (R::num == 1) { + return lhs.count() * R::den < rhs.count(); + } else + return lhs.units() < rhs.units(); +} + +template +bool +operator==(Scalar const &lhs, Scalar const &rhs) +{ + typedef std::ratio R; + if (R::den == 1) { + return lhs.count() == rhs.count() * R::num; + } else if (R::num == 1) { + return lhs.count() * R::den == rhs.count(); + } else + return lhs.units() == rhs.units(); +} + +template +bool +operator<=(Scalar const &lhs, Scalar const &rhs) +{ + typedef std::ratio R; + if (R::den == 1) { + return lhs.count() <= rhs.count() * R::num; + } else if (R::num == 1) { + return lhs.count() * R::den <= rhs.count(); + } else + return lhs.units() <= rhs.units(); +} + +// Derived compares. No narrowing optimization needed because if the scales +// are the same the nested call with be optimized. + +template +bool +operator>(Scalar const &lhs, Scalar const &rhs) +{ + return rhs < lhs; +} + +template +bool +operator>=(Scalar const &lhs, Scalar const &rhs) +{ + return rhs <= lhs; +} + +// Do the integer compares. +// A bit ugly to handle the issue that integers without explicit type are 'int'. Therefore suppport must be provided +// for comparison not just the counter type C but also explicitly 'int'. That makes the operators ambiguous if C is +// 'int'. The specializations for 'int' resolve this as their presence "covers" the generic cases. + +template +bool +operator<(Scalar const &lhs, C n) +{ + return lhs.count() < n; +} +template +bool +operator<(C n, Scalar const &rhs) +{ + return n < rhs.count(); +} +template +bool +operator<(Scalar const &lhs, int n) +{ + return lhs.count() < static_cast(n); +} +template +bool +operator<(int n, Scalar const &rhs) +{ + return static_cast(n) < rhs.count(); +} +template +bool +operator<(Scalar const &lhs, int n) +{ + return lhs.count() < n; +} +template +bool +operator<(int n, Scalar const &rhs) +{ + return n < rhs.count(); +} + +template +bool +operator==(Scalar const &lhs, C n) +{ + return lhs.count() == n; +} +template +bool +operator==(C n, Scalar const &rhs) +{ + return n == rhs.count(); +} +template +bool +operator==(Scalar const &lhs, int n) +{ + return lhs.count() == static_cast(n); +} +template +bool +operator==(int n, Scalar const &rhs) +{ + return static_cast(n) == rhs.count(); +} +template +bool +operator==(Scalar const &lhs, int n) +{ + return lhs.count() == n; +} +template +bool +operator==(int n, Scalar const &rhs) +{ + return n == rhs.count(); +} + +template +bool +operator>(Scalar const &lhs, C n) +{ + return lhs.count() > n; +} +template +bool +operator>(C n, Scalar const &rhs) +{ + return n > rhs.count(); +} +template +bool +operator>(Scalar const &lhs, int n) +{ + return lhs.count() > static_cast(n); +} +template +bool +operator>(int n, Scalar const &rhs) +{ + return static_cast(n) > rhs.count(); +} +template +bool +operator>(Scalar const &lhs, int n) +{ + return lhs.count() > n; +} +template +bool +operator>(int n, Scalar const &rhs) +{ + return n > rhs.count(); +} + +template +bool +operator<=(Scalar const &lhs, C n) +{ + return lhs.count() <= n; +} +template +bool +operator<=(C n, Scalar const &rhs) +{ + return n <= rhs.count(); +} +template +bool +operator<=(Scalar const &lhs, int n) +{ + return lhs.count() <= static_cast(n); +} +template +bool +operator<=(int n, Scalar const &rhs) +{ + return static_cast(n) <= rhs.count(); +} +template +bool +operator<=(Scalar const &lhs, int n) +{ + return lhs.count() <= n; +} +template +bool +operator<=(int n, Scalar const &rhs) +{ + return n <= rhs.count(); +} + +template +bool +operator>=(Scalar const &lhs, C n) +{ + return lhs.count() >= n; +} +template +bool +operator>=(C n, Scalar const &rhs) +{ + return n >= rhs.count(); +} +template +bool +operator>=(Scalar const &lhs, int n) +{ + return lhs.count() >= static_cast(n); +} +template +bool +operator>=(int n, Scalar const &rhs) +{ + return static_cast(n) >= rhs.count(); +} +template +bool +operator>=(Scalar const &lhs, int n) +{ + return lhs.count() >= n; +} +template +bool +operator>=(int n, Scalar const &rhs) +{ + return n >= rhs.count(); +} + +// Arithmetic operators +template +template +auto +Scalar::operator+=(Scalar const &that) -> self & +{ + typedef std::ratio R; + static_assert(R::den == 1, "Addition not permitted - target scale is not an integral multiple of source scale."); + _n += that.count() * R::num; + return *this; +} +template +auto +Scalar::operator+=(self const &that) -> self & +{ + _n += that._n; + return *this; +} +template +auto +Scalar::operator+=(C n) -> self & +{ + _n += n; + return *this; +} + +template +Scalar +operator+(Scalar const &lhs, Scalar const &rhs) +{ + return Scalar(lhs) += rhs; +} +template +Scalar +operator+(Scalar const &lhs, C n) +{ + return Scalar(lhs) += n; +} +template +Scalar +operator+(C n, Scalar const &rhs) +{ + return Scalar(rhs) += n; +} +template +Scalar +operator+(Scalar const &lhs, int n) +{ + return Scalar(lhs) += n; +} +template +Scalar +operator+(int n, Scalar const &rhs) +{ + return Scalar(rhs) += n; +} +template +Scalar +operator+(Scalar const &lhs, int n) +{ + return Scalar(lhs) += n; +} +template +Scalar +operator+(int n, Scalar const &rhs) +{ + return Scalar(rhs) += n; +} + +template +template +auto +Scalar::operator-=(Scalar const &that) -> self & +{ + typedef std::ratio R; + static_assert(R::den == 1, "Subtraction not permitted - target scale is not an integral multiple of source scale."); + _n -= that.count() * R::num; + return *this; +} +template +auto +Scalar::operator-=(self const &that) -> self & +{ + _n -= that._n; + return *this; +} +template +auto +Scalar::operator-=(C n) -> self & +{ + _n -= n; + return *this; +} + +template +Scalar +operator-(Scalar const &lhs, Scalar const &rhs) +{ + return Scalar(lhs) -= rhs; +} +template +Scalar +operator-(Scalar const &lhs, C n) +{ + return Scalar(lhs) -= n; +} +template +Scalar +operator-(C n, Scalar const &rhs) +{ + return Scalar(rhs) -= n; +} +template +Scalar +operator-(Scalar const &lhs, int n) +{ + return Scalar(lhs) -= n; +} +template +Scalar +operator-(int n, Scalar const &rhs) +{ + return Scalar(rhs) -= n; +} +template +Scalar +operator-(Scalar const &lhs, int n) +{ + return Scalar(lhs) -= n; +} +template +Scalar +operator-(int n, Scalar const &rhs) +{ + return Scalar(rhs) -= n; +} + +template +auto +Scalar::operator*=(C n) -> self & +{ + _n *= n; + return *this; +} + +template Scalar operator*(Scalar const &lhs, C n) +{ + return Scalar(lhs) -= n; +} +template Scalar operator*(C n, Scalar const &rhs) +{ + return Scalar(rhs) -= n; +} +template Scalar operator*(Scalar const &lhs, int n) +{ + return Scalar(lhs) -= n; +} +template Scalar operator*(int n, Scalar const &rhs) +{ + return Scalar(rhs) -= n; +} +template Scalar operator*(Scalar const &lhs, int n) +{ + return Scalar(lhs) -= n; +} +template Scalar operator*(int n, Scalar const &rhs) +{ + return Scalar(rhs) -= n; +} + +template +auto +Scalar::operator/=(C n) -> self & +{ + _n /= n; + return *this; +} + +template +Scalar +operator/(Scalar const &lhs, C n) +{ + return Scalar(lhs) -= n; +} +template +Scalar +operator/(Scalar const &lhs, int n) +{ + return Scalar(lhs) -= n; +} +template +Scalar +operator/(Scalar const &lhs, int n) +{ + return Scalar(lhs) -= n; +} + +} // namespace +#endif // TS_SCALAR_H diff --git a/lib/ts/test_Scalar.cc b/lib/ts/test_Scalar.cc new file mode 100644 index 00000000000..c087a84fe4f --- /dev/null +++ b/lib/ts/test_Scalar.cc @@ -0,0 +1,248 @@ +/** @file + + Intrusive pointer test. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include +#include +#include +#include + +namespace ts +{ +using namespace ApacheTrafficServer; +} + +struct TestBox { + typedef TestBox self; ///< Self reference type. + + std::string _name; + + static int _count; + static int _fail; + + TestBox(char const *name) : _name(name) {} + TestBox(std::string const &name) : _name(name) {} + bool check(bool result, char const *fmt, ...) __attribute__((format(printf, 3, 4))); + + static void + print_summary() + { + printf("Tests: %d of %d passed - %s\n", (_count - _fail), _count, _fail ? "FAIL" : "SUCCESS"); + } +}; + +int TestBox::_count = 0; +int TestBox::_fail = 0; + +bool +TestBox::check(bool result, char const *fmt, ...) +{ + ++_count; + + if (!result) { + static constexpr size_t N = 1 << 16; + size_t n = N; + size_t x; + char *s; + char buffer[N]; // just stack, go big. + + s = buffer; + x = snprintf(s, n, "%s: ", _name.c_str()); + n -= x; + s += x; + + va_list ap; + va_start(ap, fmt); + vsnprintf(s, n, fmt, ap); + va_end(ap); + printf("%s\n", buffer); + ++_fail; + } + return result; +} + +// Extremely simple test. +void +Test_1() +{ + constexpr static int SCALE = 4096; + typedef ts::Scalar PageSize; + + TestBox test("TS Scalar basic"); + PageSize pg1(1); + + test.check(pg1.count() == 1, "Count wrong, got %d expected %d", pg1.count(), 1); + test.check(pg1.units() == SCALE, "Units wrong, got %d expected %d", pg1.units(), SCALE); +} + +// Test multiples. +void +Test_2() +{ + constexpr static int SCALE_1 = 8192; + constexpr static int SCALE_2 = 512; + + typedef ts::Scalar Size_1; + typedef ts::Scalar Size_2; + + TestBox test("TS Scalar Conversion of scales of multiples"); + Size_2 sz_a(2); + Size_2 sz_b(57); + Size_2 sz_c(SCALE_1 / SCALE_2); + Size_2 sz_d(29 * SCALE_1 / SCALE_2); + + auto sz = ts::scaled_up(sz_a); + test.check(sz.count() == 1, "Rounding up, got %d expected %d", sz.count(), 1); + sz = ts::scaled_down(sz_a); + test.check(sz.count() == 0, "Rounding down: got %d expected %d", sz.count(), 0); + + sz = ts::scaled_up(sz_b); + test.check(sz.count() == 4, "Rounding up, got %d expected %d", sz.count(), 4); + sz = ts::scaled_down(sz_b); + test.check(sz.count() == 3, "Rounding down, got %d expected %d", sz.count(), 3); + + sz = ts::scaled_up(sz_c); + test.check(sz.count() == 1, "Rounding up, got %d expected %d", sz.count(), 1); + sz = ts::scaled_down(sz_c); + test.check(sz.count() == 1, "Rounding down, got %d expected %d", sz.count(), 1); + + sz = ts::scaled_up(sz_d); + test.check(sz.count() == 29, "Rounding up, got %d expected %d", sz.count(), 29); + sz = ts::scaled_down(sz_d); + test.check(sz.count() == 29, "Rounding down, got %d expected %d", sz.count(), 29); + + sz = 119; + sz_b = sz; // Should be OK because SCALE_1 is an integer multiple of SCALE_2 + // sz = sz_b; // Should not compile. + test.check(sz_b.count() == 119 * (SCALE_1 / SCALE_2), "Integral conversion, got %d expected %d", sz_b.count(), + 119 * (SCALE_1 / SCALE_2)); +} + +// Test common factor. +void +Test_3() +{ + constexpr static int SCALE_1 = 30; + constexpr static int SCALE_2 = 20; + + typedef ts::Scalar Size_1; + typedef ts::Scalar Size_2; + + TestBox test("TS Scalar common factor conversions"); + Size_2 sz_a(2); + Size_2 sz_b(97); + + auto sz = ts::scaled_up(sz_a); + test.check(sz.count() == 2, "Rounding up, got %d expected %d", sz.count(), 2); + sz = ts::scaled_down(sz_a); + test.check(sz.count() == 1, "Rounding down: got %d expected %d", sz.count(), 0); + + sz = ts::scaled_up(sz_b); + test.check(sz.count() == 65, "Rounding up, got %d expected %d", sz.count(), 65); + sz = ts::scaled_down(sz_b); + test.check(sz.count() == 64, "Rounding down, got %d expected %d", sz.count(), 64); +} + +void +Test_4() +{ + TestBox test("TS Scalar: relatively prime tests"); + + ts::Scalar<9> m_9; + ts::Scalar<4> m_4, m_test; + + m_9 = 95; + // m_4 = m_9; // Should fail to compile with static assert. + // m_9 = m_4; // Should fail to compile with static assert. + + m_4 = ts::scaled_up(m_9); + test.check(m_4.count() == 214, "Rounding down, got %d expected %d", m_4.count(), 214); + m_4 = ts::scaled_down(m_9); + test.check(m_4.count() == 213, "Rounding down, got %d expected %d", m_4.count(), 213); + + m_4 = 213; + m_9 = ts::scaled_up(m_4); + test.check(m_9.count() == 95, "Rounding down, got %d expected %d", m_9.count(), 95); + m_9 = ts::scaled_down(m_4); + test.check(m_9.count() == 94, "Rounding down, got %d expected %d", m_9.count(), 94); + + m_test = m_4; // Verify assignment of identical scale values compiles. + test.check(m_test.count() == 213, "Assignment got %d expected %d", m_4.count(), 213); +} + +void +Test_5() +{ + TestBox test("TS Scalar: arithmetic operator tests"); + + typedef ts::Scalar<1024> KBytes; + typedef ts::Scalar<1, int64_t> Bytes; + typedef ts::Scalar<1024 * KBytes::SCALE> MBytes; + + Bytes bytes(96); + KBytes kbytes(2); + MBytes mbytes(5); + + Bytes z1 = bytes + 128; + test.check(z1.count() == 224, "Addition got %ld expected %d", z1.count(), 224); + KBytes z2 = kbytes + 3; + test.check(z2.count() == 5, "Addition got %d expected %d", z2.count(), 5); + Bytes z3(bytes); + z3 += kbytes; + test.check(z3.units() == 2048 + 96, "Addition got %ld expected %d", z3.units(), 2048 + 96); + MBytes z4 = mbytes; + z4 += 5; + z2 += z4; + test.check(z2.units() == ((10 << 20) + (5 << 10)), "Addition got %d expected %d", z2.units(), (10 << 20) + (2 << 10)); + + z1 += 128; + test.check(z1.count() == 352, "Addition got %ld expected %d", z1.count(), 352); +} + +void +test_Compile() +{ + // These tests aren't normally run, they exist to detect compiler issues. + + typedef ts::Scalar<1024, long int> KBytes; + typedef ts::Scalar<1024, int> KiBytes; + + KBytes x(12); + KiBytes y(12); + + if (x > 12) + std::cout << "Operator > works" << std::endl; + if (y > 12) + std::cout << "Operator > works" << std::endl; +} + +int +main(int, char **) +{ + Test_1(); + Test_2(); + Test_3(); + Test_4(); + Test_5(); + TestBox::print_summary(); + return 0; +} From b850a79c1f5bdbf56742141009f33b24a2b5dadb Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Thu, 26 Jan 2017 11:03:21 -0600 Subject: [PATCH 37/81] Scalar: Fix constructor bug. --- lib/ts/Scalar.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 381b3ab8800..3868729ebfc 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -53,6 +53,8 @@ namespace detail // Only let subclasses construct, as this class only makes sense as an abstract superclass. ScalarArithmetics(); }; + + template ScalarArithmetics::ScalarArithmetics() {} template auto ScalarArithmetics::operator+=(int n) -> S & From 7238b2d816fb6b8e40e9689659d84c1e44936351 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 27 Jan 2017 04:15:50 -0600 Subject: [PATCH 38/81] CacheTool: Checkpoint. --- cmd/traffic_cache_tool/CacheTool.cc | 207 +++++++++++++++++----------- 1 file changed, 127 insertions(+), 80 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index e1cf1e0ae33..780700431a4 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -47,6 +47,8 @@ namespace ApacheTrafficServer const Bytes CacheSpan::OFFSET{CacheStoreBlocks{1}}; } +using ts::Bytes; + namespace { ts::FilePath SpanFile; @@ -59,11 +61,15 @@ int OPEN_RW_FLAGS = O_RDONLY; struct Span { Span(ts::FilePath const &path) : _path(path) {} + ts::Errata load(); + ts::Errata loadDevice(); + void clearPermanently(); ts::FilePath _path; ats_scoped_fd _fd; std::unique_ptr _header; + int _vol_idx = 0; }; struct Volume { @@ -101,7 +107,7 @@ struct VolumeConfig const_iterator end() const { return _volumes.end(); } }; -// All of these free functions need to be moved to the Cache class. +// All of these free functions need to be moved to the Cache class. Or the Span class? bool Validate_Stripe_Meta(ts::CacheStripeMeta const &stripe) @@ -132,6 +138,7 @@ Probe_For_Stripe(ts::StringView &mem) return zret; } +/* --------------------------------------------------------------------------------------- */ void Calc_Stripe_Data(ts::CacheStripeMeta const &header, ts::CacheStripeMeta const &footer, off_t delta, ts::StripeData &data) { @@ -265,40 +272,59 @@ Open_Stripe(ats_scoped_fd const &fd, ts::CacheStripeDescriptor const &block) } } -// -------------------- +/* --------------------------------------------------------------------------------------- */ struct Cache { ~Cache(); - ts::Errata load(ts::FilePath const &path); - ts::Errata loadConfig(ts::FilePath const &path); - ts::Errata loadDevice(ts::FilePath const &path); + ts::Errata loadSpan(ts::FilePath const &path); + ts::Errata loadSpanConfig(ts::FilePath const &path); + ts::Errata loadSpanDirect(ts::FilePath const &path, int vol_idx = -1, Bytes size = -1); enum class SpanDumpDepth { SPAN, STRIPE, DIRECTORY }; void dumpSpans(SpanDumpDepth depth); void dumpVolumes(); + ts::CacheStripeBlocks calcTotalSpanSize(); + std::list _spans; std::map _volumes; + }; ts::Errata -Cache::load(ts::FilePath const &path) +Cache::loadSpan(ts::FilePath const &path) { ts::Errata zret; if (!path.is_readable()) zret = ts::Errata::Message(0, EPERM, path," is not readable."); -// throw(std::system_error(errno, std::system_category(), static_cast(path))); else if (path.is_regular_file()) - zret = this->loadConfig(path); - else if (path.is_char_device() || path.is_block_device()) - zret = this->loadDevice(path); + zret = this->loadSpanConfig(path); else - zret = ts::Errata::Message(0, EBADF, path, " is not a valid file type"); + zret = this->loadSpanDirect(path); + return zret; +} + +ts::Errata +Cache::loadSpanDirect(ts::FilePath const &path, int vol_idx, Bytes size) +{ + ts::Errata zret; + std::unique_ptr span(new Span(path)); + zret = span->load(); + if (zret) { + int nspb = span->_header->num_diskvol_blks; + for (auto i = 0; i < nspb; ++i) { + ts::CacheStripeDescriptor &stripe = span->_header->stripes[i]; + if (stripe.free == 0) + _volumes[stripe.vol_idx]._stripes.push_back(Volume::StripeRef{span.get(), i}); + } + span->_vol_idx = vol_idx; + _spans.push_back(span.release()); + } return zret; } ts::Errata -Cache::loadConfig(ts::FilePath const &path) +Cache::loadSpanConfig(ts::FilePath const &path) { static const ts::StringView TAG_ID("id"); static const ts::StringView TAG_VOL("volume"); @@ -315,18 +341,24 @@ Cache::loadConfig(ts::FilePath const &path) continue; ts::StringView path = line.extractPrefix(&isspace); if (path) { - // After this the line is [size] [id=string] [vol=#] + // After this the line is [size] [id=string] [volume=#] while (line) { ts::StringView value(line.extractPrefix(&isspace)); if (value) { ts::StringView tag(value.splitPrefix('=')); - if (!tag) { + if (!tag) { // must be the size } else if (0 == strcasecmp(tag, TAG_ID)) { } else if (0 == strcasecmp(tag, TAG_VOL)) { + ts::StringView text; + auto n = ts::svtoi(value, &text); + if (text == value && 0 < n && n < 256) { + } else { + zret.push(0,0, "Invalid volume index '", value, "'"); + } } } } - zret = this->load(ts::FilePath(path)); + zret = this->loadSpan(ts::FilePath(path)); } } } else { @@ -335,60 +367,6 @@ Cache::loadConfig(ts::FilePath const &path) return zret; } -ts::Errata -Cache::loadDevice(ts::FilePath const &path) -{ - ts::Errata zret; - int flags; - - flags = OPEN_RW_FLAGS -#if defined(O_DIRECT) - | O_DIRECT -#endif -#if defined(O_DSYNC) - | O_DSYNC -#endif - ; - - ats_scoped_fd fd(path.open(flags)); - - if (fd) { - off_t offset = ts::CacheSpan::OFFSET.units(); - alignas(512) char buff[8192]; - int64_t n = pread(fd, buff, sizeof(buff), offset); - if (n >= static_cast(sizeof(ts::SpanHeader))) { - ts::SpanHeader &span_hdr = reinterpret_cast(buff); - // See if it looks valid - if (span_hdr.magic == ts::SpanHeader::MAGIC && span_hdr.num_diskvol_blks == span_hdr.num_used + span_hdr.num_free) { - int nspb = span_hdr.num_diskvol_blks; - size_t span_hdr_size = sizeof(ts::SpanHeader) + (nspb - 1) * sizeof(ts::CacheStripeDescriptor); - Span *span = new Span(path); - span->_header.reset(new (malloc(span_hdr_size)) ts::SpanHeader); - if (span_hdr_size <= sizeof(buff)) { - memcpy(span->_header.get(), buff, span_hdr_size); - } else { - // TODO - check the pread return - pread(fd, span->_header.get(), span_hdr_size, offset); - } - span->_fd = fd.release(); - _spans.push_back(span); - for (auto i = 0; i < nspb; ++i) { - ts::CacheStripeDescriptor &stripe = span->_header->stripes[i]; - if (stripe.free == 0) { - // Add to volume. - _volumes[stripe.vol_idx]._stripes.push_back(Volume::StripeRef{span, i}); - } - } - } - } else { - zret = ts::Errata::Message(0, errno, "Failed to read from ", path, '[', errno, ':', strerror(errno), ']'); - } - } else { - zret = ts::Errata::Message(0, errno, "Unable to open ", path); - } - return zret; -} - void Cache::dumpSpans(SpanDumpDepth depth) { @@ -422,11 +400,81 @@ Cache::dumpVolumes() } } +ts::CacheStripeBlocks Cache::calcTotalSpanSize() +{ + ts::CacheStripeBlocks zret(0); + + for ( auto span : _spans ) { + zret += ts::scaled_down(span->_size); + } +} + + Cache::~Cache() { for (auto *span : _spans) delete span; } +/* --------------------------------------------------------------------------------------- */ +ts::Errata +Span::load() +{ + ts::Errata zret; + if (!_path.is_readable()) + zret = ts::Errata::Message(0, EPERM, _path," is not readable."); + else if (_path.is_char_device() || _path.is_block_device()) + zret = this->loadDevice(); + else if (_path.is_dir()) + zret.push(0, 1, "Directory support not yet available"); + else + zret.push(0, EBADF, _path, " is not a valid file type"); + return zret; +} + +ts::Errata +Span::loadDevice() +{ + ts::Errata zret; + int flags; + + flags = OPEN_RW_FLAGS +#if defined(O_DIRECT) + | O_DIRECT +#endif +#if defined(O_DSYNC) + | O_DSYNC +#endif + ; + + ats_scoped_fd fd(_path.open(flags)); + + if (fd) { + off_t offset = ts::CacheSpan::OFFSET.units(); + alignas(512) char buff[8192]; + int64_t n = pread(fd, buff, sizeof(buff), offset); + if (n >= static_cast(sizeof(ts::SpanHeader))) { + ts::SpanHeader &span_hdr = reinterpret_cast(buff); + // See if it looks valid + if (span_hdr.magic == ts::SpanHeader::MAGIC && span_hdr.num_diskvol_blks == span_hdr.num_used + span_hdr.num_free) { + int nspb = span_hdr.num_diskvol_blks; + size_t span_hdr_size = sizeof(ts::SpanHeader) + (nspb - 1) * sizeof(ts::CacheStripeDescriptor); + _header.reset(new (malloc(span_hdr_size)) ts::SpanHeader); + if (span_hdr_size <= sizeof(buff)) { + memcpy(_header.get(), buff, span_hdr_size); + } else { + // TODO - check the pread return + pread(fd, _header.get(), span_hdr_size, offset); + } + _fd = fd.release(); + } + } else { + zret = ts::Errata::Message(0, errno, "Failed to read from ", _path, '[', errno, ':', strerror(errno), ']'); + } + } else { + zret = ts::Errata::Message(0, errno, "Unable to open ", _path); + } + return zret; +} void Span::clearPermanently() @@ -536,7 +584,7 @@ List_Stripes(Cache::SpanDumpDepth depth, int argc, char *argv[]) ts::Errata zret; Cache cache; - if ((zret = cache.load(SpanFile))) { + if ((zret = cache.loadSpan(SpanFile))) { cache.dumpSpans(depth); cache.dumpVolumes(); } @@ -548,18 +596,17 @@ Simulate_Span_Allocation(int argc, char *argv[]) { ts::Errata zret; VolumeConfig vols; + Cache cache; - if (!VolumeFile) { - return zret.push(0, 9, "Volume config file not set"); - } + if (!VolumeFile) zret.push(0, 9, "Volume config file not set"); + if (!SpanFile) zret.push(0, 9, "Span file not set"); - zret = vols.load(VolumeFile); if (zret) { - for (VolumeConfig::VolData const& vd : vols) { - std::cout << "Volume " << vd._idx << " size "; - if (vd._percent) std::cout << vd._percent << '%'; - else std::cout << vd._size.count() << " megabytes"; - std::cout << std::endl; + zret = vols.load(VolumeFile); + if (zret) { + zret = cache.loadSpan(SpanFile); + if (zret) { + } } } return zret; @@ -572,7 +619,7 @@ Clear_Spans(int argc, char *argv[]) Cache cache; OPEN_RW_FLAGS = O_RDWR; - if ((zret = cache.load(SpanFile))) { + if ((zret = cache.loadSpan(SpanFile))) { for (auto *span : cache._spans) { span->clearPermanently(); } From 633a86058f47efe6e2394fd0aba933a18ba7915f Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 27 Jan 2017 08:27:52 -0600 Subject: [PATCH 39/81] Scalar: Fix operator bugs. --- lib/ts/Scalar.h | 18 +++++++++--------- lib/ts/test_Scalar.cc | 8 ++++++++ 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 3868729ebfc..2e974d02126 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -738,27 +738,27 @@ Scalar::operator*=(C n) -> self & template Scalar operator*(Scalar const &lhs, C n) { - return Scalar(lhs) -= n; + return Scalar(lhs) *= n; } template Scalar operator*(C n, Scalar const &rhs) { - return Scalar(rhs) -= n; + return Scalar(rhs) *= n; } template Scalar operator*(Scalar const &lhs, int n) { - return Scalar(lhs) -= n; + return Scalar(lhs) *= n; } template Scalar operator*(int n, Scalar const &rhs) { - return Scalar(rhs) -= n; + return Scalar(rhs) *= n; } template Scalar operator*(Scalar const &lhs, int n) { - return Scalar(lhs) -= n; + return Scalar(lhs) *= n; } template Scalar operator*(int n, Scalar const &rhs) { - return Scalar(rhs) -= n; + return Scalar(rhs) *= n; } template @@ -773,19 +773,19 @@ template Scalar operator/(Scalar const &lhs, C n) { - return Scalar(lhs) -= n; + return Scalar(lhs) /= n; } template Scalar operator/(Scalar const &lhs, int n) { - return Scalar(lhs) -= n; + return Scalar(lhs) /= n; } template Scalar operator/(Scalar const &lhs, int n) { - return Scalar(lhs) -= n; + return Scalar(lhs) /= n; } } // namespace diff --git a/lib/ts/test_Scalar.cc b/lib/ts/test_Scalar.cc index c087a84fe4f..47c18f754e3 100644 --- a/lib/ts/test_Scalar.cc +++ b/lib/ts/test_Scalar.cc @@ -216,6 +216,14 @@ Test_5() z1 += 128; test.check(z1.count() == 352, "Addition got %ld expected %d", z1.count(), 352); + + z2 = 2; + z1 = 3 * z2; + test.check(z1.count() == 6144, "Addition got %ld expected %d", z1.count(), 6144); + z1 *= 5; + test.check(z1.count() == 30720, "Addition got %ld expected %d", z1.count(), 30720); + z1 /= 3; + test.check(z1.count() == 10240, "Addition got %ld expected %d", z1.count(), 10240); } void From cfa2f5189746656483e50c1860ca63c67808a812 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Sat, 28 Jan 2017 13:41:23 -0600 Subject: [PATCH 40/81] Scalar: Add typing tag template argument. --- lib/ts/Scalar.h | 321 ++++++++++++++++++++++++------------------------ 1 file changed, 162 insertions(+), 159 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 2e974d02126..8396975a4ec 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -33,7 +33,9 @@ namespace ApacheTrafficServer { -template class Scalar; +template class Scalar; + +namespace tag { struct generic; } namespace detail { @@ -42,8 +44,8 @@ namespace detail // To be convienet @c Scalar should support operators for its internal declared counter type and (int). // This creates ambiguous overloads when C is (int). This class lets the (int) overloads be moved to a super // class so conflict causes overridding rather than ambiguity. - template struct ScalarArithmetics { - typedef ApacheTrafficServer::Scalar S; + template struct ScalarArithmetics { + typedef ApacheTrafficServer::Scalar S; S &operator+=(int); S &operator-=(int); S &operator*=(int); @@ -54,28 +56,28 @@ namespace detail ScalarArithmetics(); }; - template ScalarArithmetics::ScalarArithmetics() {} - template + template ScalarArithmetics::ScalarArithmetics() {} + template auto - ScalarArithmetics::operator+=(int n) -> S & + ScalarArithmetics::operator+=(int n) -> S & { return static_cast(this).operator+=(static_cast(n)); } - template + template auto - ScalarArithmetics::operator-=(int n) -> S & + ScalarArithmetics::operator-=(int n) -> S & { return static_cast(this).operator-=(static_cast(n)); } - template + template auto - ScalarArithmetics::operator*=(int n) -> S & + ScalarArithmetics::operator*=(int n) -> S & { return static_cast(this).operator*=(static_cast(n)); } - template + template auto - ScalarArithmetics::operator/=(int n) -> S & + ScalarArithmetics::operator/=(int n) -> S & { return static_cast(this).operator/=(static_cast(n)); } @@ -100,7 +102,7 @@ namespace detail @see scaled_up @see scaled_down */ -template class Scalar : public detail::ScalarArithmetics +template class Scalar : public detail::ScalarArithmetics { typedef Scalar self; ///< Self reference type. @@ -109,17 +111,18 @@ template class Scalar : public detail::ScalarArit /// Make it externally accessible. constexpr static intmax_t SCALE = N; typedef C Count; ///< Type used to hold the count. + typedef T Tag; ///< Make tag accessible. constexpr Scalar(); ///< Default contructor. ///< Construct to have @a n scaled units. constexpr Scalar(Count n); /// Copy constructor for same scale. - template Scalar(Scalar const &that); + template Scalar(Scalar const &that); /// Copy / conversion constructor. /// @note Requires that @c S be an integer multiple of @c SCALE. - template Scalar(Scalar const &that); + template Scalar(Scalar const &that); /// Direct assignment. /// The count is set to @a n. @@ -134,7 +137,7 @@ template class Scalar : public detail::ScalarArit /// The value is scaled appropriately. /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then /// the @c scaled_up or @c scaled_down casts must be used to indicate the rounding direction. - template self &operator=(Scalar const &that); + template self &operator=(Scalar const &that); /// Assignment from same scale. self &operator=(self const &that); @@ -152,7 +155,7 @@ template class Scalar : public detail::ScalarArit /// The value is scaled from @a that to @a this. /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then /// the @c scaled_up or @c scaled_down casts must be used to indicate the rounding direction. - template self &operator-=(Scalar const &that); + template self &operator-=(Scalar const &that); /// Subtraction - subtract @a n as a number of scaled units. self &operator-=(C n); /// Subtraction - subtract @a n as a number of scaled units. @@ -171,60 +174,60 @@ template class Scalar : public detail::ScalarArit Count _n; ///< Number of scale units. }; -template constexpr Scalar::Scalar() : _n() +template constexpr Scalar::Scalar() : _n() { } -template constexpr Scalar::Scalar(Count n) : _n(n) +template constexpr Scalar::Scalar(Count n) : _n(n) { } -template +template constexpr auto -Scalar::count() const -> Count +Scalar::count() const -> Count { return _n; } -template +template constexpr auto -Scalar::units() const -> Count +Scalar::units() const -> Count { return _n * SCALE; } -template +template inline auto -Scalar::operator=(Count n) -> self & +Scalar::operator=(Count n) -> self & { _n = n; return *this; } -template +template inline auto -Scalar::operator=(self const &that) -> self & +Scalar::operator=(self const &that) -> self & { _n = that._n; return *this; } -template +template constexpr inline intmax_t -Scalar::scale() +Scalar::scale() { return SCALE; } -template template Scalar::Scalar(Scalar const &that) : _n(static_cast(that._n)) + template template Scalar::Scalar(Scalar const &that) : _n(static_cast(that._n)) { } -template template Scalar::Scalar(Scalar const &that) + template template Scalar::Scalar(Scalar const &that) { typedef std::ratio R; static_assert(R::den == 1, "Construction not permitted - target scale is not an integral multiple of source scale."); _n = that.count() * R::num; } -template +template template auto -Scalar::operator=(Scalar const &that) -> self & + Scalar::operator=(Scalar const &that) -> self & { typedef std::ratio R; static_assert(R::den == 1, "Assignment not permitted - target scale is not an integral multiple of source scale."); @@ -248,7 +251,7 @@ Scalar::operator=(Scalar const &that) -> self & */ template M -scaled_up(Scalar const &src) +scaled_up(Scalar const &src) { typedef std::ratio R; auto c = src.count(); @@ -278,7 +281,7 @@ scaled_up(Scalar const &src) */ template M -scaled_down(Scalar const &src) +scaled_down(Scalar const &src) { typedef std::ratio R; auto c = src.count(); @@ -317,34 +320,34 @@ scaled_up(intmax_t n) // Try for a bit of performance boost - if the metrics have the same scale // just comparing the counts is sufficient and scaling conversion is avoided. -template +template bool -operator<(Scalar const &lhs, Scalar const &rhs) +operator<(Scalar const &lhs, Scalar const &rhs) { return lhs.count() < rhs.count(); } -template +template bool -operator==(Scalar const &lhs, Scalar const &rhs) +operator==(Scalar const &lhs, Scalar const &rhs) { return lhs.count() == rhs.count(); } // Could be derived but if we're optimizing let's avoid the extra negation. // Or we could check if the compiler can optimize that out anyway. -template +template bool -operator<=(Scalar const &lhs, Scalar const &rhs) +operator<=(Scalar const &lhs, Scalar const &rhs) { return lhs.count() <= rhs.count(); } // General base cases. -template +template bool -operator<(Scalar const &lhs, Scalar const &rhs) +operator<(Scalar const &lhs, Scalar const &rhs) { typedef std::ratio R; // Based on tests with the GNU compiler, the fact that the conditionals are compile time @@ -358,9 +361,9 @@ operator<(Scalar const &lhs, Scalar const &rhs) return lhs.units() < rhs.units(); } -template + template bool -operator==(Scalar const &lhs, Scalar const &rhs) + operator==(Scalar const &lhs, Scalar const &rhs) { typedef std::ratio R; if (R::den == 1) { @@ -371,9 +374,9 @@ operator==(Scalar const &lhs, Scalar const &rhs) return lhs.units() == rhs.units(); } -template + template bool -operator<=(Scalar const &lhs, Scalar const &rhs) + operator<=(Scalar const &lhs, Scalar const &rhs) { typedef std::ratio R; if (R::den == 1) { @@ -406,27 +409,27 @@ operator>=(Scalar const &lhs, Scalar const &rhs) // for comparison not just the counter type C but also explicitly 'int'. That makes the operators ambiguous if C is // 'int'. The specializations for 'int' resolve this as their presence "covers" the generic cases. -template +template bool -operator<(Scalar const &lhs, C n) +operator<(Scalar const &lhs, C n) { return lhs.count() < n; } -template +template bool -operator<(C n, Scalar const &rhs) +operator<(C n, Scalar const &rhs) { return n < rhs.count(); } -template +template bool -operator<(Scalar const &lhs, int n) +operator<(Scalar const &lhs, int n) { return lhs.count() < static_cast(n); } -template +template bool -operator<(int n, Scalar const &rhs) +operator<(int n, Scalar const &rhs) { return static_cast(n) < rhs.count(); } @@ -443,27 +446,27 @@ operator<(int n, Scalar const &rhs) return n < rhs.count(); } -template +template bool -operator==(Scalar const &lhs, C n) +operator==(Scalar const &lhs, C n) { return lhs.count() == n; } -template +template bool -operator==(C n, Scalar const &rhs) +operator==(C n, Scalar const &rhs) { return n == rhs.count(); } -template +template bool -operator==(Scalar const &lhs, int n) +operator==(Scalar const &lhs, int n) { return lhs.count() == static_cast(n); } -template +template bool -operator==(int n, Scalar const &rhs) +operator==(int n, Scalar const &rhs) { return static_cast(n) == rhs.count(); } @@ -480,27 +483,27 @@ operator==(int n, Scalar const &rhs) return n == rhs.count(); } -template +template bool -operator>(Scalar const &lhs, C n) +operator>(Scalar const &lhs, C n) { return lhs.count() > n; } -template +template bool -operator>(C n, Scalar const &rhs) +operator>(C n, Scalar const &rhs) { return n > rhs.count(); } -template +template bool -operator>(Scalar const &lhs, int n) +operator>(Scalar const &lhs, int n) { return lhs.count() > static_cast(n); } -template +template bool -operator>(int n, Scalar const &rhs) +operator>(int n, Scalar const &rhs) { return static_cast(n) > rhs.count(); } @@ -517,27 +520,27 @@ operator>(int n, Scalar const &rhs) return n > rhs.count(); } -template +template bool -operator<=(Scalar const &lhs, C n) +operator<=(Scalar const &lhs, C n) { return lhs.count() <= n; } -template +template bool -operator<=(C n, Scalar const &rhs) +operator<=(C n, Scalar const &rhs) { return n <= rhs.count(); } -template +template bool -operator<=(Scalar const &lhs, int n) +operator<=(Scalar const &lhs, int n) { return lhs.count() <= static_cast(n); } -template +template bool -operator<=(int n, Scalar const &rhs) +operator<=(int n, Scalar const &rhs) { return static_cast(n) <= rhs.count(); } @@ -554,27 +557,27 @@ operator<=(int n, Scalar const &rhs) return n <= rhs.count(); } -template +template bool -operator>=(Scalar const &lhs, C n) +operator>=(Scalar const &lhs, C n) { return lhs.count() >= n; } -template +template bool -operator>=(C n, Scalar const &rhs) +operator>=(C n, Scalar const &rhs) { return n >= rhs.count(); } -template +template bool -operator>=(Scalar const &lhs, int n) +operator>=(Scalar const &lhs, int n) { return lhs.count() >= static_cast(n); } -template +template bool -operator>=(int n, Scalar const &rhs) +operator>=(int n, Scalar const &rhs) { return static_cast(n) >= rhs.count(); } @@ -592,60 +595,60 @@ operator>=(int n, Scalar const &rhs) } // Arithmetic operators -template +template template auto -Scalar::operator+=(Scalar const &that) -> self & +Scalar::operator+=(Scalar const &that) -> self & { typedef std::ratio R; static_assert(R::den == 1, "Addition not permitted - target scale is not an integral multiple of source scale."); _n += that.count() * R::num; return *this; } -template +template auto -Scalar::operator+=(self const &that) -> self & +Scalar::operator+=(self const &that) -> self & { _n += that._n; return *this; } -template +template auto -Scalar::operator+=(C n) -> self & +Scalar::operator+=(C n) -> self & { _n += n; return *this; } -template -Scalar -operator+(Scalar const &lhs, Scalar const &rhs) +template +Scalar +operator+(Scalar const &lhs, Scalar const &rhs) { - return Scalar(lhs) += rhs; + return Scalar(lhs) += rhs; } -template -Scalar -operator+(Scalar const &lhs, C n) +template +Scalar +operator+(Scalar const &lhs, C n) { - return Scalar(lhs) += n; + return Scalar(lhs) += n; } -template -Scalar -operator+(C n, Scalar const &rhs) +template +Scalar +operator+(C n, Scalar const &rhs) { - return Scalar(rhs) += n; + return Scalar(rhs) += n; } -template -Scalar -operator+(Scalar const &lhs, int n) +template +Scalar +operator+(Scalar const &lhs, int n) { - return Scalar(lhs) += n; + return Scalar(lhs) += n; } -template -Scalar -operator+(int n, Scalar const &rhs) +template +Scalar +operator+(int n, Scalar const &rhs) { - return Scalar(rhs) += n; + return Scalar(rhs) += n; } template Scalar @@ -660,60 +663,60 @@ operator+(int n, Scalar const &rhs) return Scalar(rhs) += n; } -template +template template auto -Scalar::operator-=(Scalar const &that) -> self & + Scalar::operator-=(Scalar const &that) -> self & { typedef std::ratio R; static_assert(R::den == 1, "Subtraction not permitted - target scale is not an integral multiple of source scale."); _n -= that.count() * R::num; return *this; } -template +template auto -Scalar::operator-=(self const &that) -> self & +Scalar::operator-=(self const &that) -> self & { _n -= that._n; return *this; } -template +template auto -Scalar::operator-=(C n) -> self & +Scalar::operator-=(C n) -> self & { _n -= n; return *this; } -template -Scalar -operator-(Scalar const &lhs, Scalar const &rhs) +template +Scalar +operator-(Scalar const &lhs, Scalar const &rhs) { - return Scalar(lhs) -= rhs; + return Scalar(lhs) -= rhs; } -template -Scalar -operator-(Scalar const &lhs, C n) +template +Scalar +operator-(Scalar const &lhs, C n) { - return Scalar(lhs) -= n; + return Scalar(lhs) -= n; } -template -Scalar -operator-(C n, Scalar const &rhs) +template +Scalar +operator-(C n, Scalar const &rhs) { - return Scalar(rhs) -= n; + return Scalar(rhs) -= n; } -template -Scalar -operator-(Scalar const &lhs, int n) +template +Scalar +operator-(Scalar const &lhs, int n) { - return Scalar(lhs) -= n; + return Scalar(lhs) -= n; } -template -Scalar -operator-(int n, Scalar const &rhs) +template +Scalar +operator-(int n, Scalar const &rhs) { - return Scalar(rhs) -= n; + return Scalar(rhs) -= n; } template Scalar @@ -728,29 +731,29 @@ operator-(int n, Scalar const &rhs) return Scalar(rhs) -= n; } -template +template auto -Scalar::operator*=(C n) -> self & +Scalar::operator*=(C n) -> self & { _n *= n; return *this; } -template Scalar operator*(Scalar const &lhs, C n) +template Scalar operator*(Scalar const &lhs, C n) { - return Scalar(lhs) *= n; + return Scalar(lhs) *= n; } -template Scalar operator*(C n, Scalar const &rhs) +template Scalar operator*(C n, Scalar const &rhs) { - return Scalar(rhs) *= n; + return Scalar(rhs) *= n; } -template Scalar operator*(Scalar const &lhs, int n) +template Scalar operator*(Scalar const &lhs, int n) { - return Scalar(lhs) *= n; + return Scalar(lhs) *= n; } -template Scalar operator*(int n, Scalar const &rhs) +template Scalar operator*(int n, Scalar const &rhs) { - return Scalar(rhs) *= n; + return Scalar(rhs) *= n; } template Scalar operator*(Scalar const &lhs, int n) { @@ -761,25 +764,25 @@ template Scalar operator*(int n, Scalar const &rhs) return Scalar(rhs) *= n; } -template +template auto -Scalar::operator/=(C n) -> self & +Scalar::operator/=(C n) -> self & { _n /= n; return *this; } -template -Scalar -operator/(Scalar const &lhs, C n) +template +Scalar +operator/(Scalar const &lhs, C n) { - return Scalar(lhs) /= n; + return Scalar(lhs) /= n; } -template -Scalar -operator/(Scalar const &lhs, int n) +template +Scalar +operator/(Scalar const &lhs, int n) { - return Scalar(lhs) /= n; + return Scalar(lhs) /= n; } template Scalar From 89a307ed6a9772ec211466373c4c726435af66d7 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Sat, 28 Jan 2017 15:02:46 -0600 Subject: [PATCH 41/81] Scalar: Updated class description. Added increment/decrement operators. --- lib/ts/Scalar.h | 52 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 8396975a4ec..00e911a5823 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -94,7 +94,13 @@ namespace detail lossy and the two conversions determine whether, in such a case, the result should be rounded up or down to the nearest scale value. - @a N sets the scale. @a T is the type used to hold the count, which is in units of @a N. + @a N sets the scale. @a C is the type used to hold the count, which is in units of @a N. + + @a T is a "tag" type which is used only to distinguish the base metric for the scale. Scalar + types that have different tags are not interoperable although they can be converted manually by + converting to units and then explicitly constructing a new Scalar instance. This is by + design. This can be ignored - if not specified then it defaults to a "generic" tag. The type can + be (and usually is) defined in name only). @note This is modeled somewhat on @c std::chrono and serves a similar function for different and simpler cases (where the ratio is always an integer, never a fraction). @@ -151,6 +157,15 @@ template class Scalar /// Addition - add @a n as a number of scaled units. self &operator+=(self const &that); + /// Increment - increase count by 1. + self &operator ++(); + /// Increment - increase count by 1. + self operator ++(int); + /// Decrement - decrease count by 1. + self &operator --(); + /// Decrement - decrease count by 1. + self operator --(int); + /// Subtraction operator. /// The value is scaled from @a that to @a this. /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then @@ -731,6 +746,41 @@ operator-(int n, Scalar const &rhs) return Scalar(rhs) -= n; } +template +auto +Scalar::operator++() -> self & +{ + ++_n; + return *this; +} + +template +auto +Scalar::operator++(int) -> self +{ + self zret(*this); + ++_n; + return zret; +} + + +template +auto +Scalar::operator--() -> self & +{ + --_n; + return *this; +} + +template +auto +Scalar::operator--(int) -> self +{ + self zret(*this); + --_n; + return zret; +} + template auto Scalar::operator*=(C n) -> self & From 504b05edf93367cb00a89a2c7c303f8abb69d351 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Sat, 28 Jan 2017 20:28:26 -0600 Subject: [PATCH 42/81] Scalar: Move tag namesapce to global level. --- lib/ts/Scalar.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 00e911a5823..b1f96433665 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -31,12 +31,12 @@ #include #include +namespace tag { struct generic; } + namespace ApacheTrafficServer { template class Scalar; -namespace tag { struct generic; } - namespace detail { // Internal class to deal with operator overload issues. From 5f3c942904f71aacb2aefd58aea2b1cf465adb67 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Sun, 29 Jan 2017 08:01:15 -0600 Subject: [PATCH 43/81] Scalar: clang-format --- lib/ts/Scalar.h | 50 +++++++++++++++++++++++-------------------------- 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index b1f96433665..05598900c7a 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -31,7 +31,10 @@ #include #include -namespace tag { struct generic; } +namespace tag +{ +struct generic; +} namespace ApacheTrafficServer { @@ -117,7 +120,7 @@ template class Scalar /// Make it externally accessible. constexpr static intmax_t SCALE = N; typedef C Count; ///< Type used to hold the count. - typedef T Tag; ///< Make tag accessible. + typedef T Tag; ///< Make tag accessible. constexpr Scalar(); ///< Default contructor. ///< Construct to have @a n scaled units. @@ -158,13 +161,13 @@ template class Scalar self &operator+=(self const &that); /// Increment - increase count by 1. - self &operator ++(); + self &operator++(); /// Increment - increase count by 1. - self operator ++(int); + self operator++(int); /// Decrement - decrease count by 1. - self &operator --(); + self &operator--(); /// Decrement - decrease count by 1. - self operator --(int); + self operator--(int); /// Subtraction operator. /// The value is scaled from @a that to @a this. @@ -228,11 +231,13 @@ Scalar::scale() return SCALE; } - template template Scalar::Scalar(Scalar const &that) : _n(static_cast(that._n)) +template +template +Scalar::Scalar(Scalar const &that) : _n(static_cast(that._n)) { } - template template Scalar::Scalar(Scalar const &that) +template template Scalar::Scalar(Scalar const &that) { typedef std::ratio R; static_assert(R::den == 1, "Construction not permitted - target scale is not an integral multiple of source scale."); @@ -242,7 +247,7 @@ Scalar::scale() template template auto - Scalar::operator=(Scalar const &that) -> self & +Scalar::operator=(Scalar const &that) -> self & { typedef std::ratio R; static_assert(R::den == 1, "Assignment not permitted - target scale is not an integral multiple of source scale."); @@ -376,9 +381,9 @@ operator<(Scalar const &lhs, Scalar const &rhs) return lhs.units() < rhs.units(); } - template +template bool - operator==(Scalar const &lhs, Scalar const &rhs) +operator==(Scalar const &lhs, Scalar const &rhs) { typedef std::ratio R; if (R::den == 1) { @@ -389,9 +394,9 @@ bool return lhs.units() == rhs.units(); } - template +template bool - operator<=(Scalar const &lhs, Scalar const &rhs) +operator<=(Scalar const &lhs, Scalar const &rhs) { typedef std::ratio R; if (R::den == 1) { @@ -681,7 +686,7 @@ operator+(int n, Scalar const &rhs) template template auto - Scalar::operator-=(Scalar const &that) -> self & +Scalar::operator-=(Scalar const &that) -> self & { typedef std::ratio R; static_assert(R::den == 1, "Subtraction not permitted - target scale is not an integral multiple of source scale."); @@ -746,35 +751,26 @@ operator-(int n, Scalar const &rhs) return Scalar(rhs) -= n; } -template -auto -Scalar::operator++() -> self & +template auto Scalar::operator++() -> self & { ++_n; return *this; } -template -auto -Scalar::operator++(int) -> self +template auto Scalar::operator++(int) -> self { self zret(*this); ++_n; return zret; } - -template -auto -Scalar::operator--() -> self & +template auto Scalar::operator--() -> self & { --_n; return *this; } -template -auto -Scalar::operator--(int) -> self +template auto Scalar::operator--(int) -> self { self zret(*this); --_n; From 933b4be09d39e2326ca33dc690f10a49196dc1a9 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Sun, 29 Jan 2017 11:19:20 -0600 Subject: [PATCH 44/81] CacheTool: pre-merge --- lib/ts/Scalar.h | 420 ------------------------------------------ lib/ts/test_Scalar.cc | 247 ------------------------- 2 files changed, 667 deletions(-) delete mode 100644 lib/ts/Scalar.h delete mode 100644 lib/ts/test_Scalar.cc diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h deleted file mode 100644 index b9a5b86718c..00000000000 --- a/lib/ts/Scalar.h +++ /dev/null @@ -1,420 +0,0 @@ -/** @file - - Scaled integral values. - - In many situations it is desirable to define scaling factors or base units (a "metric"). This template - enables this to be done in a type and scaling safe manner where the defined factors carry their scaling - information as part of the type. - - @section license License - - Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - */ - -#if !defined(TS_SCALAR_H) -#define TS_SCALAR_H - -#include -#include - -namespace ApacheTrafficServer -{ - template class Scalar; - - namespace detail { - // Internal class to deal with operator overload issues. - // Because the type of integers with no explicit type is (int) that type is special in terms of overloads. - // To be convienet @c Scalar should support operators for its internal declared counter type and (int). - // This creates ambiguous overloads when C is (int). This class lets the (int) overloads be moved to a super - // class so conflict causes overridding rather than ambiguity. I am a bit unclear on why no implementation is - // needed but there it is. - template < intmax_t N, typename C > - struct ScalarArithmetics - { - ApacheTrafficServer::Scalar operator += (int); - ApacheTrafficServer::Scalar operator -= (int); - }; - } - -/** A class to hold scaled values. - - Instances of this class have a @a count and a @a scale. The "value" of the instance is @a - count * @a scale. The scale is stored in the compiler in the class symbol table and so only - the count is a run time value. An instance with a large scale can be assign to an instance - with a smaller scale and the conversion is done automatically. Conversions from a smaller to - larger scale must be explicit using @c scaled_up and @c scaled_down. This prevents - inadvertent changes in value. Because the scales are not the same these conversions can be - lossy and the two conversions determine whether, in such a case, the result should be rounded - up or down to the nearest scale value. - - @a N sets the scale. @a T is the type used to hold the count, which is in units of @a N. - - @note This is modeled somewhat on @c std::chrono and serves a similar function for different - and simpler cases (where the ratio is always an integer, never a fraction). - - @see scaled_up - @see scaled_down - */ -template class Scalar : public detail::ScalarArithmetics -{ - typedef Scalar self; ///< Self reference type. - -public: - /// Scaling factor for instances. - /// Make it externally accessible. - constexpr static intmax_t SCALE = N; - typedef C Count; ///< Type used to hold the count. - - constexpr Scalar(); ///< Default contructor. - ///< Construct to have @a n scaled units. - constexpr Scalar(Count n); - - /// Copy constructor for same scale. - template Scalar(Scalar const &that); - - /// Copy / conversion constructor. - /// @note Requires that @c S be an integer multiple of @c SCALE. - template Scalar(Scalar const &that); - - /// Direct assignment. - /// The count is set to @a n. - self &operator=(Count n); - - /// The number of scale units. - constexpr Count count() const; - /// The absolute value, scaled up. - constexpr Count units() const; - - /// Assignment operator. - /// The value is scaled appropriately. - /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then - /// the @c scaled_up or @c scaled_down casts must be used to indicate the rounding direction. - template self &operator=(Scalar const &that); - /// Assignment from same scale. - self &operator=(self const &that); - - /// Addition operator. - /// The value is scaled from @a that to @a this. - /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then - /// the @c scaled_up or @c scaled_down casts must be used to indicate the rounding direction. - template self &operator += (Scalar const &that); - /// Addition - add @a n as a number of scaled units. - self& operator += (C n); - /// Addition - add @a n as a number of scaled units. - self& operator += (self const& that); - - /// Run time access to the scale (template arg @a N). - static constexpr intmax_t scale(); - -protected: - Count _n; ///< Number of scale units. -}; - -template constexpr Scalar::Scalar() : _n() -{ -} -template constexpr Scalar::Scalar(Count n) : _n(n) -{ -} -template -constexpr auto -Scalar::count() const -> Count -{ - return _n; -} -template -constexpr auto -Scalar::units() const -> Count -{ - return _n * SCALE; -} -template -inline auto -Scalar::operator=(Count n) -> self & -{ - _n = n; - return *this; -} -template -inline auto -Scalar::operator=(self const &that) -> self & -{ - _n = that._n; - return *this; -} -template -constexpr inline intmax_t -Scalar::scale() -{ - return SCALE; -} - -template template Scalar::Scalar(Scalar const &that) : _n(static_cast(that._n)) -{ -} - -template template Scalar::Scalar(Scalar const &that) -{ - typedef std::ratio R; - static_assert(R::den == 1, "Construction not permitted - target scale is not an integral multiple of source scale."); - _n = that.count() * R::num; -} - -template -template -auto -Scalar::operator=(Scalar const &that) -> self & -{ - typedef std::ratio R; - static_assert(R::den == 1, "Assignment not permitted - target scale is not an integral multiple of source scale."); - _n = that.count() * R::num; - return *this; -} - -// -- Free Functions -- - -/** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding up. - The resulting count in the return value will be the smallest count that is not smaller than the unit - value of @a src. - - @code - typedef Scalar<16> Paragraphs; - typedef Scalar<1024> KiloBytes; - - Paragraphs src(37459); - auto size = scaled_up(src); // size.count() == 586 - @endcode - */ -template -M -scaled_up(Scalar const &src) -{ - typedef std::ratio R; - auto c = src.count(); - - if (M::SCALE == S) { - return c; - } else if (R::den == 1) { - return c / R::num + (0 != c % R::num); // N is a multiple of S. - } else if (R::num == 1) { - return c * R::den; // S is a multiple of N. - } else { - return (c / R::num) * R::den + ((c % R::num) * R::den) / R::num + (0 != (c % R::num)); - } -} - -/** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding down. - The resulting count in the return value will be the largest count that is not larger than the unit - value of @a src. - - @code - typedef Scalar<16> Paragraphs; - typedef Scalar<1024> KiloBytes; - - Paragraphs src(37459); - auto size = scaled_up(src); // size.count() == 585 - @endcode - */ -template -M -scaled_down(Scalar const &src) -{ - typedef std::ratio R; - auto c = src.count(); - - if (R::den == 1) { - return c / R::num; // S is a multiple of N. - } else if (R::num == 1) { - return c * R::den; // N is a multiple of S. - } else { - // General case where neither N nor S are a multiple of the other. - // Yes, a bit odd, but this minimizes the risk of integer overflow. - // I need to validate that under -O2 the compiler will only do 1 division to get - // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are - // powers of 2 I have verified recent GNU compilers will optimize to bit operations. - return (c / R::num) * R::den + ((c % R::num) * R::den) / R::num; - } -} - -/// Convert a unit value @a n to a Scalar, rounding down. -template -M -scaled_down(intmax_t n) -{ - return n / M::SCALE; // assuming compiler will optimize out dividing by 1 if needed. -} - -/// Convert a unit value @a n to a Scalar, rounding up. -template -M -scaled_up(intmax_t n) -{ - return M::SCALE == 1 ? n : (n / M::SCALE + (0 != (n % M::SCALE))); -} - -// --- Compare operators - -// Try for a bit of performance boost - if the metrics have the same scale -// just comparing the counts is sufficient and scaling conversion is avoided. -template -bool -operator<(Scalar const &lhs, Scalar const &rhs) -{ - return lhs.count() < rhs.count(); -} - -template -bool -operator==(Scalar const &lhs, Scalar const &rhs) -{ - return lhs.count() == rhs.count(); -} - -// Could be derived but if we're optimizing let's avoid the extra negation. -// Or we could check if the compiler can optimize that out anyway. -template -bool -operator<=(Scalar const &lhs, Scalar const &rhs) -{ - return lhs.count() <= rhs.count(); -} - -// General base cases. - -template -bool -operator<(Scalar const &lhs, Scalar const &rhs) -{ - typedef std::ratio R; - // Based on tests with the GNU compiler, the fact that the conditionals are compile time - // constant causes the never taken paths to be dropped so there are no runtime conditional - // checks, even with no optimization at all. - if (R::den == 1) { - return lhs.count() < rhs.count() * R::num; - } else if (R::num == 1) { - return lhs.count() * R::den < rhs.count(); - } else - return lhs.units() < rhs.units(); -} - -template -bool -operator==(Scalar const &lhs, Scalar const &rhs) -{ - typedef std::ratio R; - if (R::den == 1) { - return lhs.count() == rhs.count() * R::num; - } else if (R::num == 1) { - return lhs.count() * R::den == rhs.count(); - } else - return lhs.units() == rhs.units(); -} - -template -bool -operator<=(Scalar const &lhs, Scalar const &rhs) -{ - typedef std::ratio R; - if (R::den == 1) { - return lhs.count() <= rhs.count() * R::num; - } else if (R::num == 1) { - return lhs.count() * R::den <= rhs.count(); - } else - return lhs.units() <= rhs.units(); -} - -// Derived compares. No narrowing optimization needed because if the scales -// are the same the nested call with be optimized. - -template -bool -operator>(Scalar const &lhs, Scalar const &rhs) -{ - return rhs < lhs; -} - -template -bool -operator>=(Scalar const &lhs, Scalar const &rhs) -{ - return rhs <= lhs; -} - -// Do the integer compares. -// A bit ugly to handle the issue that integers without explicit type are 'int'. Therefore suppport must be provided -// for comparison not just the counter type C but also explicitly 'int'. That makes the operators ambiguous if C is -// 'int'. The specializations for 'int' resolve this as their presence "covers" the generic cases. - -template bool operator < (Scalar const &lhs, C n) { return lhs.count() < n; } -template bool operator < (C n, Scalar const &rhs) { return n < rhs.count(); } -template bool operator < (Scalar const &lhs, int n) { return lhs.count() < static_cast(n); } -template bool operator < (int n, Scalar const &rhs) { return static_cast(n) < rhs.count(); } -template bool operator < (Scalar const &lhs, int n) { return lhs.count() < n; } -template bool operator < (int n, Scalar const &rhs) { return n < rhs.count(); } - -template bool operator == (Scalar const &lhs, C n) { return lhs.count() == n; } -template bool operator == (C n, Scalar const &rhs) { return n == rhs.count(); } -template bool operator == (Scalar const &lhs, int n) { return lhs.count() == static_cast(n); } -template bool operator == (int n, Scalar const &rhs) { return static_cast(n) == rhs.count(); } -template bool operator == (Scalar const &lhs, int n) { return lhs.count() == n; } -template bool operator == (int n, Scalar const &rhs) { return n == rhs.count(); } - -template bool operator > (Scalar const &lhs, C n) { return lhs.count() > n; } -template bool operator > (C n, Scalar const &rhs) { return n > rhs.count(); } -template bool operator > (Scalar const &lhs, int n) { return lhs.count() > static_cast(n); } -template bool operator > (int n, Scalar const &rhs) { return static_cast(n) > rhs.count(); } -template bool operator > (Scalar const &lhs, int n) { return lhs.count() > n; } -template bool operator > (int n, Scalar const &rhs) { return n > rhs.count(); } - -template bool operator <= (Scalar const &lhs, C n) { return lhs.count() <= n; } -template bool operator <= (C n, Scalar const &rhs) { return n <= rhs.count(); } -template bool operator <= (Scalar const &lhs, int n) { return lhs.count() <= static_cast(n); } -template bool operator <= (int n, Scalar const &rhs) { return static_cast(n) <= rhs.count(); } -template bool operator <= (Scalar const &lhs, int n) { return lhs.count() <= n; } -template bool operator <= (int n, Scalar const &rhs) { return n <= rhs.count(); } - -template bool operator >= (Scalar const &lhs, C n) { return lhs.count() >= n; } -template bool operator >= (C n, Scalar const &rhs) { return n >= rhs.count(); } -template bool operator >= (Scalar const &lhs, int n) { return lhs.count() >= static_cast(n); } -template bool operator >= (int n, Scalar const &rhs) { return static_cast(n) >= rhs.count(); } -template bool operator >= (Scalar const &lhs, int n) { return lhs.count() >= n; } -template bool operator >= (int n, Scalar const &rhs) { return n >= rhs.count(); } - -// Arithmetic operators -template template auto Scalar::operator += (Scalar const& that) -> self& -{ - typedef std::ratio R; - static_assert(R::den == 1, "Assignment not permitted - target scale is not an integral multiple of source scale."); - _n += that.count() * R::num; - return *this; -} -template auto Scalar::operator += (self const& that) -> self& { _n += that._n; return *this; } -template auto Scalar::operator += (C n) -> self& { _n += n; return *this; } -//template auto Scalar::operator += (int n) -> self& { _n += static_cast(n); return *this; } -//template auto Scalar::operator += (int n) -> self& { _n += n; return *this; } - -template Scalar operator + (Scalar const &lhs, Scalar const& rhs) { return Scalar(lhs) += rhs; } -template Scalar operator + (Scalar const &lhs, C n) { return Scalar(lhs) += n; } -template Scalar operator + (C n, Scalar const& rhs) { return Scalar(rhs) += n; } -template Scalar operator + (Scalar const &lhs, int n) { return Scalar(lhs) += n; } -template Scalar operator + (int n, Scalar const& rhs) { return Scalar(rhs) += n; } -template Scalar operator + (Scalar const &lhs, int n) { return Scalar(lhs) += n; } -template Scalar operator + (int n, Scalar const& rhs) { return Scalar(rhs) += n; } - - -} // namespace -#endif // TS_SCALAR_H diff --git a/lib/ts/test_Scalar.cc b/lib/ts/test_Scalar.cc deleted file mode 100644 index e3b6d888705..00000000000 --- a/lib/ts/test_Scalar.cc +++ /dev/null @@ -1,247 +0,0 @@ -/** @file - - Intrusive pointer test. - - @section license License - - Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -#include -#include -#include -#include - -namespace ts -{ -using namespace ApacheTrafficServer; -} - -struct TestBox { - typedef TestBox self; ///< Self reference type. - - std::string _name; - - static int _count; - static int _fail; - - TestBox(char const *name) : _name(name) {} - TestBox(std::string const &name) : _name(name) {} - bool check(bool result, char const *fmt, ...) __attribute__((format(printf, 3, 4))); - - static void - print_summary() - { - printf("Tests: %d of %d passed - %s\n", (_count - _fail), _count, _fail ? "FAIL" : "SUCCESS"); - } -}; - -int TestBox::_count = 0; -int TestBox::_fail = 0; - -bool -TestBox::check(bool result, char const *fmt, ...) -{ - ++_count; - - if (!result) { - static constexpr size_t N = 1 << 16; - size_t n = N; - size_t x; - char *s; - char buffer[N]; // just stack, go big. - - s = buffer; - x = snprintf(s, n, "%s: ", _name.c_str()); - n -= x; - s += x; - - va_list ap; - va_start(ap, fmt); - vsnprintf(s, n, fmt, ap); - va_end(ap); - printf("%s\n", buffer); - ++_fail; - } - return result; -} - -// Extremely simple test. -void -Test_1() -{ - constexpr static int SCALE = 4096; - typedef ts::Scalar PageSize; - - TestBox test("TS Scalar basic"); - PageSize pg1(1); - - test.check(pg1.count() == 1, "Count wrong, got %d expected %d", pg1.count(), 1); - test.check(pg1.units() == SCALE, "Units wrong, got %d expected %d", pg1.units(), SCALE); -} - -// Test multiples. -void -Test_2() -{ - constexpr static int SCALE_1 = 8192; - constexpr static int SCALE_2 = 512; - - typedef ts::Scalar Size_1; - typedef ts::Scalar Size_2; - - TestBox test("TS Scalar Conversion of scales of multiples"); - Size_2 sz_a(2); - Size_2 sz_b(57); - Size_2 sz_c(SCALE_1 / SCALE_2); - Size_2 sz_d(29 * SCALE_1 / SCALE_2); - - auto sz = ts::scaled_up(sz_a); - test.check(sz.count() == 1, "Rounding up, got %d expected %d", sz.count(), 1); - sz = ts::scaled_down(sz_a); - test.check(sz.count() == 0, "Rounding down: got %d expected %d", sz.count(), 0); - - sz = ts::scaled_up(sz_b); - test.check(sz.count() == 4, "Rounding up, got %d expected %d", sz.count(), 4); - sz = ts::scaled_down(sz_b); - test.check(sz.count() == 3, "Rounding down, got %d expected %d", sz.count(), 3); - - sz = ts::scaled_up(sz_c); - test.check(sz.count() == 1, "Rounding up, got %d expected %d", sz.count(), 1); - sz = ts::scaled_down(sz_c); - test.check(sz.count() == 1, "Rounding down, got %d expected %d", sz.count(), 1); - - sz = ts::scaled_up(sz_d); - test.check(sz.count() == 29, "Rounding up, got %d expected %d", sz.count(), 29); - sz = ts::scaled_down(sz_d); - test.check(sz.count() == 29, "Rounding down, got %d expected %d", sz.count(), 29); - - sz = 119; - sz_b = sz; // Should be OK because SCALE_1 is an integer multiple of SCALE_2 - // sz = sz_b; // Should not compile. - test.check(sz_b.count() == 119 * (SCALE_1 / SCALE_2), "Integral conversion, got %d expected %d", sz_b.count(), - 119 * (SCALE_1 / SCALE_2)); -} - -// Test common factor. -void -Test_3() -{ - constexpr static int SCALE_1 = 30; - constexpr static int SCALE_2 = 20; - - typedef ts::Scalar Size_1; - typedef ts::Scalar Size_2; - - TestBox test("TS Scalar common factor conversions"); - Size_2 sz_a(2); - Size_2 sz_b(97); - - auto sz = ts::scaled_up(sz_a); - test.check(sz.count() == 2, "Rounding up, got %d expected %d", sz.count(), 2); - sz = ts::scaled_down(sz_a); - test.check(sz.count() == 1, "Rounding down: got %d expected %d", sz.count(), 0); - - sz = ts::scaled_up(sz_b); - test.check(sz.count() == 65, "Rounding up, got %d expected %d", sz.count(), 65); - sz = ts::scaled_down(sz_b); - test.check(sz.count() == 64, "Rounding down, got %d expected %d", sz.count(), 64); -} - -void -Test_4() -{ - TestBox test("TS Scalar: relatively prime tests"); - - ts::Scalar<9> m_9; - ts::Scalar<4> m_4, m_test; - - m_9 = 95; - // m_4 = m_9; // Should fail to compile with static assert. - // m_9 = m_4; // Should fail to compile with static assert. - - m_4 = ts::scaled_up(m_9); - test.check(m_4.count() == 214, "Rounding down, got %d expected %d", m_4.count(), 214); - m_4 = ts::scaled_down(m_9); - test.check(m_4.count() == 213, "Rounding down, got %d expected %d", m_4.count(), 213); - - m_4 = 213; - m_9 = ts::scaled_up(m_4); - test.check(m_9.count() == 95, "Rounding down, got %d expected %d", m_9.count(), 95); - m_9 = ts::scaled_down(m_4); - test.check(m_9.count() == 94, "Rounding down, got %d expected %d", m_9.count(), 94); - - m_test = m_4; // Verify assignment of identical scale values compiles. - test.check(m_test.count() == 213, "Assignment got %d expected %d", m_4.count(), 213); -} - -void -Test_5() -{ - TestBox test("TS Scalar: arithmetic operator tests"); - - typedef ts::Scalar<1024> KBytes; - typedef ts::Scalar<1, int64_t> Bytes; - typedef ts::Scalar<1024 * KBytes::SCALE> MBytes; - - Bytes bytes(96); - KBytes kbytes(2); - MBytes mbytes(5); - - Bytes z1 = bytes + 128; - test.check(z1.count() == 224, "Addition got %ld expected %d", z1.count(), 224); - KBytes z2 = kbytes + 3; - test.check(z2.count() == 5, "Addition got %d expected %d", z2.count(), 5); - Bytes z3(bytes); - z3 += kbytes; - test.check(z3.units() == 2048+96, "Addition got %ld expected %d", z3.units(), 2048+96); - MBytes z4 = mbytes; - z4 += 5; - z2 += z4; - test.check(z2.units() == ((10<<20) + (5<<10)), "Addition got %d expected %d", z2.units(), (10<<20) + (2<<10)); - - z1 += 128; - test.check(z1.count() == 352, "Addition got %ld expected %d", z1.count(), 352); -} - - -void -test_Compile() -{ - // These tests aren't normally run, they exist to detect compiler issues. - - typedef ts::Scalar<1024, long int> KBytes; - typedef ts::Scalar<1024, int> KiBytes; - - KBytes x(12); - KiBytes y(12); - - if (x > 12) std::cout << "Operator > works" << std::endl; - if (y > 12) std::cout << "Operator > works" << std::endl; -} - -int -main(int, char **) -{ - Test_1(); - Test_2(); - Test_3(); - Test_4(); - Test_5(); - TestBox::print_summary(); - return 0; -} From a92303d5c4e1f87b8e7bfcb7f5b8b461800a3967 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Mon, 30 Jan 2017 08:20:22 -0600 Subject: [PATCH 45/81] CacheTool: Checkpoint. --- cmd/traffic_cache_tool/CacheDefs.h | 14 ++++++++------ cmd/traffic_cache_tool/File.cc | 2 +- cmd/traffic_cache_tool/File.h | 17 +++++++++++++---- 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheDefs.h b/cmd/traffic_cache_tool/CacheDefs.h index fb6bf5f4f07..81f9766aaac 100644 --- a/cmd/traffic_cache_tool/CacheDefs.h +++ b/cmd/traffic_cache_tool/CacheDefs.h @@ -26,21 +26,23 @@ #include #include +namespace tag { struct bytes; } + namespace ApacheTrafficServer { constexpr static uint8_t CACHE_DB_MAJOR_VERSION = 24; -typedef Scalar<1, int64_t> Bytes; -typedef Scalar<1024, int64_t> Kilobytes; -typedef Scalar<1024 * Kilobytes::SCALE, int64_t> Megabytes; +typedef Scalar<1, int64_t, tag::bytes> Bytes; +typedef Scalar<1024, int64_t, tag::bytes> Kilobytes; +typedef Scalar<1024 * Kilobytes::SCALE, int64_t, tag::bytes> Megabytes; // Units of allocation for stripes. -typedef Scalar<128 * Megabytes::SCALE, int64_t> CacheStripeBlocks; + typedef Scalar<128 * Megabytes::SCALE, int64_t, tag::bytes> CacheStripeBlocks; // Size measurement of cache storage. // Also size of meta data storage units. -typedef Scalar<8 * Kilobytes::SCALE, int64_t> CacheStoreBlocks; + typedef Scalar<8 * Kilobytes::SCALE, int64_t, tag::bytes> CacheStoreBlocks; // Size unit for content stored in cache. -typedef Scalar<512, int64_t> CacheDataBlocks; + typedef Scalar<512, int64_t, tag::bytes> CacheDataBlocks; /** A cache span is a representation of raw storage. It corresponds to a raw disk, disk partition, file, or directory. diff --git a/cmd/traffic_cache_tool/File.cc b/cmd/traffic_cache_tool/File.cc index 776f1706c39..c3c038e2ee4 100644 --- a/cmd/traffic_cache_tool/File.cc +++ b/cmd/traffic_cache_tool/File.cc @@ -31,7 +31,7 @@ FilePath & FilePath::operator=(char const *path) { _path = ats_strdup(path); - _stat_p = false; + _stat_p = STAT_P::UNDEF; return *this; } diff --git a/cmd/traffic_cache_tool/File.h b/cmd/traffic_cache_tool/File.h index 82ec9a69abd..ac5e0823e51 100644 --- a/cmd/traffic_cache_tool/File.h +++ b/cmd/traffic_cache_tool/File.h @@ -70,6 +70,8 @@ class FilePath /// Return the file type value. int file_type() const; + /// Size of the file or block device. + off_t physical_size() const; bool is_char_device() const; bool is_block_device() const; @@ -81,8 +83,10 @@ class FilePath protected: ats_scoped_str _path; ///< File path. + + enum class STAT_P : int8_t { INVALID = -1, UNDEF = 0, VALID = 1}; + mutable STAT_P _stat_p = STAT_P::UNDEF; ///< Whether _stat is valid. mutable struct stat _stat; ///< File information. - mutable bool _stat_p = false; ///< Whether _stat is valid. }; /** A file support class for handling files as bulk content. @@ -162,9 +166,9 @@ FilePath::is_relative() const inline struct stat const * FilePath::stat() const { - if (!_stat_p) - _stat_p = ::stat(_path, &_stat) >= 0; - return _stat_p ? &_stat : nullptr; + if (STAT_P::UNDEF == _stat_p) + _stat_p = ::stat(_path, &_stat) >= 0 ? STAT_P::VALID : STAT_P::INVALID; + return _stat_p == STAT_P::VALID ? &_stat : nullptr; } FilePath operator/(FilePath const &lhs, FilePath const &rhs); @@ -197,6 +201,11 @@ FilePath::is_regular_file() const return this->file_type() == S_IFREG; } + inline off_t + FilePath::physical_size() const + { + return this->stat() ? _stat. + inline BulkFile::BulkFile(super &&that) : super(that) { } From 7885a54fab832bcc1f2cd3ed066989abeabd33a8 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 31 Jan 2017 03:58:41 -0600 Subject: [PATCH 46/81] CacheTool: Checkpoint. --- cmd/traffic_cache_tool/CacheDefs.h | 2 +- cmd/traffic_cache_tool/CacheTool.cc | 21 +++++++++++++++++---- cmd/traffic_cache_tool/File.h | 24 ++++++++++++------------ 3 files changed, 30 insertions(+), 17 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheDefs.h b/cmd/traffic_cache_tool/CacheDefs.h index 81f9766aaac..ac0719da5f9 100644 --- a/cmd/traffic_cache_tool/CacheDefs.h +++ b/cmd/traffic_cache_tool/CacheDefs.h @@ -80,7 +80,7 @@ struct SpanHeader { uint32_t num_free; /* number of disk volume blocks free */ uint32_t num_used; /* number of disk volume blocks in use */ uint32_t num_diskvol_blks; /* number of disk volume blocks */ - uint64_t num_blocks; + CacheStoreBlocks num_blocks; /// Serialized stripe descriptors. This is treated as a variable sized array. CacheStripeDescriptor stripes[1]; }; diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index 780700431a4..c211cf6fe7a 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -284,7 +284,8 @@ struct Cache { void dumpSpans(SpanDumpDepth depth); void dumpVolumes(); - ts::CacheStripeBlocks calcTotalSpanSize(); + ts::CacheStripeBlocks calcTotalSpanPhysicalSize(); + ts::CacheStripeBlocks calcTotalSpanConfiguredSize(); std::list _spans; std::map _volumes; @@ -374,7 +375,7 @@ Cache::dumpSpans(SpanDumpDepth depth) for (auto span : _spans) { std::cout << "Span: " << span->_path << " " << span->_header->num_volumes << " Volumes " << span->_header->num_used << " in use " << span->_header->num_free << " free " << span->_header->num_diskvol_blks << " stripes " - << span->_header->num_blocks << " blocks" << std::endl; + << span->_header->num_blocks.units() << " blocks" << std::endl; for (unsigned int i = 0; i < span->_header->num_diskvol_blks; ++i) { ts::CacheStripeDescriptor &stripe = span->_header->stripes[i]; std::cout << " : SpanBlock " << i << " @ " << stripe.offset.units() << " blocks=" << stripe.len.units() @@ -400,15 +401,26 @@ Cache::dumpVolumes() } } -ts::CacheStripeBlocks Cache::calcTotalSpanSize() +ts::CacheStripeBlocks Cache::calcTotalSpanConfiguredSize() { ts::CacheStripeBlocks zret(0); for ( auto span : _spans ) { - zret += ts::scaled_down(span->_size); + zret += ts::scaled_down(span->_header->num_blocks); } + return zret; } +ts::CacheStripeBlocks Cache::calcTotalSpanPhysicalSize() +{ + ts::CacheStripeBlocks zret(0); + + for ( auto span : _spans ) { + // This is broken, physical_size doesn't work for devices, need to fix that. + zret += ts::scaled_down(span->_path.physical_size()); + } + return zret; +} Cache::~Cache() { @@ -606,6 +618,7 @@ Simulate_Span_Allocation(int argc, char *argv[]) if (zret) { zret = cache.loadSpan(SpanFile); if (zret) { + std::cout << "Total physical span size is " << cache.calcTotalSpanConfiguredSize().count() << " stripe blocks" << std::endl; } } } diff --git a/cmd/traffic_cache_tool/File.h b/cmd/traffic_cache_tool/File.h index ac5e0823e51..a03b7b8da9a 100644 --- a/cmd/traffic_cache_tool/File.h +++ b/cmd/traffic_cache_tool/File.h @@ -64,10 +64,6 @@ class FilePath /// Access the path explicitly. char const *path() const; - /// Get the stat buffer. - /// @return A valid stat buffer or @c nullptr if the system call failed. - struct stat const *stat() const; - /// Return the file type value. int file_type() const; /// Size of the file or block device. @@ -82,6 +78,10 @@ class FilePath ats_scoped_fd open(int flags) const; protected: + /// Get the stat buffer. + /// @return A valid stat buffer or @c nullptr if the system call failed. + template T stat(T (*f)(struct stat const*)) const; + ats_scoped_str _path; ///< File path. enum class STAT_P : int8_t { INVALID = -1, UNDEF = 0, VALID = 1}; @@ -163,12 +163,11 @@ FilePath::is_relative() const return !this->is_absolute(); } -inline struct stat const * -FilePath::stat() const + template T FilePath::stat(T (*f)(struct stat const*)) const { if (STAT_P::UNDEF == _stat_p) _stat_p = ::stat(_path, &_stat) >= 0 ? STAT_P::VALID : STAT_P::INVALID; - return _stat_p == STAT_P::VALID ? &_stat : nullptr; + return _stat_p == STAT_P::VALID ? f(&_stat) : T(); } FilePath operator/(FilePath const &lhs, FilePath const &rhs); @@ -177,7 +176,7 @@ FilePath operator/(char const *lhs, FilePath const &rhs); inline int FilePath::file_type() const { - return this->stat() ? (_stat.st_mode & S_IFMT) : 0; + return this->stat([](struct stat const* s) -> int { return s->st_mode & S_IFMT; }); } inline bool @@ -201,10 +200,11 @@ FilePath::is_regular_file() const return this->file_type() == S_IFREG; } - inline off_t - FilePath::physical_size() const - { - return this->stat() ? _stat. +inline off_t +FilePath::physical_size() const +{ + return this->stat([](struct stat const* s) { return s->st_size; }); +} inline BulkFile::BulkFile(super &&that) : super(that) { From 32b4dd16ea044f625bddec49116cbed234208a58 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 31 Jan 2017 08:33:42 -0600 Subject: [PATCH 47/81] Scalar: Add label to tag class. Add class based scale_up / scale_down. --- lib/ts/Scalar.h | 80 ++++++++++++++++++++++++++++++++++++++----- lib/ts/test_Scalar.cc | 52 +++++++++++++++++++--------- 2 files changed, 107 insertions(+), 25 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 05598900c7a..9ebc3c6dd7b 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -30,6 +30,7 @@ #include #include +#include namespace tag { @@ -145,7 +146,7 @@ template class Scalar /// Assignment operator. /// The value is scaled appropriately. /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then - /// the @c scaled_up or @c scaled_down casts must be used to indicate the rounding direction. + /// the @c scale_up or @c scale_down casts must be used to indicate the rounding direction. template self &operator=(Scalar const &that); /// Assignment from same scale. self &operator=(self const &that); @@ -153,7 +154,7 @@ template class Scalar /// Addition operator. /// The value is scaled from @a that to @a this. /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then - /// the @c scaled_up or @c scaled_down casts must be used to indicate the rounding direction. + /// the @c scale_up or @c scale_down casts must be used to indicate the rounding direction. template self &operator+=(Scalar const &that); /// Addition - add @a n as a number of scaled units. self &operator+=(C n); @@ -172,7 +173,7 @@ template class Scalar /// Subtraction operator. /// The value is scaled from @a that to @a this. /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then - /// the @c scaled_up or @c scaled_down casts must be used to indicate the rounding direction. + /// the @c scale_up or @c scale_down casts must be used to indicate the rounding direction. template self &operator-=(Scalar const &that); /// Subtraction - subtract @a n as a number of scaled units. self &operator-=(C n); @@ -185,6 +186,12 @@ template class Scalar /// Division - divide (rounding down) the count by @a n. self &operator/=(C n); + /// Scale value @a x to this type, rounding up. + template self scale_up(Scalar const &x); + + /// Scale value @a x to this type, rounding down. + template self scale_down(Scalar const &x); + /// Run time access to the scale (template arg @a N). static constexpr intmax_t scale(); @@ -266,12 +273,12 @@ Scalar::operator=(Scalar const &that) -> self & typedef Scalar<1024> KiloBytes; Paragraphs src(37459); - auto size = scaled_up(src); // size.count() == 586 + auto size = scale_up(src); // size.count() == 586 @endcode */ template M -scaled_up(Scalar const &src) +scale_up(Scalar const &src) { typedef std::ratio R; auto c = src.count(); @@ -296,12 +303,12 @@ scaled_up(Scalar const &src) typedef Scalar<1024> KiloBytes; Paragraphs src(37459); - auto size = scaled_up(src); // size.count() == 585 + auto size = scale_up(src); // size.count() == 585 @endcode */ template M -scaled_down(Scalar const &src) +scale_down(Scalar const &src) { typedef std::ratio R; auto c = src.count(); @@ -323,7 +330,7 @@ scaled_down(Scalar const &src) /// Convert a unit value @a n to a Scalar, rounding down. template M -scaled_down(intmax_t n) +scale_down(intmax_t n) { return n / M::SCALE; // assuming compiler will optimize out dividing by 1 if needed. } @@ -331,7 +338,7 @@ scaled_down(intmax_t n) /// Convert a unit value @a n to a Scalar, rounding up. template M -scaled_up(intmax_t n) +scale_up(intmax_t n) { return M::SCALE == 1 ? n : (n / M::SCALE + (0 != (n % M::SCALE))); } @@ -837,5 +844,60 @@ operator/(Scalar const &lhs, int n) return Scalar(lhs) /= n; } +template +template +auto +Scalar::scale_up(Scalar const &that) -> self +{ + return ApacheTrafficServer::scale_up(that); +} + +template +template +auto +Scalar::scale_down(Scalar const &that) -> self +{ + return ApacheTrafficServer::scale_down(that); +} + +namespace detail +{ + // These classes exist only to create distinguishable overloads. + struct tag_label_A { + }; + struct tag_label_B : public tag_label_A { + }; + // The purpose is to print a label for a tagged type only if the tag class defines a member that + // is the label. This creates a base function that always works and does nothing. The second + // function creates an overload if the tag class has a member named 'label' that has an stream IO + // output operator. When invoked with a second argument of B then the second overload exists and + // is used, otherwise only the first exists and that is used. The critical technology is the use + // of 'auto' and 'decltype' which effectively checks if the code inside 'decltype' compiles. + template + inline std::ostream & + tag_label(std::ostream &s, tag_label_A const &) + { + return s; + } + template + inline auto + tag_label(std::ostream &s, tag_label_B const &) -> decltype(s << T::label) + { + return s << T::label; + } +} // detail + } // namespace + +namespace std +{ +template +ostream & +operator<<(ostream &s, ApacheTrafficServer::Scalar const &x) +{ + static ApacheTrafficServer::detail::tag_label_B const b; + s << x.count(); + return ApacheTrafficServer::detail::tag_label(s, b); +} +} #endif // TS_SCALAR_H diff --git a/lib/ts/test_Scalar.cc b/lib/ts/test_Scalar.cc index 47c18f754e3..09db13b316d 100644 --- a/lib/ts/test_Scalar.cc +++ b/lib/ts/test_Scalar.cc @@ -110,24 +110,24 @@ Test_2() Size_2 sz_c(SCALE_1 / SCALE_2); Size_2 sz_d(29 * SCALE_1 / SCALE_2); - auto sz = ts::scaled_up(sz_a); + auto sz = ts::scale_up(sz_a); test.check(sz.count() == 1, "Rounding up, got %d expected %d", sz.count(), 1); - sz = ts::scaled_down(sz_a); + sz = ts::scale_down(sz_a); test.check(sz.count() == 0, "Rounding down: got %d expected %d", sz.count(), 0); - sz = ts::scaled_up(sz_b); + sz = ts::scale_up(sz_b); test.check(sz.count() == 4, "Rounding up, got %d expected %d", sz.count(), 4); - sz = ts::scaled_down(sz_b); + sz = ts::scale_down(sz_b); test.check(sz.count() == 3, "Rounding down, got %d expected %d", sz.count(), 3); - sz = ts::scaled_up(sz_c); + sz = ts::scale_up(sz_c); test.check(sz.count() == 1, "Rounding up, got %d expected %d", sz.count(), 1); - sz = ts::scaled_down(sz_c); + sz = ts::scale_down(sz_c); test.check(sz.count() == 1, "Rounding down, got %d expected %d", sz.count(), 1); - sz = ts::scaled_up(sz_d); + sz = ts::scale_up(sz_d); test.check(sz.count() == 29, "Rounding up, got %d expected %d", sz.count(), 29); - sz = ts::scaled_down(sz_d); + sz = ts::scale_down(sz_d); test.check(sz.count() == 29, "Rounding down, got %d expected %d", sz.count(), 29); sz = 119; @@ -151,14 +151,14 @@ Test_3() Size_2 sz_a(2); Size_2 sz_b(97); - auto sz = ts::scaled_up(sz_a); + auto sz = ts::scale_up(sz_a); test.check(sz.count() == 2, "Rounding up, got %d expected %d", sz.count(), 2); - sz = ts::scaled_down(sz_a); + sz = ts::scale_down(sz_a); test.check(sz.count() == 1, "Rounding down: got %d expected %d", sz.count(), 0); - sz = ts::scaled_up(sz_b); + sz = ts::scale_up(sz_b); test.check(sz.count() == 65, "Rounding up, got %d expected %d", sz.count(), 65); - sz = ts::scaled_down(sz_b); + sz = ts::scale_down(sz_b); test.check(sz.count() == 64, "Rounding down, got %d expected %d", sz.count(), 64); } @@ -174,15 +174,15 @@ Test_4() // m_4 = m_9; // Should fail to compile with static assert. // m_9 = m_4; // Should fail to compile with static assert. - m_4 = ts::scaled_up(m_9); + m_4 = ts::scale_up(m_9); test.check(m_4.count() == 214, "Rounding down, got %d expected %d", m_4.count(), 214); - m_4 = ts::scaled_down(m_9); + m_4 = ts::scale_down(m_9); test.check(m_4.count() == 213, "Rounding down, got %d expected %d", m_4.count(), 213); m_4 = 213; - m_9 = ts::scaled_up(m_4); + m_9 = ts::scale_up(m_4); test.check(m_9.count() == 95, "Rounding down, got %d expected %d", m_9.count(), 95); - m_9 = ts::scaled_down(m_4); + m_9 = ts::scale_down(m_4); test.check(m_9.count() == 94, "Rounding down, got %d expected %d", m_9.count(), 94); m_test = m_4; // Verify assignment of identical scale values compiles. @@ -226,6 +226,25 @@ Test_5() test.check(z1.count() == 10240, "Addition got %ld expected %d", z1.count(), 10240); } +struct KBytes_tag { + static std::string const label; +}; +std::string const KBytes_tag::label(" KBytes"); + +void +Test_IO() +{ + typedef ts::Scalar<1024, long int, KBytes_tag> KBytes; + typedef ts::Scalar<1024, int> KiBytes; + + KBytes x(12); + KiBytes y(12); + + std::cout << "Testing" << std::endl; + std::cout << "x is " << x << std::endl; + std::cout << "y is " << y << std::endl; +} + void test_Compile() { @@ -252,5 +271,6 @@ main(int, char **) Test_4(); Test_5(); TestBox::print_summary(); + Test_IO(); return 0; } From 463ec574a8a22ba927a58217fbb77d931f602049 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 31 Jan 2017 14:42:24 -0600 Subject: [PATCH 48/81] Scalar: Minor testing tweaks. --- lib/ts/Scalar.h | 2 +- lib/ts/test_Scalar.cc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 9ebc3c6dd7b..5f1f813d675 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -896,7 +896,7 @@ ostream & operator<<(ostream &s, ApacheTrafficServer::Scalar const &x) { static ApacheTrafficServer::detail::tag_label_B const b; - s << x.count(); + s << x.units(); return ApacheTrafficServer::detail::tag_label(s, b); } } diff --git a/lib/ts/test_Scalar.cc b/lib/ts/test_Scalar.cc index 09db13b316d..aa9bb8a7e35 100644 --- a/lib/ts/test_Scalar.cc +++ b/lib/ts/test_Scalar.cc @@ -229,7 +229,7 @@ Test_5() struct KBytes_tag { static std::string const label; }; -std::string const KBytes_tag::label(" KBytes"); +std::string const KBytes_tag::label(" bytes"); void Test_IO() @@ -270,7 +270,7 @@ main(int, char **) Test_3(); Test_4(); Test_5(); - TestBox::print_summary(); Test_IO(); + TestBox::print_summary(); return 0; } From 4c63fa098e7709ee241ee7b01d9f422e0abf50e2 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Wed, 1 Feb 2017 02:09:02 -0600 Subject: [PATCH 49/81] CacheTool: First approximation of stripe allocation. --- cmd/traffic_cache_tool/CacheTool.cc | 63 ++++++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index c211cf6fe7a..f2221deec87 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -91,6 +91,7 @@ struct VolumeConfig int _idx = 0; ///< Volume index. int _percent = 0; ///< Size if specified as a percent. ts::Megabytes _size = 0; ///< Size if specified as an absolute. + ts::CacheStripeBlocks _alloc; ///< Allocation size. // Methods handy for parsing bool hasSize() const { return _percent > 0 || _size > 0; } @@ -105,8 +106,30 @@ struct VolumeConfig iterator end() { return _volumes.end(); } const_iterator begin() const { return _volumes.begin(); } const_iterator end() const { return _volumes.end(); } + + ts::Errata validatePercentAllocation(); + void convertToAbsolute(ts::CacheStripeBlocks total_span_size); }; + ts::Errata VolumeConfig::validatePercentAllocation() { + ts::Errata zret; + int n = 0; + for ( VolData& vol : _volumes ) n += vol._percent; + if (n > 100) zret.push(0, 10, "Volume percent allocation ", n, " is more than 100%"); + return zret; + } + + void VolumeConfig::convertToAbsolute(ts::CacheStripeBlocks n) + { + for ( VolData& vol : _volumes ) { + if (vol._percent) { + vol._alloc = (n * vol._percent + 99) / 100; + } else { + vol._alloc = ts::scaled_up(vol._size); + } + } + } + // All of these free functions need to be moved to the Cache class. Or the Span class? bool @@ -618,7 +641,45 @@ Simulate_Span_Allocation(int argc, char *argv[]) if (zret) { zret = cache.loadSpan(SpanFile); if (zret) { - std::cout << "Total physical span size is " << cache.calcTotalSpanConfiguredSize().count() << " stripe blocks" << std::endl; + ts::CacheStripeBlocks total = cache.calcTotalSpanConfiguredSize(); + struct V { + int idx; + ts::CacheStripeBlocks alloc; + ts::CacheStripeBlocks size; + int64_t shares; + }; + std::vector av; + vols.convertToAbsolute(total); + for ( auto& vol : vols ) { + av.push_back({ vol._idx, vol._alloc, 0, 0}); + } + for ( auto span : cache._spans ) { + static const int64_t SCALE = 1000; + int64_t total_shares = 0; + for ( auto& v : av ) { + auto delta = v.alloc - v.size; + if (delta > 0) { + v.shares = delta.count() * ((delta.count() * SCALE) / v.alloc.count()); + total_shares += v.shares; + } else { + v.shares = 0; + } + } + // Now allocate blocks. + ts::CacheStripeBlocks span_blocks = ts::scaled_down(span->_header->num_blocks); + ts::CacheStripeBlocks span_used(0); + std::cout << "Allocation from span of size " << span_blocks.count() << std::endl; + for ( auto& v : av ) { + if (v.shares) { + auto n = (span_blocks * v.shares) / total_shares; + v.size += n; + span_used += n; + std::cout << "Volume " << v.idx << " allocated " << n.count() << " stripe blocks for a total of " << v.size.count() << " of " << v.alloc.count() << std::endl; + std::cout << " with " << v.shares << " of " << total_shares << " " << static_cast((v.shares * SCALE) / total_shares)/10.0 << "%" << std::endl; + } + } + std::cout << "Span allocated " << span_used.count() << " of " << span_blocks.count() << std::endl; + } } } } From 6a31740bc1d0f70690b5df68559d54c2fd75877f Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Wed, 1 Feb 2017 05:47:27 -0600 Subject: [PATCH 50/81] CacheTool: Improved stream operators for scalar types. --- cmd/traffic_cache_tool/CacheDefs.h | 12 ++++++++++++ cmd/traffic_cache_tool/CacheTool.cc | 10 +++++----- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheDefs.h b/cmd/traffic_cache_tool/CacheDefs.h index ac0719da5f9..6ab9deda881 100644 --- a/cmd/traffic_cache_tool/CacheDefs.h +++ b/cmd/traffic_cache_tool/CacheDefs.h @@ -35,6 +35,14 @@ constexpr static uint8_t CACHE_DB_MAJOR_VERSION = 24; typedef Scalar<1, int64_t, tag::bytes> Bytes; typedef Scalar<1024, int64_t, tag::bytes> Kilobytes; typedef Scalar<1024 * Kilobytes::SCALE, int64_t, tag::bytes> Megabytes; +typedef Scalar<1024 * Megabytes::SCALE, int64_t, tag::bytes> Gigabytes; +typedef Scalar<1024 * Gigabytes::SCALE, int64_t, tag::bytes> Terabytes; + +std::ostream& operator<<(std::ostream& s, Bytes const& n) { return s << n.count() << " bytes"; } +std::ostream& operator<<(std::ostream& s, Kilobytes const& n) { return s << n.count() << " KB"; } +std::ostream& operator<<(std::ostream& s, Megabytes const& n) { return s << n.count() << " MB"; } +std::ostream& operator<<(std::ostream& s, Gigabytes const& n) { return s << n.count() << " HB"; } +std::ostream& operator<<(std::ostream& s, Terabytes const& n) { return s << n.count() << " TB"; } // Units of allocation for stripes. typedef Scalar<128 * Megabytes::SCALE, int64_t, tag::bytes> CacheStripeBlocks; @@ -44,6 +52,10 @@ typedef Scalar<1024 * Kilobytes::SCALE, int64_t, tag::bytes> Megabytes; // Size unit for content stored in cache. typedef Scalar<512, int64_t, tag::bytes> CacheDataBlocks; +std::ostream& operator<<(std::ostream& s, CacheStripeBlocks const& n) { return s << n.count() << " stripe blocks"; } +std::ostream& operator<<(std::ostream& s, CacheStoreBlocks const& n) { return s << n.count() << " store blocks"; } +std::ostream& operator<<(std::ostream& s, CacheDataBlocks const& n) { return s << n.count() << " data blocks"; } + /** A cache span is a representation of raw storage. It corresponds to a raw disk, disk partition, file, or directory. */ diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index f2221deec87..56b88724fe2 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -567,7 +567,7 @@ VolumeConfig::load(ts::FilePath const& path) if (!percent) { v._size = ts::scaled_up(v._size = n); if (v._size.count() != n) { - zret.push(0, 0, "Line ", ln, " size ", n, " was rounded up to ", v._size.count()); + zret.push(0, 0, "Line ", ln, " size ", n, " was rounded up to ", v._size); } } else if ('%' == *percent && percent.size() == 1) { v._percent = n; @@ -668,17 +668,17 @@ Simulate_Span_Allocation(int argc, char *argv[]) // Now allocate blocks. ts::CacheStripeBlocks span_blocks = ts::scaled_down(span->_header->num_blocks); ts::CacheStripeBlocks span_used(0); - std::cout << "Allocation from span of size " << span_blocks.count() << std::endl; + std::cout << "Allocation from span of " << span_blocks << std::endl; for ( auto& v : av ) { if (v.shares) { auto n = (span_blocks * v.shares) / total_shares; v.size += n; span_used += n; - std::cout << "Volume " << v.idx << " allocated " << n.count() << " stripe blocks for a total of " << v.size.count() << " of " << v.alloc.count() << std::endl; - std::cout << " with " << v.shares << " of " << total_shares << " " << static_cast((v.shares * SCALE) / total_shares)/10.0 << "%" << std::endl; + std::cout << "Volume " << v.idx << " allocated " << n << " for a total of " << v.size << " of " << v.alloc << std::endl; + std::cout << " with " << v.shares << " shares of " << total_shares << " total - " << static_cast((v.shares * SCALE) / total_shares)/10.0 << "%" << std::endl; } } - std::cout << "Span allocated " << span_used.count() << " of " << span_blocks.count() << std::endl; + std::cout << "Span allocated " << span_used << " of " << span_blocks << std::endl; } } } From 2372eb0357d8a0e41439d13f083a1b3e7193f629 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Thu, 2 Feb 2017 01:06:33 -0600 Subject: [PATCH 51/81] CacheTool: Free space allocation with existing volumes / spans working. --- cmd/traffic_cache_tool/CacheTool.cc | 38 +++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index 56b88724fe2..ccdf98b50b1 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -70,6 +70,7 @@ struct Span { ats_scoped_fd _fd; std::unique_ptr _header; int _vol_idx = 0; + ts::CacheStoreBlocks _free_space; }; struct Volume { @@ -78,6 +79,7 @@ struct Volume { int _idx; ///< Stripe index in span. }; int _idx; ///< Volume index. + ts::CacheStoreBlocks _size; ///< Amount of storage allocated. std::vector _stripes; }; @@ -338,8 +340,12 @@ Cache::loadSpanDirect(ts::FilePath const &path, int vol_idx, Bytes size) int nspb = span->_header->num_diskvol_blks; for (auto i = 0; i < nspb; ++i) { ts::CacheStripeDescriptor &stripe = span->_header->stripes[i]; - if (stripe.free == 0) + if (stripe.free == 0) { _volumes[stripe.vol_idx]._stripes.push_back(Volume::StripeRef{span.get(), i}); + _volumes[stripe.vol_idx]._size += stripe.len; + } else { + span->_free_space += stripe.len; + } } span->_vol_idx = vol_idx; _spans.push_back(span.release()); @@ -644,38 +650,54 @@ Simulate_Span_Allocation(int argc, char *argv[]) ts::CacheStripeBlocks total = cache.calcTotalSpanConfiguredSize(); struct V { int idx; - ts::CacheStripeBlocks alloc; - ts::CacheStripeBlocks size; + ts::CacheStripeBlocks alloc; // target allocation + ts::CacheStripeBlocks size; // actually allocated space + int64_t deficit; int64_t shares; }; std::vector av; vols.convertToAbsolute(total); for ( auto& vol : vols ) { - av.push_back({ vol._idx, vol._alloc, 0, 0}); + ts::CacheStripeBlocks size(0); + auto spot = cache._volumes.find(vol._idx); + if (spot != cache._volumes.end()) + size = ts::scaled_down(spot->second._size); + av.push_back({ vol._idx, vol._alloc, size, 0, 0}); } for ( auto span : cache._spans ) { + if (span->_free_space <= 0) continue; static const int64_t SCALE = 1000; int64_t total_shares = 0; for ( auto& v : av ) { auto delta = v.alloc - v.size; if (delta > 0) { - v.shares = delta.count() * ((delta.count() * SCALE) / v.alloc.count()); + v.deficit = (delta.count() * SCALE) / v.alloc.count(); + v.shares = delta.count() * v.deficit; total_shares += v.shares; + std::cout << "Volume " << v.idx << " allocated " << v.alloc << " has " << v.size << " needs " << (v.alloc - v.size) << " deficit " << v.deficit << std::endl; } else { v.shares = 0; } } // Now allocate blocks. - ts::CacheStripeBlocks span_blocks = ts::scaled_down(span->_header->num_blocks); + ts::CacheStripeBlocks span_blocks = ts::scaled_down(span->_free_space); ts::CacheStripeBlocks span_used(0); std::cout << "Allocation from span of " << span_blocks << std::endl; + // sort by deficit so least relatively full volumes go first. + std::sort(av.begin(), av.end(), [](V const& lhs, V const& rhs) { return lhs.deficit > rhs.deficit; }); for ( auto& v : av ) { if (v.shares) { - auto n = (span_blocks * v.shares) / total_shares; + auto n = (((span_blocks - span_used) * v.shares) + total_shares -1) / total_shares; + auto delta = v.alloc - v.size; + // Not sure why this is needed. But a large and empty volume can dominate the shares + // enough to get more than it actually needs if the other volume are relative small or full. + // I need to do more math to see if the weighting can be adjusted to not have this happen. + n = std::min(n, delta); v.size += n; span_used += n; - std::cout << "Volume " << v.idx << " allocated " << n << " for a total of " << v.size << " of " << v.alloc << std::endl; + std::cout << "Volume " << v.idx << " allocated " << n << " of " << delta << " needed to total of " << v.size << " of " << v.alloc << std::endl; std::cout << " with " << v.shares << " shares of " << total_shares << " total - " << static_cast((v.shares * SCALE) / total_shares)/10.0 << "%" << std::endl; + total_shares -= v.shares; } } std::cout << "Span allocated " << span_used << " of " << span_blocks << std::endl; From e0f5e07255f55fea7313277606fd9ad097f8e06c Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Thu, 2 Feb 2017 15:47:11 -0600 Subject: [PATCH 52/81] Scalar: Fix template brokenness in generic comparisons. --- lib/ts/Scalar.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 5f1f813d675..29a21b39b96 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -417,16 +417,16 @@ operator<=(Scalar const &lhs, Scalar const &rhs) // Derived compares. No narrowing optimization needed because if the scales // are the same the nested call with be optimized. -template +template bool -operator>(Scalar const &lhs, Scalar const &rhs) + operator>(Scalar const &lhs, Scalar const &rhs) { return rhs < lhs; } -template +template bool -operator>=(Scalar const &lhs, Scalar const &rhs) + operator>=(Scalar const &lhs, Scalar const &rhs) { return rhs <= lhs; } From 59ba80dface61ae8c6faa2c52d19e16c6a1a2d1f Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 3 Feb 2017 09:47:10 -0600 Subject: [PATCH 53/81] Scalar: Clang format. Tweak label detection to be sure of the method return type. --- lib/ts/Scalar.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 29a21b39b96..58b41c2f311 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -419,14 +419,14 @@ operator<=(Scalar const &lhs, Scalar const &rhs) template bool - operator>(Scalar const &lhs, Scalar const &rhs) +operator>(Scalar const &lhs, Scalar const &rhs) { return rhs < lhs; } template bool - operator>=(Scalar const &lhs, Scalar const &rhs) +operator>=(Scalar const &lhs, Scalar const &rhs) { return rhs <= lhs; } @@ -881,7 +881,7 @@ namespace detail } template inline auto - tag_label(std::ostream &s, tag_label_B const &) -> decltype(s << T::label) + tag_label(std::ostream &s, tag_label_B const &) -> decltype(s << T::label, s) { return s << T::label; } From e77cbf0371666be13fa4d106b4f7c7cfdbe2e40b Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 7 Feb 2017 12:26:03 -0600 Subject: [PATCH 54/81] Scalar: Added common type and addition between arbitrary scalar types. --- lib/ts/Scalar.h | 31 ++++++++++++++++++++++++++++--- lib/ts/test_Scalar.cc | 5 +++++ 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 58b41c2f311..94b1c401d3f 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -155,7 +155,7 @@ template class Scalar /// The value is scaled from @a that to @a this. /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then /// the @c scale_up or @c scale_down casts must be used to indicate the rounding direction. - template self &operator+=(Scalar const &that); + template self &operator+=(Scalar const &that); /// Addition - add @a n as a number of scaled units. self &operator+=(C n); /// Addition - add @a n as a number of scaled units. @@ -240,7 +240,7 @@ Scalar::scale() template template -Scalar::Scalar(Scalar const &that) : _n(static_cast(that._n)) +Scalar::Scalar(Scalar const &that) : _n(static_cast(that.count())) { } @@ -625,7 +625,7 @@ operator>=(int n, Scalar const &rhs) template template auto -Scalar::operator+=(Scalar const &that) -> self & + Scalar::operator+=(Scalar const &that) -> self & { typedef std::ratio R; static_assert(R::den == 1, "Addition not permitted - target scale is not an integral multiple of source scale."); @@ -647,6 +647,13 @@ Scalar::operator+=(C n) -> self & return *this; } +template +auto +operator + (Scalar lhs, Scalar const &rhs) -> typename std::common_type,Scalar>::type +{ + return typename std::common_type,Scalar>::type(lhs) += rhs; +} + template Scalar operator+(Scalar const &lhs, Scalar const &rhs) @@ -715,6 +722,13 @@ Scalar::operator-=(C n) -> self & return *this; } +template +auto +operator - (Scalar lhs, Scalar const &rhs) -> typename std::common_type,Scalar>::type +{ + return typename std::common_type,Scalar>::type(lhs) -= rhs; +} + template Scalar operator-(Scalar const &lhs, Scalar const &rhs) @@ -899,5 +913,16 @@ operator<<(ostream &s, ApacheTrafficServer::Scalar const &x) s << x.units(); return ApacheTrafficServer::detail::tag_label(s, b); } + + +/// Compute common type of two scalars. +/// In `std` to overload the base definition. This yields a type that has the common type of the +/// counter type and a scale that is the GCF of the input scales. +template < intmax_t N, typename C, intmax_t S, typename I, typename T > +struct common_type, ApacheTrafficServer::Scalar> +{ + typedef std::ratio R; + typedef ApacheTrafficServer::Scalar::type, T> type; +}; } #endif // TS_SCALAR_H diff --git a/lib/ts/test_Scalar.cc b/lib/ts/test_Scalar.cc index aa9bb8a7e35..63a3162d76e 100644 --- a/lib/ts/test_Scalar.cc +++ b/lib/ts/test_Scalar.cc @@ -224,6 +224,11 @@ Test_5() test.check(z1.count() == 30720, "Addition got %ld expected %d", z1.count(), 30720); z1 /= 3; test.check(z1.count() == 10240, "Addition got %ld expected %d", z1.count(), 10240); + + z2 = 3148; + auto x = z2 + MBytes(1); + test.check(x.scale() == z2.scale(), "Common type addition yielded bad scale %ld - expected %ld", x.scale(), z2.scale()); + test.check(x.count() == 4172, "Common type addition yielded bad count %d - expected %d", x.count(), 4172); } struct KBytes_tag { From 21059073f1380d2ae06180a2b789842e5aaf8cd8 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Wed, 8 Feb 2017 00:22:48 -0600 Subject: [PATCH 55/81] CacheTool: checkpoint before Scalar update. --- cmd/traffic_cache_tool/CacheDefs.h | 2 + cmd/traffic_cache_tool/CacheTool.cc | 452 ++++++++++++++++++++++------ lib/tsconfig/Errata.h | 3 + 3 files changed, 357 insertions(+), 100 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheDefs.h b/cmd/traffic_cache_tool/CacheDefs.h index 6ab9deda881..b91260bb30d 100644 --- a/cmd/traffic_cache_tool/CacheDefs.h +++ b/cmd/traffic_cache_tool/CacheDefs.h @@ -31,6 +31,8 @@ namespace tag { struct bytes; } namespace ApacheTrafficServer { constexpr static uint8_t CACHE_DB_MAJOR_VERSION = 24; +/// Maximum allowed volume index. +constexpr static int MAX_VOLUME_IDX = 255; typedef Scalar<1, int64_t, tag::bytes> Bytes; typedef Scalar<1024, int64_t, tag::bytes> Kilobytes; diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index ccdf98b50b1..7a4edefee23 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -26,9 +26,11 @@ #include #include #include +#include #include #include #include +#include #include #include #include "File.h" @@ -48,90 +50,273 @@ const Bytes CacheSpan::OFFSET{CacheStoreBlocks{1}}; } using ts::Bytes; +using ts::Megabytes; +using ts::CacheStoreBlocks; +using ts::CacheStripeBlocks; +using ts::CacheStripeDescriptor; +using ts::Errata; +using ts::FilePath; + +using ts::scale_up; +using ts::scale_down; namespace { -ts::FilePath SpanFile; -ts::FilePath VolumeFile; +FilePath SpanFile; +FilePath VolumeFile; ts::CommandTable Commands; // Default this to read only, only enable write if specifically required. int OPEN_RW_FLAGS = O_RDONLY; +struct Stripe; + struct Span { - Span(ts::FilePath const &path) : _path(path) {} - ts::Errata load(); - ts::Errata loadDevice(); + Span(FilePath const &path) : _path(path) {} + Errata load(); + Errata loadDevice(); + + /// No allocated stripes on this span. + bool isEmpty() const; void clearPermanently(); - ts::FilePath _path; + ts::Rv allocStripe(int vol_idx, CacheStripeBlocks len); + Errata updateHeader(); + + FilePath _path; ats_scoped_fd _fd; - std::unique_ptr _header; int _vol_idx = 0; - ts::CacheStoreBlocks _free_space; + CacheStoreBlocks _len; ///< Total length of span. + CacheStoreBlocks _free_space; + /// A copy of the data on the disk. + std::unique_ptr _header; + /// Live information about stripes. + std::list _stripes; }; +/* --------------------------------------------------------------------------------------- */ +struct Stripe +{ + Stripe(Span* span, Bytes start, CacheStoreBlocks len); + bool isFree() const { return 0 == _vol_idx; } + + Span* _span; ///< Hosting span. + Bytes _start; ///< Offset of first byte of stripe. + Bytes _content; ///< Start of content. + CacheStoreBlocks _len; ///< Length of stripe. + uint8_t _vol_idx; ///< Volume index. +}; + +Stripe::Stripe(Span* span, Bytes start, CacheStoreBlocks len) + : _span(span), _start(start), _len(len) +{ +} +/* --------------------------------------------------------------------------------------- */ +/// A live volume. +/// Volume data based on data from loaded spans. struct Volume { - struct StripeRef { - Span *_span; ///< Span with stripe. - int _idx; ///< Stripe index in span. - }; int _idx; ///< Volume index. - ts::CacheStoreBlocks _size; ///< Amount of storage allocated. - std::vector _stripes; + CacheStoreBlocks _size; ///< Amount of storage allocated. + std::vector _stripes; }; - -// Data parsed from the volume config file. +/* --------------------------------------------------------------------------------------- */ +/// Data parsed from the volume config file. struct VolumeConfig { - ts::Errata load(ts::FilePath const& path); + Errata load(FilePath const& path); - struct VolData + /// Data direct from the config file. + struct Data { int _idx = 0; ///< Volume index. int _percent = 0; ///< Size if specified as a percent. - ts::Megabytes _size = 0; ///< Size if specified as an absolute. - ts::CacheStripeBlocks _alloc; ///< Allocation size. + Megabytes _size = 0; ///< Size if specified as an absolute. + CacheStripeBlocks _alloc; ///< Allocation size. // Methods handy for parsing bool hasSize() const { return _percent > 0 || _size > 0; } bool hasIndex() const { return _idx > 0; } }; - std::vector _volumes; - typedef std::vector::iterator iterator; - typedef std::vector::const_iterator const_iterator; + std::vector _volumes; + typedef std::vector::iterator iterator; + typedef std::vector::const_iterator const_iterator; iterator begin() { return _volumes.begin(); } iterator end() { return _volumes.end(); } const_iterator begin() const { return _volumes.begin(); } const_iterator end() const { return _volumes.end(); } - ts::Errata validatePercentAllocation(); + Errata validatePercentAllocation(); void convertToAbsolute(ts::CacheStripeBlocks total_span_size); }; - ts::Errata VolumeConfig::validatePercentAllocation() { - ts::Errata zret; - int n = 0; - for ( VolData& vol : _volumes ) n += vol._percent; - if (n > 100) zret.push(0, 10, "Volume percent allocation ", n, " is more than 100%"); - return zret; +Errata +VolumeConfig::validatePercentAllocation() { + Errata zret; + int n = 0; + for ( auto& vol : _volumes ) n += vol._percent; + if (n > 100) zret.push(0, 10, "Volume percent allocation ", n, " is more than 100%"); + return zret; +} + +void +VolumeConfig::convertToAbsolute(ts::CacheStripeBlocks n) +{ + for ( auto& vol : _volumes ) { + if (vol._percent) { + vol._alloc = (n * vol._percent + 99) / 100; + } else { + vol._alloc = ts::scale_up(vol._size); + } } +} +/* --------------------------------------------------------------------------------------- */ +struct Cache { + ~Cache(); + + Errata loadSpan(FilePath const &path); + Errata loadSpanConfig(FilePath const &path); + Errata loadSpanDirect(FilePath const &path, int vol_idx = -1, Bytes size = -1); + + enum class SpanDumpDepth { SPAN, STRIPE, DIRECTORY }; + void dumpSpans(SpanDumpDepth depth); + void dumpVolumes(); + + ts::CacheStripeBlocks calcTotalSpanPhysicalSize(); + ts::CacheStripeBlocks calcTotalSpanConfiguredSize(); - void VolumeConfig::convertToAbsolute(ts::CacheStripeBlocks n) + std::list _spans; + std::map _volumes; + +}; +/* --------------------------------------------------------------------------------------- */ +/// Temporary structure used for doing allocation computations. +class VolumeAllocator +{ + /// Working struct that tracks allocation information. + struct V { - for ( VolData& vol : _volumes ) { - if (vol._percent) { - vol._alloc = (n * vol._percent + 99) / 100; - } else { - vol._alloc = ts::scaled_up(vol._size); + VolumeConfig::Data const& _config; ///< Configuration instance. + CacheStripeBlocks _size; ///< Current actual size. + int64_t _deficit; + int64_t _shares; + + V(VolumeConfig::Data const& config, CacheStripeBlocks size, int64_t deficit = 0, int64_t shares = 0) + : _config(config), _size(size), _deficit(deficit), _shares(shares) + { + } + V& operator = (V const& that) { + new(this) V(that._config, that._size, that._deficit, that._shares); + return *this; + } + }; + + typedef std::vector AV; + AV _av; ///< Working vector of volume data. + + Cache _cache; ///< Current state. + VolumeConfig _vols; ///< Configuration state. + +public: + + VolumeAllocator(); + + Errata load(FilePath const& spanFile, FilePath const& volumeFile); + Errata fillEmptySpans(); +}; + +VolumeAllocator::VolumeAllocator() { } + +Errata +VolumeAllocator::load(FilePath const& spanFile, FilePath const& volumeFile) +{ + Errata zret; + + if (!volumeFile) zret.push(0, 9, "Volume config file not set"); + if (!spanFile) zret.push(0, 9, "Span file not set"); + + if (zret) { + zret = _vols.load(volumeFile); + if (zret) { + zret = _cache.loadSpan(spanFile); + if (zret) { + CacheStripeBlocks total = _cache.calcTotalSpanConfiguredSize(); + _vols.convertToAbsolute(total); + for ( auto& vol : _vols ) { + CacheStripeBlocks size(0); + auto spot = _cache._volumes.find(vol._idx); + if (spot != _cache._volumes.end()) + size = scale_down(spot->second._size); + _av.push_back({ vol, size, 0, 0}); + } } } } + return zret; +} + +Errata +VolumeAllocator::fillEmptySpans() +{ + Errata zret; + + /// Scaling factor for shares, effectively the accuracy. + static const int64_t SCALE = 1000; + + // Walk the spans, skipping ones that are not empty. + for ( auto span : _cache._spans ) { + int64_t total_shares = 0; + + if (!span->isEmpty()) continue; + std::cout << "Allocating " << scale_down(span->_len) << " from span " << span->_path << std::endl; + + // Walk the volumes and get the relative allocations. + for ( auto& v : _av ) { + auto delta = v._config._alloc - v._size; + if (delta > 0) { + v._deficit = (delta.count() * SCALE) / v._config._alloc.count(); + v._shares = delta.count() * v._deficit; + total_shares += v._shares; + } else { + v._shares = 0; + } + } + // Now allocate blocks. + ts::CacheStripeBlocks span_blocks = ts::scale_down(span->_free_space); + ts::CacheStripeBlocks span_used(0); + + // sort by deficit so least relatively full volumes go first. + std::sort(_av.begin(), _av.end(), [](V const& lhs, V const& rhs) { return lhs._deficit > rhs._deficit; }); + for ( auto& v : _av ) { + if (v._shares) { + auto n = (((span_blocks - span_used) * v._shares) + total_shares -1) / total_shares; + auto delta = v._config._alloc - v._size; + // Not sure why this is needed. But a large and empty volume can dominate the shares + // enough to get more than it actually needs if the other volume are relative small or full. + // I need to do more math to see if the weighting can be adjusted to not have this happen. + n = std::min(n, delta); + v._size += n; + span_used += n; + total_shares -= v._shares; + span->allocStripe(v._config._idx, n); + std::cout << " " << n << " to volume " << v._config._idx << std::endl; + } + } + std::cout << " Total " << span_used << std::endl; + std::cout << " Updating Header ... "; + zret = span->updateHeader(); + if (zret) + std::cout << " Done" << std::endl; + else + std::cout << " Error" << std::endl << zret; + } + return zret; +} +/* --------------------------------------------------------------------------------------- */ // All of these free functions need to be moved to the Cache class. Or the Span class? bool @@ -162,7 +347,6 @@ Probe_For_Stripe(ts::StringView &mem) } return zret; } - /* --------------------------------------------------------------------------------------- */ void Calc_Stripe_Data(ts::CacheStripeMeta const &header, ts::CacheStripeMeta const &footer, off_t delta, ts::StripeData &data) @@ -298,31 +482,12 @@ Open_Stripe(ats_scoped_fd const &fd, ts::CacheStripeDescriptor const &block) } /* --------------------------------------------------------------------------------------- */ -struct Cache { - ~Cache(); - - ts::Errata loadSpan(ts::FilePath const &path); - ts::Errata loadSpanConfig(ts::FilePath const &path); - ts::Errata loadSpanDirect(ts::FilePath const &path, int vol_idx = -1, Bytes size = -1); - - enum class SpanDumpDepth { SPAN, STRIPE, DIRECTORY }; - void dumpSpans(SpanDumpDepth depth); - void dumpVolumes(); - - ts::CacheStripeBlocks calcTotalSpanPhysicalSize(); - ts::CacheStripeBlocks calcTotalSpanConfiguredSize(); - - std::list _spans; - std::map _volumes; - -}; - -ts::Errata -Cache::loadSpan(ts::FilePath const &path) +Errata +Cache::loadSpan(FilePath const &path) { - ts::Errata zret; + Errata zret; if (!path.is_readable()) - zret = ts::Errata::Message(0, EPERM, path," is not readable."); + zret = Errata::Message(0, EPERM, path," is not readable."); else if (path.is_regular_file()) zret = this->loadSpanConfig(path); else @@ -330,22 +495,25 @@ Cache::loadSpan(ts::FilePath const &path) return zret; } -ts::Errata -Cache::loadSpanDirect(ts::FilePath const &path, int vol_idx, Bytes size) +Errata +Cache::loadSpanDirect(FilePath const &path, int vol_idx, Bytes size) { - ts::Errata zret; + Errata zret; std::unique_ptr span(new Span(path)); zret = span->load(); if (zret) { int nspb = span->_header->num_diskvol_blks; for (auto i = 0; i < nspb; ++i) { - ts::CacheStripeDescriptor &stripe = span->_header->stripes[i]; - if (stripe.free == 0) { - _volumes[stripe.vol_idx]._stripes.push_back(Volume::StripeRef{span.get(), i}); - _volumes[stripe.vol_idx]._size += stripe.len; + ts::CacheStripeDescriptor &raw = span->_header->stripes[i]; + Stripe* stripe = new Stripe(span.get(), raw.offset, raw.len); + if (raw.free == 0) { + stripe->_vol_idx = raw.vol_idx; + _volumes[stripe->_vol_idx]._stripes.push_back(stripe); + _volumes[stripe->_vol_idx]._size += stripe->_len; } else { - span->_free_space += stripe.len; + span->_free_space += stripe->_len; } + span->_stripes.push_back(stripe); } span->_vol_idx = vol_idx; _spans.push_back(span.release()); @@ -353,13 +521,13 @@ Cache::loadSpanDirect(ts::FilePath const &path, int vol_idx, Bytes size) return zret; } -ts::Errata -Cache::loadSpanConfig(ts::FilePath const &path) +Errata +Cache::loadSpanConfig(FilePath const &path) { static const ts::StringView TAG_ID("id"); static const ts::StringView TAG_VOL("volume"); - ts::Errata zret; + Errata zret; ts::BulkFile cfile(path); if (0 == cfile.load()) { @@ -388,11 +556,11 @@ Cache::loadSpanConfig(ts::FilePath const &path) } } } - zret = this->loadSpan(ts::FilePath(path)); + zret = this->loadSpan(FilePath(path)); } } } else { - zret = ts::Errata::Message(0, EBADF, "Unable to load ", path); + zret = Errata::Message(0, EBADF, "Unable to load ", path); } return zret; } @@ -423,7 +591,7 @@ Cache::dumpVolumes() for (auto const &elt : _volumes) { size_t size = 0; for (auto const &r : elt.second._stripes) - size += r._span->_header->stripes[r._idx].len.units(); + size += r->_len.units(); std::cout << "Volume " << elt.first << " has " << elt.second._stripes.size() << " stripes and " << size << " bytes" << std::endl; @@ -435,7 +603,7 @@ ts::CacheStripeBlocks Cache::calcTotalSpanConfiguredSize() ts::CacheStripeBlocks zret(0); for ( auto span : _spans ) { - zret += ts::scaled_down(span->_header->num_blocks); + zret += ts::scale_down(span->_header->num_blocks); } return zret; } @@ -446,7 +614,7 @@ ts::CacheStripeBlocks Cache::calcTotalSpanPhysicalSize() for ( auto span : _spans ) { // This is broken, physical_size doesn't work for devices, need to fix that. - zret += ts::scaled_down(span->_path.physical_size()); + zret += ts::scale_down(span->_path.physical_size()); } return zret; } @@ -457,12 +625,12 @@ Cache::~Cache() delete span; } /* --------------------------------------------------------------------------------------- */ -ts::Errata +Errata Span::load() { - ts::Errata zret; + Errata zret; if (!_path.is_readable()) - zret = ts::Errata::Message(0, EPERM, _path," is not readable."); + zret = Errata::Message(0, EPERM, _path," is not readable."); else if (_path.is_char_device() || _path.is_block_device()) zret = this->loadDevice(); else if (_path.is_dir()) @@ -472,10 +640,10 @@ Span::load() return zret; } -ts::Errata +Errata Span::loadDevice() { - ts::Errata zret; + Errata zret; int flags; flags = OPEN_RW_FLAGS @@ -491,7 +659,7 @@ Span::loadDevice() if (fd) { off_t offset = ts::CacheSpan::OFFSET.units(); - alignas(512) char buff[8192]; + alignas(512) char buff[CacheStoreBlocks::SCALE]; int64_t n = pread(fd, buff, sizeof(buff), offset); if (n >= static_cast(sizeof(ts::SpanHeader))) { ts::SpanHeader &span_hdr = reinterpret_cast(buff); @@ -507,20 +675,87 @@ Span::loadDevice() pread(fd, _header.get(), span_hdr_size, offset); } _fd = fd.release(); + _len = _header->num_blocks; } } else { - zret = ts::Errata::Message(0, errno, "Failed to read from ", _path, '[', errno, ':', strerror(errno), ']'); + zret = Errata::Message(0, errno, "Failed to read from ", _path, '[', errno, ':', strerror(errno), ']'); } } else { - zret = ts::Errata::Message(0, errno, "Unable to open ", _path); + zret = Errata::Message(0, errno, "Unable to open ", _path); } return zret; } +ts::Rv Span::allocStripe(int vol_idx, CacheStripeBlocks len) +{ + for (auto spot = _stripes.begin(), limit = _stripes.end() ; spot != limit ; ++spot ) { + Stripe* stripe = *spot; + if (stripe->isFree()) { + // Exact match, or if the remains after allocating are less than a stripe block, take it all. + if (stripe->_len <= len && len < (stripe->_len + CacheStripeBlocks(1)) { + stripe->_vol_idx = vol_idx; + return stripe; + } else if (stripe->_len > len) { + Stripe* ns = new Stripe(this, stripe->_start, len); + stripe->_start += len; + stripe->_len -= len; + ns->_vol_idx = vol_idx; + _stripes.insert(spot, ns); + return ns; + } + } + } + return ts::Rv(nullptr, Errata::Message(0,15,"Failed to allocate stripe of size ", len, " - no free block large enough")); +} + +bool Span::isEmpty() const { return std::all_of(_stripes.begin(), _stripes.end(), [] (Stripe* s) { return s->_vol_idx == 0; });} + +Errata Span::updateHeader() +{ + Errata zret; + int n = _stripes.size(); + CacheStripeDescriptor* sd; + CacheStoreBlocks hdr_size = scale_up(sizeof(ts::SpanHeader) + ( n - 1 ) * sizeof(ts::CacheStripeDescriptor)); + void* raw = ats_memalign(512, hdr_size.units()); + ts::SpanHeader* hdr = static_cast(raw); + std::bitset volume_mask; + + hdr->magic = ts::SpanHeader::MAGIC; + hdr->num_free = 0; + hdr->num_used = 0; + hdr->num_diskvol_blks = n; + hdr->num_blocks = _len; + + sd = hdr->stripes; + for ( auto stripe : _stripes ) { + sd->offset = stripe->_start; + sd->len = stripe->_len; + sd->vol_idx = stripe->_vol_idx; + volume_mask[sd->vol_idx] = true; + sd->type = 0; + if (sd->vol_idx == 0) { + sd->free = true; + ++(hdr->num_free); + } else { + sd->free = false; + ++(hdr->num_used); + } + + ++sd; + } + volume_mask[0] = false; // don't include free stripes in distinct volume count. + hdr->num_volumes = volume_mask.count(); + _header.reset(hdr); + ssize_t r = pwrite(_fd, hdr, hdr_size.units(), ts::CacheSpan::OFFSET.units()); + if (r < ts::CacheSpan::OFFSET.units()) + zret.push(0,errno,"Failed to update span - ", strerror(errno)); + return zret; +} + void Span::clearPermanently() { - alignas(512) static char zero[ts::CacheStoreBlocks::SCALE]; // should be all zero, it's static. + alignas(512) static char zero[CacheStoreBlocks::SCALE]; // should be all zero, it's static. std::cout << "Clearing " << _path << " permanently on disk "; ssize_t n = pwrite(_fd, zero, sizeof(zero), ts::CacheSpan::OFFSET.units()); if (n == sizeof(zero)) @@ -535,13 +770,13 @@ Span::clearPermanently() std::cout << std::endl; } /* --------------------------------------------------------------------------------------- */ -ts::Errata -VolumeConfig::load(ts::FilePath const& path) +Errata +VolumeConfig::load(FilePath const& path) { static const ts::StringView TAG_SIZE("size"); static const ts::StringView TAG_VOL("volume"); - ts::Errata zret; + Errata zret; int ln = 0; @@ -549,7 +784,7 @@ VolumeConfig::load(ts::FilePath const& path) if (0 == cfile.load()) { ts::StringView content = cfile.content(); while (content) { - VolData v; + Data v; ++ln; ts::StringView line = content.splitPrefix('\n'); @@ -571,7 +806,7 @@ VolumeConfig::load(ts::FilePath const& path) if (text) { ts::StringView percent(text.end(), value.end()); // clip parsed number. if (!percent) { - v._size = ts::scaled_up(v._size = n); + v._size = ts::scale_up(v._size = n); if (v._size.count() != n) { zret.push(0, 0, "Line ", ln, " size ", n, " was rounded up to ", v._size); } @@ -606,7 +841,7 @@ VolumeConfig::load(ts::FilePath const& path) } } } else { - zret = ts::Errata::Message(0, EBADF, "Unable to load ", path); + zret = Errata::Message(0, EBADF, "Unable to load ", path); } return zret; } @@ -619,10 +854,10 @@ struct option Options[] = { }; } -ts::Errata +Errata List_Stripes(Cache::SpanDumpDepth depth, int argc, char *argv[]) { - ts::Errata zret; + Errata zret; Cache cache; if ((zret = cache.loadSpan(SpanFile))) { @@ -632,10 +867,25 @@ List_Stripes(Cache::SpanDumpDepth depth, int argc, char *argv[]) return zret; } -ts::Errata +Errata +Cmd_Allocate_Empty_Spans(int argc, char *argv[]) +{ + Errata zret; + VolumeAllocator va; + + OPEN_RW_FLAGS = O_RDWR; + zret = va.load(SpanFile, VolumeFile); + if (zret) { + va.fillEmptySpans(); + } + + return zret; +} + +Errata Simulate_Span_Allocation(int argc, char *argv[]) { - ts::Errata zret; + Errata zret; VolumeConfig vols; Cache cache; @@ -661,7 +911,7 @@ Simulate_Span_Allocation(int argc, char *argv[]) ts::CacheStripeBlocks size(0); auto spot = cache._volumes.find(vol._idx); if (spot != cache._volumes.end()) - size = ts::scaled_down(spot->second._size); + size = ts::scale_down(spot->second._size); av.push_back({ vol._idx, vol._alloc, size, 0, 0}); } for ( auto span : cache._spans ) { @@ -680,7 +930,7 @@ Simulate_Span_Allocation(int argc, char *argv[]) } } // Now allocate blocks. - ts::CacheStripeBlocks span_blocks = ts::scaled_down(span->_free_space); + ts::CacheStripeBlocks span_blocks = ts::scale_down(span->_free_space); ts::CacheStripeBlocks span_used(0); std::cout << "Allocation from span of " << span_blocks << std::endl; // sort by deficit so least relatively full volumes go first. @@ -708,10 +958,10 @@ Simulate_Span_Allocation(int argc, char *argv[]) return zret; } -ts::Errata +Errata Clear_Spans(int argc, char *argv[]) { - ts::Errata zret; + Errata zret; Cache cache; OPEN_RW_FLAGS = O_RDWR; @@ -752,6 +1002,8 @@ main(int argc, char *argv[]) [](int argc, char *argv[]) { return List_Stripes(Cache::SpanDumpDepth::STRIPE, argc, argv); }); Commands.add(std::string("clear"), std::string("Clear spans"), &Clear_Spans); Commands.add(std::string("volumes"), std::string("Volumes"), &Simulate_Span_Allocation); + Commands.add(std::string("alloc"), std::string("Storage allocation")) + .subCommand(std::string("free"), std::string("Allocate storage on free (empty) spans"), &Cmd_Allocate_Empty_Spans); Commands.setArgIndex(optind); @@ -760,7 +1012,7 @@ main(int argc, char *argv[]) exit(1); } - ts::Errata result = Commands.invoke(argc, argv); + Errata result = Commands.invoke(argc, argv); if (result.size()) { std::cerr << result; diff --git a/lib/tsconfig/Errata.h b/lib/tsconfig/Errata.h index 7c959416512..278697ac103 100644 --- a/lib/tsconfig/Errata.h +++ b/lib/tsconfig/Errata.h @@ -816,6 +816,9 @@ inline Errata::Errata(Id id, Code code, std::string const& text) { inline Errata::Errata(Message const& msg) { this->push(msg); } +inline Errata::Errata(Message && msg) { + this->push(std::move(msg)); +} inline Errata::operator bool() const { return this->isOK(); } From e04bd22ed972c71b85aafb24c7d7a22bfa9c620e Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Wed, 8 Feb 2017 03:15:23 -0600 Subject: [PATCH 56/81] CacheTool: Updated Scalar to handle a compile problem. Almost working, need to tweak it to reset a span to a single free stripe. --- cmd/traffic_cache_tool/CacheTool.cc | 48 +++++++++++++++++------------ cmd/traffic_cache_tool/Makefile.am | 1 + 2 files changed, 30 insertions(+), 19 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index 7a4edefee23..a9950d86743 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -90,6 +90,7 @@ struct Span { int _vol_idx = 0; CacheStoreBlocks _len; ///< Total length of span. CacheStoreBlocks _free_space; + ink_device_geometry _geometry; ///< Geometry of span. /// A copy of the data on the disk. std::unique_ptr _header; /// Live information about stripes. @@ -658,27 +659,36 @@ Span::loadDevice() ats_scoped_fd fd(_path.open(flags)); if (fd) { - off_t offset = ts::CacheSpan::OFFSET.units(); - alignas(512) char buff[CacheStoreBlocks::SCALE]; - int64_t n = pread(fd, buff, sizeof(buff), offset); - if (n >= static_cast(sizeof(ts::SpanHeader))) { - ts::SpanHeader &span_hdr = reinterpret_cast(buff); - // See if it looks valid - if (span_hdr.magic == ts::SpanHeader::MAGIC && span_hdr.num_diskvol_blks == span_hdr.num_used + span_hdr.num_free) { - int nspb = span_hdr.num_diskvol_blks; - size_t span_hdr_size = sizeof(ts::SpanHeader) + (nspb - 1) * sizeof(ts::CacheStripeDescriptor); - _header.reset(new (malloc(span_hdr_size)) ts::SpanHeader); - if (span_hdr_size <= sizeof(buff)) { - memcpy(_header.get(), buff, span_hdr_size); + if (ink_file_get_geometry(_fd, &_geometry)) { + off_t offset = ts::CacheSpan::OFFSET.units(); + alignas(512) char buff[CacheStoreBlocks::SCALE]; + + + int64_t n = pread(fd, buff, sizeof(buff), offset); + if (n >= static_cast(sizeof(ts::SpanHeader))) { + ts::SpanHeader &span_hdr = reinterpret_cast(buff); + // See if it looks valid + if (span_hdr.magic == ts::SpanHeader::MAGIC && span_hdr.num_diskvol_blks == span_hdr.num_used + span_hdr.num_free) { + int nspb = span_hdr.num_diskvol_blks; + size_t span_hdr_size = sizeof(ts::SpanHeader) + (nspb - 1) * sizeof(ts::CacheStripeDescriptor); + _header.reset(new (malloc(span_hdr_size)) ts::SpanHeader); + if (span_hdr_size <= sizeof(buff)) { + memcpy(_header.get(), buff, span_hdr_size); + } else { + // TODO - check the pread return + pread(fd, _header.get(), span_hdr_size, offset); + } + _fd = fd.release(); + _len = _header->num_blocks; + } else { - // TODO - check the pread return - pread(fd, _header.get(), span_hdr_size, offset); + zret = Errata::Message(0, 22, "Span header for ", _path, " is invalid"); } - _fd = fd.release(); - _len = _header->num_blocks; + } else { + zret = Errata::Message(0, errno, "Failed to read from ", _path, '[', errno, ':', strerror(errno), ']'); } } else { - zret = Errata::Message(0, errno, "Failed to read from ", _path, '[', errno, ':', strerror(errno), ']'); + zret = Errata::Message(0, 23, "Unable to get device geometry for ", _path); } } else { zret = Errata::Message(0, errno, "Unable to open ", _path); @@ -692,7 +702,7 @@ ts::Rv Span::allocStripe(int vol_idx, CacheStripeBlocks len) Stripe* stripe = *spot; if (stripe->isFree()) { // Exact match, or if the remains after allocating are less than a stripe block, take it all. - if (stripe->_len <= len && len < (stripe->_len + CacheStripeBlocks(1)) { + if (stripe->_len <= len && len < (stripe->_len + CacheStripeBlocks(1))) { stripe->_vol_idx = vol_idx; return stripe; } else if (stripe->_len > len) { @@ -705,7 +715,7 @@ ts::Rv Span::allocStripe(int vol_idx, CacheStripeBlocks len) } } } - return ts::Rv(nullptr, Errata::Message(0,15,"Failed to allocate stripe of size ", len, " - no free block large enough")); + return ts::Rv(nullptr, Errata::Message(0,15,"Failed to allocate stripe of size ", len, " - no free block large enough")); } bool Span::isEmpty() const { return std::all_of(_stripes.begin(), _stripes.end(), [] (Stripe* s) { return s->_vol_idx == 0; });} diff --git a/cmd/traffic_cache_tool/Makefile.am b/cmd/traffic_cache_tool/Makefile.am index 79ddab352d7..b1805b4b390 100644 --- a/cmd/traffic_cache_tool/Makefile.am +++ b/cmd/traffic_cache_tool/Makefile.am @@ -26,6 +26,7 @@ traffic_cache_tool_SOURCES = CacheDefs.h CacheTool.cc File.h File.cc Command.h C traffic_cache_tool_LDADD = \ $(top_builddir)/lib/ts/.libs/MemView.o \ $(top_builddir)/lib/ts/.libs/ink_memory.o \ + $(top_builddir)/lib/ts/.libs/ink_file.o \ $(top_builddir)/lib/ts/.libs/ink_error.o \ $(top_builddir)/lib/tsconfig/.libs/Errata.o From d40116d8c65b7c3c31b95e0c0ebdea62ab41037e Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Wed, 8 Feb 2017 21:23:51 -0600 Subject: [PATCH 57/81] Scalar: Removed base class, experimentally it doesn't seem needed. --- lib/ts/Scalar.h | 70 +++++++------------------------------------ lib/ts/test_Scalar.cc | 18 ++++++++--- 2 files changed, 25 insertions(+), 63 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 94b1c401d3f..bc73906c8da 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -41,52 +41,6 @@ namespace ApacheTrafficServer { template class Scalar; -namespace detail -{ - // Internal class to deal with operator overload issues. - // Because the type of integers with no explicit type is (int) that type is special in terms of overloads. - // To be convienet @c Scalar should support operators for its internal declared counter type and (int). - // This creates ambiguous overloads when C is (int). This class lets the (int) overloads be moved to a super - // class so conflict causes overridding rather than ambiguity. - template struct ScalarArithmetics { - typedef ApacheTrafficServer::Scalar S; - S &operator+=(int); - S &operator-=(int); - S &operator*=(int); - S &operator/=(int); - - protected: - // Only let subclasses construct, as this class only makes sense as an abstract superclass. - ScalarArithmetics(); - }; - - template ScalarArithmetics::ScalarArithmetics() {} - template - auto - ScalarArithmetics::operator+=(int n) -> S & - { - return static_cast(this).operator+=(static_cast(n)); - } - template - auto - ScalarArithmetics::operator-=(int n) -> S & - { - return static_cast(this).operator-=(static_cast(n)); - } - template - auto - ScalarArithmetics::operator*=(int n) -> S & - { - return static_cast(this).operator*=(static_cast(n)); - } - template - auto - ScalarArithmetics::operator/=(int n) -> S & - { - return static_cast(this).operator/=(static_cast(n)); - } -} - /** A class to hold scaled values. Instances of this class have a @a count and a @a scale. The "value" of the instance is @a @@ -112,7 +66,7 @@ namespace detail @see scaled_up @see scaled_down */ -template class Scalar : public detail::ScalarArithmetics +template class Scalar { typedef Scalar self; ///< Self reference type. @@ -187,10 +141,10 @@ template class Scalar self &operator/=(C n); /// Scale value @a x to this type, rounding up. - template self scale_up(Scalar const &x); + template static self scale_up(Scalar const &x); /// Scale value @a x to this type, rounding down. - template self scale_down(Scalar const &x); + template static self scale_down(Scalar const &x); /// Run time access to the scale (template arg @a N). static constexpr intmax_t scale(); @@ -625,7 +579,7 @@ operator>=(int n, Scalar const &rhs) template template auto - Scalar::operator+=(Scalar const &that) -> self & +Scalar::operator+=(Scalar const &that) -> self & { typedef std::ratio R; static_assert(R::den == 1, "Addition not permitted - target scale is not an integral multiple of source scale."); @@ -649,9 +603,9 @@ Scalar::operator+=(C n) -> self & template auto -operator + (Scalar lhs, Scalar const &rhs) -> typename std::common_type,Scalar>::type +operator+(Scalar lhs, Scalar const &rhs) -> typename std::common_type, Scalar>::type { - return typename std::common_type,Scalar>::type(lhs) += rhs; + return typename std::common_type, Scalar>::type(lhs) += rhs; } template @@ -724,9 +678,9 @@ Scalar::operator-=(C n) -> self & template auto -operator - (Scalar lhs, Scalar const &rhs) -> typename std::common_type,Scalar>::type +operator-(Scalar lhs, Scalar const &rhs) -> typename std::common_type, Scalar>::type { - return typename std::common_type,Scalar>::type(lhs) -= rhs; + return typename std::common_type, Scalar>::type(lhs) -= rhs; } template @@ -914,15 +868,13 @@ operator<<(ostream &s, ApacheTrafficServer::Scalar const &x) return ApacheTrafficServer::detail::tag_label(s, b); } - /// Compute common type of two scalars. /// In `std` to overload the base definition. This yields a type that has the common type of the /// counter type and a scale that is the GCF of the input scales. -template < intmax_t N, typename C, intmax_t S, typename I, typename T > -struct common_type, ApacheTrafficServer::Scalar> -{ +template +struct common_type, ApacheTrafficServer::Scalar> { typedef std::ratio R; - typedef ApacheTrafficServer::Scalar::type, T> type; + typedef ApacheTrafficServer::Scalar::type, T> type; }; } #endif // TS_SCALAR_H diff --git a/lib/ts/test_Scalar.cc b/lib/ts/test_Scalar.cc index 63a3162d76e..e3bcb57537a 100644 --- a/lib/ts/test_Scalar.cc +++ b/lib/ts/test_Scalar.cc @@ -225,10 +225,10 @@ Test_5() z1 /= 3; test.check(z1.count() == 10240, "Addition got %ld expected %d", z1.count(), 10240); - z2 = 3148; + z2 = 3148; auto x = z2 + MBytes(1); - test.check(x.scale() == z2.scale(), "Common type addition yielded bad scale %ld - expected %ld", x.scale(), z2.scale()); - test.check(x.count() == 4172, "Common type addition yielded bad count %d - expected %d", x.count(), 4172); + test.check(x.scale() == z2.scale(), "Common type addition yielded bad scale %ld - expected %ld", x.scale(), z2.scale()); + test.check(x.count() == 4172, "Common type addition yielded bad count %d - expected %d", x.count(), 4172); } struct KBytes_tag { @@ -255,8 +255,9 @@ test_Compile() { // These tests aren't normally run, they exist to detect compiler issues. - typedef ts::Scalar<1024, long int> KBytes; + typedef ts::Scalar<1024, short> KBytes; typedef ts::Scalar<1024, int> KiBytes; + int delta = 10; KBytes x(12); KiBytes y(12); @@ -265,6 +266,15 @@ test_Compile() std::cout << "Operator > works" << std::endl; if (y > 12) std::cout << "Operator > works" << std::endl; + + (void)(x += 10); + (void)(x += static_cast(10)); + (void)(x += static_cast(10)); + (void)(x += delta); + (void)(y += 10); + (void)(y += static_cast(10)); + (void)(y += static_cast(10)); + (void)(y += delta); } int From aa2eb9342f60e433c1f004c1d4b9fb3facaccad0 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Thu, 9 Feb 2017 13:53:30 -0600 Subject: [PATCH 58/81] Scalar: Comparison operator bug fixes. --- lib/ts/Scalar.h | 89 ++++++++++++++++++++++++++++++++++++++++--- lib/ts/test_Scalar.cc | 30 +++++++++++++++ 2 files changed, 113 insertions(+), 6 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index bc73906c8da..0b2342a30c3 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -41,6 +41,39 @@ namespace ApacheTrafficServer { template class Scalar; +/** Helper class for @c Scalar. + + This is used to wrap a value for the @c Scalar constructor to indicate the value is in units (not scaled) and should be rounded + up. +*/ +template struct unit_ceil_t { + C _n; + template operator unit_ceil_t() { return static_cast(_n); } +}; + +template +constexpr unit_ceil_t +unit_ceil(C n) +{ + return unit_ceil_t{n}; +} + +/** Helper class for @c Scalar. + + This is used to wrap a value for the @c Scalar constructor to indicate the value is in units (not scaled) and should be rounded + down. +*/ +template struct unit_floor_t { + C _n; + template operator unit_floor_t() { return static_cast(_n); } +}; + +template +constexpr unit_floor_t +unit_floor(C n) +{ + return unit_floor_t{n}; +} /** A class to hold scaled values. Instances of this class have a @a count and a @a scale. The "value" of the instance is @a @@ -80,6 +113,10 @@ template class Scalar constexpr Scalar(); ///< Default contructor. ///< Construct to have @a n scaled units. constexpr Scalar(Count n); + /// Scale units value @a x to this type, rounding down. + constexpr Scalar(unit_ceil_t x); + /// Scale units value @a x to this type, rounding down. + constexpr Scalar(unit_floor_t x); /// Copy constructor for same scale. template Scalar(Scalar const &that); @@ -91,6 +128,8 @@ template class Scalar /// Direct assignment. /// The count is set to @a n. self &operator=(Count n); + self &operator=(unit_ceil_t n); + self &operator=(unit_floor_t n); /// The number of scale units. constexpr Count count() const; @@ -142,9 +181,13 @@ template class Scalar /// Scale value @a x to this type, rounding up. template static self scale_up(Scalar const &x); + /// Scale value units value @a x to this type, rounding up. + static self scale_up(C x); /// Scale value @a x to this type, rounding down. template static self scale_down(Scalar const &x); + /// Scale value units value @a x to this type, rounding down. + static self scale_down(C x); /// Run time access to the scale (template arg @a N). static constexpr intmax_t scale(); @@ -159,6 +202,12 @@ template constexpr Scalar::Scalar( template constexpr Scalar::Scalar(Count n) : _n(n) { } +template constexpr Scalar::Scalar(unit_ceil_t n) : _n(scale_up(n._n)._n) +{ +} +template constexpr Scalar::Scalar(unit_floor_t n) : _n(scale_down(n._n)._n) +{ +} template constexpr auto Scalar::count() const -> Count @@ -186,6 +235,20 @@ Scalar::operator=(self const &that) -> self & return *this; } template +inline auto +Scalar::operator=(unit_ceil_t n) -> self & +{ + *this = scale_up(n._n); + return *this; +} +template +inline auto +Scalar::operator=(unit_floor_t n) -> self & +{ + *this = scale_down(n._n); + return *this; +} +template constexpr inline intmax_t Scalar::scale() { @@ -335,9 +398,9 @@ operator<(Scalar const &lhs, Scalar const &rhs) // constant causes the never taken paths to be dropped so there are no runtime conditional // checks, even with no optimization at all. if (R::den == 1) { - return lhs.count() < rhs.count() * R::num; + return lhs.count() * R::num < rhs.count(); } else if (R::num == 1) { - return lhs.count() * R::den < rhs.count(); + return lhs.count() < rhs.count() * R::den; } else return lhs.units() < rhs.units(); } @@ -348,9 +411,9 @@ operator==(Scalar const &lhs, Scalar const &rhs) { typedef std::ratio R; if (R::den == 1) { - return lhs.count() == rhs.count() * R::num; + return lhs.count() * R::num == rhs.count(); } else if (R::num == 1) { - return lhs.count() * R::den == rhs.count(); + return lhs.count() == rhs.count() * R::den; } else return lhs.units() == rhs.units(); } @@ -361,9 +424,9 @@ operator<=(Scalar const &lhs, Scalar const &rhs) { typedef std::ratio R; if (R::den == 1) { - return lhs.count() <= rhs.count() * R::num; + return lhs.count() * R::num <= rhs.count(); } else if (R::num == 1) { - return lhs.count() * R::den <= rhs.count(); + return lhs.count() <= rhs.count() * R::den; } else return lhs.units() <= rhs.units(); } @@ -828,6 +891,20 @@ Scalar::scale_down(Scalar const &that) -> self return ApacheTrafficServer::scale_down(that); } +template +auto +Scalar::scale_up(C x) -> self +{ + return ApacheTrafficServer::scale_up(x); +} + +template +auto +Scalar::scale_down(C x) -> self +{ + return ApacheTrafficServer::scale_down(x); +} + namespace detail { // These classes exist only to create distinguishable overloads. diff --git a/lib/ts/test_Scalar.cc b/lib/ts/test_Scalar.cc index e3bcb57537a..fb6f1a9c0dc 100644 --- a/lib/ts/test_Scalar.cc +++ b/lib/ts/test_Scalar.cc @@ -229,6 +229,35 @@ Test_5() auto x = z2 + MBytes(1); test.check(x.scale() == z2.scale(), "Common type addition yielded bad scale %ld - expected %ld", x.scale(), z2.scale()); test.check(x.count() == 4172, "Common type addition yielded bad count %d - expected %d", x.count(), 4172); + + z2 = z2.scale_down(262150); + test.check(z2.count() == 256, "Scale down bad count %d - expected %d", z2.count(), 256); + + z2 = ts::unit_ceil(262150); + test.check(z2.count() == 257, "Scale down bad count %d - expected %d", z2.count(), 257); + + KBytes q(ts::unit_floor(262150)); + test.check(q.count() == 256, "Scale down bad count %d - expected %d", q.count(), 256); +} + +// test comparisons +void +Test_6() +{ + using ts::Scalar; + typedef Scalar<1024, ssize_t> KB; + typedef Scalar MB; + typedef Scalar<8 * KB::SCALE, ssize_t> StoreBlocks; + typedef Scalar<127 * MB::SCALE, ssize_t> SpanBlocks; + + TestBox test("TS Scalar: comparison operator tests"); + + StoreBlocks a(80759700); + SpanBlocks b(4968); + SpanBlocks delta(1); + + test.check(a < b, "[1] Less than incorrect %ld < %ld", a.units(), b.units()); + test.check(b < (a + delta), "[2] Less than incorrect %ld < %ld", b.units(), (a + delta).units()); } struct KBytes_tag { @@ -285,6 +314,7 @@ main(int, char **) Test_3(); Test_4(); Test_5(); + Test_6(); Test_IO(); TestBox::print_summary(); return 0; From bfacf3654d2b84a8a3929207f000900f2f5e0ddf Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 10 Feb 2017 01:50:04 -0600 Subject: [PATCH 59/81] Checkpoint: Almost working may need some Scalar fixes too. --- cmd/traffic_cache_tool/CacheTool.cc | 111 ++++++++++++++++++---------- cmd/traffic_cache_tool/Makefile.am | 2 + 2 files changed, 76 insertions(+), 37 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index a9950d86743..9385e5efa1f 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -80,6 +81,10 @@ struct Span { /// No allocated stripes on this span. bool isEmpty() const; + /// Replace all existing stripes with a single unallocated stripe covering the span. + Errata clear(); + + /// This is broken and needs to be cleaned up. void clearPermanently(); ts::Rv allocStripe(int vol_idx, CacheStripeBlocks len); @@ -88,6 +93,9 @@ struct Span { FilePath _path; ats_scoped_fd _fd; int _vol_idx = 0; + CacheStoreBlocks _base; ///< Offset to first usable byte. + CacheStoreBlocks _offset; ///< Offset to first content byte. + // The space between _base and _offset is where the span information is stored. CacheStoreBlocks _len; ///< Total length of span. CacheStoreBlocks _free_space; ink_device_geometry _geometry; ///< Geometry of span. @@ -182,6 +190,9 @@ struct Cache { Errata loadSpanConfig(FilePath const &path); Errata loadSpanDirect(FilePath const &path, int vol_idx = -1, Bytes size = -1); + /// Change the @a span to have a single, unused stripe occupying the entire @a span. + Errata clearSpan(Span* span); + enum class SpanDumpDepth { SPAN, STRIPE, DIRECTORY }; void dumpSpans(SpanDumpDepth depth); void dumpVolumes(); @@ -287,7 +298,7 @@ VolumeAllocator::fillEmptySpans() } } // Now allocate blocks. - ts::CacheStripeBlocks span_blocks = ts::scale_down(span->_free_space); + ts::CacheStripeBlocks span_blocks = ts::scale_up(span->_free_space); ts::CacheStripeBlocks span_used(0); // sort by deficit so least relatively full volumes go first. @@ -503,20 +514,24 @@ Cache::loadSpanDirect(FilePath const &path, int vol_idx, Bytes size) std::unique_ptr span(new Span(path)); zret = span->load(); if (zret) { - int nspb = span->_header->num_diskvol_blks; - for (auto i = 0; i < nspb; ++i) { - ts::CacheStripeDescriptor &raw = span->_header->stripes[i]; - Stripe* stripe = new Stripe(span.get(), raw.offset, raw.len); - if (raw.free == 0) { - stripe->_vol_idx = raw.vol_idx; - _volumes[stripe->_vol_idx]._stripes.push_back(stripe); - _volumes[stripe->_vol_idx]._size += stripe->_len; - } else { - span->_free_space += stripe->_len; + if (span->_header) { + int nspb = span->_header->num_diskvol_blks; + for (auto i = 0; i < nspb; ++i) { + ts::CacheStripeDescriptor &raw = span->_header->stripes[i]; + Stripe* stripe = new Stripe(span.get(), raw.offset, raw.len); + if (raw.free == 0) { + stripe->_vol_idx = raw.vol_idx; + _volumes[stripe->_vol_idx]._stripes.push_back(stripe); + _volumes[stripe->_vol_idx]._size += stripe->_len; + } else { + span->_free_space += stripe->_len; + } + span->_stripes.push_back(stripe); } - span->_stripes.push_back(stripe); + span->_vol_idx = vol_idx; + } else { + span->clear(); } - span->_vol_idx = vol_idx; _spans.push_back(span.release()); } return zret; @@ -571,15 +586,19 @@ Cache::dumpSpans(SpanDumpDepth depth) { if (depth >= SpanDumpDepth::SPAN) { for (auto span : _spans) { - std::cout << "Span: " << span->_path << " " << span->_header->num_volumes << " Volumes " << span->_header->num_used - << " in use " << span->_header->num_free << " free " << span->_header->num_diskvol_blks << " stripes " - << span->_header->num_blocks.units() << " blocks" << std::endl; - for (unsigned int i = 0; i < span->_header->num_diskvol_blks; ++i) { - ts::CacheStripeDescriptor &stripe = span->_header->stripes[i]; - std::cout << " : SpanBlock " << i << " @ " << stripe.offset.units() << " blocks=" << stripe.len.units() - << " vol=" << stripe.vol_idx << " type=" << stripe.type << " " << (stripe.free ? "free" : "in-use") << std::endl; - if (depth >= SpanDumpDepth::STRIPE) { - Open_Stripe(span->_fd, stripe); + if (nullptr == span->_header) { + std::cout << "Span: " << span->_path << " is uninitialized" << std::endl; + } else { + std::cout << "Span: " << span->_path << " " << span->_header->num_volumes << " Volumes " << span->_header->num_used + << " in use " << span->_header->num_free << " free " << span->_header->num_diskvol_blks << " stripes " + << span->_header->num_blocks.units() << " blocks" << std::endl; + for (unsigned int i = 0; i < span->_header->num_diskvol_blks; ++i) { + ts::CacheStripeDescriptor &stripe = span->_header->stripes[i]; + std::cout << " : SpanBlock " << i << " @ " << stripe.offset.units() << " blocks=" << stripe.len.units() + << " vol=" << stripe.vol_idx << " type=" << stripe.type << " " << (stripe.free ? "free" : "in-use") << std::endl; + if (depth >= SpanDumpDepth::STRIPE) { + Open_Stripe(span->_fd, stripe); + } } } } @@ -604,7 +623,7 @@ ts::CacheStripeBlocks Cache::calcTotalSpanConfiguredSize() ts::CacheStripeBlocks zret(0); for ( auto span : _spans ) { - zret += ts::scale_down(span->_header->num_blocks); + zret += ts::scale_down(span->_len); } return zret; } @@ -659,31 +678,34 @@ Span::loadDevice() ats_scoped_fd fd(_path.open(flags)); if (fd) { - if (ink_file_get_geometry(_fd, &_geometry)) { + if (ink_file_get_geometry(fd, _geometry)) { off_t offset = ts::CacheSpan::OFFSET.units(); - alignas(512) char buff[CacheStoreBlocks::SCALE]; - - - int64_t n = pread(fd, buff, sizeof(buff), offset); - if (n >= static_cast(sizeof(ts::SpanHeader))) { + CacheStoreBlocks span_hdr_size(1); // default. + static const ssize_t BUFF_SIZE = CacheStoreBlocks::SCALE; // match default span_hdr_size + alignas(512) char buff[BUFF_SIZE]; + ssize_t n = pread(fd, buff, BUFF_SIZE, offset); + if (n >= BUFF_SIZE) { ts::SpanHeader &span_hdr = reinterpret_cast(buff); // See if it looks valid if (span_hdr.magic == ts::SpanHeader::MAGIC && span_hdr.num_diskvol_blks == span_hdr.num_used + span_hdr.num_free) { int nspb = span_hdr.num_diskvol_blks; - size_t span_hdr_size = sizeof(ts::SpanHeader) + (nspb - 1) * sizeof(ts::CacheStripeDescriptor); - _header.reset(new (malloc(span_hdr_size)) ts::SpanHeader); - if (span_hdr_size <= sizeof(buff)) { - memcpy(_header.get(), buff, span_hdr_size); + span_hdr_size = span_hdr_size.scale_up(Bytes(sizeof(ts::SpanHeader) + (nspb - 1) * sizeof(ts::CacheStripeDescriptor))); + _header.reset(new (malloc(span_hdr_size.units())) ts::SpanHeader); + if (span_hdr_size.units() <= BUFF_SIZE) { + memcpy(_header.get(), buff, span_hdr_size.units()); } else { // TODO - check the pread return - pread(fd, _header.get(), span_hdr_size, offset); + pread(fd, _header.get(), span_hdr_size.units(), offset); } - _fd = fd.release(); _len = _header->num_blocks; - } else { - zret = Errata::Message(0, 22, "Span header for ", _path, " is invalid"); + zret = Errata::Message(0, 0, "Span header for ", _path, " is invalid"); + _len = _len.scale_down(Bytes(_geometry.totalsz)); } + // valid FD means the device is accessible and has enough storage to be configured. + _fd = fd.release(); + _base = _base.scale_up(Bytes(offset)); + _offset = _base + span_hdr_size; } else { zret = Errata::Message(0, errno, "Failed to read from ", _path, '[', errno, ':', strerror(errno), ']'); } @@ -720,6 +742,21 @@ ts::Rv Span::allocStripe(int vol_idx, CacheStripeBlocks len) bool Span::isEmpty() const { return std::all_of(_stripes.begin(), _stripes.end(), [] (Stripe* s) { return s->_vol_idx == 0; });} +Errata +Span::clear() +{ + Stripe* stripe; + std::for_each(_stripes.begin(), _stripes.end(), [](Stripe* s) { delete s; }); + _stripes.clear(); + + stripe = new Stripe(this, _offset, _len - _offset); + _stripes.push_back(stripe); + _free_space = stripe->_len; + + return Errata(); +} + + Errata Span::updateHeader() { Errata zret; diff --git a/cmd/traffic_cache_tool/Makefile.am b/cmd/traffic_cache_tool/Makefile.am index b1805b4b390..b6c02bb5c0c 100644 --- a/cmd/traffic_cache_tool/Makefile.am +++ b/cmd/traffic_cache_tool/Makefile.am @@ -28,6 +28,8 @@ traffic_cache_tool_LDADD = \ $(top_builddir)/lib/ts/.libs/ink_memory.o \ $(top_builddir)/lib/ts/.libs/ink_file.o \ $(top_builddir)/lib/ts/.libs/ink_error.o \ + $(top_builddir)/lib/ts/.libs/ink_string.o \ + $(top_builddir)/lib/ts/.libs/ink_assert.o \ $(top_builddir)/lib/tsconfig/.libs/Errata.o all-am: Makefile $(PROGRAMS) From d262a3267704d4a589ea2b4574808032b771f622 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 10 Feb 2017 04:04:51 -0600 Subject: [PATCH 60/81] CacheTool; First successful recovery of a span. --- cmd/traffic_cache_tool/CacheTool.cc | 42 ++++++++++++++++++----------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index 9385e5efa1f..15f1d7b36b7 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -115,7 +115,8 @@ struct Stripe Bytes _start; ///< Offset of first byte of stripe. Bytes _content; ///< Start of content. CacheStoreBlocks _len; ///< Length of stripe. - uint8_t _vol_idx; ///< Volume index. + uint8_t _vol_idx = 0; ///< Volume index. + uint8_t _type = 0; ///< Stripe type. }; Stripe::Stripe(Span* span, Bytes start, CacheStoreBlocks len) @@ -521,6 +522,7 @@ Cache::loadSpanDirect(FilePath const &path, int vol_idx, Bytes size) Stripe* stripe = new Stripe(span.get(), raw.offset, raw.len); if (raw.free == 0) { stripe->_vol_idx = raw.vol_idx; + stripe->_type = raw.type; _volumes[stripe->_vol_idx]._stripes.push_back(stripe); _volumes[stripe->_vol_idx]._size += stripe->_len; } else { @@ -686,6 +688,7 @@ Span::loadDevice() ssize_t n = pread(fd, buff, BUFF_SIZE, offset); if (n >= BUFF_SIZE) { ts::SpanHeader &span_hdr = reinterpret_cast(buff); + _base = _base.scale_up(Bytes(offset)); // See if it looks valid if (span_hdr.magic == ts::SpanHeader::MAGIC && span_hdr.num_diskvol_blks == span_hdr.num_used + span_hdr.num_free) { int nspb = span_hdr.num_diskvol_blks; @@ -700,11 +703,10 @@ Span::loadDevice() _len = _header->num_blocks; } else { zret = Errata::Message(0, 0, "Span header for ", _path, " is invalid"); - _len = _len.scale_down(Bytes(_geometry.totalsz)); + _len = _len.scale_down(Bytes(_geometry.totalsz)) - _base; } // valid FD means the device is accessible and has enough storage to be configured. _fd = fd.release(); - _base = _base.scale_up(Bytes(offset)); _offset = _base + span_hdr_size; } else { zret = Errata::Message(0, errno, "Failed to read from ", _path, '[', errno, ':', strerror(errno), ']'); @@ -723,17 +725,21 @@ ts::Rv Span::allocStripe(int vol_idx, CacheStripeBlocks len) for (auto spot = _stripes.begin(), limit = _stripes.end() ; spot != limit ; ++spot ) { Stripe* stripe = *spot; if (stripe->isFree()) { - // Exact match, or if the remains after allocating are less than a stripe block, take it all. - if (stripe->_len <= len && len < (stripe->_len + CacheStripeBlocks(1))) { - stripe->_vol_idx = vol_idx; - return stripe; - } else if (stripe->_len > len) { - Stripe* ns = new Stripe(this, stripe->_start, len); - stripe->_start += len; - stripe->_len -= len; - ns->_vol_idx = vol_idx; - _stripes.insert(spot, ns); - return ns; + if (len < stripe->_len) { + // If the remains would be less than a stripe block, just take it all. + if (stripe->_len <= (len + CacheStripeBlocks(1))) { + stripe->_vol_idx = vol_idx; + stripe->_type = 1; + return stripe; + } else { + Stripe* ns = new Stripe(this, stripe->_start, len); + stripe->_start += len; + stripe->_len -= len; + ns->_vol_idx = vol_idx; + ns->_type = 1; + _stripes.insert(spot, ns); + return ns; + } } } } @@ -749,6 +755,12 @@ Span::clear() std::for_each(_stripes.begin(), _stripes.end(), [](Stripe* s) { delete s; }); _stripes.clear(); + // Gah, due to lack of anything better, TS depends on the number of usable blocks to be consistent + // with internal calculations so have to match that here. Yay. + CacheStoreBlocks eff = _len - _base; // starting # of usable blocks. + // The maximum number of volumes that can store stored, accounting for the space used to store the descriptors. + int n = (eff.units() - sizeof(ts::SpanHeader)) / (CacheStripeBlocks::SCALE + sizeof(CacheStripeDescriptor)); + _offset = _base + _offset.scale_up(sizeof(ts::SpanHeader) + (n - 1) * sizeof(CacheStripeDescriptor)); stripe = new Stripe(this, _offset, _len - _offset); _stripes.push_back(stripe); _free_space = stripe->_len; @@ -778,8 +790,8 @@ Errata Span::updateHeader() sd->offset = stripe->_start; sd->len = stripe->_len; sd->vol_idx = stripe->_vol_idx; + sd->type = stripe->_type; volume_mask[sd->vol_idx] = true; - sd->type = 0; if (sd->vol_idx == 0) { sd->free = true; ++(hdr->num_free); From 9210857d590a20cdfd7d23996d41966d50cdcf6c Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 10 Feb 2017 10:53:54 -0600 Subject: [PATCH 61/81] Scalar: Reworked how rounding / scaling is done. More template boilerplate but overall simpler in implementation and use. Consolidation of scaling logic to a single location. --- lib/ts/Scalar.h | 466 +++++++++++++++++++++++++----------------- lib/ts/test_Scalar.cc | 92 +++++---- 2 files changed, 326 insertions(+), 232 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 0b2342a30c3..2f82debbcdb 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -41,46 +41,134 @@ namespace ApacheTrafficServer { template class Scalar; -/** Helper class for @c Scalar. - - This is used to wrap a value for the @c Scalar constructor to indicate the value is in units (not scaled) and should be rounded - up. -*/ -template struct unit_ceil_t { - C _n; - template operator unit_ceil_t() { return static_cast(_n); } -}; - -template -constexpr unit_ceil_t -unit_ceil(C n) +namespace detail { - return unit_ceil_t{n}; -} + /// Convert a count @a c that is scale @s S to scale @c N + template + intmax_t + scale_conversion_round_up(intmax_t c) + { + typedef std::ratio R; + if (N == S) { + return c; + } else if (R::den == 1) { + return c / R::num + (0 != c % R::num); // N is a multiple of S. + } else if (R::num == 1) { + return c * R::den; // S is a multiple of N. + } else { + return (c / R::num) * R::den + ((c % R::num) * R::den) / R::num + (0 != (c % R::num)); + } + } -/** Helper class for @c Scalar. + /// Convert a count @a c that is scale @s S to scale @c N + template + intmax_t + scale_conversion_round_down(intmax_t c) + { + typedef std::ratio R; + if (N == S) { + return c; + } else if (R::den == 1) { + return c / R::num; // N = k S + } else if (R::num == 1) { + return c * R::den; // S = k N + } else { + // General case where neither N nor S are a multiple of the other. + // Yes, a bit odd, but this minimizes the risk of integer overflow. + // I need to validate that under -O2 the compiler will only do 1 division to get + // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are + // powers of 2 I have verified recent GNU compilers will optimize to bit operations. + return (c / R::num) * R::den + ((c % R::num) * R::den) / R::num; + } + } - This is used to wrap a value for the @c Scalar constructor to indicate the value is in units (not scaled) and should be rounded - down. -*/ -template struct unit_floor_t { - C _n; - template operator unit_floor_t() { return static_cast(_n); } -}; + /* Helper classes for @c Scalar + + These wrap values to capture extra information for @c Scalar methods. This includes whether to + round up or down when converting and, when the wrapped data is also a @c Scalar, the scale. + + These are not intended for direct use but by the @c round_up and @c round_down free functions + which capture the information about the argument and construct an instance of one of these + classes to pass it on to a @c Scalar method. + + Scale conversions between @c Scalar instances are handled in these classes via the templated + methods @c scale_conversion_round_up and @c scale_conversion_round_down. Scale conversions from + units are sufficiently simple to be done directly in the methods. + + Much of this is driven by the fact that the assignment operator can not be templated and + therefore to have a nice interace for assignment this split is needed. + */ + + // Unit value, to be rounded up. + template struct scalar_unit_round_up_t { + C _n; + // template constexpr operator scalar_unit_round_up_t() { return static_cast(_n); } + template + constexpr I + scale() + { + return static_cast(_n / N + (0 != (_n % N))); + } + }; + // Unit value, to be rounded down. + template struct scalar_unit_round_down_t { + C _n; + // template constexpr operator scalar_unit_round_down_t() { return static_cast(_n); } + template + constexpr I + scale() + { + return static_cast(_n / N); + } + }; + // Scalar value, to be rounded up. + template struct scalar_round_up_t { + C _n; + template constexpr operator Scalar() { return scale_conversion_round_up(_n); } + }; + // Scalar value, to be rounded down. + template struct scalar_round_down_t { + C _n; + template constexpr operator Scalar() { return scale_conversion_round_down(_n); } + }; +} +/// Mark a unit value to be scaled, rounding down. +template +constexpr detail::scalar_unit_round_up_t +round_up(C n) +{ + return detail::scalar_unit_round_up_t{n}; +} +/// Mark a @c Scalar value to be scaled, rounding up. +template +constexpr detail::scalar_round_up_t +round_up(Scalar v) +{ + return detail::scalar_round_up_t{v.count()}; +} +/// Mark a unit value to be scaled, rounding down. template -constexpr unit_floor_t -unit_floor(C n) +constexpr detail::scalar_unit_round_down_t +round_down(C n) +{ + return detail::scalar_unit_round_down_t{n}; +} +/// Mark a @c Scalar value, to be rounded down. +template +constexpr detail::scalar_round_down_t +round_down(Scalar v) { - return unit_floor_t{n}; + return detail::scalar_round_down_t{v.count()}; } + /** A class to hold scaled values. Instances of this class have a @a count and a @a scale. The "value" of the instance is @a count * @a scale. The scale is stored in the compiler in the class symbol table and so only the count is a run time value. An instance with a large scale can be assign to an instance with a smaller scale and the conversion is done automatically. Conversions from a smaller to - larger scale must be explicit using @c scaled_up and @c scaled_down. This prevents + larger scale must be explicit using @c round_up and @c round_down. This prevents inadvertent changes in value. Because the scales are not the same these conversions can be lossy and the two conversions determine whether, in such a case, the result should be rounded up or down to the nearest scale value. @@ -96,16 +184,15 @@ unit_floor(C n) @note This is modeled somewhat on @c std::chrono and serves a similar function for different and simpler cases (where the ratio is always an integer, never a fraction). - @see scaled_up - @see scaled_down + @see round_up + @see round_down */ template class Scalar { typedef Scalar self; ///< Self reference type. public: - /// Scaling factor for instances. - /// Make it externally accessible. + /// Scaling factor - make it external accessible. constexpr static intmax_t SCALE = N; typedef C Count; ///< Type used to hold the count. typedef T Tag; ///< Make tag accessible. @@ -114,36 +201,45 @@ template class Scalar ///< Construct to have @a n scaled units. constexpr Scalar(Count n); /// Scale units value @a x to this type, rounding down. - constexpr Scalar(unit_ceil_t x); - /// Scale units value @a x to this type, rounding down. - constexpr Scalar(unit_floor_t x); - + constexpr Scalar(detail::scalar_unit_round_up_t x); /// Copy constructor for same scale. - template Scalar(Scalar const &that); + template constexpr Scalar(Scalar const &that); - /// Copy / conversion constructor. + /// Copy constructor. + constexpr Scalar(self const &that); /// Copy constructor. + /// Conversion constructor. /// @note Requires that @c S be an integer multiple of @c SCALE. template Scalar(Scalar const &that); + /// Scaling constructor. + Scalar(detail::scalar_round_up_t const &that); + /// Scaling constructor. + Scalar(detail::scalar_round_down_t const &that); + /// Scaling constructor. + constexpr Scalar(detail::scalar_unit_round_up_t const &that); + /// Scaling constructor. + constexpr Scalar(detail::scalar_unit_round_down_t const &that); + /// Assignment operator. + /// The value is scaled appropriately. + /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then + /// the @c round_up or @c round_down must be used to indicate the rounding direction. + template self &operator=(Scalar const &that); + /// Assignment from same scale. + self &operator=(self const &that); /// Direct assignment. /// The count is set to @a n. self &operator=(Count n); - self &operator=(unit_ceil_t n); - self &operator=(unit_floor_t n); + // Scaling assignments. + self &operator=(detail::scalar_unit_round_up_t n); + self &operator=(detail::scalar_unit_round_down_t n); + self &operator=(detail::scalar_round_up_t v); + self &operator=(detail::scalar_round_down_t v); /// The number of scale units. constexpr Count count() const; - /// The absolute value, scaled up. + /// The scaled value. constexpr Count units() const; - /// Assignment operator. - /// The value is scaled appropriately. - /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then - /// the @c scale_up or @c scale_down casts must be used to indicate the rounding direction. - template self &operator=(Scalar const &that); - /// Assignment from same scale. - self &operator=(self const &that); - /// Addition operator. /// The value is scaled from @a that to @a this. /// @note Requires the scale of @a that be an integer multiple of the scale of @a this. If this isn't the case then @@ -153,6 +249,10 @@ template class Scalar self &operator+=(C n); /// Addition - add @a n as a number of scaled units. self &operator+=(self const &that); + self &operator+=(detail::scalar_unit_round_up_t n); + self &operator+=(detail::scalar_unit_round_down_t n); + self &operator+=(detail::scalar_round_up_t v); + self &operator+=(detail::scalar_round_down_t v); /// Increment - increase count by 1. self &operator++(); @@ -172,6 +272,10 @@ template class Scalar self &operator-=(C n); /// Subtraction - subtract @a n as a number of scaled units. self &operator-=(self const &that); + self &operator-=(detail::scalar_unit_round_up_t n); + self &operator-=(detail::scalar_unit_round_down_t n); + self &operator-=(detail::scalar_round_up_t v); + self &operator-=(detail::scalar_round_down_t v); /// Multiplication - multiple the count by @a n. self &operator*=(C n); @@ -179,16 +283,6 @@ template class Scalar /// Division - divide (rounding down) the count by @a n. self &operator/=(C n); - /// Scale value @a x to this type, rounding up. - template static self scale_up(Scalar const &x); - /// Scale value units value @a x to this type, rounding up. - static self scale_up(C x); - - /// Scale value @a x to this type, rounding down. - template static self scale_down(Scalar const &x); - /// Scale value units value @a x to this type, rounding down. - static self scale_down(C x); - /// Run time access to the scale (template arg @a N). static constexpr intmax_t scale(); @@ -202,12 +296,35 @@ template constexpr Scalar::Scalar( template constexpr Scalar::Scalar(Count n) : _n(n) { } -template constexpr Scalar::Scalar(unit_ceil_t n) : _n(scale_up(n._n)._n) +template constexpr Scalar::Scalar(self const &that) : _n(that._n) { } -template constexpr Scalar::Scalar(unit_floor_t n) : _n(scale_down(n._n)._n) +template +template +constexpr Scalar::Scalar(Scalar const &that) : _n(static_cast(that.count())) +{ +} +template template Scalar::Scalar(Scalar const &that) +{ + typedef std::ratio R; + static_assert(R::den == 1, "Construction not permitted - target scale is not an integral multiple of source scale."); + _n = that.count() * R::num; +} +template Scalar::Scalar(detail::scalar_round_up_t const &v) : _n(v._n) +{ +} +template Scalar::Scalar(detail::scalar_round_down_t const &v) : _n(v._n) +{ +} +template +constexpr Scalar::Scalar(detail::scalar_unit_round_up_t const &v) : _n(v.template scale()) { } +template +constexpr Scalar::Scalar(detail::scalar_unit_round_down_t const &v) : _n(v.template scale()) +{ +} + template constexpr auto Scalar::count() const -> Count @@ -220,6 +337,7 @@ Scalar::units() const -> Count { return _n * SCALE; } + template inline auto Scalar::operator=(Count n) -> self & @@ -236,38 +354,32 @@ Scalar::operator=(self const &that) -> self & } template inline auto -Scalar::operator=(unit_ceil_t n) -> self & +Scalar::operator=(detail::scalar_round_up_t v) -> self & { - *this = scale_up(n._n); + _n = v._n; return *this; } template inline auto -Scalar::operator=(unit_floor_t n) -> self & +Scalar::operator=(detail::scalar_round_down_t v) -> self & { - *this = scale_down(n._n); + _n = v._n; return *this; } template -constexpr inline intmax_t -Scalar::scale() +inline auto +Scalar::operator=(detail::scalar_unit_round_up_t v) -> self & { - return SCALE; + _n = v.template scale(); + return *this; } - template -template -Scalar::Scalar(Scalar const &that) : _n(static_cast(that.count())) -{ -} - -template template Scalar::Scalar(Scalar const &that) +inline auto +Scalar::operator=(detail::scalar_unit_round_down_t v) -> self & { - typedef std::ratio R; - static_assert(R::den == 1, "Construction not permitted - target scale is not an integral multiple of source scale."); - _n = that.count() * R::num; + _n = v.template scale(); + return *this; } - template template auto @@ -279,85 +391,11 @@ Scalar::operator=(Scalar const &that) -> self & return *this; } -// -- Free Functions -- - -/** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding up. - The resulting count in the return value will be the smallest count that is not smaller than the unit - value of @a src. - - @code - typedef Scalar<16> Paragraphs; - typedef Scalar<1024> KiloBytes; - - Paragraphs src(37459); - auto size = scale_up(src); // size.count() == 586 - @endcode - */ -template -M -scale_up(Scalar const &src) -{ - typedef std::ratio R; - auto c = src.count(); - - if (M::SCALE == S) { - return c; - } else if (R::den == 1) { - return c / R::num + (0 != c % R::num); // N is a multiple of S. - } else if (R::num == 1) { - return c * R::den; // S is a multiple of N. - } else { - return (c / R::num) * R::den + ((c % R::num) * R::den) / R::num + (0 != (c % R::num)); - } -} - -/** Convert a metric @a src to a different scale, keeping the unit value as close as possible, rounding down. - The resulting count in the return value will be the largest count that is not larger than the unit - value of @a src. - - @code - typedef Scalar<16> Paragraphs; - typedef Scalar<1024> KiloBytes; - - Paragraphs src(37459); - auto size = scale_up(src); // size.count() == 585 - @endcode - */ -template -M -scale_down(Scalar const &src) -{ - typedef std::ratio R; - auto c = src.count(); - - if (R::den == 1) { - return c / R::num; // S is a multiple of N. - } else if (R::num == 1) { - return c * R::den; // N is a multiple of S. - } else { - // General case where neither N nor S are a multiple of the other. - // Yes, a bit odd, but this minimizes the risk of integer overflow. - // I need to validate that under -O2 the compiler will only do 1 division to get - // both the quotient and remainder for (n/N) and (n%N). In cases where N,S are - // powers of 2 I have verified recent GNU compilers will optimize to bit operations. - return (c / R::num) * R::den + ((c % R::num) * R::den) / R::num; - } -} - -/// Convert a unit value @a n to a Scalar, rounding down. -template -M -scale_down(intmax_t n) -{ - return n / M::SCALE; // assuming compiler will optimize out dividing by 1 if needed. -} - -/// Convert a unit value @a n to a Scalar, rounding up. -template -M -scale_up(intmax_t n) +template +constexpr inline intmax_t +Scalar::scale() { - return M::SCALE == 1 ? n : (n / M::SCALE + (0 != (n % M::SCALE))); + return SCALE; } // --- Compare operators @@ -663,6 +701,34 @@ Scalar::operator+=(C n) -> self & _n += n; return *this; } +template +auto +Scalar::operator+=(detail::scalar_unit_round_up_t v) -> self & +{ + _n += v.template scale(); + return *this; +} +template +auto +Scalar::operator+=(detail::scalar_unit_round_down_t v) -> self & +{ + _n += v.template scale(); + return *this; +} +template +auto +Scalar::operator+=(detail::scalar_round_up_t v) -> self & +{ + _n += v._n; + return *this; +} +template +auto +Scalar::operator+=(detail::scalar_round_down_t v) -> self & +{ + _n += v._n; + return *this; +} template auto @@ -701,11 +767,11 @@ operator+(int n, Scalar const &rhs) { return Scalar(rhs) += n; } -template +template Scalar -operator+(Scalar const &lhs, int n) +operator+(Scalar const &lhs, int n) { - return Scalar(lhs) += n; + return Scalar(lhs) += n; } template Scalar @@ -713,6 +779,30 @@ operator+(int n, Scalar const &rhs) { return Scalar(rhs) += n; } +template +Scalar +operator+(detail::scalar_unit_round_up_t lhs, Scalar const &rhs) +{ + return Scalar(rhs) += lhs.template scale(); +} +template +Scalar +operator+(Scalar const &lhs, detail::scalar_unit_round_up_t rhs) +{ + return Scalar(lhs) += rhs.template scale(); +} +template +Scalar +operator+(detail::scalar_unit_round_down_t lhs, Scalar const &rhs) +{ + return Scalar(rhs) += lhs.template scale(); +} +template +Scalar +operator+(Scalar const &lhs, detail::scalar_unit_round_down_t rhs) +{ + return Scalar(lhs) += rhs.template scale(); +} template template @@ -738,6 +828,34 @@ Scalar::operator-=(C n) -> self & _n -= n; return *this; } +template +auto +Scalar::operator-=(detail::scalar_unit_round_up_t v) -> self & +{ + _n -= v.template scale(); + return *this; +} +template +auto +Scalar::operator-=(detail::scalar_unit_round_down_t v) -> self & +{ + _n -= v.template scale(); + return *this; +} +template +auto +Scalar::operator-=(detail::scalar_round_up_t v) -> self & +{ + _n -= v._n; + return *this; +} +template +auto +Scalar::operator-=(detail::scalar_round_down_t v) -> self & +{ + _n -= v._n; + return *this; +} template auto @@ -875,36 +993,6 @@ operator/(Scalar const &lhs, int n) return Scalar(lhs) /= n; } -template -template -auto -Scalar::scale_up(Scalar const &that) -> self -{ - return ApacheTrafficServer::scale_up(that); -} - -template -template -auto -Scalar::scale_down(Scalar const &that) -> self -{ - return ApacheTrafficServer::scale_down(that); -} - -template -auto -Scalar::scale_up(C x) -> self -{ - return ApacheTrafficServer::scale_up(x); -} - -template -auto -Scalar::scale_down(C x) -> self -{ - return ApacheTrafficServer::scale_down(x); -} - namespace detail { // These classes exist only to create distinguishable overloads. diff --git a/lib/ts/test_Scalar.cc b/lib/ts/test_Scalar.cc index fb6f1a9c0dc..ff9bd0e1bdc 100644 --- a/lib/ts/test_Scalar.cc +++ b/lib/ts/test_Scalar.cc @@ -110,25 +110,25 @@ Test_2() Size_2 sz_c(SCALE_1 / SCALE_2); Size_2 sz_d(29 * SCALE_1 / SCALE_2); - auto sz = ts::scale_up(sz_a); - test.check(sz.count() == 1, "Rounding up, got %d expected %d", sz.count(), 1); - sz = ts::scale_down(sz_a); - test.check(sz.count() == 0, "Rounding down: got %d expected %d", sz.count(), 0); - - sz = ts::scale_up(sz_b); - test.check(sz.count() == 4, "Rounding up, got %d expected %d", sz.count(), 4); - sz = ts::scale_down(sz_b); - test.check(sz.count() == 3, "Rounding down, got %d expected %d", sz.count(), 3); - - sz = ts::scale_up(sz_c); - test.check(sz.count() == 1, "Rounding up, got %d expected %d", sz.count(), 1); - sz = ts::scale_down(sz_c); - test.check(sz.count() == 1, "Rounding down, got %d expected %d", sz.count(), 1); - - sz = ts::scale_up(sz_d); - test.check(sz.count() == 29, "Rounding up, got %d expected %d", sz.count(), 29); - sz = ts::scale_down(sz_d); - test.check(sz.count() == 29, "Rounding down, got %d expected %d", sz.count(), 29); + Size_1 sz = ts::round_up(sz_a); + test.check(sz.count() == 1, "[1] Rounding up, got %d expected %d", sz.count(), 1); + sz = ts::round_down(sz_a); + test.check(sz.count() == 0, "[2] Rounding down: got %d expected %d", sz.count(), 0); + + sz = ts::round_up(sz_b); + test.check(sz.count() == 4, "[3] Rounding up, got %d expected %d", sz.count(), 4); + sz = ts::round_down(sz_b); + test.check(sz.count() == 3, "[4] Rounding down, got %d expected %d", sz.count(), 3); + + sz = ts::round_up(sz_c); + test.check(sz.count() == 1, "[5] Rounding up, got %d expected %d", sz.count(), 1); + sz = ts::round_down(sz_c); + test.check(sz.count() == 1, "[6] Rounding down, got %d expected %d", sz.count(), 1); + + sz = ts::round_up(sz_d); + test.check(sz.count() == 29, "[7] Rounding up, got %d expected %d", sz.count(), 29); + sz = ts::round_down(sz_d); + test.check(sz.count() == 29, "[8] Rounding down, got %d expected %d", sz.count(), 29); sz = 119; sz_b = sz; // Should be OK because SCALE_1 is an integer multiple of SCALE_2 @@ -151,14 +151,14 @@ Test_3() Size_2 sz_a(2); Size_2 sz_b(97); - auto sz = ts::scale_up(sz_a); + Size_1 sz = round_up(sz_a); test.check(sz.count() == 2, "Rounding up, got %d expected %d", sz.count(), 2); - sz = ts::scale_down(sz_a); + sz = round_down(sz_a); test.check(sz.count() == 1, "Rounding down: got %d expected %d", sz.count(), 0); - sz = ts::scale_up(sz_b); + sz = ts::round_up(sz_b); test.check(sz.count() == 65, "Rounding up, got %d expected %d", sz.count(), 65); - sz = ts::scale_down(sz_b); + sz = ts::round_down(sz_b); test.check(sz.count() == 64, "Rounding down, got %d expected %d", sz.count(), 64); } @@ -174,15 +174,15 @@ Test_4() // m_4 = m_9; // Should fail to compile with static assert. // m_9 = m_4; // Should fail to compile with static assert. - m_4 = ts::scale_up(m_9); + m_4 = ts::round_up(m_9); test.check(m_4.count() == 214, "Rounding down, got %d expected %d", m_4.count(), 214); - m_4 = ts::scale_down(m_9); + m_4 = ts::round_down(m_9); test.check(m_4.count() == 213, "Rounding down, got %d expected %d", m_4.count(), 213); m_4 = 213; - m_9 = ts::scale_up(m_4); + m_9 = ts::round_up(m_4); test.check(m_9.count() == 95, "Rounding down, got %d expected %d", m_9.count(), 95); - m_9 = ts::scale_down(m_4); + m_9 = ts::round_down(m_4); test.check(m_9.count() == 94, "Rounding down, got %d expected %d", m_9.count(), 94); m_test = m_4; // Verify assignment of identical scale values compiles. @@ -203,41 +203,47 @@ Test_5() MBytes mbytes(5); Bytes z1 = bytes + 128; - test.check(z1.count() == 224, "Addition got %ld expected %d", z1.count(), 224); + test.check(z1.count() == 224, "[1] Addition got %ld expected %d", z1.count(), 224); KBytes z2 = kbytes + 3; - test.check(z2.count() == 5, "Addition got %d expected %d", z2.count(), 5); + test.check(z2.count() == 5, "[2] Addition got %d expected %d", z2.count(), 5); Bytes z3(bytes); z3 += kbytes; - test.check(z3.units() == 2048 + 96, "Addition got %ld expected %d", z3.units(), 2048 + 96); + test.check(z3.units() == 2048 + 96, "[3] Addition got %ld expected %d", z3.units(), 2048 + 96); MBytes z4 = mbytes; z4 += 5; z2 += z4; - test.check(z2.units() == ((10 << 20) + (5 << 10)), "Addition got %d expected %d", z2.units(), (10 << 20) + (2 << 10)); + test.check(z2.units() == ((10 << 20) + (5 << 10)), "[4] Addition got %d expected %d", z2.units(), (10 << 20) + (2 << 10)); z1 += 128; - test.check(z1.count() == 352, "Addition got %ld expected %d", z1.count(), 352); + test.check(z1.count() == 352, "[5] Addition got %ld expected %d", z1.count(), 352); z2 = 2; z1 = 3 * z2; - test.check(z1.count() == 6144, "Addition got %ld expected %d", z1.count(), 6144); + test.check(z1.count() == 6144, "[6] Addition got %ld expected %d", z1.count(), 6144); z1 *= 5; - test.check(z1.count() == 30720, "Addition got %ld expected %d", z1.count(), 30720); + test.check(z1.count() == 30720, "[7] Addition got %ld expected %d", z1.count(), 30720); z1 /= 3; - test.check(z1.count() == 10240, "Addition got %ld expected %d", z1.count(), 10240); + test.check(z1.count() == 10240, "[8] Addition got %ld expected %d", z1.count(), 10240); z2 = 3148; auto x = z2 + MBytes(1); - test.check(x.scale() == z2.scale(), "Common type addition yielded bad scale %ld - expected %ld", x.scale(), z2.scale()); - test.check(x.count() == 4172, "Common type addition yielded bad count %d - expected %d", x.count(), 4172); + test.check(x.scale() == z2.scale(), "[9] Common type addition yielded bad scale %ld - expected %ld", x.scale(), z2.scale()); + test.check(x.count() == 4172, "[10] Common type addition yielded bad count %d - expected %d", x.count(), 4172); - z2 = z2.scale_down(262150); - test.check(z2.count() == 256, "Scale down bad count %d - expected %d", z2.count(), 256); + z2 = ts::round_down(262150); + test.check(z2.count() == 256, "[11] Unit scale down assignment bad count %d - expected %d", z2.count(), 256); - z2 = ts::unit_ceil(262150); - test.check(z2.count() == 257, "Scale down bad count %d - expected %d", z2.count(), 257); + z2 = ts::round_up(262150); + test.check(z2.count() == 257, "[12] Unit scale up assignment bad count %d - expected %d", z2.count(), 257); - KBytes q(ts::unit_floor(262150)); - test.check(q.count() == 256, "Scale down bad count %d - expected %d", q.count(), 256); + KBytes q(ts::round_down(262150)); + test.check(q.count() == 256, "[13] Unit scale down constructor bad count %d - expected %d", q.count(), 256); + + z2 += ts::round_up(97384); + test.check(z2.count() == 353, "[14] Unit scale down += bad count %d - expected %d", z2.count(), 353); + + decltype(z2) a = z2 + ts::round_down(167229); + test.check(a.count() == 516, "[15] Unit scale down += bad count %d - expected %d", a.count(), 516); } // test comparisons From 782ce3632f496906b49a44f345ae16adaaf47046 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 10 Feb 2017 11:44:48 -0600 Subject: [PATCH 62/81] Scalar: Fill out addition and subtraction operators for new rounding mechanism. --- lib/ts/Scalar.h | 73 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 2f82debbcdb..d273fdd43dc 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -803,6 +803,30 @@ operator+(Scalar const &lhs, detail::scalar_unit_round_down_t rhs) { return Scalar(lhs) += rhs.template scale(); } +template +Scalar +operator+(detail::scalar_round_up_t lhs, Scalar const &rhs) +{ + return Scalar(rhs) += lhs._n; +} +template +Scalar +operator+(Scalar const &lhs, detail::scalar_round_up_t rhs) +{ + return Scalar(lhs) += rhs._n; +} +template +Scalar +operator+(detail::scalar_round_down_t lhs, Scalar const &rhs) +{ + return Scalar(rhs) += lhs._n; +} +template +Scalar +operator+(Scalar const &lhs, detail::scalar_round_down_t rhs) +{ + return Scalar(lhs) += rhs._n; +} template template @@ -906,6 +930,55 @@ operator-(int n, Scalar const &rhs) { return Scalar(rhs) -= n; } +template +Scalar +operator-(detail::scalar_unit_round_up_t lhs, Scalar const &rhs) +{ + return Scalar(rhs) -= lhs.template scale(); +} +template +Scalar +operator-(Scalar const &lhs, detail::scalar_unit_round_up_t rhs) +{ + return Scalar(lhs) -= rhs.template scale(); +} +template +Scalar +operator-(detail::scalar_unit_round_down_t lhs, Scalar const &rhs) +{ + return Scalar(rhs) -= lhs.template scale(); +} +template +Scalar +operator-(Scalar const &lhs, detail::scalar_unit_round_down_t rhs) +{ + return Scalar(lhs) -= rhs.template scale(); +} +template +Scalar +operator-(detail::scalar_round_up_t lhs, Scalar const &rhs) +{ + return Scalar(rhs) -= lhs._n; +} +template +Scalar +operator-(Scalar const &lhs, detail::scalar_round_up_t rhs) +{ + return Scalar(lhs) -= rhs._n; +} +template +Scalar +operator-(detail::scalar_round_down_t lhs, Scalar const &rhs) +{ + return Scalar(rhs) -= lhs._n; +} +template +Scalar +operator-(Scalar const &lhs, detail::scalar_round_down_t rhs) +{ + return Scalar(lhs) -= rhs._n; +} + template auto Scalar::operator++() -> self & { From 6d8ef9dc026d098ba80becbcb46cefe567899c6c Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 10 Feb 2017 14:11:28 -0600 Subject: [PATCH 63/81] Scalar: More tweaks, working on cross type case for round up/down. --- lib/ts/Scalar.h | 34 +++++++++++++++++++--------------- lib/ts/test_Scalar.cc | 21 +++++++++++++++++++++ 2 files changed, 40 insertions(+), 15 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index d273fdd43dc..5c6ea3fc3e1 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -95,6 +95,11 @@ namespace detail methods @c scale_conversion_round_up and @c scale_conversion_round_down. Scale conversions from units are sufficiently simple to be done directly in the methods. + The self conversion operator that just changes the internal storage type is needed for direct + assignments so there is a conversion from the type of the expression to the type needed by the + @c Scalar instance. I think this could also be done by adding a template parameter to the @c + Scalar methods which I may look at in the future. + Much of this is driven by the fact that the assignment operator can not be templated and therefore to have a nice interace for assignment this split is needed. */ @@ -102,7 +107,7 @@ namespace detail // Unit value, to be rounded up. template struct scalar_unit_round_up_t { C _n; - // template constexpr operator scalar_unit_round_up_t() { return static_cast(_n); } + template constexpr operator scalar_unit_round_up_t() { return {static_cast(_n)}; } template constexpr I scale() @@ -113,7 +118,7 @@ namespace detail // Unit value, to be rounded down. template struct scalar_unit_round_down_t { C _n; - // template constexpr operator scalar_unit_round_down_t() { return static_cast(_n); } + template operator scalar_unit_round_down_t() { return {static_cast(_n)}; } template constexpr I scale() @@ -138,28 +143,28 @@ template constexpr detail::scalar_unit_round_up_t round_up(C n) { - return detail::scalar_unit_round_up_t{n}; + return {n}; } /// Mark a @c Scalar value to be scaled, rounding up. template constexpr detail::scalar_round_up_t round_up(Scalar v) { - return detail::scalar_round_up_t{v.count()}; + return {v.count()}; } /// Mark a unit value to be scaled, rounding down. template constexpr detail::scalar_unit_round_down_t round_down(C n) { - return detail::scalar_unit_round_down_t{n}; + return {n}; } /// Mark a @c Scalar value, to be rounded down. template constexpr detail::scalar_round_down_t round_down(Scalar v) { - return detail::scalar_round_down_t{v.count()}; + return {v.count()}; } /** A class to hold scaled values. @@ -791,15 +796,15 @@ operator+(Scalar const &lhs, detail::scalar_unit_round_up_t rhs) { return Scalar(lhs) += rhs.template scale(); } -template +template Scalar -operator+(detail::scalar_unit_round_down_t lhs, Scalar const &rhs) +operator+(detail::scalar_unit_round_down_t lhs, Scalar const &rhs) { return Scalar(rhs) += lhs.template scale(); } -template +template Scalar -operator+(Scalar const &lhs, detail::scalar_unit_round_down_t rhs) +operator+(Scalar const &lhs, detail::scalar_unit_round_down_t rhs) { return Scalar(lhs) += rhs.template scale(); } @@ -930,15 +935,15 @@ operator-(int n, Scalar const &rhs) { return Scalar(rhs) -= n; } -template +template Scalar -operator-(detail::scalar_unit_round_up_t lhs, Scalar const &rhs) +operator-(detail::scalar_unit_round_up_t lhs, Scalar const &rhs) { return Scalar(rhs) -= lhs.template scale(); } -template +template Scalar -operator-(Scalar const &lhs, detail::scalar_unit_round_up_t rhs) +operator-(Scalar const &lhs, detail::scalar_unit_round_up_t rhs) { return Scalar(lhs) -= rhs.template scale(); } @@ -979,7 +984,6 @@ operator-(Scalar const &lhs, detail::scalar_round_down_t rhs) return Scalar(lhs) -= rhs._n; } - template auto Scalar::operator++() -> self & { ++_n; diff --git a/lib/ts/test_Scalar.cc b/lib/ts/test_Scalar.cc index ff9bd0e1bdc..588f0ecce41 100644 --- a/lib/ts/test_Scalar.cc +++ b/lib/ts/test_Scalar.cc @@ -195,6 +195,7 @@ Test_5() TestBox test("TS Scalar: arithmetic operator tests"); typedef ts::Scalar<1024> KBytes; + typedef ts::Scalar<1025, long int> KiBytes; typedef ts::Scalar<1, int64_t> Bytes; typedef ts::Scalar<1024 * KBytes::SCALE> MBytes; @@ -244,6 +245,26 @@ Test_5() decltype(z2) a = z2 + ts::round_down(167229); test.check(a.count() == 516, "[15] Unit scale down += bad count %d - expected %d", a.count(), 516); + + KiBytes k = 3148; + auto kx = k + MBytes(1); + test.check(kx.scale() == k.scale(), "[9] Common type addition yielded bad scale %ld - expected %ld", kx.scale(), k.scale()); + test.check(kx.count() == 4172, "[10] Common type addition yielded bad count %ld - expected %d", kx.count(), 4172); + + k = ts::round_down(262150); + test.check(k.count() == 256, "[11] Unit scale down assignment bad count %ld - expected %d", k.count(), 256); + + k = ts::round_up(262150); + test.check(k.count() == 257, "[12] Unit scale up assignment bad count %ld - expected %d", k.count(), 257); + + KBytes kq(ts::round_down(262150)); + test.check(kq.count() == 256, "[13] Unit scale down constructor bad count %d - expected %d", kq.count(), 256); + + k += ts::round_up(97384); + test.check(k.count() == 353, "[14] Unit scale down += bad count %ld - expected %d", k.count(), 353); + + decltype(k) ka = k + ts::round_down(167229); + test.check(ka.count() == 516, "[15] Unit scale down += bad count %ld - expected %d", ka.count(), 516); } // test comparisons From 21cfc63f7f62c7b51a75d60e9038146c2cf3e8a1 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 10 Feb 2017 14:28:32 -0600 Subject: [PATCH 64/81] Scalar: Conversion of scalar helpers to use templated methods in Scalar. --- lib/ts/Scalar.h | 72 ++++++++++++++++++++++++++----------------------- 1 file changed, 39 insertions(+), 33 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 5c6ea3fc3e1..702af469997 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -92,13 +92,14 @@ namespace detail classes to pass it on to a @c Scalar method. Scale conversions between @c Scalar instances are handled in these classes via the templated - methods @c scale_conversion_round_up and @c scale_conversion_round_down. Scale conversions from - units are sufficiently simple to be done directly in the methods. + methods @c scale_conversion_round_up and @c scale_conversion_round_down. - The self conversion operator that just changes the internal storage type is needed for direct - assignments so there is a conversion from the type of the expression to the type needed by the - @c Scalar instance. I think this could also be done by adding a template parameter to the @c - Scalar methods which I may look at in the future. + Conversions between scales and types for the scalar helpers is done inside the helper classes + and a user type conversion operator exists so the helper can be converted by the compiler to + the correct type. For the untis bases conversion this is done in @c Scalar because the + generality of the needed conversion is too broad to be easily used. It can be done but there is + some ugliness due to the fact that in some cases two user conversions which is difficult to + deal with. I have tried it both ways and overall this seems a cleaner implementation. Much of this is driven by the fact that the assignment operator can not be templated and therefore to have a nice interace for assignment this split is needed. @@ -107,7 +108,7 @@ namespace detail // Unit value, to be rounded up. template struct scalar_unit_round_up_t { C _n; - template constexpr operator scalar_unit_round_up_t() { return {static_cast(_n)}; } + // template constexpr operator scalar_unit_round_up_t() { return {static_cast(_n)}; } template constexpr I scale() @@ -118,7 +119,7 @@ namespace detail // Unit value, to be rounded down. template struct scalar_unit_round_down_t { C _n; - template operator scalar_unit_round_down_t() { return {static_cast(_n)}; } + // template operator scalar_unit_round_down_t() { return {static_cast(_n)}; } template constexpr I scale() @@ -205,11 +206,6 @@ template class Scalar constexpr Scalar(); ///< Default contructor. ///< Construct to have @a n scaled units. constexpr Scalar(Count n); - /// Scale units value @a x to this type, rounding down. - constexpr Scalar(detail::scalar_unit_round_up_t x); - /// Copy constructor for same scale. - template constexpr Scalar(Scalar const &that); - /// Copy constructor. constexpr Scalar(self const &that); /// Copy constructor. /// Conversion constructor. @@ -219,10 +215,12 @@ template class Scalar Scalar(detail::scalar_round_up_t const &that); /// Scaling constructor. Scalar(detail::scalar_round_down_t const &that); - /// Scaling constructor. - constexpr Scalar(detail::scalar_unit_round_up_t const &that); - /// Scaling constructor. - constexpr Scalar(detail::scalar_unit_round_down_t const &that); + /// Scale units value @a x to this type, rounding up. + template constexpr Scalar(detail::scalar_unit_round_up_t v); + /// Scale units value @a x to this type, rounding down. + template constexpr Scalar(detail::scalar_unit_round_down_t v); + /// Copy constructor for same scale. + template constexpr Scalar(Scalar const &that); /// Assignment operator. /// The value is scaled appropriately. @@ -235,10 +233,10 @@ template class Scalar /// The count is set to @a n. self &operator=(Count n); // Scaling assignments. - self &operator=(detail::scalar_unit_round_up_t n); - self &operator=(detail::scalar_unit_round_down_t n); - self &operator=(detail::scalar_round_up_t v); - self &operator=(detail::scalar_round_down_t v); + template self &operator=(detail::scalar_unit_round_up_t n); + template self &operator=(detail::scalar_unit_round_down_t n); + self &operator =(detail::scalar_round_up_t v); + self &operator =(detail::scalar_round_down_t v); /// The number of scale units. constexpr Count count() const; @@ -254,8 +252,8 @@ template class Scalar self &operator+=(C n); /// Addition - add @a n as a number of scaled units. self &operator+=(self const &that); - self &operator+=(detail::scalar_unit_round_up_t n); - self &operator+=(detail::scalar_unit_round_down_t n); + template self &operator+=(detail::scalar_unit_round_up_t n); + template self &operator+=(detail::scalar_unit_round_down_t n); self &operator+=(detail::scalar_round_up_t v); self &operator+=(detail::scalar_round_down_t v); @@ -277,8 +275,8 @@ template class Scalar self &operator-=(C n); /// Subtraction - subtract @a n as a number of scaled units. self &operator-=(self const &that); - self &operator-=(detail::scalar_unit_round_up_t n); - self &operator-=(detail::scalar_unit_round_down_t n); + template self &operator-=(detail::scalar_unit_round_up_t n); + template self &operator-=(detail::scalar_unit_round_down_t n); self &operator-=(detail::scalar_round_up_t v); self &operator-=(detail::scalar_round_down_t v); @@ -322,11 +320,13 @@ template Scalar::Scalar(detail::sc { } template -constexpr Scalar::Scalar(detail::scalar_unit_round_up_t const &v) : _n(v.template scale()) +template +constexpr Scalar::Scalar(detail::scalar_unit_round_up_t v) : _n(v.template scale()) { } template -constexpr Scalar::Scalar(detail::scalar_unit_round_down_t const &v) : _n(v.template scale()) +template +constexpr Scalar::Scalar(detail::scalar_unit_round_down_t v) : _n(v.template scale()) { } @@ -372,15 +372,17 @@ Scalar::operator=(detail::scalar_round_down_t v) -> self & return *this; } template +template inline auto -Scalar::operator=(detail::scalar_unit_round_up_t v) -> self & +Scalar::operator=(detail::scalar_unit_round_up_t v) -> self & { _n = v.template scale(); return *this; } template +template inline auto -Scalar::operator=(detail::scalar_unit_round_down_t v) -> self & +Scalar::operator=(detail::scalar_unit_round_down_t v) -> self & { _n = v.template scale(); return *this; @@ -707,15 +709,17 @@ Scalar::operator+=(C n) -> self & return *this; } template +template auto -Scalar::operator+=(detail::scalar_unit_round_up_t v) -> self & +Scalar::operator+=(detail::scalar_unit_round_up_t v) -> self & { _n += v.template scale(); return *this; } template +template auto -Scalar::operator+=(detail::scalar_unit_round_down_t v) -> self & +Scalar::operator+=(detail::scalar_unit_round_down_t v) -> self & { _n += v.template scale(); return *this; @@ -858,15 +862,17 @@ Scalar::operator-=(C n) -> self & return *this; } template +template auto -Scalar::operator-=(detail::scalar_unit_round_up_t v) -> self & +Scalar::operator-=(detail::scalar_unit_round_up_t v) -> self & { _n -= v.template scale(); return *this; } template +template auto -Scalar::operator-=(detail::scalar_unit_round_down_t v) -> self & +Scalar::operator-=(detail::scalar_unit_round_down_t v) -> self & { _n -= v.template scale(); return *this; From ad410385eedef2752a4290ab6ba52dbe1d18bf11 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 10 Feb 2017 14:44:52 -0600 Subject: [PATCH 65/81] Scalar: Filling out the + and - operators for unit scaling. --- lib/ts/Scalar.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 702af469997..3ef351cb5f3 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -788,15 +788,15 @@ operator+(int n, Scalar const &rhs) { return Scalar(rhs) += n; } -template +template Scalar -operator+(detail::scalar_unit_round_up_t lhs, Scalar const &rhs) +operator+(detail::scalar_unit_round_up_t lhs, Scalar const &rhs) { return Scalar(rhs) += lhs.template scale(); } -template +template Scalar -operator+(Scalar const &lhs, detail::scalar_unit_round_up_t rhs) +operator+(Scalar const &lhs, detail::scalar_unit_round_up_t rhs) { return Scalar(lhs) += rhs.template scale(); } @@ -941,27 +941,27 @@ operator-(int n, Scalar const &rhs) { return Scalar(rhs) -= n; } -template +template Scalar -operator-(detail::scalar_unit_round_up_t lhs, Scalar const &rhs) +operator-(detail::scalar_unit_round_up_t lhs, Scalar const &rhs) { return Scalar(rhs) -= lhs.template scale(); } -template +template Scalar -operator-(Scalar const &lhs, detail::scalar_unit_round_up_t rhs) +operator-(Scalar const &lhs, detail::scalar_unit_round_up_t rhs) { return Scalar(lhs) -= rhs.template scale(); } -template +template Scalar -operator-(detail::scalar_unit_round_down_t lhs, Scalar const &rhs) +operator-(detail::scalar_unit_round_down_t lhs, Scalar const &rhs) { return Scalar(rhs) -= lhs.template scale(); } -template +template Scalar -operator-(Scalar const &lhs, detail::scalar_unit_round_down_t rhs) +operator-(Scalar const &lhs, detail::scalar_unit_round_down_t rhs) { return Scalar(lhs) -= rhs.template scale(); } From cc8704acfc7ca94cf71c85d8967c177e4e2397f6 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 10 Feb 2017 14:54:32 -0600 Subject: [PATCH 66/81] Scalar: Fix FreeBSD compile errors. --- lib/ts/Scalar.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index 3ef351cb5f3..c01aa33d4d4 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -111,7 +111,7 @@ namespace detail // template constexpr operator scalar_unit_round_up_t() { return {static_cast(_n)}; } template constexpr I - scale() + scale() const { return static_cast(_n / N + (0 != (_n % N))); } @@ -122,7 +122,7 @@ namespace detail // template operator scalar_unit_round_down_t() { return {static_cast(_n)}; } template constexpr I - scale() + scale() const { return static_cast(_n / N); } @@ -130,12 +130,12 @@ namespace detail // Scalar value, to be rounded up. template struct scalar_round_up_t { C _n; - template constexpr operator Scalar() { return scale_conversion_round_up(_n); } + template constexpr operator Scalar() const { return scale_conversion_round_up(_n); } }; // Scalar value, to be rounded down. template struct scalar_round_down_t { C _n; - template constexpr operator Scalar() { return scale_conversion_round_down(_n); } + template constexpr operator Scalar() const { return scale_conversion_round_down(_n); } }; } @@ -1111,7 +1111,7 @@ template ostream & operator<<(ostream &s, ApacheTrafficServer::Scalar const &x) { - static ApacheTrafficServer::detail::tag_label_B const b; + static ApacheTrafficServer::detail::tag_label_B b; s << x.units(); return ApacheTrafficServer::detail::tag_label(s, b); } From 34602e0f58ea209b3ab3983bcacccaf57ea52252 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 10 Feb 2017 16:59:01 -0600 Subject: [PATCH 67/81] Scalar: Fix subtraction operators. --- lib/ts/Scalar.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index c01aa33d4d4..d51f1b91c9b 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -945,7 +945,7 @@ template Scalar operator-(detail::scalar_unit_round_up_t lhs, Scalar const &rhs) { - return Scalar(rhs) -= lhs.template scale(); + return Scalar(lhs.template scale()) -= rhs; } template Scalar @@ -957,7 +957,7 @@ template Scalar operator-(detail::scalar_unit_round_down_t lhs, Scalar const &rhs) { - return Scalar(rhs) -= lhs.template scale(); + return Scalar(lhs.template scale()) -= rhs; } template Scalar From 0ec25b5ba9843b43a378acb458bec871a0b2d313 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Sat, 11 Feb 2017 03:02:52 -0600 Subject: [PATCH 68/81] CacheTool: add "--write" flag which if not present prevents any writing to disk. --- cmd/traffic_cache_tool/CacheTool.cc | 51 ++++++++++++++++++----------- 1 file changed, 32 insertions(+), 19 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index 15f1d7b36b7..de7fd887755 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -69,7 +69,7 @@ FilePath VolumeFile; ts::CommandTable Commands; // Default this to read only, only enable write if specifically required. -int OPEN_RW_FLAGS = O_RDONLY; +int OPEN_RW_FLAG = O_RDONLY; struct Stripe; @@ -668,7 +668,7 @@ Span::loadDevice() Errata zret; int flags; - flags = OPEN_RW_FLAGS + flags = OPEN_RW_FLAG #if defined(O_DIRECT) | O_DIRECT #endif @@ -805,28 +805,36 @@ Errata Span::updateHeader() volume_mask[0] = false; // don't include free stripes in distinct volume count. hdr->num_volumes = volume_mask.count(); _header.reset(hdr); - ssize_t r = pwrite(_fd, hdr, hdr_size.units(), ts::CacheSpan::OFFSET.units()); - if (r < ts::CacheSpan::OFFSET.units()) - zret.push(0,errno,"Failed to update span - ", strerror(errno)); + if (OPEN_RW_FLAG) { + ssize_t r = pwrite(_fd, hdr, hdr_size.units(), ts::CacheSpan::OFFSET.units()); + if (r < ts::CacheSpan::OFFSET.units()) + zret.push(0,errno,"Failed to update span - ", strerror(errno)); + } else { + std::cout << "Writing not enabled, no updates perfomed" << std::endl; + } return zret; } void Span::clearPermanently() { - alignas(512) static char zero[CacheStoreBlocks::SCALE]; // should be all zero, it's static. - std::cout << "Clearing " << _path << " permanently on disk "; - ssize_t n = pwrite(_fd, zero, sizeof(zero), ts::CacheSpan::OFFSET.units()); - if (n == sizeof(zero)) - std::cout << "done"; - else { - const char *text = strerror(errno); - std::cout << "failed"; - if (n >= 0) - std::cout << " - " << n << " of " << sizeof(zero) << " bytes written"; - std::cout << " - " << text; + if (OPEN_RW_FLAG) { + alignas(512) static char zero[CacheStoreBlocks::SCALE]; // should be all zero, it's static. + std::cout << "Clearing " << _path << " permanently on disk "; + ssize_t n = pwrite(_fd, zero, sizeof(zero), ts::CacheSpan::OFFSET.units()); + if (n == sizeof(zero)) + std::cout << "done"; + else { + const char *text = strerror(errno); + std::cout << "failed"; + if (n >= 0) + std::cout << " - " << n << " of " << sizeof(zero) << " bytes written"; + std::cout << " - " << text; + } + std::cout << std::endl; + } else { + std::cout << "Clearing " << _path << " not performed, write not enabled" << std::endl; } - std::cout << std::endl; } /* --------------------------------------------------------------------------------------- */ Errata @@ -909,6 +917,7 @@ struct option Options[] = { {"help", 0, nullptr, 'h'}, {"spans", 1, nullptr, 's'}, {"volumes", 1, nullptr, 'v'}, + {"write", 0, nullptr, 'w' }, {nullptr, 0, nullptr, 0 } }; } @@ -932,7 +941,7 @@ Cmd_Allocate_Empty_Spans(int argc, char *argv[]) Errata zret; VolumeAllocator va; - OPEN_RW_FLAGS = O_RDWR; +// OPEN_RW_FLAG = O_RDWR; zret = va.load(SpanFile, VolumeFile); if (zret) { va.fillEmptySpans(); @@ -1023,7 +1032,7 @@ Clear_Spans(int argc, char *argv[]) Errata zret; Cache cache; - OPEN_RW_FLAGS = O_RDWR; +// OPEN_RW_FLAG = O_RDWR; if ((zret = cache.loadSpan(SpanFile))) { for (auto *span : cache._spans) { span->clearPermanently(); @@ -1051,6 +1060,10 @@ main(int argc, char *argv[]) case 'v': VolumeFile = optarg; break; + case 'w': + OPEN_RW_FLAG = O_RDWR; + std::cout << "NOTE: Writing to physical devices enabled" << std::endl; + break; } } From 128e419cdb47ce18adb619fbe3030d99d99b2541 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Sat, 11 Feb 2017 03:59:07 -0600 Subject: [PATCH 69/81] CacheTool: clang-format. --- cmd/traffic_cache_tool/CacheDefs.h | 59 ++++-- cmd/traffic_cache_tool/CacheTool.cc | 318 ++++++++++++++++------------ cmd/traffic_cache_tool/Command.h | 10 +- cmd/traffic_cache_tool/File.h | 16 +- lib/ts/MemView.cc | 19 +- lib/ts/MemView.h | 2 +- lib/ts/test_Metric.cc | 6 +- 7 files changed, 261 insertions(+), 169 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheDefs.h b/cmd/traffic_cache_tool/CacheDefs.h index b91260bb30d..999b0500a07 100644 --- a/cmd/traffic_cache_tool/CacheDefs.h +++ b/cmd/traffic_cache_tool/CacheDefs.h @@ -26,7 +26,10 @@ #include #include -namespace tag { struct bytes; } +namespace tag +{ +struct bytes; +} namespace ApacheTrafficServer { @@ -40,23 +43,55 @@ typedef Scalar<1024 * Kilobytes::SCALE, int64_t, tag::bytes> Megabytes; typedef Scalar<1024 * Megabytes::SCALE, int64_t, tag::bytes> Gigabytes; typedef Scalar<1024 * Gigabytes::SCALE, int64_t, tag::bytes> Terabytes; -std::ostream& operator<<(std::ostream& s, Bytes const& n) { return s << n.count() << " bytes"; } -std::ostream& operator<<(std::ostream& s, Kilobytes const& n) { return s << n.count() << " KB"; } -std::ostream& operator<<(std::ostream& s, Megabytes const& n) { return s << n.count() << " MB"; } -std::ostream& operator<<(std::ostream& s, Gigabytes const& n) { return s << n.count() << " HB"; } -std::ostream& operator<<(std::ostream& s, Terabytes const& n) { return s << n.count() << " TB"; } +std::ostream & +operator<<(std::ostream &s, Bytes const &n) +{ + return s << n.count() << " bytes"; +} +std::ostream & +operator<<(std::ostream &s, Kilobytes const &n) +{ + return s << n.count() << " KB"; +} +std::ostream & +operator<<(std::ostream &s, Megabytes const &n) +{ + return s << n.count() << " MB"; +} +std::ostream & +operator<<(std::ostream &s, Gigabytes const &n) +{ + return s << n.count() << " HB"; +} +std::ostream & +operator<<(std::ostream &s, Terabytes const &n) +{ + return s << n.count() << " TB"; +} // Units of allocation for stripes. - typedef Scalar<128 * Megabytes::SCALE, int64_t, tag::bytes> CacheStripeBlocks; +typedef Scalar<128 * Megabytes::SCALE, int64_t, tag::bytes> CacheStripeBlocks; // Size measurement of cache storage. // Also size of meta data storage units. - typedef Scalar<8 * Kilobytes::SCALE, int64_t, tag::bytes> CacheStoreBlocks; +typedef Scalar<8 * Kilobytes::SCALE, int64_t, tag::bytes> CacheStoreBlocks; // Size unit for content stored in cache. - typedef Scalar<512, int64_t, tag::bytes> CacheDataBlocks; +typedef Scalar<512, int64_t, tag::bytes> CacheDataBlocks; -std::ostream& operator<<(std::ostream& s, CacheStripeBlocks const& n) { return s << n.count() << " stripe blocks"; } -std::ostream& operator<<(std::ostream& s, CacheStoreBlocks const& n) { return s << n.count() << " store blocks"; } -std::ostream& operator<<(std::ostream& s, CacheDataBlocks const& n) { return s << n.count() << " data blocks"; } +std::ostream & +operator<<(std::ostream &s, CacheStripeBlocks const &n) +{ + return s << n.count() << " stripe blocks"; +} +std::ostream & +operator<<(std::ostream &s, CacheStoreBlocks const &n) +{ + return s << n.count() << " store blocks"; +} +std::ostream & +operator<<(std::ostream &s, CacheDataBlocks const &n) +{ + return s << n.count() << " data blocks"; +} /** A cache span is a representation of raw storage. It corresponds to a raw disk, disk partition, file, or directory. diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index de7fd887755..e00b0b9e379 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -87,13 +87,13 @@ struct Span { /// This is broken and needs to be cleaned up. void clearPermanently(); - ts::Rv allocStripe(int vol_idx, CacheStripeBlocks len); + ts::Rv allocStripe(int vol_idx, CacheStripeBlocks len); Errata updateHeader(); FilePath _path; ats_scoped_fd _fd; int _vol_idx = 0; - CacheStoreBlocks _base; ///< Offset to first usable byte. + CacheStoreBlocks _base; ///< Offset to first usable byte. CacheStoreBlocks _offset; ///< Offset to first content byte. // The space between _base and _offset is where the span information is stored. CacheStoreBlocks _len; ///< Total length of span. @@ -102,80 +102,107 @@ struct Span { /// A copy of the data on the disk. std::unique_ptr _header; /// Live information about stripes. - std::list _stripes; + std::list _stripes; }; /* --------------------------------------------------------------------------------------- */ -struct Stripe -{ - Stripe(Span* span, Bytes start, CacheStoreBlocks len); +struct Stripe { + Stripe(Span *span, Bytes start, CacheStoreBlocks len); - bool isFree() const { return 0 == _vol_idx; } + bool + isFree() const + { + return 0 == _vol_idx; + } - Span* _span; ///< Hosting span. - Bytes _start; ///< Offset of first byte of stripe. - Bytes _content; ///< Start of content. + Span *_span; ///< Hosting span. + Bytes _start; ///< Offset of first byte of stripe. + Bytes _content; ///< Start of content. CacheStoreBlocks _len; ///< Length of stripe. - uint8_t _vol_idx = 0; ///< Volume index. - uint8_t _type = 0; ///< Stripe type. + uint8_t _vol_idx = 0; ///< Volume index. + uint8_t _type = 0; ///< Stripe type. }; -Stripe::Stripe(Span* span, Bytes start, CacheStoreBlocks len) - : _span(span), _start(start), _len(len) +Stripe::Stripe(Span *span, Bytes start, CacheStoreBlocks len) : _span(span), _start(start), _len(len) { } /* --------------------------------------------------------------------------------------- */ /// A live volume. /// Volume data based on data from loaded spans. struct Volume { - int _idx; ///< Volume index. + int _idx; ///< Volume index. CacheStoreBlocks _size; ///< Amount of storage allocated. - std::vector _stripes; + std::vector _stripes; }; /* --------------------------------------------------------------------------------------- */ /// Data parsed from the volume config file. -struct VolumeConfig -{ - Errata load(FilePath const& path); +struct VolumeConfig { + Errata load(FilePath const &path); /// Data direct from the config file. - struct Data - { - int _idx = 0; ///< Volume index. - int _percent = 0; ///< Size if specified as a percent. - Megabytes _size = 0; ///< Size if specified as an absolute. + struct Data { + int _idx = 0; ///< Volume index. + int _percent = 0; ///< Size if specified as a percent. + Megabytes _size = 0; ///< Size if specified as an absolute. CacheStripeBlocks _alloc; ///< Allocation size. // Methods handy for parsing - bool hasSize() const { return _percent > 0 || _size > 0; } - bool hasIndex() const { return _idx > 0; } + bool + hasSize() const + { + return _percent > 0 || _size > 0; + } + bool + hasIndex() const + { + return _idx > 0; + } }; std::vector _volumes; typedef std::vector::iterator iterator; typedef std::vector::const_iterator const_iterator; - iterator begin() { return _volumes.begin(); } - iterator end() { return _volumes.end(); } - const_iterator begin() const { return _volumes.begin(); } - const_iterator end() const { return _volumes.end(); } + iterator + begin() + { + return _volumes.begin(); + } + iterator + end() + { + return _volumes.end(); + } + const_iterator + begin() const + { + return _volumes.begin(); + } + const_iterator + end() const + { + return _volumes.end(); + } Errata validatePercentAllocation(); void convertToAbsolute(ts::CacheStripeBlocks total_span_size); }; Errata -VolumeConfig::validatePercentAllocation() { +VolumeConfig::validatePercentAllocation() +{ Errata zret; int n = 0; - for ( auto& vol : _volumes ) n += vol._percent; - if (n > 100) zret.push(0, 10, "Volume percent allocation ", n, " is more than 100%"); + for (auto &vol : _volumes) + n += vol._percent; + if (n > 100) + zret.push(0, 10, "Volume percent allocation ", n, " is more than 100%"); return zret; } void VolumeConfig::convertToAbsolute(ts::CacheStripeBlocks n) { - for ( auto& vol : _volumes ) { + for (auto &vol : _volumes) { if (vol._percent) { vol._alloc = (n * vol._percent + 99) / 100; } else { @@ -192,7 +219,7 @@ struct Cache { Errata loadSpanDirect(FilePath const &path, int vol_idx = -1, Bytes size = -1); /// Change the @a span to have a single, unused stripe occupying the entire @a span. - Errata clearSpan(Span* span); + Errata clearSpan(Span *span); enum class SpanDumpDepth { SPAN, STRIPE, DIRECTORY }; void dumpSpans(SpanDumpDepth depth); @@ -203,26 +230,26 @@ struct Cache { std::list _spans; std::map _volumes; - }; /* --------------------------------------------------------------------------------------- */ /// Temporary structure used for doing allocation computations. class VolumeAllocator { /// Working struct that tracks allocation information. - struct V - { - VolumeConfig::Data const& _config; ///< Configuration instance. - CacheStripeBlocks _size; ///< Current actual size. + struct V { + VolumeConfig::Data const &_config; ///< Configuration instance. + CacheStripeBlocks _size; ///< Current actual size. int64_t _deficit; int64_t _shares; - V(VolumeConfig::Data const& config, CacheStripeBlocks size, int64_t deficit = 0, int64_t shares = 0) + V(VolumeConfig::Data const &config, CacheStripeBlocks size, int64_t deficit = 0, int64_t shares = 0) : _config(config), _size(size), _deficit(deficit), _shares(shares) - { - } - V& operator = (V const& that) { - new(this) V(that._config, that._size, that._deficit, that._shares); + { + } + V & + operator=(V const &that) + { + new (this) V(that._config, that._size, that._deficit, that._shares); return *this; } }; @@ -230,26 +257,29 @@ class VolumeAllocator typedef std::vector AV; AV _av; ///< Working vector of volume data. - Cache _cache; ///< Current state. + Cache _cache; ///< Current state. VolumeConfig _vols; ///< Configuration state. public: - VolumeAllocator(); - Errata load(FilePath const& spanFile, FilePath const& volumeFile); + Errata load(FilePath const &spanFile, FilePath const &volumeFile); Errata fillEmptySpans(); }; -VolumeAllocator::VolumeAllocator() { } +VolumeAllocator::VolumeAllocator() +{ +} Errata -VolumeAllocator::load(FilePath const& spanFile, FilePath const& volumeFile) +VolumeAllocator::load(FilePath const &spanFile, FilePath const &volumeFile) { Errata zret; - if (!volumeFile) zret.push(0, 9, "Volume config file not set"); - if (!spanFile) zret.push(0, 9, "Span file not set"); + if (!volumeFile) + zret.push(0, 9, "Volume config file not set"); + if (!spanFile) + zret.push(0, 9, "Span file not set"); if (zret) { zret = _vols.load(volumeFile); @@ -258,12 +288,12 @@ VolumeAllocator::load(FilePath const& spanFile, FilePath const& volumeFile) if (zret) { CacheStripeBlocks total = _cache.calcTotalSpanConfiguredSize(); _vols.convertToAbsolute(total); - for ( auto& vol : _vols ) { + for (auto &vol : _vols) { CacheStripeBlocks size(0); auto spot = _cache._volumes.find(vol._idx); if (spot != _cache._volumes.end()) size = scale_down(spot->second._size); - _av.push_back({ vol, size, 0, 0}); + _av.push_back({vol, size, 0, 0}); } } } @@ -280,19 +310,20 @@ VolumeAllocator::fillEmptySpans() static const int64_t SCALE = 1000; // Walk the spans, skipping ones that are not empty. - for ( auto span : _cache._spans ) { + for (auto span : _cache._spans) { int64_t total_shares = 0; - if (!span->isEmpty()) continue; + if (!span->isEmpty()) + continue; std::cout << "Allocating " << scale_down(span->_len) << " from span " << span->_path << std::endl; // Walk the volumes and get the relative allocations. - for ( auto& v : _av ) { + for (auto &v : _av) { auto delta = v._config._alloc - v._size; if (delta > 0) { v._deficit = (delta.count() * SCALE) / v._config._alloc.count(); - v._shares = delta.count() * v._deficit; + v._shares = delta.count() * v._deficit; total_shares += v._shares; } else { v._shares = 0; @@ -303,10 +334,10 @@ VolumeAllocator::fillEmptySpans() ts::CacheStripeBlocks span_used(0); // sort by deficit so least relatively full volumes go first. - std::sort(_av.begin(), _av.end(), [](V const& lhs, V const& rhs) { return lhs._deficit > rhs._deficit; }); - for ( auto& v : _av ) { + std::sort(_av.begin(), _av.end(), [](V const &lhs, V const &rhs) { return lhs._deficit > rhs._deficit; }); + for (auto &v : _av) { if (v._shares) { - auto n = (((span_blocks - span_used) * v._shares) + total_shares -1) / total_shares; + auto n = (((span_blocks - span_used) * v._shares) + total_shares - 1) / total_shares; auto delta = v._config._alloc - v._size; // Not sure why this is needed. But a large and empty volume can dominate the shares // enough to get more than it actually needs if the other volume are relative small or full. @@ -500,7 +531,7 @@ Cache::loadSpan(FilePath const &path) { Errata zret; if (!path.is_readable()) - zret = Errata::Message(0, EPERM, path," is not readable."); + zret = Errata::Message(0, EPERM, path, " is not readable."); else if (path.is_regular_file()) zret = this->loadSpanConfig(path); else @@ -519,10 +550,10 @@ Cache::loadSpanDirect(FilePath const &path, int vol_idx, Bytes size) int nspb = span->_header->num_diskvol_blks; for (auto i = 0; i < nspb; ++i) { ts::CacheStripeDescriptor &raw = span->_header->stripes[i]; - Stripe* stripe = new Stripe(span.get(), raw.offset, raw.len); + Stripe *stripe = new Stripe(span.get(), raw.offset, raw.len); if (raw.free == 0) { stripe->_vol_idx = raw.vol_idx; - stripe->_type = raw.type; + stripe->_type = raw.type; _volumes[stripe->_vol_idx]._stripes.push_back(stripe); _volumes[stripe->_vol_idx]._size += stripe->_len; } else { @@ -569,7 +600,7 @@ Cache::loadSpanConfig(FilePath const &path) auto n = ts::svtoi(value, &text); if (text == value && 0 < n && n < 256) { } else { - zret.push(0,0, "Invalid volume index '", value, "'"); + zret.push(0, 0, "Invalid volume index '", value, "'"); } } } @@ -597,7 +628,8 @@ Cache::dumpSpans(SpanDumpDepth depth) for (unsigned int i = 0; i < span->_header->num_diskvol_blks; ++i) { ts::CacheStripeDescriptor &stripe = span->_header->stripes[i]; std::cout << " : SpanBlock " << i << " @ " << stripe.offset.units() << " blocks=" << stripe.len.units() - << " vol=" << stripe.vol_idx << " type=" << stripe.type << " " << (stripe.free ? "free" : "in-use") << std::endl; + << " vol=" << stripe.vol_idx << " type=" << stripe.type << " " << (stripe.free ? "free" : "in-use") + << std::endl; if (depth >= SpanDumpDepth::STRIPE) { Open_Stripe(span->_fd, stripe); } @@ -620,21 +652,23 @@ Cache::dumpVolumes() } } -ts::CacheStripeBlocks Cache::calcTotalSpanConfiguredSize() +ts::CacheStripeBlocks +Cache::calcTotalSpanConfiguredSize() { ts::CacheStripeBlocks zret(0); - for ( auto span : _spans ) { + for (auto span : _spans) { zret += ts::scale_down(span->_len); } return zret; } -ts::CacheStripeBlocks Cache::calcTotalSpanPhysicalSize() +ts::CacheStripeBlocks +Cache::calcTotalSpanPhysicalSize() { ts::CacheStripeBlocks zret(0); - for ( auto span : _spans ) { + for (auto span : _spans) { // This is broken, physical_size doesn't work for devices, need to fix that. zret += ts::scale_down(span->_path.physical_size()); } @@ -652,7 +686,7 @@ Span::load() { Errata zret; if (!_path.is_readable()) - zret = Errata::Message(0, EPERM, _path," is not readable."); + zret = Errata::Message(0, EPERM, _path, " is not readable."); else if (_path.is_char_device() || _path.is_block_device()) zret = this->loadDevice(); else if (_path.is_dir()) @@ -682,16 +716,16 @@ Span::loadDevice() if (fd) { if (ink_file_get_geometry(fd, _geometry)) { off_t offset = ts::CacheSpan::OFFSET.units(); - CacheStoreBlocks span_hdr_size(1); // default. + CacheStoreBlocks span_hdr_size(1); // default. static const ssize_t BUFF_SIZE = CacheStoreBlocks::SCALE; // match default span_hdr_size alignas(512) char buff[BUFF_SIZE]; ssize_t n = pread(fd, buff, BUFF_SIZE, offset); if (n >= BUFF_SIZE) { ts::SpanHeader &span_hdr = reinterpret_cast(buff); - _base = _base.scale_up(Bytes(offset)); + _base = _base.scale_up(Bytes(offset)); // See if it looks valid if (span_hdr.magic == ts::SpanHeader::MAGIC && span_hdr.num_diskvol_blks == span_hdr.num_used + span_hdr.num_free) { - int nspb = span_hdr.num_diskvol_blks; + int nspb = span_hdr.num_diskvol_blks; span_hdr_size = span_hdr_size.scale_up(Bytes(sizeof(ts::SpanHeader) + (nspb - 1) * sizeof(ts::CacheStripeDescriptor))); _header.reset(new (malloc(span_hdr_size.units())) ts::SpanHeader); if (span_hdr_size.units() <= BUFF_SIZE) { @@ -706,7 +740,7 @@ Span::loadDevice() _len = _len.scale_down(Bytes(_geometry.totalsz)) - _base; } // valid FD means the device is accessible and has enough storage to be configured. - _fd = fd.release(); + _fd = fd.release(); _offset = _base + span_hdr_size; } else { zret = Errata::Message(0, errno, "Failed to read from ", _path, '[', errno, ':', strerror(errno), ']'); @@ -720,77 +754,83 @@ Span::loadDevice() return zret; } -ts::Rv Span::allocStripe(int vol_idx, CacheStripeBlocks len) +ts::Rv +Span::allocStripe(int vol_idx, CacheStripeBlocks len) { - for (auto spot = _stripes.begin(), limit = _stripes.end() ; spot != limit ; ++spot ) { - Stripe* stripe = *spot; + for (auto spot = _stripes.begin(), limit = _stripes.end(); spot != limit; ++spot) { + Stripe *stripe = *spot; if (stripe->isFree()) { if (len < stripe->_len) { // If the remains would be less than a stripe block, just take it all. if (stripe->_len <= (len + CacheStripeBlocks(1))) { stripe->_vol_idx = vol_idx; - stripe->_type = 1; + stripe->_type = 1; return stripe; } else { - Stripe* ns = new Stripe(this, stripe->_start, len); + Stripe *ns = new Stripe(this, stripe->_start, len); stripe->_start += len; stripe->_len -= len; ns->_vol_idx = vol_idx; - ns->_type = 1; + ns->_type = 1; _stripes.insert(spot, ns); return ns; } } } } - return ts::Rv(nullptr, Errata::Message(0,15,"Failed to allocate stripe of size ", len, " - no free block large enough")); + return ts::Rv(nullptr, + Errata::Message(0, 15, "Failed to allocate stripe of size ", len, " - no free block large enough")); } -bool Span::isEmpty() const { return std::all_of(_stripes.begin(), _stripes.end(), [] (Stripe* s) { return s->_vol_idx == 0; });} +bool +Span::isEmpty() const +{ + return std::all_of(_stripes.begin(), _stripes.end(), [](Stripe *s) { return s->_vol_idx == 0; }); +} Errata Span::clear() { - Stripe* stripe; - std::for_each(_stripes.begin(), _stripes.end(), [](Stripe* s) { delete s; }); + Stripe *stripe; + std::for_each(_stripes.begin(), _stripes.end(), [](Stripe *s) { delete s; }); _stripes.clear(); // Gah, due to lack of anything better, TS depends on the number of usable blocks to be consistent // with internal calculations so have to match that here. Yay. CacheStoreBlocks eff = _len - _base; // starting # of usable blocks. // The maximum number of volumes that can store stored, accounting for the space used to store the descriptors. - int n = (eff.units() - sizeof(ts::SpanHeader)) / (CacheStripeBlocks::SCALE + sizeof(CacheStripeDescriptor)); + int n = (eff.units() - sizeof(ts::SpanHeader)) / (CacheStripeBlocks::SCALE + sizeof(CacheStripeDescriptor)); _offset = _base + _offset.scale_up(sizeof(ts::SpanHeader) + (n - 1) * sizeof(CacheStripeDescriptor)); - stripe = new Stripe(this, _offset, _len - _offset); + stripe = new Stripe(this, _offset, _len - _offset); _stripes.push_back(stripe); _free_space = stripe->_len; return Errata(); } - -Errata Span::updateHeader() +Errata +Span::updateHeader() { Errata zret; int n = _stripes.size(); - CacheStripeDescriptor* sd; - CacheStoreBlocks hdr_size = scale_up(sizeof(ts::SpanHeader) + ( n - 1 ) * sizeof(ts::CacheStripeDescriptor)); - void* raw = ats_memalign(512, hdr_size.units()); - ts::SpanHeader* hdr = static_cast(raw); - std::bitset volume_mask; - - hdr->magic = ts::SpanHeader::MAGIC; - hdr->num_free = 0; - hdr->num_used = 0; + CacheStripeDescriptor *sd; + CacheStoreBlocks hdr_size = scale_up(sizeof(ts::SpanHeader) + (n - 1) * sizeof(ts::CacheStripeDescriptor)); + void *raw = ats_memalign(512, hdr_size.units()); + ts::SpanHeader *hdr = static_cast(raw); + std::bitset volume_mask; + + hdr->magic = ts::SpanHeader::MAGIC; + hdr->num_free = 0; + hdr->num_used = 0; hdr->num_diskvol_blks = n; - hdr->num_blocks = _len; + hdr->num_blocks = _len; sd = hdr->stripes; - for ( auto stripe : _stripes ) { - sd->offset = stripe->_start; - sd->len = stripe->_len; - sd->vol_idx = stripe->_vol_idx; - sd->type = stripe->_type; + for (auto stripe : _stripes) { + sd->offset = stripe->_start; + sd->len = stripe->_len; + sd->vol_idx = stripe->_vol_idx; + sd->type = stripe->_type; volume_mask[sd->vol_idx] = true; if (sd->vol_idx == 0) { sd->free = true; @@ -802,13 +842,13 @@ Errata Span::updateHeader() ++sd; } - volume_mask[0] = false; // don't include free stripes in distinct volume count. + volume_mask[0] = false; // don't include free stripes in distinct volume count. hdr->num_volumes = volume_mask.count(); _header.reset(hdr); if (OPEN_RW_FLAG) { ssize_t r = pwrite(_fd, hdr, hdr_size.units(), ts::CacheSpan::OFFSET.units()); if (r < ts::CacheSpan::OFFSET.units()) - zret.push(0,errno,"Failed to update span - ", strerror(errno)); + zret.push(0, errno, "Failed to update span - ", strerror(errno)); } else { std::cout << "Writing not enabled, no updates perfomed" << std::endl; } @@ -838,7 +878,7 @@ Span::clearPermanently() } /* --------------------------------------------------------------------------------------- */ Errata -VolumeConfig::load(FilePath const& path) +VolumeConfig::load(FilePath const &path) { static const ts::StringView TAG_SIZE("size"); static const ts::StringView TAG_VOL("volume"); @@ -903,8 +943,10 @@ VolumeConfig::load(FilePath const& path) if (v.hasSize() && v.hasIndex()) { _volumes.push_back(std::move(v)); } else { - if (!v.hasSize()) zret.push(0,7, "Line ", ln, " does not have the required field ", TAG_SIZE); - if (!v.hasIndex()) zret.push(0,8, "Line ", ln, " does not have the required field ", TAG_VOL); + if (!v.hasSize()) + zret.push(0, 7, "Line ", ln, " does not have the required field ", TAG_SIZE); + if (!v.hasIndex()) + zret.push(0, 8, "Line ", ln, " does not have the required field ", TAG_VOL); } } } else { @@ -913,13 +955,11 @@ VolumeConfig::load(FilePath const& path) return zret; } /* --------------------------------------------------------------------------------------- */ -struct option Options[] = { - {"help", 0, nullptr, 'h'}, - {"spans", 1, nullptr, 's'}, - {"volumes", 1, nullptr, 'v'}, - {"write", 0, nullptr, 'w' }, - {nullptr, 0, nullptr, 0 } -}; +struct option Options[] = {{"help", 0, nullptr, 'h'}, + {"spans", 1, nullptr, 's'}, + {"volumes", 1, nullptr, 'v'}, + {"write", 0, nullptr, 'w'}, + {nullptr, 0, nullptr, 0}}; } Errata @@ -929,8 +969,8 @@ List_Stripes(Cache::SpanDumpDepth depth, int argc, char *argv[]) Cache cache; if ((zret = cache.loadSpan(SpanFile))) { - cache.dumpSpans(depth); - cache.dumpVolumes(); + cache.dumpSpans(depth); + cache.dumpVolumes(); } return zret; } @@ -941,7 +981,7 @@ Cmd_Allocate_Empty_Spans(int argc, char *argv[]) Errata zret; VolumeAllocator va; -// OPEN_RW_FLAG = O_RDWR; + // OPEN_RW_FLAG = O_RDWR; zret = va.load(SpanFile, VolumeFile); if (zret) { va.fillEmptySpans(); @@ -957,8 +997,10 @@ Simulate_Span_Allocation(int argc, char *argv[]) VolumeConfig vols; Cache cache; - if (!VolumeFile) zret.push(0, 9, "Volume config file not set"); - if (!SpanFile) zret.push(0, 9, "Span file not set"); + if (!VolumeFile) + zret.push(0, 9, "Volume config file not set"); + if (!SpanFile) + zret.push(0, 9, "Span file not set"); if (zret) { zret = vols.load(VolumeFile); @@ -969,30 +1011,32 @@ Simulate_Span_Allocation(int argc, char *argv[]) struct V { int idx; ts::CacheStripeBlocks alloc; // target allocation - ts::CacheStripeBlocks size; // actually allocated space + ts::CacheStripeBlocks size; // actually allocated space int64_t deficit; int64_t shares; }; std::vector av; vols.convertToAbsolute(total); - for ( auto& vol : vols ) { + for (auto &vol : vols) { ts::CacheStripeBlocks size(0); auto spot = cache._volumes.find(vol._idx); if (spot != cache._volumes.end()) size = ts::scale_down(spot->second._size); - av.push_back({ vol._idx, vol._alloc, size, 0, 0}); + av.push_back({vol._idx, vol._alloc, size, 0, 0}); } - for ( auto span : cache._spans ) { - if (span->_free_space <= 0) continue; + for (auto span : cache._spans) { + if (span->_free_space <= 0) + continue; static const int64_t SCALE = 1000; - int64_t total_shares = 0; - for ( auto& v : av ) { + int64_t total_shares = 0; + for (auto &v : av) { auto delta = v.alloc - v.size; if (delta > 0) { v.deficit = (delta.count() * SCALE) / v.alloc.count(); - v.shares = delta.count() * v.deficit; + v.shares = delta.count() * v.deficit; total_shares += v.shares; - std::cout << "Volume " << v.idx << " allocated " << v.alloc << " has " << v.size << " needs " << (v.alloc - v.size) << " deficit " << v.deficit << std::endl; + std::cout << "Volume " << v.idx << " allocated " << v.alloc << " has " << v.size << " needs " << (v.alloc - v.size) + << " deficit " << v.deficit << std::endl; } else { v.shares = 0; } @@ -1002,10 +1046,10 @@ Simulate_Span_Allocation(int argc, char *argv[]) ts::CacheStripeBlocks span_used(0); std::cout << "Allocation from span of " << span_blocks << std::endl; // sort by deficit so least relatively full volumes go first. - std::sort(av.begin(), av.end(), [](V const& lhs, V const& rhs) { return lhs.deficit > rhs.deficit; }); - for ( auto& v : av ) { + std::sort(av.begin(), av.end(), [](V const &lhs, V const &rhs) { return lhs.deficit > rhs.deficit; }); + for (auto &v : av) { if (v.shares) { - auto n = (((span_blocks - span_used) * v.shares) + total_shares -1) / total_shares; + auto n = (((span_blocks - span_used) * v.shares) + total_shares - 1) / total_shares; auto delta = v.alloc - v.size; // Not sure why this is needed. But a large and empty volume can dominate the shares // enough to get more than it actually needs if the other volume are relative small or full. @@ -1013,8 +1057,10 @@ Simulate_Span_Allocation(int argc, char *argv[]) n = std::min(n, delta); v.size += n; span_used += n; - std::cout << "Volume " << v.idx << " allocated " << n << " of " << delta << " needed to total of " << v.size << " of " << v.alloc << std::endl; - std::cout << " with " << v.shares << " shares of " << total_shares << " total - " << static_cast((v.shares * SCALE) / total_shares)/10.0 << "%" << std::endl; + std::cout << "Volume " << v.idx << " allocated " << n << " of " << delta << " needed to total of " << v.size << " of " + << v.alloc << std::endl; + std::cout << " with " << v.shares << " shares of " << total_shares << " total - " + << static_cast((v.shares * SCALE) / total_shares) / 10.0 << "%" << std::endl; total_shares -= v.shares; } } @@ -1032,7 +1078,7 @@ Clear_Spans(int argc, char *argv[]) Errata zret; Cache cache; -// OPEN_RW_FLAG = O_RDWR; + // OPEN_RW_FLAG = O_RDWR; if ((zret = cache.loadSpan(SpanFile))) { for (auto *span : cache._spans) { span->clearPermanently(); diff --git a/cmd/traffic_cache_tool/Command.h b/cmd/traffic_cache_tool/Command.h index d1949f24aef..776bf859707 100644 --- a/cmd/traffic_cache_tool/Command.h +++ b/cmd/traffic_cache_tool/Command.h @@ -111,7 +111,7 @@ class CommandTable /** Set the index of the "first" argument. This causes the command processing to skip @a n arguments. */ - self& setArgIndex(int n); + self &setArgIndex(int n); /** Invoke a command. @return The return value of the executed command, or an error value if the command was not found. @@ -127,7 +127,11 @@ class CommandTable friend class Command; }; -inline CommandTable& CommandTable::setArgIndex(int n) { _opt_idx = n; return *this; } - +inline CommandTable & +CommandTable::setArgIndex(int n) +{ + _opt_idx = n; + return *this; +} } #endif diff --git a/cmd/traffic_cache_tool/File.h b/cmd/traffic_cache_tool/File.h index a03b7b8da9a..569db16af2e 100644 --- a/cmd/traffic_cache_tool/File.h +++ b/cmd/traffic_cache_tool/File.h @@ -80,13 +80,13 @@ class FilePath protected: /// Get the stat buffer. /// @return A valid stat buffer or @c nullptr if the system call failed. - template T stat(T (*f)(struct stat const*)) const; + template T stat(T (*f)(struct stat const *)) const; - ats_scoped_str _path; ///< File path. + ats_scoped_str _path; ///< File path. - enum class STAT_P : int8_t { INVALID = -1, UNDEF = 0, VALID = 1}; + enum class STAT_P : int8_t { INVALID = -1, UNDEF = 0, VALID = 1 }; mutable STAT_P _stat_p = STAT_P::UNDEF; ///< Whether _stat is valid. - mutable struct stat _stat; ///< File information. + mutable struct stat _stat; ///< File information. }; /** A file support class for handling files as bulk content. @@ -163,7 +163,9 @@ FilePath::is_relative() const return !this->is_absolute(); } - template T FilePath::stat(T (*f)(struct stat const*)) const +template +T +FilePath::stat(T (*f)(struct stat const *)) const { if (STAT_P::UNDEF == _stat_p) _stat_p = ::stat(_path, &_stat) >= 0 ? STAT_P::VALID : STAT_P::INVALID; @@ -176,7 +178,7 @@ FilePath operator/(char const *lhs, FilePath const &rhs); inline int FilePath::file_type() const { - return this->stat([](struct stat const* s) -> int { return s->st_mode & S_IFMT; }); + return this->stat([](struct stat const *s) -> int { return s->st_mode & S_IFMT; }); } inline bool @@ -203,7 +205,7 @@ FilePath::is_regular_file() const inline off_t FilePath::physical_size() const { - return this->stat([](struct stat const* s) { return s->st_size; }); + return this->stat([](struct stat const *s) { return s->st_size; }); } inline BulkFile::BulkFile(super &&that) : super(that) diff --git a/lib/ts/MemView.cc b/lib/ts/MemView.cc index 59d1559aaab..f701f5cefb8 100644 --- a/lib/ts/MemView.cc +++ b/lib/ts/MemView.cc @@ -41,14 +41,14 @@ strcasecmp(StringView lhs, StringView rhs) } intmax_t -svtoi(StringView src, StringView* out, int base) +svtoi(StringView src, StringView *out, int base) { static const int8_t convert[256] = { -// 0 1 2 3 4 5 6 7 8 9 A B C D E F + // 0 1 2 3 4 5 6 7 8 9 A B C D E F -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 00 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 10 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 20 - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, // 30 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, // 30 -1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, // 40 25, 26, 27, 28, 20, 30, 31, 32, 33, 34, 35, -1, -1, -1, -1, -1, // 50 -1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, // 60 @@ -65,10 +65,12 @@ svtoi(StringView src, StringView* out, int base) intmax_t zret = 0; - if (*out) out->clear(); - if (!(1 < base && base <= 36)) return 0; + if (*out) + out->clear(); + if (!(1 < base && base <= 36)) + return 0; if (src.ltrim(&isspace)) { - const char* start = src.ptr(); + const char *start = src.ptr(); int8_t v; bool neg = false; if ('-' == *src) { @@ -79,11 +81,12 @@ svtoi(StringView src, StringView* out, int base) zret = zret * base + v; ++src; } - if (out && (src.ptr() > (neg ? start+1 : start))) { + if (out && (src.ptr() > (neg ? start + 1 : start))) { out->setView(start, src.ptr()); } - if (neg) zret = -zret; + if (neg) + zret = -zret; } return zret; } diff --git a/lib/ts/MemView.h b/lib/ts/MemView.h index f07b32f74ff..ef4770d9b41 100644 --- a/lib/ts/MemView.h +++ b/lib/ts/MemView.h @@ -50,7 +50,7 @@ int strcasecmp(StringView lhs, StringView rhs); - If the number starts with a literal '0' then it is treated as base 8. - If the number starts with the literal characters '0x' or '0X' then it is treated as base 16. */ -intmax_t svtoi(StringView src, StringView* parsed = nullptr, int base = 10); +intmax_t svtoi(StringView src, StringView *parsed = nullptr, int base = 10); /** A read only view of contiguous piece of memory. diff --git a/lib/ts/test_Metric.cc b/lib/ts/test_Metric.cc index 7bd78325b5b..cdde41d556b 100644 --- a/lib/ts/test_Metric.cc +++ b/lib/ts/test_Metric.cc @@ -173,8 +173,10 @@ test_Compile() KBytes x(12); KiBytes y(12); - if (x > 12) std::cout << "Operator > works" << std::endl; - if (y > 12) std::cout << "Operator > works" << std::endl; + if (x > 12) + std::cout << "Operator > works" << std::endl; + if (y > 12) + std::cout << "Operator > works" << std::endl; } int From ad5f364013858ed89f2ccad6648bb94d5735fc28 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Sat, 11 Feb 2017 04:09:13 -0600 Subject: [PATCH 70/81] CacheTool: Makefile fixes. --- cmd/traffic_cache_tool/CacheTool.cc | 32 ++++++++++++++--------------- cmd/traffic_cache_tool/Makefile.am | 1 - 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index e00b0b9e379..d7d14ec4542 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -58,8 +58,8 @@ using ts::CacheStripeDescriptor; using ts::Errata; using ts::FilePath; -using ts::scale_up; -using ts::scale_down; +using ts::round_up; +using ts::round_down; namespace { @@ -206,7 +206,7 @@ VolumeConfig::convertToAbsolute(ts::CacheStripeBlocks n) if (vol._percent) { vol._alloc = (n * vol._percent + 99) / 100; } else { - vol._alloc = ts::scale_up(vol._size); + vol._alloc = round_up(vol._size); } } } @@ -292,7 +292,7 @@ VolumeAllocator::load(FilePath const &spanFile, FilePath const &volumeFile) CacheStripeBlocks size(0); auto spot = _cache._volumes.find(vol._idx); if (spot != _cache._volumes.end()) - size = scale_down(spot->second._size); + size = round_down(spot->second._size); _av.push_back({vol, size, 0, 0}); } } @@ -316,7 +316,7 @@ VolumeAllocator::fillEmptySpans() if (!span->isEmpty()) continue; - std::cout << "Allocating " << scale_down(span->_len) << " from span " << span->_path << std::endl; + std::cout << "Allocating " << CacheStripeBlocks(round_down(span->_len)) << " from span " << span->_path << std::endl; // Walk the volumes and get the relative allocations. for (auto &v : _av) { @@ -330,7 +330,7 @@ VolumeAllocator::fillEmptySpans() } } // Now allocate blocks. - ts::CacheStripeBlocks span_blocks = ts::scale_up(span->_free_space); + ts::CacheStripeBlocks span_blocks = round_up(span->_free_space); ts::CacheStripeBlocks span_used(0); // sort by deficit so least relatively full volumes go first. @@ -658,7 +658,7 @@ Cache::calcTotalSpanConfiguredSize() ts::CacheStripeBlocks zret(0); for (auto span : _spans) { - zret += ts::scale_down(span->_len); + zret += round_down(span->_len); } return zret; } @@ -670,7 +670,7 @@ Cache::calcTotalSpanPhysicalSize() for (auto span : _spans) { // This is broken, physical_size doesn't work for devices, need to fix that. - zret += ts::scale_down(span->_path.physical_size()); + zret += round_down(span->_path.physical_size()); } return zret; } @@ -722,11 +722,11 @@ Span::loadDevice() ssize_t n = pread(fd, buff, BUFF_SIZE, offset); if (n >= BUFF_SIZE) { ts::SpanHeader &span_hdr = reinterpret_cast(buff); - _base = _base.scale_up(Bytes(offset)); + _base = round_up(offset); // See if it looks valid if (span_hdr.magic == ts::SpanHeader::MAGIC && span_hdr.num_diskvol_blks == span_hdr.num_used + span_hdr.num_free) { int nspb = span_hdr.num_diskvol_blks; - span_hdr_size = span_hdr_size.scale_up(Bytes(sizeof(ts::SpanHeader) + (nspb - 1) * sizeof(ts::CacheStripeDescriptor))); + span_hdr_size = round_up(sizeof(ts::SpanHeader) + (nspb - 1) * sizeof(ts::CacheStripeDescriptor)); _header.reset(new (malloc(span_hdr_size.units())) ts::SpanHeader); if (span_hdr_size.units() <= BUFF_SIZE) { memcpy(_header.get(), buff, span_hdr_size.units()); @@ -737,7 +737,7 @@ Span::loadDevice() _len = _header->num_blocks; } else { zret = Errata::Message(0, 0, "Span header for ", _path, " is invalid"); - _len = _len.scale_down(Bytes(_geometry.totalsz)) - _base; + _len = round_down(_geometry.totalsz) - _base; } // valid FD means the device is accessible and has enough storage to be configured. _fd = fd.release(); @@ -800,7 +800,7 @@ Span::clear() CacheStoreBlocks eff = _len - _base; // starting # of usable blocks. // The maximum number of volumes that can store stored, accounting for the space used to store the descriptors. int n = (eff.units() - sizeof(ts::SpanHeader)) / (CacheStripeBlocks::SCALE + sizeof(CacheStripeDescriptor)); - _offset = _base + _offset.scale_up(sizeof(ts::SpanHeader) + (n - 1) * sizeof(CacheStripeDescriptor)); + _offset = _base + round_up(sizeof(ts::SpanHeader) + (n - 1) * sizeof(CacheStripeDescriptor)); stripe = new Stripe(this, _offset, _len - _offset); _stripes.push_back(stripe); _free_space = stripe->_len; @@ -814,7 +814,7 @@ Span::updateHeader() Errata zret; int n = _stripes.size(); CacheStripeDescriptor *sd; - CacheStoreBlocks hdr_size = scale_up(sizeof(ts::SpanHeader) + (n - 1) * sizeof(ts::CacheStripeDescriptor)); + CacheStoreBlocks hdr_size = round_up(sizeof(ts::SpanHeader) + (n - 1) * sizeof(ts::CacheStripeDescriptor)); void *raw = ats_memalign(512, hdr_size.units()); ts::SpanHeader *hdr = static_cast(raw); std::bitset volume_mask; @@ -913,7 +913,7 @@ VolumeConfig::load(FilePath const &path) if (text) { ts::StringView percent(text.end(), value.end()); // clip parsed number. if (!percent) { - v._size = ts::scale_up(v._size = n); + v._size = round_up(v._size = n); if (v._size.count() != n) { zret.push(0, 0, "Line ", ln, " size ", n, " was rounded up to ", v._size); } @@ -1021,7 +1021,7 @@ Simulate_Span_Allocation(int argc, char *argv[]) ts::CacheStripeBlocks size(0); auto spot = cache._volumes.find(vol._idx); if (spot != cache._volumes.end()) - size = ts::scale_down(spot->second._size); + size = round_down(spot->second._size); av.push_back({vol._idx, vol._alloc, size, 0, 0}); } for (auto span : cache._spans) { @@ -1042,7 +1042,7 @@ Simulate_Span_Allocation(int argc, char *argv[]) } } // Now allocate blocks. - ts::CacheStripeBlocks span_blocks = ts::scale_down(span->_free_space); + ts::CacheStripeBlocks span_blocks = round_down(span->_free_space); ts::CacheStripeBlocks span_used(0); std::cout << "Allocation from span of " << span_blocks << std::endl; // sort by deficit so least relatively full volumes go first. diff --git a/cmd/traffic_cache_tool/Makefile.am b/cmd/traffic_cache_tool/Makefile.am index b6c02bb5c0c..8a0e8c0a5f6 100644 --- a/cmd/traffic_cache_tool/Makefile.am +++ b/cmd/traffic_cache_tool/Makefile.am @@ -17,7 +17,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -AM_LDFLAGS = @EXTRA_CXX_LDFLAGS@ @LIBTOOL_LINK_FLAGS@ -Wl,--as-needed AM_CPPFLAGS = -I $(srcdir)/iocore -I $(srcdir)/lib/ts noinst_PROGRAMS = traffic_cache_tool From a5b98020a6c5d16be3ce72e56d4c7e4e8b188ac9 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 10 Feb 2017 20:47:58 -0600 Subject: [PATCH 71/81] CacheTool: Initial documentation. --- .../command-line/traffic_cache_tool.en.rst | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 doc/appendices/command-line/traffic_cache_tool.en.rst diff --git a/doc/appendices/command-line/traffic_cache_tool.en.rst b/doc/appendices/command-line/traffic_cache_tool.en.rst new file mode 100644 index 00000000000..eb4264d337e --- /dev/null +++ b/doc/appendices/command-line/traffic_cache_tool.en.rst @@ -0,0 +1,87 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +.. include:: ../../common.defs + +.. _traffic_cache_tool: + +traffic_cache_tool +****************** + +Synopsis +======== + +:program:`traffic_cache_tool` [OPTIONS] SUBCOMMAND [OPTIONS] + +.. _traffic-cache-tool-commands: + +Description +=========== + +:program:`traffic_cache_tool` is designed to interact with the |TS| cache both for inspection and modification. + +:program:`traffic_cache_tool alloc` + Perform cache storage allocation operations. +:program:`traffic_cache_tool list` + Display information about the cache. + +Options +======= + +.. program:: traffic_cache_tool + +.. option:: --span + + Specify the span (storage) to operate one. This can be a device, a cache directory, or a configuration file in the format of :file:`storage.config`. In the latter case all devices listed in the configuration file become active. + +.. option:: --volume + + Specify the volume configuration file in the format of :file:`volume.config`. This is important primarily for allocation operations where having the volume configuration is needed in order to properly allocate storage in spans to specific volumes. + +.. option:: --write + + Enable writing to storage devices. If this flag is not present then no operation will write to any storage device. This makes "dry run" the default and actual changes require specifying this flag. + +Subcommands +=========== + +traffic_cache_tool alloc +------------------------ +.. program:: traffic_cache_tool alloc +.. option:: free + + Allocate space on all spans that are empty. Requires a volume confiuration file to be specified. + +traffic_cache_tool list +----------------------- +.. program:: traffic_cache_tool list +.. option:: stripes + + Search the spans for stripe data and display it. This is potentially slow as large sections of the disk may need to be read to find the stripe headers. + +Examples +======== + +List the basic span data. + + $ traffic_cache_tool list + +See also +======== + +:manpage:`storage.config(5)` +:manpage:`volume.config(5)`, From 43ac2ddfa38c8dc8fbc39d2b91932ab5eba54237 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Sat, 11 Feb 2017 09:04:15 -0600 Subject: [PATCH 72/81] CacheTool: clang-format. --- lib/ts/Scalar.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ts/Scalar.h b/lib/ts/Scalar.h index d51f1b91c9b..884178114dd 100644 --- a/lib/ts/Scalar.h +++ b/lib/ts/Scalar.h @@ -945,7 +945,7 @@ template Scalar operator-(detail::scalar_unit_round_up_t lhs, Scalar const &rhs) { - return Scalar(lhs.template scale()) -= rhs; + return Scalar(lhs.template scale()) -= rhs; } template Scalar @@ -957,7 +957,7 @@ template Scalar operator-(detail::scalar_unit_round_down_t lhs, Scalar const &rhs) { - return Scalar(lhs.template scale()) -= rhs; + return Scalar(lhs.template scale()) -= rhs; } template Scalar From 3cba5c3f0c7f2f578d16f03cd8ac584e9d07cc57 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 17 Feb 2017 02:54:02 -0600 Subject: [PATCH 73/81] CacheTool: Tweak makefile. --- cmd/traffic_cache_tool/Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/traffic_cache_tool/Makefile.am b/cmd/traffic_cache_tool/Makefile.am index 8a0e8c0a5f6..de07a5cd84d 100644 --- a/cmd/traffic_cache_tool/Makefile.am +++ b/cmd/traffic_cache_tool/Makefile.am @@ -17,7 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -AM_CPPFLAGS = -I $(srcdir)/iocore -I $(srcdir)/lib/ts +AM_CPPFLAGS = -I $(srcdir)/lib/ts noinst_PROGRAMS = traffic_cache_tool From f2f67c754355cb31f818db735231bd7e71ee9f85 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 17 Feb 2017 03:01:59 -0600 Subject: [PATCH 74/81] CacheTool: namespace cleanup. --- cmd/traffic_cache_tool/CacheDefs.h | 4 +++- cmd/traffic_cache_tool/CacheTool.cc | 15 +-------------- cmd/traffic_cache_tool/Command.cc | 2 +- cmd/traffic_cache_tool/Command.h | 2 +- cmd/traffic_cache_tool/File.cc | 2 +- cmd/traffic_cache_tool/File.h | 2 +- 6 files changed, 8 insertions(+), 19 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheDefs.h b/cmd/traffic_cache_tool/CacheDefs.h index 999b0500a07..ef733e8e308 100644 --- a/cmd/traffic_cache_tool/CacheDefs.h +++ b/cmd/traffic_cache_tool/CacheDefs.h @@ -31,7 +31,9 @@ namespace tag struct bytes; } -namespace ApacheTrafficServer +using namespace ApacheTrafficServer; + +namespace ts { constexpr static uint8_t CACHE_DB_MAJOR_VERSION = 24; /// Maximum allowed volume index. diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index d7d14ec4542..7e23c6af974 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -38,18 +38,6 @@ #include "CacheDefs.h" #include "Command.h" -// Sigh, a hack for now. We already have "ts" defined as a namespace in various places so for now -// just import the Full Name namespace in to 'ts' rather than direct 'namespace ts = ApachTrafficServer' -namespace ts -{ -using namespace ApacheTrafficServer; -} - -namespace ApacheTrafficServer -{ -const Bytes CacheSpan::OFFSET{CacheStoreBlocks{1}}; -} - using ts::Bytes; using ts::Megabytes; using ts::CacheStoreBlocks; @@ -58,8 +46,7 @@ using ts::CacheStripeDescriptor; using ts::Errata; using ts::FilePath; -using ts::round_up; -using ts::round_down; +const Bytes ts::CacheSpan::OFFSET{CacheStoreBlocks{1}}; namespace { diff --git a/cmd/traffic_cache_tool/Command.cc b/cmd/traffic_cache_tool/Command.cc index f2106d98ed8..0fa3f9fedd8 100644 --- a/cmd/traffic_cache_tool/Command.cc +++ b/cmd/traffic_cache_tool/Command.cc @@ -27,7 +27,7 @@ #include #include -namespace ApacheTrafficServer +namespace ts { int CommandTable::_opt_idx = 0; diff --git a/cmd/traffic_cache_tool/Command.h b/cmd/traffic_cache_tool/Command.h index 776bf859707..bd9d08b17c9 100644 --- a/cmd/traffic_cache_tool/Command.h +++ b/cmd/traffic_cache_tool/Command.h @@ -29,7 +29,7 @@ #if !defined(CACHE_TOOL_COMMAND_H) #define CACHE_TOOL_COMMAND_H -namespace ApacheTrafficServer +namespace ts { // Because in C+11 std::max is not constexpr template diff --git a/cmd/traffic_cache_tool/File.cc b/cmd/traffic_cache_tool/File.cc index c3c038e2ee4..25d17c9cbb3 100644 --- a/cmd/traffic_cache_tool/File.cc +++ b/cmd/traffic_cache_tool/File.cc @@ -25,7 +25,7 @@ #include #include -namespace ApacheTrafficServer +namespace ts { FilePath & FilePath::operator=(char const *path) diff --git a/cmd/traffic_cache_tool/File.h b/cmd/traffic_cache_tool/File.h index 569db16af2e..49882a94b3b 100644 --- a/cmd/traffic_cache_tool/File.h +++ b/cmd/traffic_cache_tool/File.h @@ -28,7 +28,7 @@ #include #include -namespace ApacheTrafficServer +namespace ts { /** A file class for supporting path operations. */ From 97cb5f321a3d49ce464e19e0933d7b26fa45a17d Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Thu, 23 Feb 2017 03:32:08 -0600 Subject: [PATCH 75/81] Checkpoint - first part of converting stripe inspection to be methods. --- cmd/traffic_cache_tool/CacheDefs.h | 35 ++--- cmd/traffic_cache_tool/CacheTool.cc | 211 +++++++++++++++++++++++++--- lib/ts/MemView.h | 13 +- 3 files changed, 216 insertions(+), 43 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheDefs.h b/cmd/traffic_cache_tool/CacheDefs.h index ef733e8e308..d3e440d407c 100644 --- a/cmd/traffic_cache_tool/CacheDefs.h +++ b/cmd/traffic_cache_tool/CacheDefs.h @@ -28,7 +28,9 @@ namespace tag { -struct bytes; + struct bytes { + static constexpr char const* const label = "bytes"; + }; } using namespace ApacheTrafficServer; @@ -38,6 +40,8 @@ namespace ts constexpr static uint8_t CACHE_DB_MAJOR_VERSION = 24; /// Maximum allowed volume index. constexpr static int MAX_VOLUME_IDX = 255; + constexpr static int ENTRIES_PER_BUCKET = 4; + constexpr static int MAX_BUCKETS_PER_SEGMENT = (1 << 16) / ENTRIES_PER_BUCKET; typedef Scalar<1, int64_t, tag::bytes> Bytes; typedef Scalar<1024, int64_t, tag::bytes> Kilobytes; @@ -63,7 +67,7 @@ operator<<(std::ostream &s, Megabytes const &n) std::ostream & operator<<(std::ostream &s, Gigabytes const &n) { - return s << n.count() << " HB"; + return s << n.count() << " GB"; } std::ostream & operator<<(std::ostream &s, Terabytes const &n) @@ -108,6 +112,8 @@ class CacheSpan /** A section of storage in a span, used to contain a stripe. + This is stored in the span header to describe the stripes in the span. + @note Serializable. @internal nee @c DiskVolBlock @@ -122,6 +128,8 @@ struct CacheStripeDescriptor { /** Header data for a span. + This is the serializable descriptor stored in a span. + @internal nee DiskHeader */ struct SpanHeader { @@ -140,7 +148,7 @@ struct SpanHeader { @internal nee VolHeadFooter */ -class CacheStripeMeta +class StripeMeta { public: static constexpr uint32_t MAGIC = 0xF1D0F00D; @@ -159,29 +167,8 @@ class CacheStripeMeta uint32_t dirty; uint32_t sector_size; uint32_t unused; // pad out to 8 byte boundary - uint16_t freelist[1]; -}; - -class StripeData -{ -public: - size_t calc_hdr_len() const; - - int64_t segments; ///< Number of segments. - int64_t buckets; ///< Number of buckets. - off_t skip; ///< Start of stripe data. - off_t start; ///< Start of content data. - off_t len; ///< Total size of stripe (metric?) }; -inline size_t -StripeData::calc_hdr_len() const -{ - return sizeof(CacheStripeMeta) + sizeof(uint16_t) * (this->segments - 1); -} -// inline size_t StripeData::calc_dir_len() const { return this->calc_hdr_len() + this->buckets * DIR_DEPTH * this->segments * -// SIZEOF_DIR + sizeof(CacheStripeMeta); } - class CacheDirEntry { unsigned int offset : 24; diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index 7e23c6af974..dcadb02520e 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -42,9 +42,12 @@ using ts::Bytes; using ts::Megabytes; using ts::CacheStoreBlocks; using ts::CacheStripeBlocks; +using ts::StripeMeta; using ts::CacheStripeDescriptor; using ts::Errata; using ts::FilePath; +using ts::MemView; +using ts::CacheDirEntry; const Bytes ts::CacheSpan::OFFSET{CacheStoreBlocks{1}}; @@ -75,31 +78,49 @@ struct Span { void clearPermanently(); ts::Rv allocStripe(int vol_idx, CacheStripeBlocks len); - Errata updateHeader(); + Errata updateHeader(); ///< Update serialized header and write to disk. - FilePath _path; - ats_scoped_fd _fd; - int _vol_idx = 0; + FilePath _path; ///< File system location of span. + ats_scoped_fd _fd; ///< Open file descriptor for span. + int _vol_idx = 0; ///< Forced volume. CacheStoreBlocks _base; ///< Offset to first usable byte. CacheStoreBlocks _offset; ///< Offset to first content byte. // The space between _base and _offset is where the span information is stored. CacheStoreBlocks _len; ///< Total length of span. - CacheStoreBlocks _free_space; + CacheStoreBlocks _free_space; ///< Total size of free stripes. ink_device_geometry _geometry; ///< Geometry of span. - /// A copy of the data on the disk. + /// Local copy of serialized header data stored on in the span. std::unique_ptr _header; /// Live information about stripes. + /// Seeded from @a _header and potentially agumented with direct probing. std::list _stripes; }; /* --------------------------------------------------------------------------------------- */ struct Stripe { + /// Meta data is stored in 4 copies A/B and Header/Footer. + enum Copy { A = 0, B = 1 }; + enum { HEAD = 0, FOOT = 1 }; + + /// Construct from span header data. Stripe(Span *span, Bytes start, CacheStoreBlocks len); - bool - isFree() const - { - return 0 == _vol_idx; - } + /// Is stripe unallocated? + bool isFree() const; + + /// Probe a chunk of memory @a mem for stripe metadata. + /// @a mem is updated to remove memory that has been probed. + /// @return @c true if @a mem has valid data, @c false otherwise. + bool probeMeta(MemView& mem); + + /// Check a buffer for being valid stripe metadata. + /// @return @c true if valid, @c false otherwise. + static bool validateMeta(StripeMeta const* meta); + + /// Load metadata for this stripe. + bool loadMeta(); + + /// Initialize the live data from the loaded serialized data. + void updateLiveData(enum Copy c); Span *_span; ///< Hosting span. Bytes _start; ///< Offset of first byte of stripe. @@ -107,11 +128,167 @@ struct Stripe { CacheStoreBlocks _len; ///< Length of stripe. uint8_t _vol_idx = 0; ///< Volume index. uint8_t _type = 0; ///< Stripe type. + + int64_t _buckets; ///< Number of buckets per segment. + int64_t _segments; ///< Number of segments. + + /// Meta copies, indexed by A/B then HEAD/FOOT. + StripeMeta _meta[2][2]; + /// Locations for the meta data. + CacheStoreBlocks _meta_pos[2][2]; }; Stripe::Stripe(Span *span, Bytes start, CacheStoreBlocks len) : _span(span), _start(start), _len(len) { } + +bool Stripe::isFree() const { return 0 == _vol_idx; } + +// Need to be bit more robust at some point. +bool Stripe::validateMeta(StripeMeta const* meta) +{ + // Need to be bit more robust at some point. + return StripeMeta::MAGIC == meta->magic && meta->version.ink_major <= ts::CACHE_DB_MAJOR_VERSION && + meta->version.ink_minor <= 2 // This may have always been zero, actually. + ; +} + +bool +Stripe::probeMeta(MemView& mem) +{ + while (mem.size() >= sizeof(StripeMeta)) { + if (this->validateMeta(mem.template at_ptr(0))) { + return true; + } + // The meta data is stored aligned on a stripe block boundary, so only need to check there. + mem += CacheStoreBlocks::SCALE; + } + return false; +} + +void +Stripe::updateLiveData(enum Copy c) +{ + CacheStoreBlocks delta{_meta_pos[c][FOOT] - _meta_pos[c][HEAD]}; + CacheStoreBlocks header_len(0); + int64_t n_buckets; + int64_t n_segments; + + do { + ++header_len; + n_buckets = (delta - header_len).units() / (sizeof(CacheDirEntry) * ts::ENTRIES_PER_BUCKET); + n_segments = n_buckets / ts::MAX_BUCKETS_PER_SEGMENT; + // This should never be more than one loop, usually none. + while ((n_buckets / n_segments) > ts::MAX_BUCKETS_PER_SEGMENT) + ++n_segments; + } while (Bytes(sizeof(StripeMeta) + sizeof(uint16_t) * n_segments) > header_len); + + _buckets = n_buckets / n_segments; + _segments = n_segments; + + std::cout << "Stripe found: " << _segments << " segments with " << _buckets << " buckets per segment for " + << _buckets * _segments * 4 << " total directory entries taking " << _buckets * _segments * sizeof(CacheDirEntry) * ts::ENTRIES_PER_BUCKET + << " out of " << (delta-header_len).units() << " bytes." << std::endl; +} + +bool +Stripe::loadMeta() +{ + // Read from disk in chunks of this size. + constexpr static int64_t N = 1 << 24; + + bool zret = false; // default not successful. + + int fd = _span->_fd; + Bytes n; + bool found; + MemView data; // The current view of the read buffer. + Bytes delta; + Bytes pos = _start; + // Avoid searching the entire span, because some of it must be content. Assume that AOS is more than 160 + // which means at most 10/160 (1/16) of the span can be directory/header. + Bytes limit = pos + _len / 16; + // Aligned buffer for raw device reads. + alignas(4096) static char buff[N]; + + // Check the earlier part of the block. Header A must be at the start of the stripe block. + // A full chunk is read in case Footer A is in that range. + n = pread(fd, buff, N, pos.units()); + data.setView(buff, n.units()); + if (this->probeMeta(data)) { + _meta[A][HEAD] = data.template at(0); + _meta_pos[A][HEAD] = round_down(pos + Bytes(data.template at_ptr(0) - buff)); + data += CacheStoreBlocks::SCALE; + + // Search for Footer A, skipping false positives. + do { + bool found; + while (!(found = this->probeMeta(data)) && pos < limit) { + pos += N; + n = pread(fd, buff, N, pos.units()); + data.setView(buff, n.units()); + } + + if (found) { + _meta[A][FOOT] = data.template at(0); + + // Need to be more thorough in cross checks but this is OK for now. + if (_meta[A][FOOT].version == _meta[A][HEAD].version) { + _meta_pos[A][FOOT] = pos + Bytes(data.template at_ptr(0) - buff); + } else { + // false positive, keep looking. + found = false; + } + } + } while (!found); + + } else { + printf("Header A not found, invalid stripe.\n"); + } + + // Technically if Copy A is valid, Copy B is not needed. But at this point it's cheap to retrieve + // (as the exact offset is computable). + if (_meta_pos[A][FOOT] > 0) { + delta = _meta_pos[A][FOOT] - _meta_pos[A][HEAD]; + // Header B should be immediately after Footer A. If at the end of the last read, + // do another read. + if (data.size() < CacheStoreBlocks::SCALE) { + pos += N; + n = pread(fd, buff, CacheStoreBlocks::SCALE, pos.units()); + data.setView(buff, n.units()); + } + if (this->validateMeta(data.template at_ptr(0))) { + _meta[B][HEAD] = data.template at(0); + _meta_pos[B][HEAD] = pos + (data.template at_ptr(0) - buff); + + // Footer B must be at the same relative offset to Header B as Footer A -> Header A. + n = pread(fd, buff, ts::CacheStoreBlocks::SCALE, (_meta_pos[B][HEAD] + delta).units()); + data.setView(buff, n.units()); + if (this->validateMeta(data.template at_ptr(0))) { + _meta[B][FOOT] = data.template at(0); + _meta_pos[B][FOOT] = _meta_pos[B][HEAD] + delta; + } + } + } + + if (_meta_pos[A][FOOT] > 0) { + if (_meta[A][HEAD].sync_serial == _meta[A][FOOT].sync_serial && + (0 == _meta_pos[B][FOOT] || _meta[B][HEAD].sync_serial != _meta[B][FOOT].sync_serial || + _meta[A][HEAD].sync_serial > _meta[B][HEAD].sync_serial)) { + this->updateLiveData(A); + zret = true; + } else if (_meta_pos[B][FOOT] > 0 && _meta[B][HEAD].sync_serial == _meta[B][FOOT].sync_serial) { + this->updateLiveData(B); + zret = true; + } else { + std::cout << "Invalid stripe data - candidates found but sync serial data not valid." << std::endl; + } + } else { + std::cout << "Invalid stripe data - no candidates found." << std::endl; + } + return zret; +} + /* --------------------------------------------------------------------------------------- */ /// A live volume. /// Volume data based on data from loaded spans. @@ -349,7 +526,7 @@ VolumeAllocator::fillEmptySpans() } /* --------------------------------------------------------------------------------------- */ // All of these free functions need to be moved to the Cache class. Or the Span class? - +# if 0 bool Validate_Stripe_Meta(ts::CacheStripeMeta const &stripe) { @@ -403,12 +580,12 @@ void Open_Stripe(ats_scoped_fd const &fd, ts::CacheStripeDescriptor const &block) { int found; - ts::StringView data; - ts::StringView stripe_mem; + StringView data; + StringView stripe_mem; constexpr static int64_t N = 1 << 24; int64_t n; off_t pos = block.offset.units(); - ts::CacheStripeMeta stripe_meta[4]; + StripeMeta stripe_meta[4]; off_t stripe_pos[4] = {0, 0, 0, 0}; off_t delta; // Avoid searching the entire span, because some of it must be content. Assume that AOS is more than 160 @@ -459,7 +636,7 @@ Open_Stripe(ats_scoped_fd const &fd, ts::CacheStripeDescriptor const &block) } // Technically if Copy A is valid, Copy B is not needed. But at this point it's cheap to retrieve - // (as the exact offsets are computable). + // (as the exact offset is computable). if (stripe_pos[1]) { delta = stripe_pos[1] - stripe_pos[0]; // Header B should be immediately after Footer A. If at the end of the last read, @@ -511,7 +688,7 @@ Open_Stripe(ats_scoped_fd const &fd, ts::CacheStripeDescriptor const &block) printf("Stripe Header A not found in first chunk\n"); } } - +# endif /* --------------------------------------------------------------------------------------- */ Errata Cache::loadSpan(FilePath const &path) diff --git a/lib/ts/MemView.h b/lib/ts/MemView.h index ccd1b85ed5f..ed5567c7211 100644 --- a/lib/ts/MemView.h +++ b/lib/ts/MemView.h @@ -162,7 +162,9 @@ class MemView /// @note This is equivalent to @c begin currently but it's probably good to have separation. constexpr const void *ptr() const; /// @return the @a V value at index @a n. - template V array(size_t n) const; + template V at(ssize_t n) const; + /// @return a pointer to the @a V value at index @a n. + template V const* at_ptr(ssize_t n) const; //@} /// Set the view. @@ -785,11 +787,18 @@ MemView::splitSuffix(const void *p) template inline V -MemView::array(size_t n) const +MemView::at(ssize_t n) const { return static_cast(_ptr)[n]; } +template +inline V const* +MemView::at_ptr(ssize_t n) const +{ + return static_cast(_ptr) + n; +} + template inline const V * MemView::find(V v) const From ab2d20e562feeb896535b6b199dc23a60bcd6ec5 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Thu, 23 Feb 2017 04:58:03 -0600 Subject: [PATCH 76/81] CacheTool: First successful run after moving stripe locating code to a class. --- cmd/traffic_cache_tool/CacheDefs.h | 2 +- cmd/traffic_cache_tool/CacheTool.cc | 47 ++++++++++++++++------------- 2 files changed, 27 insertions(+), 22 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheDefs.h b/cmd/traffic_cache_tool/CacheDefs.h index d3e440d407c..a6055035e92 100644 --- a/cmd/traffic_cache_tool/CacheDefs.h +++ b/cmd/traffic_cache_tool/CacheDefs.h @@ -29,7 +29,7 @@ namespace tag { struct bytes { - static constexpr char const* const label = "bytes"; + static constexpr char const* const label = " bytes"; }; } diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index dcadb02520e..b5907c36ec0 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -117,7 +117,7 @@ struct Stripe { static bool validateMeta(StripeMeta const* meta); /// Load metadata for this stripe. - bool loadMeta(); + Errata loadMeta(); /// Initialize the live data from the loaded serialized data. void updateLiveData(enum Copy c); @@ -185,19 +185,15 @@ Stripe::updateLiveData(enum Copy c) _buckets = n_buckets / n_segments; _segments = n_segments; - - std::cout << "Stripe found: " << _segments << " segments with " << _buckets << " buckets per segment for " - << _buckets * _segments * 4 << " total directory entries taking " << _buckets * _segments * sizeof(CacheDirEntry) * ts::ENTRIES_PER_BUCKET - << " out of " << (delta-header_len).units() << " bytes." << std::endl; } -bool +Errata Stripe::loadMeta() { // Read from disk in chunks of this size. constexpr static int64_t N = 1 << 24; - bool zret = false; // default not successful. + Errata zret; int fd = _span->_fd; Bytes n; @@ -222,7 +218,6 @@ Stripe::loadMeta() // Search for Footer A, skipping false positives. do { - bool found; while (!(found = this->probeMeta(data)) && pos < limit) { pos += N; n = pread(fd, buff, N, pos.units()); @@ -234,7 +229,7 @@ Stripe::loadMeta() // Need to be more thorough in cross checks but this is OK for now. if (_meta[A][FOOT].version == _meta[A][HEAD].version) { - _meta_pos[A][FOOT] = pos + Bytes(data.template at_ptr(0) - buff); + _meta_pos[A][FOOT] =round_down(pos + Bytes(data.template at_ptr(0) - buff)); } else { // false positive, keep looking. found = false; @@ -243,7 +238,7 @@ Stripe::loadMeta() } while (!found); } else { - printf("Header A not found, invalid stripe.\n"); + zret.push(0, 1, "Header A not found"); } // Technically if Copy A is valid, Copy B is not needed. But at this point it's cheap to retrieve @@ -259,14 +254,14 @@ Stripe::loadMeta() } if (this->validateMeta(data.template at_ptr(0))) { _meta[B][HEAD] = data.template at(0); - _meta_pos[B][HEAD] = pos + (data.template at_ptr(0) - buff); + _meta_pos[B][HEAD] = round_down(pos + Bytes(data.template at_ptr(0) - buff)); // Footer B must be at the same relative offset to Header B as Footer A -> Header A. n = pread(fd, buff, ts::CacheStoreBlocks::SCALE, (_meta_pos[B][HEAD] + delta).units()); data.setView(buff, n.units()); if (this->validateMeta(data.template at_ptr(0))) { _meta[B][FOOT] = data.template at(0); - _meta_pos[B][FOOT] = _meta_pos[B][HEAD] + delta; + _meta_pos[B][FOOT] = round_down(_meta_pos[B][HEAD] + delta); } } } @@ -276,15 +271,11 @@ Stripe::loadMeta() (0 == _meta_pos[B][FOOT] || _meta[B][HEAD].sync_serial != _meta[B][FOOT].sync_serial || _meta[A][HEAD].sync_serial > _meta[B][HEAD].sync_serial)) { this->updateLiveData(A); - zret = true; } else if (_meta_pos[B][FOOT] > 0 && _meta[B][HEAD].sync_serial == _meta[B][FOOT].sync_serial) { this->updateLiveData(B); - zret = true; } else { - std::cout << "Invalid stripe data - candidates found but sync serial data not valid." << std::endl; + zret.push(0, 1, "Invalid stripe data - candidates found but sync serial data not valid."); } - } else { - std::cout << "Invalid stripe data - no candidates found." << std::endl; } return zret; } @@ -789,15 +780,29 @@ Cache::dumpSpans(SpanDumpDepth depth) std::cout << "Span: " << span->_path << " " << span->_header->num_volumes << " Volumes " << span->_header->num_used << " in use " << span->_header->num_free << " free " << span->_header->num_diskvol_blks << " stripes " << span->_header->num_blocks.units() << " blocks" << std::endl; - for (unsigned int i = 0; i < span->_header->num_diskvol_blks; ++i) { - ts::CacheStripeDescriptor &stripe = span->_header->stripes[i]; - std::cout << " : SpanBlock " << i << " @ " << stripe.offset.units() << " blocks=" << stripe.len.units() - << " vol=" << stripe.vol_idx << " type=" << stripe.type << " " << (stripe.free ? "free" : "in-use") + + for ( auto stripe : span->_stripes ) { + std::cout << " : " << " @ " << stripe->_start << " len=" << stripe->_len.count() << " blocks " + << " vol=" << static_cast(stripe->_vol_idx) << " type=" << static_cast(stripe->_type) << " " << (stripe->isFree() ? "free" : "in-use") << std::endl; + if (depth >= SpanDumpDepth::STRIPE) { + Errata r = stripe->loadMeta(); + if (r) { + std::cout << "Stripe found: " << stripe->_segments << " segments with " << stripe->_buckets << " buckets per segment for " + << stripe->_buckets * stripe->_segments * 4 << " total directory entries taking " << stripe->_buckets * stripe->_segments * sizeof(CacheDirEntry) * ts::ENTRIES_PER_BUCKET +// << " out of " << (delta-header_len).units() << " bytes." + << std::endl; + } else { + std::cout << r; + } + } + } +# if 0 if (depth >= SpanDumpDepth::STRIPE) { Open_Stripe(span->_fd, stripe); } } +# endif } } } From b7fec2cd2fae856d48ba6635bd375cae6d801bf8 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Thu, 23 Feb 2017 04:59:12 -0600 Subject: [PATCH 77/81] clang-format. --- cmd/traffic_cache_tool/CacheDefs.h | 12 +++--- cmd/traffic_cache_tool/CacheTool.cc | 66 ++++++++++++++++------------- lib/ts/MemView.h | 4 +- 3 files changed, 45 insertions(+), 37 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheDefs.h b/cmd/traffic_cache_tool/CacheDefs.h index a6055035e92..8ea9d68dabd 100644 --- a/cmd/traffic_cache_tool/CacheDefs.h +++ b/cmd/traffic_cache_tool/CacheDefs.h @@ -28,9 +28,9 @@ namespace tag { - struct bytes { - static constexpr char const* const label = " bytes"; - }; +struct bytes { + static constexpr char const *const label = " bytes"; +}; } using namespace ApacheTrafficServer; @@ -39,9 +39,9 @@ namespace ts { constexpr static uint8_t CACHE_DB_MAJOR_VERSION = 24; /// Maximum allowed volume index. -constexpr static int MAX_VOLUME_IDX = 255; - constexpr static int ENTRIES_PER_BUCKET = 4; - constexpr static int MAX_BUCKETS_PER_SEGMENT = (1 << 16) / ENTRIES_PER_BUCKET; +constexpr static int MAX_VOLUME_IDX = 255; +constexpr static int ENTRIES_PER_BUCKET = 4; +constexpr static int MAX_BUCKETS_PER_SEGMENT = (1 << 16) / ENTRIES_PER_BUCKET; typedef Scalar<1, int64_t, tag::bytes> Bytes; typedef Scalar<1024, int64_t, tag::bytes> Kilobytes; diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index b5907c36ec0..3df581d62e3 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -80,14 +80,14 @@ struct Span { ts::Rv allocStripe(int vol_idx, CacheStripeBlocks len); Errata updateHeader(); ///< Update serialized header and write to disk. - FilePath _path; ///< File system location of span. - ats_scoped_fd _fd; ///< Open file descriptor for span. - int _vol_idx = 0; ///< Forced volume. + FilePath _path; ///< File system location of span. + ats_scoped_fd _fd; ///< Open file descriptor for span. + int _vol_idx = 0; ///< Forced volume. CacheStoreBlocks _base; ///< Offset to first usable byte. CacheStoreBlocks _offset; ///< Offset to first content byte. // The space between _base and _offset is where the span information is stored. - CacheStoreBlocks _len; ///< Total length of span. - CacheStoreBlocks _free_space; ///< Total size of free stripes. + CacheStoreBlocks _len; ///< Total length of span. + CacheStoreBlocks _free_space; ///< Total size of free stripes. ink_device_geometry _geometry; ///< Geometry of span. /// Local copy of serialized header data stored on in the span. std::unique_ptr _header; @@ -110,11 +110,11 @@ struct Stripe { /// Probe a chunk of memory @a mem for stripe metadata. /// @a mem is updated to remove memory that has been probed. /// @return @c true if @a mem has valid data, @c false otherwise. - bool probeMeta(MemView& mem); + bool probeMeta(MemView &mem); /// Check a buffer for being valid stripe metadata. /// @return @c true if valid, @c false otherwise. - static bool validateMeta(StripeMeta const* meta); + static bool validateMeta(StripeMeta const *meta); /// Load metadata for this stripe. Errata loadMeta(); @@ -129,7 +129,7 @@ struct Stripe { uint8_t _vol_idx = 0; ///< Volume index. uint8_t _type = 0; ///< Stripe type. - int64_t _buckets; ///< Number of buckets per segment. + int64_t _buckets; ///< Number of buckets per segment. int64_t _segments; ///< Number of segments. /// Meta copies, indexed by A/B then HEAD/FOOT. @@ -142,19 +142,24 @@ Stripe::Stripe(Span *span, Bytes start, CacheStoreBlocks len) : _span(span), _st { } -bool Stripe::isFree() const { return 0 == _vol_idx; } +bool +Stripe::isFree() const +{ + return 0 == _vol_idx; +} // Need to be bit more robust at some point. -bool Stripe::validateMeta(StripeMeta const* meta) +bool +Stripe::validateMeta(StripeMeta const *meta) { // Need to be bit more robust at some point. return StripeMeta::MAGIC == meta->magic && meta->version.ink_major <= ts::CACHE_DB_MAJOR_VERSION && - meta->version.ink_minor <= 2 // This may have always been zero, actually. + meta->version.ink_minor <= 2 // This may have always been zero, actually. ; } bool -Stripe::probeMeta(MemView& mem) +Stripe::probeMeta(MemView &mem) { while (mem.size() >= sizeof(StripeMeta)) { if (this->validateMeta(mem.template at_ptr(0))) { @@ -176,14 +181,14 @@ Stripe::updateLiveData(enum Copy c) do { ++header_len; - n_buckets = (delta - header_len).units() / (sizeof(CacheDirEntry) * ts::ENTRIES_PER_BUCKET); + n_buckets = (delta - header_len).units() / (sizeof(CacheDirEntry) * ts::ENTRIES_PER_BUCKET); n_segments = n_buckets / ts::MAX_BUCKETS_PER_SEGMENT; // This should never be more than one loop, usually none. while ((n_buckets / n_segments) > ts::MAX_BUCKETS_PER_SEGMENT) ++n_segments; } while (Bytes(sizeof(StripeMeta) + sizeof(uint16_t) * n_segments) > header_len); - _buckets = n_buckets / n_segments; + _buckets = n_buckets / n_segments; _segments = n_segments; } @@ -212,7 +217,7 @@ Stripe::loadMeta() n = pread(fd, buff, N, pos.units()); data.setView(buff, n.units()); if (this->probeMeta(data)) { - _meta[A][HEAD] = data.template at(0); + _meta[A][HEAD] = data.template at(0); _meta_pos[A][HEAD] = round_down(pos + Bytes(data.template at_ptr(0) - buff)); data += CacheStoreBlocks::SCALE; @@ -229,7 +234,7 @@ Stripe::loadMeta() // Need to be more thorough in cross checks but this is OK for now. if (_meta[A][FOOT].version == _meta[A][HEAD].version) { - _meta_pos[A][FOOT] =round_down(pos + Bytes(data.template at_ptr(0) - buff)); + _meta_pos[A][FOOT] = round_down(pos + Bytes(data.template at_ptr(0) - buff)); } else { // false positive, keep looking. found = false; @@ -253,14 +258,14 @@ Stripe::loadMeta() data.setView(buff, n.units()); } if (this->validateMeta(data.template at_ptr(0))) { - _meta[B][HEAD] = data.template at(0); + _meta[B][HEAD] = data.template at(0); _meta_pos[B][HEAD] = round_down(pos + Bytes(data.template at_ptr(0) - buff)); // Footer B must be at the same relative offset to Header B as Footer A -> Header A. n = pread(fd, buff, ts::CacheStoreBlocks::SCALE, (_meta_pos[B][HEAD] + delta).units()); data.setView(buff, n.units()); if (this->validateMeta(data.template at_ptr(0))) { - _meta[B][FOOT] = data.template at(0); + _meta[B][FOOT] = data.template at(0); _meta_pos[B][FOOT] = round_down(_meta_pos[B][HEAD] + delta); } } @@ -517,7 +522,7 @@ VolumeAllocator::fillEmptySpans() } /* --------------------------------------------------------------------------------------- */ // All of these free functions need to be moved to the Cache class. Or the Span class? -# if 0 +#if 0 bool Validate_Stripe_Meta(ts::CacheStripeMeta const &stripe) { @@ -679,7 +684,7 @@ Open_Stripe(ats_scoped_fd const &fd, ts::CacheStripeDescriptor const &block) printf("Stripe Header A not found in first chunk\n"); } } -# endif +#endif /* --------------------------------------------------------------------------------------- */ Errata Cache::loadSpan(FilePath const &path) @@ -781,28 +786,31 @@ Cache::dumpSpans(SpanDumpDepth depth) << " in use " << span->_header->num_free << " free " << span->_header->num_diskvol_blks << " stripes " << span->_header->num_blocks.units() << " blocks" << std::endl; - for ( auto stripe : span->_stripes ) { - std::cout << " : " << " @ " << stripe->_start << " len=" << stripe->_len.count() << " blocks " - << " vol=" << static_cast(stripe->_vol_idx) << " type=" << static_cast(stripe->_type) << " " << (stripe->isFree() ? "free" : "in-use") - << std::endl; + for (auto stripe : span->_stripes) { + std::cout << " : " + << " @ " << stripe->_start << " len=" << stripe->_len.count() << " blocks " + << " vol=" << static_cast(stripe->_vol_idx) << " type=" << static_cast(stripe->_type) << " " + << (stripe->isFree() ? "free" : "in-use") << std::endl; if (depth >= SpanDumpDepth::STRIPE) { Errata r = stripe->loadMeta(); if (r) { - std::cout << "Stripe found: " << stripe->_segments << " segments with " << stripe->_buckets << " buckets per segment for " - << stripe->_buckets * stripe->_segments * 4 << " total directory entries taking " << stripe->_buckets * stripe->_segments * sizeof(CacheDirEntry) * ts::ENTRIES_PER_BUCKET -// << " out of " << (delta-header_len).units() << " bytes." + std::cout << "Stripe found: " << stripe->_segments << " segments with " << stripe->_buckets + << " buckets per segment for " << stripe->_buckets * stripe->_segments * 4 + << " total directory entries taking " + << stripe->_buckets * stripe->_segments * sizeof(CacheDirEntry) * ts::ENTRIES_PER_BUCKET + // << " out of " << (delta-header_len).units() << " bytes." << std::endl; } else { std::cout << r; } } } -# if 0 +#if 0 if (depth >= SpanDumpDepth::STRIPE) { Open_Stripe(span->_fd, stripe); } } -# endif +#endif } } } diff --git a/lib/ts/MemView.h b/lib/ts/MemView.h index ed5567c7211..39136f3f871 100644 --- a/lib/ts/MemView.h +++ b/lib/ts/MemView.h @@ -164,7 +164,7 @@ class MemView /// @return the @a V value at index @a n. template V at(ssize_t n) const; /// @return a pointer to the @a V value at index @a n. - template V const* at_ptr(ssize_t n) const; + template V const *at_ptr(ssize_t n) const; //@} /// Set the view. @@ -793,7 +793,7 @@ MemView::at(ssize_t n) const } template -inline V const* +inline V const * MemView::at_ptr(ssize_t n) const { return static_cast(_ptr) + n; From 4423a7762f3dc28a4114c3ae0d67f05ce1e85120 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Fri, 24 Feb 2017 15:25:56 -0600 Subject: [PATCH 78/81] Checkpoint - working on saving searched disk for directory. --- cmd/traffic_cache_tool/CacheTool.cc | 289 +++++++++------------------- 1 file changed, 94 insertions(+), 195 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index 3df581d62e3..2214149d377 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -101,6 +101,24 @@ struct Stripe { enum Copy { A = 0, B = 1 }; enum { HEAD = 0, FOOT = 1 }; + /// Piece wise memory storage for the directory. + struct Chunk { + Bytes _start = 0; ///< Starting offset relative to physical device of span. + Bytes _skip = 0; ///< # of bytes not valid at the start of the first block. + Bytes _clip = 0; ///< # of bytes not valid at the end of the last block. + + typedef std::vector Chain; + Chain _chain; ///< Chain of blocks. + + ~Chunk(); + + void append(MemView m); + void clear(); + }; + + /// Hold a list of chunks representing an extended piece of memory. + typedef std::vector Memory; + /// Construct from span header data. Stripe(Span *span, Bytes start, CacheStoreBlocks len); @@ -128,6 +146,7 @@ struct Stripe { CacheStoreBlocks _len; ///< Length of stripe. uint8_t _vol_idx = 0; ///< Volume index. uint8_t _type = 0; ///< Stripe type. + uint8_t _idx = -1; ///< Stripe index in span. int64_t _buckets; ///< Number of buckets per segment. int64_t _segments; ///< Number of segments. @@ -136,8 +155,27 @@ struct Stripe { StripeMeta _meta[2][2]; /// Locations for the meta data. CacheStoreBlocks _meta_pos[2][2]; + /// Directory. + Chunk _directory; }; +Stripe::Chunk::~Chunk() +{ + this->clear(); +} +void +Stripe::Chunk::append(MemView m) +{ + _chain.push_back(m); +} +void +Stripe::Chunk::clear() +{ + for (auto &m : _chain) + free(const_cast(m.ptr())); + _chain.clear(); +} + Stripe::Stripe(Span *span, Bytes start, CacheStoreBlocks len) : _span(span), _start(start), _len(len) { } @@ -179,6 +217,10 @@ Stripe::updateLiveData(enum Copy c) int64_t n_buckets; int64_t n_segments; + // Past the header is the segment free list heads which if sufficiently long (> ~4K) can take + // more than 1 store block. Start with a guess of 1 and adjust upwards as needed. A 2TB stripe + // with an AOS of 8000 has roughly 3700 segments meaning that for even 10TB drives this loop + // should only be a few iterations. do { ++header_len; n_buckets = (delta - header_len).units() / (sizeof(CacheDirEntry) * ts::ENTRIES_PER_BUCKET); @@ -209,24 +251,41 @@ Stripe::loadMeta() // Avoid searching the entire span, because some of it must be content. Assume that AOS is more than 160 // which means at most 10/160 (1/16) of the span can be directory/header. Bytes limit = pos + _len / 16; - // Aligned buffer for raw device reads. - alignas(4096) static char buff[N]; + size_t io_align = _span->_geometry.blocksz; + StripeMeta const *meta; - // Check the earlier part of the block. Header A must be at the start of the stripe block. - // A full chunk is read in case Footer A is in that range. - n = pread(fd, buff, N, pos.units()); - data.setView(buff, n.units()); - if (this->probeMeta(data)) { - _meta[A][HEAD] = data.template at(0); - _meta_pos[A][HEAD] = round_down(pos + Bytes(data.template at_ptr(0) - buff)); - data += CacheStoreBlocks::SCALE; + char* buff; // Current active buffer. + std::unique_ptr bulk_buff; // Buffer for bulk reads. + static const size_t SBSIZE = CacheStripeBlocks::SCALE; // save some typing. + alignas(SBSIZE) char stripe_buff[SBSIZE]; // Use when reading a single stripe block. + if (io_align > SBSIZE) return Errata::Message(0,1,"Cannot load stripe ", _idx, " on span ", _span->_path, " because the I/O block alignment ", io_align, " is larger than the buffer alignment ", SBSIZE); + + _directory._start = pos; + + // Check the earlier part of the block. Header A must be at the start of the stripe block. + n = pread(fd, stripe_buff, SBSIZE, pos.units()); + data.setView(stripe_buff, n.units()); + meta = data.template at_ptr(0); + if (this->validateMeta(meta)) { + delta = data.template at_ptr(0) - buff.get(); + _meta[A][HEAD] = *meta; + _meta_pos[A][HEAD] = round_down(pos + delta); + pos += SBSIZE; // Search for Footer A, skipping false positives. do { - while (!(found = this->probeMeta(data)) && pos < limit) { - pos += N; + do { + bulk_buff.reset(ats_memalign(io_align, N)); + buff = bulk_buff.get(); n = pread(fd, buff, N, pos.units()); data.setView(buff, n.units()); + found = this->probeMeta(data); + } while (!found && pos < limit); + + + _directory.append({buff.release(), N}); + pos += N; + buff.reset(static_cast(malloc(N))); } if (found) { @@ -234,14 +293,15 @@ Stripe::loadMeta() // Need to be more thorough in cross checks but this is OK for now. if (_meta[A][FOOT].version == _meta[A][HEAD].version) { - _meta_pos[A][FOOT] = round_down(pos + Bytes(data.template at_ptr(0) - buff)); + _meta_pos[A][FOOT] = round_down(pos + Bytes(data.template at_ptr(0) - buff.get())); + _directory._clip = N - (data.template at_ptr(0) - buff.get()); } else { // false positive, keep looking. found = false; } } } while (!found); - + _directory.append({buff.release(), N}); } else { zret.push(0, 1, "Header A not found"); } @@ -249,24 +309,29 @@ Stripe::loadMeta() // Technically if Copy A is valid, Copy B is not needed. But at this point it's cheap to retrieve // (as the exact offset is computable). if (_meta_pos[A][FOOT] > 0) { + alignas(512) char b[CacheStoreBlocks::SCALE]; + delta = _meta_pos[A][FOOT] - _meta_pos[A][HEAD]; // Header B should be immediately after Footer A. If at the end of the last read, // do another read. if (data.size() < CacheStoreBlocks::SCALE) { pos += N; - n = pread(fd, buff, CacheStoreBlocks::SCALE, pos.units()); - data.setView(buff, n.units()); + n = pread(fd, b, CacheStoreBlocks::SCALE, pos.units()); + data.setView(b, n.units()); } - if (this->validateMeta(data.template at_ptr(0))) { - _meta[B][HEAD] = data.template at(0); - _meta_pos[B][HEAD] = round_down(pos + Bytes(data.template at_ptr(0) - buff)); + meta = data.template at_ptr(0); + if (this->validateMeta(meta)) { + _meta[B][HEAD] = *meta; + _meta_pos[B][HEAD] = round_down(pos); // Footer B must be at the same relative offset to Header B as Footer A -> Header A. - n = pread(fd, buff, ts::CacheStoreBlocks::SCALE, (_meta_pos[B][HEAD] + delta).units()); - data.setView(buff, n.units()); - if (this->validateMeta(data.template at_ptr(0))) { - _meta[B][FOOT] = data.template at(0); - _meta_pos[B][FOOT] = round_down(_meta_pos[B][HEAD] + delta); + pos += delta; + n = pread(fd, b, ts::CacheStoreBlocks::SCALE, pos.units()); + data.setView(b, n.units()); + meta = data.template at_ptr(0); + if (this->validateMeta(meta)) { + _meta[B][FOOT] = *meta; + _meta_pos[B][FOOT] = round_down(pos); } } } @@ -282,6 +347,9 @@ Stripe::loadMeta() zret.push(0, 1, "Invalid stripe data - candidates found but sync serial data not valid."); } } + + if (!zret) + _directory.clear(); return zret; } @@ -521,171 +589,6 @@ VolumeAllocator::fillEmptySpans() return zret; } /* --------------------------------------------------------------------------------------- */ -// All of these free functions need to be moved to the Cache class. Or the Span class? -#if 0 -bool -Validate_Stripe_Meta(ts::CacheStripeMeta const &stripe) -{ - return ts::CacheStripeMeta::MAGIC == stripe.magic && stripe.version.ink_major <= ts::CACHE_DB_MAJOR_VERSION && - stripe.version.ink_minor <= 2 // This may have always been zero, actually. - ; -} - -typedef std::tuple ProbeResult; - -ProbeResult -Probe_For_Stripe(ts::StringView &mem) -{ - ProbeResult zret{mem.size() >= sizeof(ts::CacheStripeMeta) ? 0 : -1, ts::StringView(nullptr)}; - ts::StringView &test_site = std::get<1>(zret); - - while (mem.size() >= sizeof(ts::CacheStripeMeta)) { - // The meta data is stored aligned on a stripe block boundary, so only need to check there. - test_site = mem; - mem += ts::CacheStoreBlocks::SCALE; // always move this forward to make restarting search easy. - - if (Validate_Stripe_Meta(*reinterpret_cast(test_site.ptr()))) { - std::get<0>(zret) = 1; - break; - } - } - return zret; -} -/* --------------------------------------------------------------------------------------- */ -void -Calc_Stripe_Data(ts::CacheStripeMeta const &header, ts::CacheStripeMeta const &footer, off_t delta, ts::StripeData &data) -{ - // Assuming header + free list fits in one cache stripe block, which isn't true for large stripes (>2G or so). - // Need to detect that, presumably by checking that the segment count fits in the stripe block. - ts::CacheStoreBlocks hdr_size{1}; - off_t space = delta - hdr_size.units(); - int64_t n_buckets = space / 40; - data.segments = n_buckets / (1 << 14); - // This should never be more than one loop, usually none. - while ((n_buckets / data.segments) > 1 << 14) - ++(data.segments); - data.buckets = n_buckets / data.segments; - data.start = delta * 2; // this is wrong, need to add in the base block position. - - std::cout << "Stripe is " << data.segments << " segments with " << data.buckets << " buckets per segment for " - << data.buckets * data.segments * 4 << " total directory entries taking " << data.buckets * data.segments * 40 - << " out of " << space << " bytes." << std::endl; -} - -void -Open_Stripe(ats_scoped_fd const &fd, ts::CacheStripeDescriptor const &block) -{ - int found; - StringView data; - StringView stripe_mem; - constexpr static int64_t N = 1 << 24; - int64_t n; - off_t pos = block.offset.units(); - StripeMeta stripe_meta[4]; - off_t stripe_pos[4] = {0, 0, 0, 0}; - off_t delta; - // Avoid searching the entire span, because some of it must be content. Assume that AOS is more than 160 - // which means at most 10/160 (1/16) of the span can be directory/header. - off_t limit = pos + block.len.units() / 16; - alignas(4096) static char buff[N]; - - // Check the earlier part of the block. Header A must be at the start of the stripe block. - // A full chunk is read in case Footer A is in that range. - n = pread(fd, buff, N, pos); - data.setView(buff, n); - std::tie(found, stripe_mem) = Probe_For_Stripe(data); - - if (found > 0) { - if (stripe_mem.ptr() != buff) { - std::cout << "Header A found at" << pos + stripe_mem.ptr() - buff << " which is not at start of stripe block" << std::endl; - } else { - stripe_pos[0] = pos; - stripe_meta[0] = reinterpret_cast(buff); // copy it out of buffer. - std::cout << "Header A found at " << stripe_pos[0] << std::endl; - // Search for Footer A, skipping false positives. - while (stripe_pos[1] == 0) { - std::tie(found, stripe_mem) = Probe_For_Stripe(data); - while (found == 0 && pos < limit) { - pos += N; - n = pread(fd, buff, N, pos); - data.setView(buff, n); - std::tie(found, stripe_mem) = Probe_For_Stripe(data); - } - if (found > 0) { - // Need to be more thorough in cross checks but this is OK for now. - ts::CacheStripeMeta const &s = *reinterpret_cast(stripe_mem.ptr()); - if (s.version == stripe_meta[0].version) { - stripe_meta[1] = s; - stripe_pos[1] = pos + (stripe_mem.ptr() - buff); - printf("Footer A found at %" PRIu64 "\n", stripe_pos[1]); - if (stripe_meta[0].sync_serial == stripe_meta[1].sync_serial) { - printf("Copy A is valid - sync=%d\n", stripe_meta[0].sync_serial); - } - } else { - // false positive, keep looking. - found = 0; - } - } else { - printf("Header A not found, invalid stripe.\n"); - break; - } - } - - // Technically if Copy A is valid, Copy B is not needed. But at this point it's cheap to retrieve - // (as the exact offset is computable). - if (stripe_pos[1]) { - delta = stripe_pos[1] - stripe_pos[0]; - // Header B should be immediately after Footer A. If at the end of the last read, - // do another read. - if (!data) { - pos += N; - n = pread(fd, buff, ts::CacheStoreBlocks::SCALE, pos); - data.setView(buff, n); - } - std::tie(found, stripe_mem) = Probe_For_Stripe(data); - if (found <= 0) { - printf("Header B not found at expected location.\n"); - } else { - stripe_meta[2] = *reinterpret_cast(stripe_mem.ptr()); - stripe_pos[2] = pos + (stripe_mem.ptr() - buff); - printf("Found Header B at expected location %" PRIu64 ".\n", stripe_pos[2]); - - // Footer B must be at the same relative offset to Header B as Footer A -> Header A. - n = pread(fd, buff, ts::CacheStoreBlocks::SCALE, stripe_pos[2] + delta); - data.setView(buff, n); - std::tie(found, stripe_mem) = Probe_For_Stripe(data); - if (found == 1) { - stripe_pos[3] = stripe_pos[2] + delta; - stripe_meta[3] = *reinterpret_cast(stripe_mem.ptr()); - printf("Footer B found at expected location %" PRIu64 ".\n", stripe_pos[3]); - } else { - printf("Footer B not found at expected location %" PRIu64 ".\n", stripe_pos[2] + delta); - } - } - } - - if (stripe_pos[1]) { - if (stripe_meta[0].sync_serial == stripe_meta[1].sync_serial && - (0 == stripe_pos[3] || stripe_meta[2].sync_serial != stripe_meta[3].sync_serial || - stripe_meta[0].sync_serial > stripe_meta[2].sync_serial)) { - ts::StripeData sdata; - Calc_Stripe_Data(stripe_meta[0], stripe_meta[1], delta, sdata); - } else if (stripe_pos[3] && stripe_meta[2].sync_serial == stripe_meta[3].sync_serial) { - ts::StripeData sdata; - Calc_Stripe_Data(stripe_meta[2], stripe_meta[3], delta, sdata); - } else { - std::cout << "Invalid stripe data - candidates found but sync serial data not valid." << std::endl; - } - } else { - std::cout << "Invalid stripe data - no candidates found." << std::endl; - } - } - } else { - printf("Stripe Header A not found in first chunk\n"); - } -} -#endif -/* --------------------------------------------------------------------------------------- */ Errata Cache::loadSpan(FilePath const &path) { @@ -711,6 +614,7 @@ Cache::loadSpanDirect(FilePath const &path, int vol_idx, Bytes size) for (auto i = 0; i < nspb; ++i) { ts::CacheStripeDescriptor &raw = span->_header->stripes[i]; Stripe *stripe = new Stripe(span.get(), raw.offset, raw.len); + stripe->_idx = i; if (raw.free == 0) { stripe->_vol_idx = raw.vol_idx; stripe->_type = raw.type; @@ -800,17 +704,12 @@ Cache::dumpSpans(SpanDumpDepth depth) << stripe->_buckets * stripe->_segments * sizeof(CacheDirEntry) * ts::ENTRIES_PER_BUCKET // << " out of " << (delta-header_len).units() << " bytes." << std::endl; + stripe->_directory.clear(); } else { std::cout << r; } } } -#if 0 - if (depth >= SpanDumpDepth::STRIPE) { - Open_Stripe(span->_fd, stripe); - } - } -#endif } } } From a21056f49de391998c09c37145de37b5dc2d7ab0 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 28 Feb 2017 15:09:05 -0600 Subject: [PATCH 79/81] Cache Tool: checkpoint. --- cmd/traffic_cache_tool/CacheTool.cc | 81 ++++++++++++++--------------- 1 file changed, 39 insertions(+), 42 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index 2214149d377..caf8fddce93 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -125,10 +125,15 @@ struct Stripe { /// Is stripe unallocated? bool isFree() const; - /// Probe a chunk of memory @a mem for stripe metadata. - /// @a mem is updated to remove memory that has been probed. - /// @return @c true if @a mem has valid data, @c false otherwise. - bool probeMeta(MemView &mem); + /** Probe a chunk of memory @a mem for stripe metadata. + + @a mem is updated to remove memory that has been probed. If @a + meta is not @c nullptr then it is used for additional cross + checking. + + @return @c true if @a mem has valid data, @c false otherwise. + */ + bool probeMeta(MemView &mem, StripeMeta const* meta = nullptr); /// Check a buffer for being valid stripe metadata. /// @return @c true if valid, @c false otherwise. @@ -197,10 +202,15 @@ Stripe::validateMeta(StripeMeta const *meta) } bool -Stripe::probeMeta(MemView &mem) +Stripe::probeMeta(MemView &mem, StripeMeta const* base_meta) { while (mem.size() >= sizeof(StripeMeta)) { - if (this->validateMeta(mem.template at_ptr(0))) { + StripeMeta const* meta = mem.template at_ptr(0); + if (this->validateMeta(meta) && + (base_meta == nullptr || // no base version to check against. + ( meta->version == base_meta->version ) // need more checks here I think. + )) + { return true; } // The meta data is stored aligned on a stripe block boundary, so only need to check there. @@ -254,8 +264,7 @@ Stripe::loadMeta() size_t io_align = _span->_geometry.blocksz; StripeMeta const *meta; - char* buff; // Current active buffer. - std::unique_ptr bulk_buff; // Buffer for bulk reads. + std::unique_ptr bulk_buff; // Buffer for bulk reads. static const size_t SBSIZE = CacheStripeBlocks::SCALE; // save some typing. alignas(SBSIZE) char stripe_buff[SBSIZE]; // Use when reading a single stripe block. @@ -263,45 +272,35 @@ Stripe::loadMeta() _directory._start = pos; - // Check the earlier part of the block. Header A must be at the start of the stripe block. + // Header A must be at the start of the stripe block. n = pread(fd, stripe_buff, SBSIZE, pos.units()); data.setView(stripe_buff, n.units()); meta = data.template at_ptr(0); if (this->validateMeta(meta)) { - delta = data.template at_ptr(0) - buff.get(); + delta = data.template at_ptr(0) - bulk_buff.get(); _meta[A][HEAD] = *meta; _meta_pos[A][HEAD] = round_down(pos + delta); pos += SBSIZE; // Search for Footer A, skipping false positives. - do { - do { - bulk_buff.reset(ats_memalign(io_align, N)); - buff = bulk_buff.get(); - n = pread(fd, buff, N, pos.units()); - data.setView(buff, n.units()); - found = this->probeMeta(data); - } while (!found && pos < limit); - - - _directory.append({buff.release(), N}); - pos += N; - buff.reset(static_cast(malloc(N))); - } - + while (pos < limit) { + char *buff = static_cast(ats_memalign(io_align, N)); + bulk_buff.reset(buff); + n = pread(fd, buff, N, pos.units()); + data.setView(buff, n.units()); + found = this->probeMeta(data, &_meta[A][HEAD]); if (found) { - _meta[A][FOOT] = data.template at(0); - - // Need to be more thorough in cross checks but this is OK for now. - if (_meta[A][FOOT].version == _meta[A][HEAD].version) { - _meta_pos[A][FOOT] = round_down(pos + Bytes(data.template at_ptr(0) - buff.get())); - _directory._clip = N - (data.template at_ptr(0) - buff.get()); - } else { - // false positive, keep looking. - found = false; + ptrdiff_t diff = data.template at_ptr(0) - buff; + _meta_pos[A][FOOT] = round_down(pos + Bytes(diff)); + if (diff > 0) { + _directory._clip = N - diff; + _directory.append({bulk_buff.release(), N}); } + break; + } else { + _directory.append({bulk_buff.release(), N}); + pos += N; } - } while (!found); - _directory.append({buff.release(), N}); + } } else { zret.push(0, 1, "Header A not found"); } @@ -309,15 +308,13 @@ Stripe::loadMeta() // Technically if Copy A is valid, Copy B is not needed. But at this point it's cheap to retrieve // (as the exact offset is computable). if (_meta_pos[A][FOOT] > 0) { - alignas(512) char b[CacheStoreBlocks::SCALE]; - delta = _meta_pos[A][FOOT] - _meta_pos[A][HEAD]; // Header B should be immediately after Footer A. If at the end of the last read, // do another read. if (data.size() < CacheStoreBlocks::SCALE) { pos += N; - n = pread(fd, b, CacheStoreBlocks::SCALE, pos.units()); - data.setView(b, n.units()); + n = pread(fd, stripe_buff, CacheStoreBlocks::SCALE, pos.units()); + data.setView(stripe_buff, n.units()); } meta = data.template at_ptr(0); if (this->validateMeta(meta)) { @@ -326,8 +323,8 @@ Stripe::loadMeta() // Footer B must be at the same relative offset to Header B as Footer A -> Header A. pos += delta; - n = pread(fd, b, ts::CacheStoreBlocks::SCALE, pos.units()); - data.setView(b, n.units()); + n = pread(fd, stripe_buff, ts::CacheStoreBlocks::SCALE, pos.units()); + data.setView(stripe_buff, n.units()); meta = data.template at_ptr(0); if (this->validateMeta(meta)) { _meta[B][FOOT] = *meta; From 288e46f4823db2d31125b4cf296604502a8b8f4f Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 28 Feb 2017 15:10:20 -0600 Subject: [PATCH 80/81] Cache Tool: Fix. --- cmd/traffic_cache_tool/CacheTool.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index caf8fddce93..0cb5bb2e881 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -265,7 +265,7 @@ Stripe::loadMeta() StripeMeta const *meta; std::unique_ptr bulk_buff; // Buffer for bulk reads. - static const size_t SBSIZE = CacheStripeBlocks::SCALE; // save some typing. + static const size_t SBSIZE = CacheStoreBlocks::SCALE; // save some typing. alignas(SBSIZE) char stripe_buff[SBSIZE]; // Use when reading a single stripe block. if (io_align > SBSIZE) return Errata::Message(0,1,"Cannot load stripe ", _idx, " on span ", _span->_path, " because the I/O block alignment ", io_align, " is larger than the buffer alignment ", SBSIZE); From 983065771c5c62f44f16e048580aacecb7da5e27 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Wed, 1 Mar 2017 10:47:29 -0600 Subject: [PATCH 81/81] CacheTool: Finally working after redoing the stripe meta location logic. --- cmd/traffic_cache_tool/CacheTool.cc | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/cmd/traffic_cache_tool/CacheTool.cc b/cmd/traffic_cache_tool/CacheTool.cc index 0cb5bb2e881..9f20018d7a8 100644 --- a/cmd/traffic_cache_tool/CacheTool.cc +++ b/cmd/traffic_cache_tool/CacheTool.cc @@ -151,7 +151,7 @@ struct Stripe { CacheStoreBlocks _len; ///< Length of stripe. uint8_t _vol_idx = 0; ///< Volume index. uint8_t _type = 0; ///< Stripe type. - uint8_t _idx = -1; ///< Stripe index in span. + int8_t _idx = -1; ///< Stripe index in span. int64_t _buckets; ///< Number of buckets per segment. int64_t _segments; ///< Number of segments. @@ -242,13 +242,17 @@ Stripe::updateLiveData(enum Copy c) _buckets = n_buckets / n_segments; _segments = n_segments; + _directory._skip = header_len; } Errata Stripe::loadMeta() { - // Read from disk in chunks of this size. - constexpr static int64_t N = 1 << 24; + // Read from disk in chunks of this size. This needs to be a multiple of both the + // store block size and the directory entry size so neither goes acrss read boundaries. + // Beyond that the value should be in the ~10MB range for what I guess is best performance + // vs. blocking production disk I/O on a live system. + constexpr static int64_t N = (1 << 8) * CacheStoreBlocks::SCALE * sizeof(CacheDirEntry); Errata zret; @@ -273,15 +277,18 @@ Stripe::loadMeta() _directory._start = pos; // Header A must be at the start of the stripe block. + // Todo: really need to check pread() for failure. n = pread(fd, stripe_buff, SBSIZE, pos.units()); data.setView(stripe_buff, n.units()); meta = data.template at_ptr(0); if (this->validateMeta(meta)) { - delta = data.template at_ptr(0) - bulk_buff.get(); + delta = data.template at_ptr(0) - stripe_buff; _meta[A][HEAD] = *meta; - _meta_pos[A][HEAD] = round_down(pos + delta); + _meta_pos[A][HEAD] = round_down(pos + Bytes(delta)); pos += SBSIZE; - // Search for Footer A, skipping false positives. + _directory._skip = SBSIZE; // first guess, updated in @c updateLiveData when the header length is computed. + // Search for Footer A. Nothing for it except to grub through the disk. + // The searched data is cached so it's available for directory parsing later if needed. while (pos < limit) { char *buff = static_cast(ats_memalign(io_align, N)); bulk_buff.reset(buff); @@ -291,10 +298,12 @@ Stripe::loadMeta() if (found) { ptrdiff_t diff = data.template at_ptr(0) - buff; _meta_pos[A][FOOT] = round_down(pos + Bytes(diff)); + // don't bother attaching block if the footer is at the start if (diff > 0) { _directory._clip = N - diff; _directory.append({bulk_buff.release(), N}); } + data += SBSIZE; // skip footer for checking on B copy. break; } else { _directory.append({bulk_buff.release(), N}); @@ -689,13 +698,13 @@ Cache::dumpSpans(SpanDumpDepth depth) for (auto stripe : span->_stripes) { std::cout << " : " - << " @ " << stripe->_start << " len=" << stripe->_len.count() << " blocks " + << "Stripe " << static_cast(stripe->_idx) << " @ " << stripe->_start << " len=" << stripe->_len.count() << " blocks " << " vol=" << static_cast(stripe->_vol_idx) << " type=" << static_cast(stripe->_type) << " " << (stripe->isFree() ? "free" : "in-use") << std::endl; if (depth >= SpanDumpDepth::STRIPE) { Errata r = stripe->loadMeta(); if (r) { - std::cout << "Stripe found: " << stripe->_segments << " segments with " << stripe->_buckets + std::cout << " " << stripe->_segments << " segments with " << stripe->_buckets << " buckets per segment for " << stripe->_buckets * stripe->_segments * 4 << " total directory entries taking " << stripe->_buckets * stripe->_segments * sizeof(CacheDirEntry) * ts::ENTRIES_PER_BUCKET