167 changes: 114 additions & 53 deletions libcxx/utils/google-benchmark/include/benchmark/benchmark_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,12 @@ int main(int argc, char** argv) {
// of memcpy() calls of different lengths:
static void BM_memcpy(benchmark::State& state) {
char* src = new char[state.range_x()]; char* dst = new char[state.range_x()];
memset(src, 'x', state.range_x());
char* src = new char[state.range(0)]; char* dst = new char[state.range(0)];
memset(src, 'x', state.range(0));
while (state.KeepRunning())
memcpy(dst, src, state.range_x());
memcpy(dst, src, state.range(0));
state.SetBytesProcessed(int64_t(state.iterations()) *
int64_t(state.range_x()));
int64_t(state.range(0)));
delete[] src; delete[] dst;
}
BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10);
Expand All @@ -60,27 +60,27 @@ BENCHMARK(BM_memcpy)->Range(8, 8<<10);
static void BM_SetInsert(benchmark::State& state) {
while (state.KeepRunning()) {
state.PauseTiming();
set<int> data = ConstructRandomSet(state.range_x());
set<int> data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range_y(); ++j)
for (int j = 0; j < state.range(1); ++j)
data.insert(RandomNumber());
}
}
BENCHMARK(BM_SetInsert)
->ArgPair(1<<10, 1)
->ArgPair(1<<10, 8)
->ArgPair(1<<10, 64)
->ArgPair(1<<10, 512)
->ArgPair(8<<10, 1)
->ArgPair(8<<10, 8)
->ArgPair(8<<10, 64)
->ArgPair(8<<10, 512);
->Args({1<<10, 1})
->Args({1<<10, 8})
->Args({1<<10, 64})
->Args({1<<10, 512})
->Args({8<<10, 1})
->Args({8<<10, 8})
->Args({8<<10, 64})
->Args({8<<10, 512});
// The preceding code is quite repetitive, and can be replaced with
// the following short-hand. The following macro will pick a few
// appropriate arguments in the product of the two specified ranges
// and will generate a microbenchmark for each such pair.
BENCHMARK(BM_SetInsert)->RangePair(1<<10, 8<<10, 1, 512);
BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {1, 512}});
// For more complex patterns of inputs, passing a custom function
// to Apply allows programmatic specification of an
Expand All @@ -90,7 +90,7 @@ BENCHMARK(BM_SetInsert)->RangePair(1<<10, 8<<10, 1, 512);
static void CustomArguments(benchmark::internal::Benchmark* b) {
for (int i = 0; i <= 10; ++i)
for (int j = 32; j <= 1024*1024; j *= 8)
b->ArgPair(i, j);
b->Args({i, j});
}
BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
Expand All @@ -101,14 +101,14 @@ template <class Q> int BM_Sequential(benchmark::State& state) {
Q q;
typename Q::value_type v;
while (state.KeepRunning()) {
for (int i = state.range_x(); i--; )
for (int i = state.range(0); i--; )
q.push(v);
for (int e = state.range_x(); e--; )
for (int e = state.range(0); e--; )
q.Wait(&v);
}
// actually messages, not bytes:
state.SetBytesProcessed(
static_cast<int64_t>(state.iterations())*state.range_x());
static_cast<int64_t>(state.iterations())*state.range(0));
}
BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue<int>)->Range(1<<0, 1<<10);
Expand Down Expand Up @@ -153,8 +153,15 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
#include <stddef.h>
#include <stdint.h>

#include <vector>

#include "macros.h"

#if defined(BENCHMARK_HAS_CXX11)
#include <type_traits>
#include <utility>
#endif

namespace benchmark {
class BenchmarkReporter;

Expand All @@ -165,11 +172,16 @@ void Initialize(int* argc, char** argv);
// of each matching benchmark. Otherwise run each matching benchmark and
// report the results.
//
// The second overload reports the results using the specified 'reporter'.
// The second and third overload use the specified 'console_reporter' and
// 'file_reporter' respectively. 'file_reporter' will write to the file specified
// by '--benchmark_output'. If '--benchmark_output' is not given the
// 'file_reporter' is ignored.
//
// RETURNS: The number of matching benchmarks.
size_t RunSpecifiedBenchmarks();
size_t RunSpecifiedBenchmarks(BenchmarkReporter* reporter);
size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter);
size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
BenchmarkReporter* file_reporter);


// If this routine is called, peak memory allocation past this point in the
Expand Down Expand Up @@ -258,7 +270,7 @@ typedef double(BigOFunc)(int);
// benchmark to use.
class State {
public:
State(size_t max_iters, bool has_x, int x, bool has_y, int y,
State(size_t max_iters, const std::vector<int>& ranges,
int thread_i, int n_threads);

// Returns true if the benchmark should continue through another iteration.
Expand Down Expand Up @@ -367,7 +379,7 @@ class State {
}

BENCHMARK_ALWAYS_INLINE
size_t complexity_length_n() {
int complexity_length_n() {
return complexity_n_;
}

Expand Down Expand Up @@ -413,17 +425,9 @@ class State {

// Range arguments for this run. CHECKs if the argument has been set.
BENCHMARK_ALWAYS_INLINE
int range_x() const {
assert(has_range_x_);
((void)has_range_x_); // Prevent unused warning.
return range_x_;
}

BENCHMARK_ALWAYS_INLINE
int range_y() const {
assert(has_range_y_);
((void)has_range_y_); // Prevent unused warning.
return range_y_;
int range(std::size_t pos) const {
assert(range_.size() > pos);
return range_[pos];
}

BENCHMARK_ALWAYS_INLINE
Expand All @@ -434,11 +438,7 @@ class State {
bool finished_;
size_t total_iterations_;

bool has_range_x_;
int range_x_;

bool has_range_y_;
int range_y_;
std::vector<int> range_;

size_t bytes_processed_;
size_t items_processed_;
Expand Down Expand Up @@ -489,24 +489,22 @@ class Benchmark {
// REQUIRES: The function passed to the constructor must accept an arg1.
Benchmark* Range(int start, int limit);

// Run this benchmark once for every value in the range [start..limit]
// Run this benchmark once for all values in the range [start..limit] with specific step
// REQUIRES: The function passed to the constructor must accept an arg1.
Benchmark* DenseRange(int start, int limit);
Benchmark* DenseRange(int start, int limit, int step = 1);

// Run this benchmark once with "x,y" as the extra arguments passed
// Run this benchmark once with "args" as the extra arguments passed
// to the function.
// REQUIRES: The function passed to the constructor must accept arg1,arg2.
Benchmark* ArgPair(int x, int y);
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
Benchmark* Args(const std::vector<int>& args);

// Pick a set of values A from the range [lo1..hi1] and a set
// of values B from the range [lo2..hi2]. Run the benchmark for
// every pair of values in the cartesian product of A and B
// (i.e., for all combinations of the values in A and B).
// REQUIRES: The function passed to the constructor must accept arg1,arg2.
Benchmark* RangePair(int lo1, int hi1, int lo2, int hi2);
// Run this benchmark once for a number of values picked from the
// ranges [start..limit]. (starts and limits are always picked.)
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
Benchmark* Ranges(const std::vector<std::pair<int, int> >& ranges);

// Pass this benchmark object to *func, which can customize
// the benchmark by calling various methods like Arg, ArgPair,
// the benchmark by calling various methods like Arg, Args,
// Threads, etc.
Benchmark* Apply(void (*func)(Benchmark* benchmark));

Expand Down Expand Up @@ -587,6 +585,20 @@ class Benchmark {
Benchmark& operator=(Benchmark const&);
};

} // namespace internal

// Create and register a benchmark with the specified 'name' that invokes
// the specified functor 'fn'.
//
// RETURNS: A pointer to the registered benchmark.
internal::Benchmark* RegisterBenchmark(const char* name, internal::Function* fn);

#if defined(BENCHMARK_HAS_CXX11)
template <class Lambda>
internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn);
#endif

namespace internal {
// The class used to hold all Benchmarks created from static function.
// (ie those created using the BENCHMARK(...) macros.
class FunctionBenchmark : public Benchmark {
Expand All @@ -600,8 +612,57 @@ class FunctionBenchmark : public Benchmark {
Function* func_;
};

#ifdef BENCHMARK_HAS_CXX11
template <class Lambda>
class LambdaBenchmark : public Benchmark {
public:
virtual void Run(State& st) { lambda_(st); }

private:
template <class OLambda>
LambdaBenchmark(const char* name, OLambda&& lam)
: Benchmark(name), lambda_(std::forward<OLambda>(lam)) {}

LambdaBenchmark(LambdaBenchmark const&) = delete;

private:
template <class Lam>
friend Benchmark* ::benchmark::RegisterBenchmark(const char*, Lam&&);

Lambda lambda_;
};
#endif

} // end namespace internal

inline internal::Benchmark*
RegisterBenchmark(const char* name, internal::Function* fn) {
return internal::RegisterBenchmarkInternal(
::new internal::FunctionBenchmark(name, fn));
}

#ifdef BENCHMARK_HAS_CXX11
template <class Lambda>
internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn) {
using BenchType = internal::LambdaBenchmark<typename std::decay<Lambda>::type>;
return internal::RegisterBenchmarkInternal(
::new BenchType(name, std::forward<Lambda>(fn)));
}
#endif

#if defined(BENCHMARK_HAS_CXX11) && \
(!defined(BENCHMARK_GCC_VERSION) || BENCHMARK_GCC_VERSION >= 409)
template <class Lambda, class ...Args>
internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn,
Args&&... args) {
return benchmark::RegisterBenchmark(name,
[=](benchmark::State& st) { fn(st, args...); });
}
#else
#define BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
#endif


// The base class for all fixture tests.
class Fixture: public internal::Benchmark {
public:
Expand Down Expand Up @@ -652,11 +713,11 @@ class Fixture: public internal::Benchmark {

// Old-style macros
#define BENCHMARK_WITH_ARG(n, a) BENCHMARK(n)->Arg((a))
#define BENCHMARK_WITH_ARG2(n, a1, a2) BENCHMARK(n)->ArgPair((a1), (a2))
#define BENCHMARK_WITH_ARG2(n, a1, a2) BENCHMARK(n)->Args({(a1), (a2)})
#define BENCHMARK_WITH_UNIT(n, t) BENCHMARK(n)->Unit((t))
#define BENCHMARK_RANGE(n, lo, hi) BENCHMARK(n)->Range((lo), (hi))
#define BENCHMARK_RANGE2(n, l1, h1, l2, h2) \
BENCHMARK(n)->RangePair((l1), (h1), (l2), (h2))
BENCHMARK(n)->RangePair({{(l1), (h1)}, {(l2), (h2)}})

#if __cplusplus >= 201103L

Expand Down
10 changes: 9 additions & 1 deletion libcxx/utils/google-benchmark/include/benchmark/macros.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,11 @@
#ifndef BENCHMARK_MACROS_H_
#define BENCHMARK_MACROS_H_

#if __cplusplus < 201103L
#if __cplusplus >= 201103L
#define BENCHMARK_HAS_CXX11
#endif

#ifndef BENCHMARK_HAS_CXX11
# define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
TypeName& operator=(const TypeName&)
Expand Down Expand Up @@ -53,4 +57,8 @@
# define BENCHMARK_BUILTIN_EXPECT(x, y) x
#endif

#if defined(__GNUC__) && !defined(__clang__)
#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
#endif

#endif // BENCHMARK_MACROS_H_
15 changes: 12 additions & 3 deletions libcxx/utils/google-benchmark/include/benchmark/reporter.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,14 +156,23 @@ class BenchmarkReporter {
// Simple reporter that outputs benchmark data to the console. This is the
// default reporter used by RunSpecifiedBenchmarks().
class ConsoleReporter : public BenchmarkReporter {
public:
public:
enum OutputOptions {
OO_None,
OO_Color
};
explicit ConsoleReporter(OutputOptions color_output = OO_Color)
: color_output_(color_output == OO_Color) {}

virtual bool ReportContext(const Context& context);
virtual void ReportRuns(const std::vector<Run>& reports);

protected:
protected:
virtual void PrintRunData(const Run& report);

size_t name_field_width_;

private:
bool color_output_;
};

class JSONReporter : public BenchmarkReporter {
Expand Down
263 changes: 162 additions & 101 deletions libcxx/utils/google-benchmark/src/benchmark.cc

Large diffs are not rendered by default.

16 changes: 6 additions & 10 deletions libcxx/utils/google-benchmark/src/colorprint.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,13 @@
#include <string>
#include <memory>

#include "commandlineflags.h"
#include "check.h"
#include "internal_macros.h"

#ifdef BENCHMARK_OS_WINDOWS
#include <Windows.h>
#endif

DECLARE_bool(color_print);

namespace benchmark {
namespace {
#ifdef BENCHMARK_OS_WINDOWS
Expand Down Expand Up @@ -120,14 +117,14 @@ std::string FormatString(const char *msg, ...) {
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ColorPrintf(out, color, fmt, args);
va_end(args);
}

if (!FLAGS_color_print) {
out << FormatString(fmt, args);
va_end(args);
return;
}

void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, va_list args) {
#ifdef BENCHMARK_OS_WINDOWS
((void)out); // suppress unused warning

const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);

// Gets the current text color.
Expand All @@ -152,7 +149,6 @@ void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) {
out << FormatString(fmt, args) << "\033[m";
#endif

va_end(args);
}

} // end namespace benchmark
1 change: 1 addition & 0 deletions libcxx/utils/google-benchmark/src/colorprint.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ enum LogColor {
std::string FormatString(const char* msg, va_list args);
std::string FormatString(const char* msg, ...);

void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, va_list args);
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...);

} // end namespace benchmark
Expand Down
4 changes: 2 additions & 2 deletions libcxx/utils/google-benchmark/src/complexity.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@ BigOFunc* FittingCurve(BigO complexity) {
case oN:
return [](int n) -> double { return n; };
case oNSquared:
return [](int n) -> double { return n * n; };
return [](int n) -> double { return std::pow(n, 2); };
case oNCubed:
return [](int n) -> double { return n * n * n; };
return [](int n) -> double { return std::pow(n, 3); };
case oLogN:
return [](int n) { return std::log2(n); };
case oNLogN:
Expand Down
41 changes: 25 additions & 16 deletions libcxx/utils/google-benchmark/src/console_reporter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@
#include "string_util.h"
#include "walltime.h"

DECLARE_bool(color_print);

namespace benchmark {

bool ConsoleReporter::ReportContext(const Context& context) {
Expand All @@ -40,10 +38,10 @@ bool ConsoleReporter::ReportContext(const Context& context) {
PrintBasicContext(&GetErrorStream(), context);

#ifdef BENCHMARK_OS_WINDOWS
if (FLAGS_color_print && &std::cout != &GetOutputStream()) {
if (color_output_ && &std::cout != &GetOutputStream()) {
GetErrorStream() << "Color printing is only supported for stdout on windows."
" Disabling color printing\n";
FLAGS_color_print = false;
color_output_ = false;
}
#endif
std::string str = FormatString("%-*s %13s %13s %10s\n",
Expand All @@ -59,18 +57,29 @@ void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
PrintRunData(run);
}

static void IgnoreColorPrint(std::ostream& out, LogColor,
const char* fmt, ...)
{
va_list args;
va_start(args, fmt);
out << FormatString(fmt, args);
va_end(args);
}

void ConsoleReporter::PrintRunData(const Run& result) {
typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...);
auto& Out = GetOutputStream();

PrinterFn* printer = color_output_ ? (PrinterFn*)ColorPrintf
: IgnoreColorPrint;
auto name_color =
(result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
ColorPrintf(Out, name_color, "%-*s ", name_field_width_,
printer(Out, name_color, "%-*s ", name_field_width_,
result.benchmark_name.c_str());

if (result.error_occurred) {
ColorPrintf(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
result.error_message.c_str());
ColorPrintf(Out, COLOR_DEFAULT, "\n");
printer(Out, COLOR_DEFAULT, "\n");
return;
}
// Format bytes per second
Expand All @@ -91,34 +100,34 @@ void ConsoleReporter::PrintRunData(const Run& result) {

if (result.report_big_o) {
std::string big_o = GetBigOString(result.complexity);
ColorPrintf(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", real_time,
printer(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", real_time,
big_o.c_str(), cpu_time, big_o.c_str());
} else if (result.report_rms) {
ColorPrintf(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", real_time * 100,
printer(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", real_time * 100,
cpu_time * 100);
} else {
const char* timeLabel = GetTimeUnitString(result.time_unit);
ColorPrintf(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ", real_time, timeLabel,
printer(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ", real_time, timeLabel,
cpu_time, timeLabel);
}

if (!result.report_big_o && !result.report_rms) {
ColorPrintf(Out, COLOR_CYAN, "%10lld", result.iterations);
printer(Out, COLOR_CYAN, "%10lld", result.iterations);
}

if (!rate.empty()) {
ColorPrintf(Out, COLOR_DEFAULT, " %*s", 13, rate.c_str());
printer(Out, COLOR_DEFAULT, " %*s", 13, rate.c_str());
}

if (!items.empty()) {
ColorPrintf(Out, COLOR_DEFAULT, " %*s", 18, items.c_str());
printer(Out, COLOR_DEFAULT, " %*s", 18, items.c_str());
}

if (!result.report_label.empty()) {
ColorPrintf(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
}

ColorPrintf(Out, COLOR_DEFAULT, "\n");
printer(Out, COLOR_DEFAULT, "\n");
}

} // end namespace benchmark
6 changes: 3 additions & 3 deletions libcxx/utils/google-benchmark/src/cycleclock.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,11 +113,11 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
uint32_t pmuseren;
uint32_t pmcntenset;
// Read the user mode perf monitor counter access permissions.
asm("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
if (pmuseren & 1) { // Allows reading perfmon counters for user mode code.
asm("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
if (pmcntenset & 0x80000000ul) { // Is it counting?
asm("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
// The counter is set up to count every 64th cycle
return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6
}
Expand Down
8 changes: 7 additions & 1 deletion libcxx/utils/google-benchmark/src/sysinfo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,7 @@ void InitializeSystemInfo() {
}
// TODO: also figure out cpuinfo_num_cpus


#elif defined BENCHMARK_OS_WINDOWS
// In NT, read MHz from the registry. If we fail to do so or we're in win9x
// then make a crude estimate.
Expand All @@ -251,7 +252,12 @@ void InitializeSystemInfo() {
cpuinfo_cycles_per_second = static_cast<double>((int64_t)data * (int64_t)(1000 * 1000)); // was mhz
else
cpuinfo_cycles_per_second = static_cast<double>(EstimateCyclesPerSecond());
// TODO: also figure out cpuinfo_num_cpus

SYSTEM_INFO sysinfo;
// Use memset as opposed to = {} to avoid GCC missing initializer false positives.
std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO));
GetSystemInfo(&sysinfo);
cpuinfo_num_cpus = sysinfo.dwNumberOfProcessors; // number of logical processors in the current group

#elif defined BENCHMARK_OS_MACOSX
// returning "mach time units" per second. the current number of elapsed
Expand Down
6 changes: 6 additions & 0 deletions libcxx/utils/google-benchmark/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,15 @@ add_test(donotoptimize_test donotoptimize_test --benchmark_min_time=0.01)
compile_benchmark_test(fixture_test)
add_test(fixture_test fixture_test --benchmark_min_time=0.01)

compile_benchmark_test(register_benchmark_test)
add_test(register_benchmark_test register_benchmark_test --benchmark_min_time=0.01)

compile_benchmark_test(map_test)
add_test(map_test map_test --benchmark_min_time=0.01)

compile_benchmark_test(multiple_ranges_test)
add_test(multiple_ranges_test multiple_ranges_test --benchmark_min_time=0.01)

compile_benchmark_test(reporter_output_test)
add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01)

Expand Down
20 changes: 10 additions & 10 deletions libcxx/utils/google-benchmark/test/basic_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ BENCHMARK(BM_empty)->ThreadPerCpu();

void BM_spin_empty(benchmark::State& state) {
while (state.KeepRunning()) {
for (int x = 0; x < state.range_x(); ++x) {
for (int x = 0; x < state.range(0); ++x) {
benchmark::DoNotOptimize(x);
}
}
Expand All @@ -23,11 +23,11 @@ BASIC_BENCHMARK_TEST(BM_spin_empty);
BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu();

void BM_spin_pause_before(benchmark::State& state) {
for (int i = 0; i < state.range_x(); ++i) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
while(state.KeepRunning()) {
for (int i = 0; i < state.range_x(); ++i) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
Expand All @@ -39,11 +39,11 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
void BM_spin_pause_during(benchmark::State& state) {
while(state.KeepRunning()) {
state.PauseTiming();
for (int i = 0; i < state.range_x(); ++i) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
state.ResumeTiming();
for (int i = 0; i < state.range_x(); ++i) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
Expand All @@ -64,11 +64,11 @@ BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();

void BM_spin_pause_after(benchmark::State& state) {
while(state.KeepRunning()) {
for (int i = 0; i < state.range_x(); ++i) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range_x(); ++i) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
Expand All @@ -77,15 +77,15 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu();


void BM_spin_pause_before_and_after(benchmark::State& state) {
for (int i = 0; i < state.range_x(); ++i) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
while(state.KeepRunning()) {
for (int i = 0; i < state.range_x(); ++i) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range_x(); ++i) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
Expand Down
26 changes: 13 additions & 13 deletions libcxx/utils/google-benchmark/test/benchmark_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ BENCHMARK(BM_Factorial)->UseRealTime();
static void BM_CalculatePiRange(benchmark::State& state) {
double pi = 0.0;
while (state.KeepRunning())
pi = CalculatePi(state.range_x());
pi = CalculatePi(state.range(0));
std::stringstream ss;
ss << pi;
state.SetLabel(ss.str());
Expand All @@ -87,25 +87,25 @@ BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
static void BM_SetInsert(benchmark::State& state) {
while (state.KeepRunning()) {
state.PauseTiming();
std::set<int> data = ConstructRandomSet(state.range_x());
std::set<int> data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range_y(); ++j)
for (int j = 0; j < state.range(1); ++j)
data.insert(rand());
}
state.SetItemsProcessed(state.iterations() * state.range_y());
state.SetBytesProcessed(state.iterations() * state.range_y() * sizeof(int));
state.SetItemsProcessed(state.iterations() * state.range(1));
state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int));
}
BENCHMARK(BM_SetInsert)->RangePair(1<<10,8<<10, 1,10);
BENCHMARK(BM_SetInsert)->Ranges({{1<<10,8<<10}, {1,10}});

template<typename Container, typename ValueType = typename Container::value_type>
static void BM_Sequential(benchmark::State& state) {
ValueType v = 42;
while (state.KeepRunning()) {
Container c;
for (int i = state.range_x(); --i; )
for (int i = state.range(0); --i; )
c.push_back(v);
}
const size_t items_processed = state.iterations() * state.range_x();
const size_t items_processed = state.iterations() * state.range(0);
state.SetItemsProcessed(items_processed);
state.SetBytesProcessed(items_processed * sizeof(v));
}
Expand All @@ -117,8 +117,8 @@ BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
#endif

static void BM_StringCompare(benchmark::State& state) {
std::string s1(state.range_x(), '-');
std::string s2(state.range_x(), '-');
std::string s1(state.range(0), '-');
std::string s2(state.range(0), '-');
while (state.KeepRunning())
benchmark::DoNotOptimize(s1.compare(s2));
}
Expand Down Expand Up @@ -147,14 +147,14 @@ BENCHMARK(BM_SetupTeardown)->ThreadPerCpu();
static void BM_LongTest(benchmark::State& state) {
double tracker = 0.0;
while (state.KeepRunning()) {
for (int i = 0; i < state.range_x(); ++i)
for (int i = 0; i < state.range(0); ++i)
benchmark::DoNotOptimize(tracker += i);
}
}
BENCHMARK(BM_LongTest)->Range(1<<16,1<<28);

static void BM_ParallelMemset(benchmark::State& state) {
int size = state.range_x() / sizeof(int);
int size = state.range(0) / sizeof(int);
int thread_size = size / state.threads;
int from = thread_size * state.thread_index;
int to = from + thread_size;
Expand All @@ -179,7 +179,7 @@ BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);

static void BM_ManualTiming(benchmark::State& state) {
size_t slept_for = 0;
int microseconds = state.range_x();
int microseconds = state.range(0);
std::chrono::duration<double, std::micro> sleep_duration {
static_cast<double>(microseconds)
};
Expand Down
48 changes: 29 additions & 19 deletions libcxx/utils/google-benchmark/test/complexity_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,18 +36,27 @@ struct TestCase {
CHECK(err_str.empty()) << "Could not construct regex \"" << regex << "\""
<< " got Error: " << err_str;

std::string near = "<EOF>";
std::string line;
bool first = true;
while (remaining_output.eof() == false) {
CHECK(remaining_output.good());
std::getline(remaining_output, line);
// Keep the first line as context.
if (first) {
near = line;
first = false;
}
if (r.Match(line)) return;
CHECK(match_rule != MR_Next) << "Expected line \"" << line
<< "\" to match regex \"" << regex << "\"";
<< "\" to match regex \"" << regex << "\""
<< "\nstarted matching at line: \"" << near << "\"";
}

CHECK(remaining_output.eof() == false)
<< "End of output reached before match for regex \"" << regex
<< "\" was found";
<< "\" was found"
<< "\nstarted matching at line: \"" << near << "\"";
}
};

Expand Down Expand Up @@ -112,7 +121,7 @@ std::string join(First f, Args&&... args) {
return std::string(std::move(f)) + "[ ]+" + join(std::forward<Args>(args)...);
}

std::string dec_re = "[0-9]+\\.[0-9]+";
std::string dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";

#define ADD_COMPLEXITY_CASES(...) \
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
Expand All @@ -138,7 +147,7 @@ int AddComplexityTest(std::vector<TestCase>* console_out, std::vector<TestCase>*
});
AddCases(csv_out, {
{"^\"" + big_o_test_name + "\",," + dec_re + "," + dec_re + "," + big_o + ",,,,,$"},
{"^\"" + rms_test_name + "\",," + dec_re + "," + dec_re + ",,,,,,$"}
{"^\"" + rms_test_name + "\",," + dec_re + "," + dec_re + ",,,,,,$", MR_Next}
});
return 0;
}
Expand All @@ -151,12 +160,15 @@ int AddComplexityTest(std::vector<TestCase>* console_out, std::vector<TestCase>*

void BM_Complexity_O1(benchmark::State& state) {
while (state.KeepRunning()) {
for (int i=0; i < 1024; ++i) {
benchmark::DoNotOptimize(&i);
}
}
state.SetComplexityN(state.range_x());
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(benchmark::o1);
BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity([](int){return 1.0; });
BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity();
BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity([](int){return 1.0; });

const char* big_o_1_test_name = "BM_Complexity_O1_BigO";
const char* rms_o_1_test_name = "BM_Complexity_O1_RMS";
Expand All @@ -167,6 +179,10 @@ const char* lambda_big_o_1 = "f\\(N\\)";
ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
big_o_1_test_name, rms_o_1_test_name, enum_auto_big_o_1);

// Add auto enum tests
ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
big_o_1_test_name, rms_o_1_test_name, enum_auto_big_o_1);

// Add lambda tests
ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1);
Expand All @@ -185,12 +201,12 @@ std::vector<int> ConstructRandomVector(int size) {
}

void BM_Complexity_O_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range_x());
const int item_not_in_vector = state.range_x()*2; // Test worst case scenario (item not in vector)
auto v = ConstructRandomVector(state.range(0));
const int item_not_in_vector = state.range(0)*2; // Test worst case scenario (item not in vector)
while (state.KeepRunning()) {
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
}
state.SetComplexityN(state.range_x());
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oN);
BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](int n) -> double{return n; });
Expand All @@ -214,11 +230,11 @@ ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
// ========================================================================= //

static void BM_Complexity_O_N_log_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range_x());
auto v = ConstructRandomVector(state.range(0));
while (state.KeepRunning()) {
std::sort(v.begin(), v.end());
}
state.SetComplexityN(state.range_x());
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oNLogN);
BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](int n) {return n * std::log2(n); });
Expand All @@ -244,14 +260,8 @@ ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,


int main(int argc, char* argv[]) {
// Add --color_print=false to argv since we don't want to match color codes.
char new_arg[64];
char* new_argv[64];
std::copy(argv, argv + argc, new_argv);
new_argv[argc++] = std::strcpy(new_arg, "--color_print=false");
benchmark::Initialize(&argc, new_argv);

benchmark::ConsoleReporter CR;
benchmark::Initialize(&argc, argv);
benchmark::ConsoleReporter CR(benchmark::ConsoleReporter::OO_None);
benchmark::JSONReporter JR;
benchmark::CSVReporter CSVR;
struct ReporterTest {
Expand Down
2 changes: 1 addition & 1 deletion libcxx/utils/google-benchmark/test/fixture_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) {
assert(data.get() != nullptr);
assert(*data == 42);
}
st.SetItemsProcessed(st.range_x());
st.SetItemsProcessed(st.range(0));
}
BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42);
BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu();
Expand Down
6 changes: 3 additions & 3 deletions libcxx/utils/google-benchmark/test/map_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ std::map<int, int> ConstructRandomMap(int size) {

// Basic version.
static void BM_MapLookup(benchmark::State& state) {
const int size = state.range_x();
const int size = state.range(0);
while (state.KeepRunning()) {
state.PauseTiming();
std::map<int, int> m = ConstructRandomMap(size);
Expand All @@ -34,7 +34,7 @@ BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12);
class MapFixture : public ::benchmark::Fixture {
public:
void SetUp(const ::benchmark::State& st) {
m = ConstructRandomMap(st.range_x());
m = ConstructRandomMap(st.range(0));
}

void TearDown(const ::benchmark::State&) {
Expand All @@ -45,7 +45,7 @@ class MapFixture : public ::benchmark::Fixture {
};

BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) {
const int size = state.range_x();
const int size = state.range(0);
while (state.KeepRunning()) {
for (int i = 0; i < size; ++i) {
benchmark::DoNotOptimize(m.find(rand() % size));
Expand Down
46 changes: 46 additions & 0 deletions libcxx/utils/google-benchmark/test/multiple_ranges_test.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
#include "benchmark/benchmark.h"

#include <set>
#include <cassert>

class MultipleRangesFixture : public ::benchmark::Fixture {
public:
MultipleRangesFixture()
: expectedValues({
{1, 3, 5}, {1, 3, 8}, {1, 3, 15}, {2, 3, 5}, {2, 3, 8}, {2, 3, 15},
{1, 4, 5}, {1, 4, 8}, {1, 4, 15}, {2, 4, 5}, {2, 4, 8}, {2, 4, 15},
{1, 7, 5}, {1, 7, 8}, {1, 7, 15}, {2, 7, 5}, {2, 7, 8}, {2, 7, 15},
{7, 6, 3}
})
{
}

void SetUp(const ::benchmark::State& state) {
std::vector<int> ranges = {state.range(0), state.range(1), state.range(2)};

assert(expectedValues.find(ranges) != expectedValues.end());

actualValues.insert(ranges);
}

virtual ~MultipleRangesFixture() {
assert(actualValues.size() == expectedValues.size());
}

std::set<std::vector<int>> expectedValues;
std::set<std::vector<int>> actualValues;
};


BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) {
while (state.KeepRunning()) {
int product = state.range(0) * state.range(1) * state.range(2);
for (int x = 0; x < product; x++) {
benchmark::DoNotOptimize(x);
}
}
}

BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty)->RangeMultiplier(2)->Ranges({{1, 2}, {3, 7}, {5, 15}})->Args({7, 6, 3});

BENCHMARK_MAIN()
6 changes: 3 additions & 3 deletions libcxx/utils/google-benchmark/test/options_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ void BM_basic(benchmark::State& state) {
}

void BM_basic_slow(benchmark::State& state) {
std::chrono::milliseconds sleep_duration(state.range_x());
std::chrono::milliseconds sleep_duration(state.range(0));
while (state.KeepRunning()) {
std::this_thread::sleep_for(
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration)
Expand All @@ -25,8 +25,8 @@ BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kMillisecond);
BENCHMARK(BM_basic)->Range(1, 8);
BENCHMARK(BM_basic)->RangeMultiplier(2)->Range(1, 8);
BENCHMARK(BM_basic)->DenseRange(10, 15);
BENCHMARK(BM_basic)->ArgPair(42, 42);
BENCHMARK(BM_basic)->RangePair(64, 512, 64, 512);
BENCHMARK(BM_basic)->Args({42, 42});
BENCHMARK(BM_basic)->Ranges({{64, 512}, {64, 512}});
BENCHMARK(BM_basic)->MinTime(0.7);
BENCHMARK(BM_basic)->UseRealTime();
BENCHMARK(BM_basic)->ThreadRange(2, 4);
Expand Down
149 changes: 149 additions & 0 deletions libcxx/utils/google-benchmark/test/register_benchmark_test.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@

#undef NDEBUG
#include "benchmark/benchmark.h"
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include <cassert>
#include <vector>

namespace {

class TestReporter : public benchmark::ConsoleReporter {
public:
virtual void ReportRuns(const std::vector<Run>& report) {
all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report);
}

std::vector<Run> all_runs_;
};

struct TestCase {
std::string name;
const char* label;
TestCase(const char* xname) : name(xname), label(nullptr) {}
TestCase(const char* xname, const char* xlabel)
: name(xname), label(xlabel) {}

typedef benchmark::BenchmarkReporter::Run Run;

void CheckRun(Run const& run) const {
CHECK(name == run.benchmark_name) << "expected " << name
<< " got " << run.benchmark_name;
if (label) {
CHECK(run.report_label == label) << "expected " << label
<< " got " << run.report_label;
} else {
CHECK(run.report_label == "");
}
}
};

std::vector<TestCase> ExpectedResults;

int AddCases(std::initializer_list<TestCase> const& v) {
for (auto N : v) {
ExpectedResults.push_back(N);
}
return 0;
}

#define CONCAT(x, y) CONCAT2(x, y)
#define CONCAT2(x, y) x##y
#define ADD_CASES(...) \
int CONCAT(dummy, __LINE__) = AddCases({__VA_ARGS__})

} // end namespace

typedef benchmark::internal::Benchmark* ReturnVal;

//----------------------------------------------------------------------------//
// Test RegisterBenchmark with no additional arguments
//----------------------------------------------------------------------------//
void BM_function(benchmark::State& state) { while (state.KeepRunning()) {} }
BENCHMARK(BM_function);
ReturnVal dummy = benchmark::RegisterBenchmark(
"BM_function_manual_registration",
BM_function);
ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});

//----------------------------------------------------------------------------//
// Test RegisterBenchmark with additional arguments
// Note: GCC <= 4.8 do not support this form of RegisterBenchmark because they
// reject the variadic pack expansion of lambda captures.
//----------------------------------------------------------------------------//
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK

void BM_extra_args(benchmark::State& st, const char* label) {
while (st.KeepRunning()) {}
st.SetLabel(label);
}
int RegisterFromFunction() {
std::pair<const char*, const char*> cases[] = {
{"test1", "One"},
{"test2", "Two"},
{"test3", "Three"}
};
for (auto& c : cases)
benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second);
return 0;
}
int dummy2 = RegisterFromFunction();
ADD_CASES(
{"test1", "One"},
{"test2", "Two"},
{"test3", "Three"}
);

#endif // BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK

//----------------------------------------------------------------------------//
// Test RegisterBenchmark with different callable types
//----------------------------------------------------------------------------//

struct CustomFixture {
void operator()(benchmark::State& st) {
while (st.KeepRunning()) {}
}
};

void TestRegistrationAtRuntime() {
#ifdef BENCHMARK_HAS_CXX11
{
CustomFixture fx;
benchmark::RegisterBenchmark("custom_fixture", fx);
AddCases({"custom_fixture"});
}
#endif
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
{
int x = 42;
auto capturing_lam = [=](benchmark::State& st) {
while (st.KeepRunning()) {}
st.SetLabel(std::to_string(x));
};
benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam);
AddCases({{"lambda_benchmark", "42"}});
}
#endif
}

int main(int argc, char* argv[]) {
TestRegistrationAtRuntime();

benchmark::Initialize(&argc, argv);

TestReporter test_reporter;
benchmark::RunSpecifiedBenchmarks(&test_reporter);

typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();

for (Run const& run : test_reporter.all_runs_) {
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;
}
assert(EB == ExpectedResults.end());

return 0;
}
16 changes: 6 additions & 10 deletions libcxx/utils/google-benchmark/test/reporter_output_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,9 @@ std::string join(First f, Args&&... args) {
return std::string(std::move(f)) + "[ ]+" + join(std::forward<Args>(args)...);
}

std::string dec_re = "[0-9]+\\.[0-9]+";


std::string dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";

} // end namespace

Expand Down Expand Up @@ -185,7 +187,7 @@ ADD_CASES(&CSVOutputTests, {
void BM_Complexity_O1(benchmark::State& state) {
while (state.KeepRunning()) {
}
state.SetComplexityN(state.range_x());
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity(benchmark::o1);

Expand All @@ -203,14 +205,8 @@ ADD_CASES(&ConsoleOutputTests, {


int main(int argc, char* argv[]) {
// Add --color_print=false to argv since we don't want to match color codes.
char new_arg[64];
char* new_argv[64];
std::copy(argv, argv + argc, new_argv);
new_argv[argc++] = std::strcpy(new_arg, "--color_print=false");
benchmark::Initialize(&argc, new_argv);

benchmark::ConsoleReporter CR;
benchmark::Initialize(&argc, argv);
benchmark::ConsoleReporter CR(benchmark::ConsoleReporter::OO_None);
benchmark::JSONReporter JR;
benchmark::CSVReporter CSVR;
struct ReporterTest {
Expand Down
4 changes: 2 additions & 2 deletions libcxx/utils/google-benchmark/test/skip_with_error_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ ADD_CASES("BM_error_before_running",
void BM_error_during_running(benchmark::State& state) {
int first_iter = true;
while (state.KeepRunning()) {
if (state.range_x() == 1 && state.thread_index <= (state.threads / 2)) {
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) {
assert(first_iter);
first_iter = false;
state.SkipWithError("error message");
Expand Down Expand Up @@ -116,7 +116,7 @@ ADD_CASES(
void BM_error_while_paused(benchmark::State& state) {
bool first_iter = true;
while (state.KeepRunning()) {
if (state.range_x() == 1 && state.thread_index <= (state.threads / 2)) {
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) {
assert(first_iter);
first_iter = false;
state.PauseTiming();
Expand Down
30 changes: 30 additions & 0 deletions libcxx/utils/google-benchmark/tools/compare_bench.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#!/usr/bin/env python
"""
compare_bench.py - Compare two benchmarks or their results and report the
difference.
"""
import sys
import gbench
from gbench import util, report

def main():
# Parse the command line flags
def usage():
print('compare_bench.py <test1> <test2> [benchmark options]...')
exit(1)
if '--help' in sys.argv or len(sys.argv) < 3:
usage()
tests = sys.argv[1:3]
bench_opts = sys.argv[3:]
bench_opts = list(bench_opts)
# Run the benchmarks and report the results
json1 = gbench.util.run_or_load_benchmark(tests[0], bench_opts)
json2 = gbench.util.run_or_load_benchmark(tests[1], bench_opts)
output_lines = gbench.report.generate_difference_report(json1, json2)
print 'Comparing %s to %s' % (tests[0], tests[1])
for ln in output_lines:
print(ln)


if __name__ == '__main__':
main()
46 changes: 46 additions & 0 deletions libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run1.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_SameTimes",
"iterations": 1000,
"real_time": 10,
"cpu_time": 10,
"time_unit": "ns"
},
{
"name": "BM_2xFaster",
"iterations": 1000,
"real_time": 50,
"cpu_time": 50,
"time_unit": "ns"
},
{
"name": "BM_2xSlower",
"iterations": 1000,
"real_time": 50,
"cpu_time": 50,
"time_unit": "ns"
},
{
"name": "BM_10PercentFaster",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_10PercentSlower",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
}
]
}
46 changes: 46 additions & 0 deletions libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run2.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_SameTimes",
"iterations": 1000,
"real_time": 10,
"cpu_time": 10,
"time_unit": "ns"
},
{
"name": "BM_2xFaster",
"iterations": 1000,
"real_time": 25,
"cpu_time": 25,
"time_unit": "ns"
},
{
"name": "BM_2xSlower",
"iterations": 20833333,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_10PercentFaster",
"iterations": 1000,
"real_time": 90,
"cpu_time": 90,
"time_unit": "ns"
},
{
"name": "BM_10PercentSlower",
"iterations": 1000,
"real_time": 110,
"cpu_time": 110,
"time_unit": "ns"
}
]
}
8 changes: 8 additions & 0 deletions libcxx/utils/google-benchmark/tools/gbench/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
"""Google Benchmark tooling"""

__author__ = 'Eric Fiselier'
__email__ = 'eric@efcs.ca'
__versioninfo__ = (0, 5, 0)
__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'

__all__ = []
136 changes: 136 additions & 0 deletions libcxx/utils/google-benchmark/tools/gbench/report.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
"""report.py - Utilities for reporting statistics about benchmark results
"""
import os

class BenchmarkColor(object):
def __init__(self, name, code):
self.name = name
self.code = code

def __repr__(self):
return '%s%r' % (self.__class__.__name__,
(self.name, self.code))

def __format__(self, format):
return self.code

# Benchmark Colors Enumeration
BC_NONE = BenchmarkColor('NONE', '')
BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m')
BC_CYAN = BenchmarkColor('CYAN', '\033[96m')
BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m')
BC_HEADER = BenchmarkColor('HEADER', '\033[92m')
BC_WARNING = BenchmarkColor('WARNING', '\033[93m')
BC_WHITE = BenchmarkColor('WHITE', '\033[97m')
BC_FAIL = BenchmarkColor('FAIL', '\033[91m')
BC_ENDC = BenchmarkColor('ENDC', '\033[0m')
BC_BOLD = BenchmarkColor('BOLD', '\033[1m')
BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')

def color_format(use_color, fmt_str, *args, **kwargs):
"""
Return the result of 'fmt_str.format(*args, **kwargs)' after transforming
'args' and 'kwargs' according to the value of 'use_color'. If 'use_color'
is False then all color codes in 'args' and 'kwargs' are replaced with
the empty string.
"""
assert use_color is True or use_color is False
if not use_color:
args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for arg in args]
kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for key, arg in kwargs.items()}
return fmt_str.format(*args, **kwargs)


def find_longest_name(benchmark_list):
"""
Return the length of the longest benchmark name in a given list of
benchmark JSON objects
"""
longest_name = 1
for bc in benchmark_list:
if len(bc['name']) > longest_name:
longest_name = len(bc['name'])
return longest_name


def calculate_change(old_val, new_val):
"""
Return a float representing the decimal change between old_val and new_val.
"""
return float(new_val - old_val) / abs(old_val)


def generate_difference_report(json1, json2, use_color=True):
"""
Calculate and report the difference between each test of two benchmarks
runs specified as 'json1' and 'json2'.
"""
first_col_width = find_longest_name(json1['benchmarks']) + 5
def find_test(name):
for b in json2['benchmarks']:
if b['name'] == name:
return b
return None
first_line = "{:<{}s} Time CPU".format(
'Benchmark', first_col_width)
output_strs = [first_line, '-' * len(first_line)]
for bn in json1['benchmarks']:
other_bench = find_test(bn['name'])
if not other_bench:
continue

def get_color(res):
if res > 0.05:
return BC_FAIL
elif res > -0.07:
return BC_WHITE
else:
return BC_CYAN
fmt_str = "{}{:<{}s}{endc} {}{:+.2f}{endc} {}{:+.2f}{endc}"
tres = calculate_change(bn['real_time'], other_bench['real_time'])
cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
output_strs += [color_format(use_color, fmt_str,
BC_HEADER, bn['name'], first_col_width,
get_color(tres), tres, get_color(cpures), cpures,
endc=BC_ENDC)]
return output_strs

###############################################################################
# Unit tests

import unittest

class TestReportDifference(unittest.TestCase):
def load_results(self):
import json
testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs')
testOutput1 = os.path.join(testInputs, 'test1_run1.json')
testOutput2 = os.path.join(testInputs, 'test1_run2.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2

def test_basic(self):
expect_lines = [
['BM_SameTimes', '+0.00', '+0.00'],
['BM_2xFaster', '-0.50', '-0.50'],
['BM_2xSlower', '+1.00', '+1.00'],
['BM_10PercentFaster', '-0.10', '-0.10'],
['BM_10PercentSlower', '+0.10', '+0.10']
]
json1, json2 = self.load_results()
output_lines = generate_difference_report(json1, json2, use_color=False)
print output_lines
self.assertEqual(len(output_lines), len(expect_lines))
for i in xrange(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 3)
self.assertEqual(parts, expect_lines[i])


if __name__ == '__main__':
unittest.main()
130 changes: 130 additions & 0 deletions libcxx/utils/google-benchmark/tools/gbench/util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
"""util.py - General utilities for running, loading, and processing benchmarks
"""
import json
import os
import tempfile
import subprocess
import sys

# Input file type enumeration
IT_Invalid = 0
IT_JSON = 1
IT_Executable = 2

_num_magic_bytes = 2 if sys.platform.startswith('win') else 4
def is_executable_file(filename):
"""
Return 'True' if 'filename' names a valid file which is likely
an executable. A file is considered an executable if it starts with the
magic bytes for a EXE, Mach O, or ELF file.
"""
if not os.path.isfile(filename):
return False
with open(filename, 'r') as f:
magic_bytes = f.read(_num_magic_bytes)
if sys.platform == 'darwin':
return magic_bytes in [
'\xfe\xed\xfa\xce', # MH_MAGIC
'\xce\xfa\xed\xfe', # MH_CIGAM
'\xfe\xed\xfa\xcf', # MH_MAGIC_64
'\xcf\xfa\xed\xfe', # MH_CIGAM_64
'\xca\xfe\xba\xbe', # FAT_MAGIC
'\xbe\xba\xfe\xca' # FAT_CIGAM
]
elif sys.platform.startswith('win'):
return magic_bytes == 'MZ'
else:
return magic_bytes == '\x7FELF'


def is_json_file(filename):
"""
Returns 'True' if 'filename' names a valid JSON output file.
'False' otherwise.
"""
try:
with open(filename, 'r') as f:
json.load(f)
return True
except:
pass
return False


def classify_input_file(filename):
"""
Return a tuple (type, msg) where 'type' specifies the classified type
of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
string represeting the error.
"""
ftype = IT_Invalid
err_msg = None
if not os.path.exists(filename):
err_msg = "'%s' does not exist" % filename
elif not os.path.isfile(filename):
err_msg = "'%s' does not name a file" % filename
elif is_executable_file(filename):
ftype = IT_Executable
elif is_json_file(filename):
ftype = IT_JSON
else:
err_msg = "'%s' does not name a valid benchmark executable or JSON file"
return ftype, err_msg


def check_input_file(filename):
"""
Classify the file named by 'filename' and return the classification.
If the file is classified as 'IT_Invalid' print an error message and exit
the program.
"""
ftype, msg = classify_input_file(filename)
if ftype == IT_Invalid:
print "Invalid input file: %s" % msg
sys.exit(1)
return ftype


def load_benchmark_results(fname):
"""
Read benchmark output from a file and return the JSON object.
REQUIRES: 'fname' names a file containing JSON benchmark output.
"""
with open(fname, 'r') as f:
return json.load(f)


def run_benchmark(exe_name, benchmark_flags):
"""
Run a benchmark specified by 'exe_name' with the specified
'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
real time console output.
RETURNS: A JSON object representing the benchmark output
"""
thandle, tname = tempfile.mkstemp()
os.close(thandle)
cmd = [exe_name] + benchmark_flags
print("RUNNING: %s" % ' '.join(cmd))
exitCode = subprocess.call(cmd + ['--benchmark_out=%s' % tname])
if exitCode != 0:
print('TEST FAILED...')
sys.exit(exitCode)
json_res = load_benchmark_results(tname)
os.unlink(tname)
return json_res


def run_or_load_benchmark(filename, benchmark_flags):
"""
Get the results for a specified benchmark. If 'filename' specifies
an executable benchmark then the results are generated by running the
benchmark. Otherwise 'filename' must name a valid JSON output file,
which is loaded and the result returned.
"""
ftype = check_input_file(filename)
if ftype == IT_JSON:
return load_benchmark_results(filename)
elif ftype == IT_Executable:
return run_benchmark(filename, benchmark_flags)
else:
assert False # This branch is unreachable