Skip to content

Commit

Permalink
implement easier API to add axis and zip/user iteration at the same time
Browse files Browse the repository at this point in the history
  • Loading branch information
robertmaynard committed Feb 28, 2022
1 parent a25f578 commit 0a2130f
Show file tree
Hide file tree
Showing 9 changed files with 118 additions and 64 deletions.
42 changes: 18 additions & 24 deletions examples/custom_iteration_spaces.cu
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,8 @@ void tied_copy_sweep_grid_shape(nvbench::state &state)
}
NVBENCH_BENCH(tied_copy_sweep_grid_shape)
// Every power of two from 64->1024:
.add_int64_axis("BlockSize", {32,64,128,256})
.add_int64_axis("NumBlocks", {1024,512,256,128})
.zip_axes({"BlockSize", "NumBlocks"});
.zip_axes( nvbench::int64_axis{"BlockSize", {32,64,128,256}},
nvbench::int64_axis{"NumBlocks", {1024,512,256,128}});

//==============================================================================
// under_diag:
Expand Down Expand Up @@ -154,15 +153,12 @@ void user_copy_sweep_grid_shape(nvbench::state &state)
copy_sweep_grid_shape(state);
}
NVBENCH_BENCH(user_copy_sweep_grid_shape)
// Every power of two from 64->1024:
.add_int64_power_of_two_axis("BlockSize", nvbench::range(6, 10))
.add_int64_power_of_two_axis("NumBlocks", nvbench::range(6, 10))
.user_iteration_axes({"NumBlocks", "BlockSize"},
[](auto... args)
-> std::unique_ptr<nvbench::axis_space_base> {
return std::make_unique<under_diag>(args...);
});

.user_iteration_axes(
[](auto... args) -> std::unique_ptr<nvbench::axis_space_base> {
return std::make_unique<under_diag>(args...);
},
nvbench::int64_axis("BlockSize", {64, 128, 256, 512, 1024}),
nvbench::int64_axis("NumBlocks", {1024, 521, 256, 128, 64}));

//==============================================================================
// gauss:
Expand Down Expand Up @@ -233,15 +229,13 @@ void dual_float64_axis(nvbench::state &state)
});
}
NVBENCH_BENCH(dual_float64_axis)
.add_float64_axis("Duration_A", nvbench::range(0., 1e-4, 1e-5))
.add_float64_axis("Duration_B", nvbench::range(0., 1e-4, 1e-5))
.user_iteration_axes({"Duration_A"},
[](auto... args)
-> std::unique_ptr<nvbench::axis_space_base> {
return std::make_unique<gauss>(args...);
})
.user_iteration_axes({"Duration_B"},
[](auto... args)
-> std::unique_ptr<nvbench::axis_space_base> {
return std::make_unique<gauss>(args...);
});
.user_iteration_axes(
[](auto... args) -> std::unique_ptr<nvbench::axis_space_base> {
return std::make_unique<gauss>(args...);
},
nvbench::float64_axis("Duration_A", nvbench::range(0., 1e-4, 1e-5)))
.user_iteration_axes(
[](auto... args) -> std::unique_ptr<nvbench::axis_space_base> {
return std::make_unique<gauss>(args...);
},
nvbench::float64_axis("Duration_B", nvbench::range(0., 1e-4, 1e-5)));
18 changes: 18 additions & 0 deletions nvbench/axes_metadata.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,26 @@ struct axes_metadata

void add_string_axis(std::string name, std::vector<std::string> data);

void add_axis(const axis_base& axis);

template<typename... Args>
void zip_axes(Args&&... args)
{
(this->add_axis(std::forward<Args>(args)),...);
this->zip_axes({args.get_name()...});
}

void zip_axes(std::vector<std::string> names);

template<typename... Args>
void
user_iteration_axes(std::function<nvbench::make_user_space_signature> make,
Args&&... args)
{
(this->add_axis(std::forward<Args>(args)),...);
this->user_iteration_axes({args.get_name()...}, std::move(make));
}

void
user_iteration_axes(std::vector<std::string> names,
std::function<nvbench::make_user_space_signature> make);
Expand Down
26 changes: 8 additions & 18 deletions nvbench/axes_metadata.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -117,38 +117,28 @@ catch (std::exception &e)
void axes_metadata::add_float64_axis(std::string name,
std::vector<nvbench::float64_t> data)
{
m_value_space.push_back(
std::make_unique<linear_axis_space>(m_axes.size(),
m_axes.size() - m_type_axe_count));

auto axis = std::make_unique<nvbench::float64_axis>(std::move(name));
axis->set_inputs(std::move(data));
m_axes.push_back(std::move(axis));
this->add_axis(nvbench::float64_axis{name,data});
}

void axes_metadata::add_int64_axis(std::string name,
std::vector<nvbench::int64_t> data,
nvbench::int64_axis_flags flags)
{
m_value_space.push_back(
std::make_unique<linear_axis_space>(m_axes.size(),
m_axes.size() - m_type_axe_count));

auto axis = std::make_unique<nvbench::int64_axis>(std::move(name));
axis->set_inputs(std::move(data), flags);
m_axes.push_back(std::move(axis));
this->add_axis(nvbench::int64_axis{name,data,flags});
}

void axes_metadata::add_string_axis(std::string name,
std::vector<std::string> data)
{
this->add_axis(nvbench::string_axis{name,data});
}

void axes_metadata::add_axis(const axis_base& axis)
{
m_value_space.push_back(
std::make_unique<linear_axis_space>(m_axes.size(),
m_axes.size() - m_type_axe_count));

auto axis = std::make_unique<nvbench::string_axis>(std::move(name));
axis->set_inputs(std::move(data));
m_axes.push_back(std::move(axis));
m_axes.push_back(axis.clone());
}

namespace
Expand Down
14 changes: 14 additions & 0 deletions nvbench/benchmark_base.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -111,12 +111,26 @@ struct benchmark_base
return *this;
}

template<typename... Args>
benchmark_base &zip_axes(Args&&... args)
{
m_axes.zip_axes(std::forward<Args>(args)...);
return *this;
}

benchmark_base &zip_axes(std::vector<std::string> names)
{
m_axes.zip_axes(std::move(names));
return *this;
}

template<typename... Args>
benchmark_base &user_iteration_axes(Args&&... args)
{
m_axes.user_iteration_axes(std::forward<Args>(args)...);
return *this;
}

benchmark_base &
user_iteration_axes(std::vector<std::string> names,
std::function<nvbench::make_user_space_signature> make)
Expand Down
5 changes: 5 additions & 0 deletions nvbench/float64_axis.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,11 @@ struct float64_axis final : public axis_base
, m_values{}
{}

explicit float64_axis(std::string name, std::vector<nvbench::float64_t> inputs)
: axis_base{std::move(name), axis_type::float64}
, m_values{std::move(inputs)}
{}

~float64_axis() final;

void set_inputs(std::vector<nvbench::float64_t> inputs)
Expand Down
4 changes: 4 additions & 0 deletions nvbench/int64_axis.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,10 @@ struct int64_axis final : public axis_base
, m_flags{int64_axis_flags::none}
{}

explicit int64_axis(std::string name,
std::vector<int64_t> inputs,
int64_axis_flags flags = int64_axis_flags::none);

~int64_axis() final;

[[nodiscard]] bool is_power_of_two() const
Expand Down
47 changes: 36 additions & 11 deletions nvbench/int64_axis.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -26,23 +26,24 @@
#include <stdexcept>
#include <vector>

namespace nvbench
namespace
{

int64_axis::~int64_axis() = default;

void int64_axis::set_inputs(std::vector<int64_t> inputs, int64_axis_flags flags)
std::vector<nvbench::int64_t>
construct_values(nvbench::int64_axis_flags flags,
const std::vector<nvbench::int64_t> &inputs)
{
m_inputs = std::move(inputs);
m_flags = flags;

if (!this->is_power_of_two())
std::vector<int64_t> values;
const bool is_power_of_two =
static_cast<bool>(flags & nvbench::int64_axis_flags::power_of_two);
if (!is_power_of_two)
{
m_values = m_inputs;
values = inputs;
}
else
{
m_values.resize(m_inputs.size());
values.resize(inputs.size());

auto conv = [](int64_t in) -> int64_t {
if (in < 0 || in >= 64)
Expand All @@ -52,11 +53,35 @@ void int64_axis::set_inputs(std::vector<int64_t> inputs, int64_axis_flags flags)
"Input={} ValidRange=[0, 63]",
in);
}
return int64_axis::compute_pow2(in);
return nvbench::int64_axis::compute_pow2(in);
};

std::transform(m_inputs.cbegin(), m_inputs.cend(), m_values.begin(), conv);
std::transform(inputs.cbegin(), inputs.cend(), values.begin(), conv);
}

return values;
}
} // namespace

namespace nvbench
{

int64_axis::int64_axis(std::string name,
std::vector<int64_t> inputs,
int64_axis_flags flags)
: axis_base{std::move(name), axis_type::int64}
, m_inputs{std::move(inputs)}
, m_values{construct_values(flags, m_inputs)}
, m_flags{flags}
{}

int64_axis::~int64_axis() = default;

void int64_axis::set_inputs(std::vector<int64_t> inputs, int64_axis_flags flags)
{
m_inputs = std::move(inputs);
m_flags = flags;
m_values = construct_values(flags, m_inputs);
}

std::string int64_axis::do_get_input_string(std::size_t i) const
Expand Down
5 changes: 5 additions & 0 deletions nvbench/string_axis.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,11 @@ struct string_axis final : public axis_base
, m_values{}
{}

explicit string_axis(std::string name, std::vector<std::string> inputs)
: axis_base{std::move(name), axis_type::string}
, m_values{std::move(inputs)}
{}

~string_axis() final;

void set_inputs(std::vector<std::string> inputs)
Expand Down
21 changes: 10 additions & 11 deletions testing/axes_iteration_space.cu
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,8 @@ void test_zip_axes()
{
using benchmark_type = nvbench::benchmark<no_op_callable>;
benchmark_type bench;
bench.add_float64_axis("F64 Axis", {0., .1, .25, .5, 1.});
bench.add_int64_axis("I64 Axis", {1, 3, 2, 4, 5});
bench.zip_axes({"F64 Axis", "I64 Axis"});
bench.zip_axes(nvbench::float64_axis("F64 Axis", {0., .1, .25, .5, 1.}),
nvbench::int64_axis("I64 Axis", {1, 3, 2, 4, 5}));

ASSERT_MSG(bench.get_config_count() == 5 * bench.get_devices().size(),
"Got {}",
Expand All @@ -107,11 +106,10 @@ void test_tie_unequal_length()
{
using benchmark_type = nvbench::benchmark<no_op_callable>;
benchmark_type bench;
bench.add_float64_axis("F64 Axis", {0., .1, .25, .5, 1.});
bench.add_int64_axis("I64 Axis", {1, 3, 2});

bench.zip_axes({"I64 Axis", "F64 Axis"});
ASSERT_THROWS_ANY(bench.zip_axes({"F64 Axis", "I64 Axis"}));
ASSERT_THROWS_ANY(
bench.zip_axes(nvbench::float64_axis("F64 Axis", {0., .1, .25, .5, 1.}),
nvbench::int64_axis("I64 Axis", {1, 3, 2})));
}

void test_tie_type_axi()
Expand Down Expand Up @@ -191,11 +189,11 @@ void test_tie_clone()
using benchmark_type = nvbench::benchmark<no_op_callable>;
benchmark_type bench;
bench.set_devices(std::vector<int>{});
bench.add_string_axis("Strings", {"string a", "string b", "string c"});
bench.add_int64_power_of_two_axis("I64 POT Axis", {10, 20});
bench.add_int64_axis("I64 Axis", {10, 20});
bench.add_float64_axis("F64 Axis", {0., .1, .25});
bench.zip_axes({"F64 Axis", "Strings"});
bench.zip_axes(nvbench::string_axis("Strings",
{"string a", "string b", "string c"}),
nvbench::float64_axis("F64 Axis", {0., .1, .25}));

const auto expected_count = bench.get_config_count();

Expand Down Expand Up @@ -237,7 +235,8 @@ struct under_diag final : nvbench::user_axis_space
{
under_diag(std::vector<std::size_t> input_indices,
std::vector<std::size_t> output_indices)
: nvbench::user_axis_space(std::move(input_indices), std::move(output_indices))
: nvbench::user_axis_space(std::move(input_indices),
std::move(output_indices))
{}

mutable std::size_t x_pos = 0;
Expand Down

0 comments on commit 0a2130f

Please sign in to comment.