Skip to content

Commit

Permalink
Rename tie_axes to zip_axes
Browse files Browse the repository at this point in the history
  • Loading branch information
robertmaynard committed Feb 28, 2022
1 parent 344878e commit 182f1da
Show file tree
Hide file tree
Showing 7 changed files with 38 additions and 37 deletions.
3 changes: 2 additions & 1 deletion docs/benchmarks.md
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,8 @@ using output_types = nvbench::type_list<float, double>;
NVBENCH_BENCH_TYPES(benchmark, NVBENCH_TYPE_AXES(input_types, output_types))
.set_type_axes_names({"InputType", "OutputType"})
.add_int64_axis("NumInputs", {1000, 10000, 100000, 200000, 200000, 200000})
.add_float64_axis("Quality", {0.05, 0.1, 0.25, 0.5, 0.75, 1.});
.add_float64_axis("Quality", {0.05, 0.1, 0.25, 0.5, 0.75, 1.})
.zip_axes({"NumInputs", "Quality"});
```
This tieing reduces the total combinations from 24 to 6, reducing the
Expand Down
2 changes: 1 addition & 1 deletion examples/custom_iteration_spaces.cu
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ NVBENCH_BENCH(tied_copy_sweep_grid_shape)
// Every power of two from 64->1024:
.add_int64_axis("BlockSize", {32,64,128,256})
.add_int64_axis("NumBlocks", {1024,512,256,128})
.tie_axes({"BlockSize", "NumBlocks"});
.zip_axes({"BlockSize", "NumBlocks"});

//==============================================================================
// under_diag:
Expand Down
2 changes: 1 addition & 1 deletion nvbench/axes_metadata.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ struct axes_metadata

void add_string_axis(std::string name, std::vector<std::string> data);

void tie_axes(std::vector<std::string> names);
void zip_axes(std::vector<std::string> names);

void
user_iteration_axes(std::vector<std::string> names,
Expand Down
4 changes: 2 additions & 2 deletions nvbench/axes_metadata.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -238,12 +238,12 @@ void reset_iteration_space(
}
} // namespace

void axes_metadata::tie_axes(std::vector<std::string> names)
void axes_metadata::zip_axes(std::vector<std::string> names)
{
NVBENCH_THROW_IF((names.size() < 2),
std::runtime_error,
"At least two axi names ( {} provided ) need to be provided "
"when using tie_axes.",
"when using zip_axes.",
names.size());

// compute the numeric indice for each name we have
Expand Down
14 changes: 7 additions & 7 deletions nvbench/axis_iteration_space.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -127,14 +127,14 @@ std::unique_ptr<axis_space_base> linear_axis_space::do_clone() const
return std::make_unique<linear_axis_space>(*this);
}

tie_axis_space::tie_axis_space(std::vector<std::size_t> input_indices,
zip_axis_space::zip_axis_space(std::vector<std::size_t> input_indices,
std::vector<std::size_t> output_indices)
: axis_space_base(std::move(input_indices), std::move(output_indices))
{}

tie_axis_space::~tie_axis_space() = default;
zip_axis_space::~zip_axis_space() = default;

detail::axis_space_iterator tie_axis_space::do_iter(axes_info info) const
detail::axis_space_iterator zip_axis_space::do_iter(axes_info info) const
{
std::vector<std::size_t> locs = m_output_indices;
auto update_func = [=](std::size_t inc_index,
Expand All @@ -150,19 +150,19 @@ detail::axis_space_iterator tie_axis_space::do_iter(axes_info info) const
return detail::make_space_iterator(locs.size(), info[0].size, update_func);
}

std::size_t tie_axis_space::do_size(const axes_info &info) const
std::size_t zip_axis_space::do_size(const axes_info &info) const
{
return info[0].size;
}

std::size_t tie_axis_space::do_valid_count(const axes_info &info) const
std::size_t zip_axis_space::do_valid_count(const axes_info &info) const
{
return info[0].active_size;
}

std::unique_ptr<axis_space_base> tie_axis_space::do_clone() const
std::unique_ptr<axis_space_base> zip_axis_space::do_clone() const
{
return std::make_unique<tie_axis_space>(*this);
return std::make_unique<zip_axis_space>(*this);
}

user_axis_space::user_axis_space(std::vector<std::size_t> input_indices,
Expand Down
4 changes: 2 additions & 2 deletions nvbench/benchmark_base.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,9 @@ struct benchmark_base
return *this;
}

benchmark_base &tie_axes(std::vector<std::string> names)
benchmark_base &zip_axes(std::vector<std::string> names)
{
m_axes.tie_axes(std::move(names));
m_axes.zip_axes(std::move(names));
return *this;
}

Expand Down
46 changes: 23 additions & 23 deletions testing/axes_iteration_space.cu
Original file line number Diff line number Diff line change
Expand Up @@ -77,13 +77,13 @@ void template_no_op_generator(nvbench::state &state,
NVBENCH_DEFINE_CALLABLE_TEMPLATE(template_no_op_generator,
template_no_op_callable);

void test_tie_axes()
void test_zip_axes()
{
using benchmark_type = nvbench::benchmark<no_op_callable>;
benchmark_type bench;
bench.add_float64_axis("F64 Axis", {0., .1, .25, .5, 1.});
bench.add_int64_axis("I64 Axis", {1, 3, 2, 4, 5});
bench.tie_axes({"F64 Axis", "I64 Axis"});
bench.zip_axes({"F64 Axis", "I64 Axis"});

ASSERT_MSG(bench.get_config_count() == 5 * bench.get_devices().size(),
"Got {}",
Expand All @@ -97,10 +97,10 @@ void test_tie_invalid_names()
bench.add_float64_axis("F64 Axis", {0., .1, .25, .5, 1.});
bench.add_int64_axis("I64 Axis", {1, 3, 2});

ASSERT_THROWS_ANY(bench.tie_axes({"F32 Axis", "I64 Axis"}));
ASSERT_THROWS_ANY(bench.tie_axes({"F32 Axis"}));
ASSERT_THROWS_ANY(bench.tie_axes({""}));
ASSERT_THROWS_ANY(bench.tie_axes(std::vector<std::string>()));
ASSERT_THROWS_ANY(bench.zip_axes({"F32 Axis", "I64 Axis"}));
ASSERT_THROWS_ANY(bench.zip_axes({"F32 Axis"}));
ASSERT_THROWS_ANY(bench.zip_axes({""}));
ASSERT_THROWS_ANY(bench.zip_axes(std::vector<std::string>()));
}

void test_tie_unequal_length()
Expand All @@ -110,8 +110,8 @@ void test_tie_unequal_length()
bench.add_float64_axis("F64 Axis", {0., .1, .25, .5, 1.});
bench.add_int64_axis("I64 Axis", {1, 3, 2});

bench.tie_axes({"I64 Axis", "F64 Axis"});
ASSERT_THROWS_ANY(bench.tie_axes({"F64 Axis", "I64 Axis"}));
bench.zip_axes({"I64 Axis", "F64 Axis"});
ASSERT_THROWS_ANY(bench.zip_axes({"F64 Axis", "I64 Axis"}));
}

void test_tie_type_axi()
Expand All @@ -126,10 +126,10 @@ void test_tie_type_axi()
bench.add_float64_axis("F64 Axis", {0., .1, .25, .5, 1.});
bench.add_int64_axis("I64 Axis", {1, 3, 2});

ASSERT_THROWS_ANY(bench.tie_axes({"F64 Axis", "Float"}));
ASSERT_THROWS_ANY(bench.zip_axes({"F64 Axis", "Float"}));
}

void test_retie_axes()
void test_rezip_axes()
{
using benchmark_type = nvbench::benchmark<no_op_callable>;
benchmark_type bench;
Expand All @@ -142,20 +142,20 @@ void test_retie_axes()
.1,
});

bench.tie_axes({"FAxis_5", "IAxis_A"});
bench.tie_axes({"IAxis_B", "FAxis_5", "IAxis_A"}); // re-tie
bench.zip_axes({"FAxis_5", "IAxis_A"});
bench.zip_axes({"IAxis_B", "FAxis_5", "IAxis_A"}); // re-tie

ASSERT_MSG(bench.get_config_count() == 10 * bench.get_devices().size(),
"Got {}",
bench.get_config_count());

bench.tie_axes({"FAxis_5", "IAxis_A"});
bench.zip_axes({"FAxis_5", "IAxis_A"});
ASSERT_MSG(bench.get_config_count() == 50 * bench.get_devices().size(),
"Got {}",
bench.get_config_count());
}

void test_retie_axes2()
void test_rezip_axes2()
{
using benchmark_type = nvbench::benchmark<no_op_callable>;
benchmark_type bench;
Expand All @@ -170,17 +170,17 @@ void test_retie_axes2()
.1,
});

bench.tie_axes({"IAxis_A", "IAxis_B", "IAxis_C"});
bench.tie_axes({"FAxis_1", "FAxis_2"});
bench.tie_axes(
bench.zip_axes({"IAxis_A", "IAxis_B", "IAxis_C"});
bench.zip_axes({"FAxis_1", "FAxis_2"});
bench.zip_axes(
{"IAxis_A", "IAxis_B", "IAxis_C", "FAxis_1", "FAxis_2"}); // re-tie

ASSERT_MSG(bench.get_config_count() == 10 * bench.get_devices().size(),
"Got {}",
bench.get_config_count());

bench.tie_axes({"IAxis_A", "IAxis_B", "IAxis_C"});
bench.tie_axes({"FAxis_1", "FAxis_2"});
bench.zip_axes({"IAxis_A", "IAxis_B", "IAxis_C"});
bench.zip_axes({"FAxis_1", "FAxis_2"});
ASSERT_MSG(bench.get_config_count() == 50 * bench.get_devices().size(),
"Got {}",
bench.get_config_count());
Expand All @@ -195,7 +195,7 @@ void test_tie_clone()
bench.add_int64_power_of_two_axis("I64 POT Axis", {10, 20});
bench.add_int64_axis("I64 Axis", {10, 20});
bench.add_float64_axis("F64 Axis", {0., .1, .25});
bench.tie_axes({"F64 Axis", "Strings"});
bench.zip_axes({"F64 Axis", "Strings"});

const auto expected_count = bench.get_config_count();

Expand Down Expand Up @@ -316,11 +316,11 @@ void test_user_axes()

int main()
{
test_tie_axes();
test_zip_axes();
test_tie_invalid_names();
test_tie_unequal_length();
test_tie_type_axi();
test_retie_axes();
test_retie_axes2();
test_rezip_axes();
test_rezip_axes2();
test_tie_clone();
}

0 comments on commit 182f1da

Please sign in to comment.