Skip to content

Commit

Permalink
Fixed #1339: Update argument order for transform_reduce
Browse files Browse the repository at this point in the history
  • Loading branch information
hkaiser committed Dec 26, 2014
1 parent df4369a commit aac20fe
Show file tree
Hide file tree
Showing 7 changed files with 62 additions and 53 deletions.
9 changes: 5 additions & 4 deletions examples/quickstart/vector_counting_dotproduct.cpp
Expand Up @@ -25,11 +25,12 @@ int hpx_main()
hpx::parallel::par,
boost::counting_iterator<size_t>(0),
boost::counting_iterator<size_t>(10007),
[&xvalues, &yvalues](size_t i)
{
return xvalues[i] * yvalues[i];
},
0.0,
std::plus<double>(),
[&xvalues, &yvalues](size_t i){
return xvalues[i] * yvalues[i];
}
std::plus<double>()
);
// print the result
hpx::cout << result << hpx::endl;
Expand Down
6 changes: 3 additions & 3 deletions examples/quickstart/vector_zip_dotproduct.cpp
Expand Up @@ -27,12 +27,12 @@ int hpx_main()
hpx::parallel::par,
make_zip_iterator(boost::begin(xvalues), boost::begin(yvalues)),
make_zip_iterator(boost::end(xvalues), boost::end(yvalues)),
0.0,
std::plus<double>(),
[](tuple<double, double> r)
{
return get<0>(r) * get<1>(r);
}
},
0.0,
std::plus<double>()
);
// print the result
hpx::cout << result << hpx::endl;
Expand Down
37 changes: 19 additions & 18 deletions hpx/parallel/algorithms/transform_reduce.hpp
Expand Up @@ -67,8 +67,9 @@ namespace hpx { namespace parallel { HPX_INLINE_NAMESPACE(v1)
{
if (first == last)
{
T init_ = init;
return detail::algorithm_result<ExPolicy, T>::get(
std::forward<T_>(init));
std::move(init_));
}

typedef typename std::iterator_traits<FwdIter>::reference
Expand Down Expand Up @@ -160,21 +161,6 @@ namespace hpx { namespace parallel { HPX_INLINE_NAMESPACE(v1)
/// the algorithm will be applied to.
/// \param last Refers to the end of the sequence of elements the
/// algorithm will be applied to.
/// \param red_op Specifies the function (or function object) which
/// will be invoked for each of the values returned
/// from the invocation of \a conv_op. This is a
/// binary predicate. The signature of this predicate
/// should be equivalent to:
/// \code
/// Ret fun(const Type1 &a, const Type2 &b);
/// \endcode \n
/// The signature does not need to have const&, but
/// the function must not modify the objects passed to
/// it.
/// The types \a Type1, \a Type2, and \a Ret must be
/// such that an object of a type as returned from
/// \a conv_op can be implicitly converted to any
/// of those types.
/// \param conv_op Specifies the function (or function object) which
/// will be invoked for each of the elements in the
/// sequence specified by [first, last). This is a
Expand All @@ -191,6 +177,21 @@ namespace hpx { namespace parallel { HPX_INLINE_NAMESPACE(v1)
/// The type \a R must be such that an object of this
/// type can be implicitly converted to \a T.
/// \param init The initial value for the generalized sum.
/// \param red_op Specifies the function (or function object) which
/// will be invoked for each of the values returned
/// from the invocation of \a conv_op. This is a
/// binary predicate. The signature of this predicate
/// should be equivalent to:
/// \code
/// Ret fun(const Type1 &a, const Type2 &b);
/// \endcode \n
/// The signature does not need to have const&, but
/// the function must not modify the objects passed to
/// it.
/// The types \a Type1, \a Type2, and \a Ret must be
/// such that an object of a type as returned from
/// \a conv_op can be implicitly converted to any
/// of those types.
///
/// The reduce operations in the parallel \a transform_reduce algorithm invoked
/// with an execution policy object of type \a sequential_execution_policy
Expand Down Expand Up @@ -227,8 +228,8 @@ namespace hpx { namespace parallel { HPX_INLINE_NAMESPACE(v1)
is_execution_policy<ExPolicy>,
typename detail::algorithm_result<ExPolicy, T>::type
>::type
transform_reduce(ExPolicy&& policy, InIter first, InIter last, T init,
Reduce && red_op, Convert && conv_op)
transform_reduce(ExPolicy&& policy, InIter first, InIter last,
Convert && conv_op, T init, Reduce && red_op)
{
typedef typename std::iterator_traits<InIter>::iterator_category
iterator_category;
Expand Down
6 changes: 3 additions & 3 deletions hpx/parallel/segmented_algorithms/transform_reduce.hpp
Expand Up @@ -132,7 +132,7 @@ namespace hpx { namespace parallel { HPX_INLINE_NAMESPACE(v1)
typedef hpx::traits::segmented_iterator_traits<SegIter> traits;
typedef typename traits::segment_iterator segment_iterator;
typedef typename traits::local_iterator local_iterator_type;
typedef detail::algorithm_result<ExPolicy> result;
typedef detail::algorithm_result<ExPolicy, T> result;

typedef typename std::iterator_traits<SegIter>::iterator_category
iterator_category;
Expand All @@ -143,7 +143,7 @@ namespace hpx { namespace parallel { HPX_INLINE_NAMESPACE(v1)
segment_iterator sit = traits::segment(first);
segment_iterator send = traits::segment(last);

std::vector<future<T> > segments;
std::vector<shared_future<T> > segments;
segments.reserve(std::distance(sit, send));

if (sit == send)
Expand Down Expand Up @@ -210,7 +210,7 @@ namespace hpx { namespace parallel { HPX_INLINE_NAMESPACE(v1)
}

return result::get(
dataflow(
lcos::local::dataflow(
hpx::util::unwrapped([=](std::vector<T> && r)
{
return std::accumulate(r.begin(), r.end(), init, red_op);
Expand Down
20 changes: 10 additions & 10 deletions tests/performance/local/transform_reduce_scaling.cpp
Expand Up @@ -26,20 +26,20 @@ struct Point
///////////////////////////////////////////////////////////////////////////////
void measure_transform_reduce(std::size_t size)
{
std::vector<Point> data_representation(size,
std::vector<Point> data_representation(size,
Point{double(std::rand()), double(std::rand())});

//invode transform_reduce
double result =
// invode transform_reduce
double result =
hpx::parallel::transform_reduce(hpx::parallel::par,
boost::begin(data_representation),
boost::end(data_representation),
0.0,
std::plus<double>(),
[](Point r)
{
return r.x * r.y;
}
},
0.0,
std::plus<double>()
);
}

Expand All @@ -54,15 +54,15 @@ void measure_transform_reduce_old(std::size_t size)
boost::begin(data_representation),
boost::end(data_representation),
Point{0.0, 0.0},
[](Point res, Point curr)
[](Point res, Point curr)
{
return Point{
res.x * res.y + curr.x * curr.y, 1.0};
}
);
}

boost::uint64_t average_out_transform_reduce(std::size_t vector_size)
boost::uint64_t average_out_transform_reduce(std::size_t vector_size)
{
measure_transform_reduce(vector_size);
return boost::uint64_t(1);
Expand Down Expand Up @@ -111,11 +111,11 @@ int main(int argc, char* argv[])
("vector_size"
, boost::program_options::value<std::size_t>()->default_value(1000)
, "size of vector")

("csv_output"
, boost::program_options::value<int>()->default_value(0)
, "print results in csv format")

("test_count"
, boost::program_options::value<int>()->default_value(100)
, "number of tests to take average from")
Expand Down
5 changes: 4 additions & 1 deletion tests/unit/components/vector_transform_reduce.cpp
Expand Up @@ -37,7 +37,7 @@ T test_transform_reduce(ExPolicy && policy, hpx::vector<T> const& xvalues,
hpx::parallel::transform_reduce(policy,
make_zip_iterator(boost::begin(xvalues), boost::begin(yvalues)),
make_zip_iterator(boost::end(xvalues), boost::end(yvalues)),
T(0), std::plus<T>(), multiply()
multiply(), T(0), std::plus<T>()
);
}

Expand All @@ -53,6 +53,9 @@ void transform_reduce_tests()
HPX_TEST_EQ(
test_transform_reduce(hpx::parallel::seq, xvalues, yvalues),
T(num));
HPX_TEST_EQ(
test_transform_reduce(hpx::parallel::par, xvalues, yvalues),
T(num));
}
}

Expand Down
32 changes: 18 additions & 14 deletions tests/unit/parallel/transform_reduce.cpp
Expand Up @@ -44,7 +44,7 @@ void test_transform_reduce(ExPolicy const& policy, IteratorTag)
result_type r1 =
hpx::parallel::transform_reduce(policy,
iterator(boost::begin(c)), iterator(boost::end(c)),
init, reduce_op, convert_op);
convert_op, init, reduce_op);

// verify values
result_type r2 =
Expand Down Expand Up @@ -77,7 +77,7 @@ void test_transform_reduce_async(ExPolicy const& p, IteratorTag)
hpx::future<std::size_t> f =
hpx::parallel::transform_reduce(p,
iterator(boost::begin(c)), iterator(boost::end(c)),
val, op, [](std::size_t v){return v;});
[](std::size_t v){ return v; }, val, op);
f.wait();

// verify values
Expand Down Expand Up @@ -128,11 +128,12 @@ void test_transform_reduce_exception(ExPolicy const& policy, IteratorTag)
try {
hpx::parallel::transform_reduce(policy,
iterator(boost::begin(c)), iterator(boost::end(c)),
[](std::size_t v){ return v; },
std::size_t(42),
[](std::size_t v1, std::size_t v2) {
[](std::size_t v1, std::size_t v2)
{
return throw std::runtime_error("test"), v1 + v2;
},
[](std::size_t v){return v;}
}
);

HPX_TEST(false);
Expand Down Expand Up @@ -163,11 +164,12 @@ void test_transform_reduce_exception_async(ExPolicy const& p, IteratorTag)
hpx::future<void> f =
hpx::parallel::transform_reduce(p,
iterator(boost::begin(c)), iterator(boost::end(c)),
[](std::size_t v){ return v; },
std::size_t(42),
[](std::size_t v1, std::size_t v2) {
[](std::size_t v1, std::size_t v2)
{
return throw std::runtime_error("test"), v1 + v2;
},
[](std::size_t v){ return v; }
}
);
returned_from_algorithm = true;
f.get();
Expand Down Expand Up @@ -230,11 +232,12 @@ void test_transform_reduce_bad_alloc(ExPolicy const& policy, IteratorTag)
try {
hpx::parallel::transform_reduce(policy,
iterator(boost::begin(c)), iterator(boost::end(c)),
[](std::size_t v){ return v; },
std::size_t(42),
[](std::size_t v1, std::size_t v2) {
[](std::size_t v1, std::size_t v2)
{
return throw std::bad_alloc(), v1 + v2;
},
[](std::size_t v){return v;}
}
);

HPX_TEST(false);
Expand Down Expand Up @@ -264,11 +267,12 @@ void test_transform_reduce_bad_alloc_async(ExPolicy const& p, IteratorTag)
hpx::future<void> f =
hpx::parallel::transform_reduce(p,
iterator(boost::begin(c)), iterator(boost::end(c)),
[](std::size_t v){ return v; },
std::size_t(42),
[](std::size_t v1, std::size_t v2) {
[](std::size_t v1, std::size_t v2)
{
return throw std::bad_alloc(), v1 + v2;
},
[](std::size_t v){return v;}
}
);
returned_from_algorithm = true;
f.get();
Expand Down

0 comments on commit aac20fe

Please sign in to comment.