Skip to content

Commit

Permalink
Convert tests to use device_uvector (#8205)
Browse files Browse the repository at this point in the history
Converts all remaining tests to use device_uvector instead of device_vector. 

Contributes to #7287

Also converts a lot of `std::vector` in tests to `thrust::host_vector` to avoid problems with `vector<bool>`. 

Adds a new utility `cudf::detail::make_host_vector_async` (and sync version) which creates a `thrust::host_vector<T>` from a `device_span`. Also makes it possible to create a `host_span` from a `std::string`.

Authors:
  - Mark Harris (https://github.com/harrism)

Approvers:
  - Robert Maynard (https://github.com/robertmaynard)
  - Nghia Truong (https://github.com/ttnghia)

URL: #8205
  • Loading branch information
harrism committed May 12, 2021
1 parent bda8457 commit cdf09ad
Show file tree
Hide file tree
Showing 32 changed files with 613 additions and 467 deletions.
116 changes: 102 additions & 14 deletions cpp/include/cudf/detail/utilities/vector_factories.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ namespace detail {
template <typename T>
rmm::device_uvector<T> make_zeroed_device_uvector_async(
std::size_t size,
rmm::cuda_stream_view stream = rmm::cuda_stream_default,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
rmm::device_uvector<T> ret(size, stream, mr);
Expand Down Expand Up @@ -93,7 +93,7 @@ rmm::device_uvector<T> make_zeroed_device_uvector_sync(
template <typename T>
rmm::device_uvector<T> make_device_uvector_async(
host_span<T const> source_data,
rmm::cuda_stream_view stream = rmm::cuda_stream_default,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
rmm::device_uvector<T> ret(source_data.size(), stream, mr);
Expand Down Expand Up @@ -124,7 +124,7 @@ template <typename Container,
host_span<typename Container::value_type const>>::value>* = nullptr>
rmm::device_uvector<typename Container::value_type> make_device_uvector_async(
Container const& c,
rmm::cuda_stream_view stream = rmm::cuda_stream_default,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
return make_device_uvector_async(host_span<typename Container::value_type const>{c}, stream, mr);
Expand Down Expand Up @@ -177,7 +177,7 @@ template <
nullptr>
rmm::device_uvector<typename Container::value_type> make_device_uvector_async(
Container const& c,
rmm::cuda_stream_view stream = rmm::cuda_stream_default,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
return make_device_uvector_async(
Expand Down Expand Up @@ -281,6 +281,16 @@ rmm::device_uvector<typename Container::value_type> make_device_uvector_sync(
return make_device_uvector_sync(device_span<typename Container::value_type const>{c}, stream, mr);
}

// Utility function template to allow copying to either a thrust::host_vector or std::vector
template <typename T, typename OutContainer>
OutContainer make_vector_async(device_span<T const> v, rmm::cuda_stream_view stream)
{
OutContainer result(v.size());
CUDA_TRY(cudaMemcpyAsync(
result.data(), v.data(), v.size() * sizeof(T), cudaMemcpyDeviceToHost, stream.value()));
return result;
}

/**
* @brief Asynchronously construct a `std::vector` containing a copy of data from a
* `device_span`
Expand All @@ -293,13 +303,9 @@ rmm::device_uvector<typename Container::value_type> make_device_uvector_sync(
* @return The data copied to the host
*/
template <typename T>
std::vector<T> make_std_vector_async(device_span<T const> v,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
std::vector<T> make_std_vector_async(device_span<T const> v, rmm::cuda_stream_view stream)
{
std::vector<T> result(v.size());
CUDA_TRY(cudaMemcpyAsync(
result.data(), v.data(), v.size() * sizeof(T), cudaMemcpyDeviceToHost, stream.value()));
return result;
return make_vector_async<T, std::vector<T>>(v, stream);
}

/**
Expand All @@ -319,8 +325,8 @@ template <
std::enable_if_t<
std::is_convertible<Container, device_span<typename Container::value_type const>>::value>* =
nullptr>
std::vector<typename Container::value_type> make_std_vector_async(
Container const& c, rmm::cuda_stream_view stream = rmm::cuda_stream_default)
std::vector<typename Container::value_type> make_std_vector_async(Container const& c,
rmm::cuda_stream_view stream)
{
return make_std_vector_async(device_span<typename Container::value_type const>{c}, stream);
}
Expand All @@ -337,8 +343,7 @@ std::vector<typename Container::value_type> make_std_vector_async(
* @return The data copied to the host
*/
template <typename T>
std::vector<T> make_std_vector_sync(device_span<T const> v,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
std::vector<T> make_std_vector_sync(device_span<T const> v, rmm::cuda_stream_view stream)
{
auto result = make_std_vector_async(v, stream);
stream.synchronize();
Expand Down Expand Up @@ -368,6 +373,89 @@ std::vector<typename Container::value_type> make_std_vector_sync(
return make_std_vector_sync(device_span<typename Container::value_type const>{c}, stream);
}

/**
* @brief Asynchronously construct a `thrust::host_vector` containing a copy of data from a
* `device_span`
*
* @note This function does not synchronize `stream`.
*
* @tparam T The type of the data to copy
* @param source_data The device data to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <typename T>
thrust::host_vector<T> make_host_vector_async(device_span<T const> v, rmm::cuda_stream_view stream)
{
return make_vector_async<T, thrust::host_vector<T>>(v, stream);
}

/**
* @brief Asynchronously construct a `std::vector` containing a copy of data from a device
* container
*
* @note This function synchronizes `stream`.
*
* @tparam Container The type of the container to copy from
* @tparam T The type of the data to copy
* @param c The input device container from which to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <
typename Container,
std::enable_if_t<
std::is_convertible<Container, device_span<typename Container::value_type const>>::value>* =
nullptr>
thrust::host_vector<typename Container::value_type> make_host_vector_async(
Container const& c, rmm::cuda_stream_view stream)
{
return make_host_vector_async(device_span<typename Container::value_type const>{c}, stream);
}

/**
* @brief Synchronously construct a `std::vector` containing a copy of data from a
* `device_span`
*
* @note This function does a synchronize on `stream`.
*
* @tparam T The type of the data to copy
* @param source_data The device data to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <typename T>
thrust::host_vector<T> make_host_vector_sync(
device_span<T const> v, rmm::cuda_stream_view stream = rmm::cuda_stream_default)
{
auto result = make_host_vector_async(v, stream);
stream.synchronize();
return result;
}

/**
* @brief Synchronously construct a `std::vector` containing a copy of data from a device
* container
*
* @note This function synchronizes `stream`.
*
* @tparam Container The type of the container to copy from
* @tparam T The type of the data to copy
* @param c The input device container from which to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <
typename Container,
std::enable_if_t<
std::is_convertible<Container, device_span<typename Container::value_type const>>::value>* =
nullptr>
thrust::host_vector<typename Container::value_type> make_host_vector_sync(
Container const& c, rmm::cuda_stream_view stream = rmm::cuda_stream_default)
{
return make_host_vector_sync(device_span<typename Container::value_type const>{c}, stream);
}

} // namespace detail

} // namespace cudf
5 changes: 5 additions & 0 deletions cpp/include/cudf/utilities/span.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,11 @@ struct is_host_span_supported_container< //
thrust::host_vector<T, Alloc>> : std::true_type {
};

template <typename T, typename Alloc>
struct is_host_span_supported_container< //
std::basic_string<T, std::char_traits<T>, Alloc>> : std::true_type {
};

template <typename T, std::size_t Extent = cudf::dynamic_extent>
struct host_span : public cudf::detail::span_base<T, Extent, host_span<T, Extent>> {
using base = cudf::detail::span_base<T, Extent, host_span<T, Extent>>;
Expand Down
11 changes: 7 additions & 4 deletions cpp/include/cudf_test/type_lists.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@
#include <cudf/wrappers/timestamps.hpp>
#include <cudf_test/type_list_utilities.hpp>

#include <thrust/host_vector.h>

#include <array>
#include <tuple>

Expand Down Expand Up @@ -79,10 +81,10 @@ constexpr auto types_to_ids()
template <typename TypeParam, typename T>
typename std::enable_if<cudf::is_fixed_width<TypeParam>() &&
!cudf::is_timestamp_t<TypeParam>::value,
std::vector<TypeParam>>::type
thrust::host_vector<TypeParam>>::type
make_type_param_vector(std::initializer_list<T> const& init_list)
{
std::vector<TypeParam> vec(init_list.size());
thrust::host_vector<TypeParam> vec(init_list.size());
std::transform(std::cbegin(init_list), std::cend(init_list), std::begin(vec), [](auto const& e) {
if (std::is_unsigned<TypeParam>::value)
return static_cast<TypeParam>(std::abs(e));
Expand All @@ -93,10 +95,11 @@ make_type_param_vector(std::initializer_list<T> const& init_list)
}

template <typename TypeParam, typename T>
typename std::enable_if<cudf::is_timestamp_t<TypeParam>::value, std::vector<TypeParam>>::type
typename std::enable_if<cudf::is_timestamp_t<TypeParam>::value,
thrust::host_vector<TypeParam>>::type
make_type_param_vector(std::initializer_list<T> const& init_list)
{
std::vector<TypeParam> vec(init_list.size());
thrust::host_vector<TypeParam> vec(init_list.size());
std::transform(std::cbegin(init_list), std::cend(init_list), std::begin(vec), [](auto const& e) {
return TypeParam{typename TypeParam::duration{e}};
});
Expand Down
11 changes: 3 additions & 8 deletions cpp/src/copying/concatenate.cu
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,6 @@ auto create_device_views(host_span<column_view const> views, rmm::cuda_stream_vi
column_device_view::create(std::declval<column_view>(), std::declval<rmm::cuda_stream_view>()));
auto device_view_owners = std::vector<CDViewPtr>(views.size());
std::transform(views.begin(), views.end(), device_view_owners.begin(), [stream](auto const& col) {
// TODO creating this device view can invoke null count computation
// even though it isn't used. See this issue:
// https://github.com/rapidsai/cudf/issues/4368
return column_device_view::create(col, stream);
});

Expand All @@ -70,10 +67,8 @@ auto create_device_views(host_span<column_view const> views, rmm::cuda_stream_vi
device_view_owners.cend(),
std::back_inserter(device_views),
[](auto const& col) { return *col; });
// TODO each of these device vector copies invoke stream synchronization
// which appears to add unnecessary overhead. See this issue:
// https://github.com/rapidsai/rmm/issues/120
auto d_views = make_device_uvector_async(device_views);

auto d_views = make_device_uvector_async(device_views, stream);

// Compute the partition offsets
auto offsets = thrust::host_vector<size_t>(views.size() + 1);
Expand All @@ -84,7 +79,7 @@ auto create_device_views(host_span<column_view const> views, rmm::cuda_stream_vi
std::next(offsets.begin()),
[](auto const& col) { return col.size(); },
thrust::plus<size_t>{});
auto d_offsets = make_device_uvector_async(offsets);
auto d_offsets = make_device_uvector_async(offsets, stream);
auto const output_size = offsets.back();

return std::make_tuple(
Expand Down
4 changes: 2 additions & 2 deletions cpp/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ ConfigureTest(STRINGS_TEST
strings/fixed_point_tests.cpp
strings/floats_tests.cpp
strings/hash_string.cu
strings/integers_tests.cu
strings/integers_tests.cpp
strings/ipv4_tests.cpp
strings/json_tests.cpp
strings/pad_tests.cpp
Expand Down Expand Up @@ -375,7 +375,7 @@ ConfigureTest(TEXT_TEST
ConfigureTest(BITMASK_TEST
bitmask/valid_if_tests.cu
bitmask/set_nullmask_tests.cu
bitmask/bitmask_tests.cu
bitmask/bitmask_tests.cpp
bitmask/is_element_valid_tests.cpp)


Expand Down
Loading

0 comments on commit cdf09ad

Please sign in to comment.