Skip to content

Commit

Permalink
Complete the deprecation of duplicated hpp headers (#793)
Browse files Browse the repository at this point in the history
Replace all .hpp headers that have a .cuh header in the same directory with the
same name by a simple include of the cuh header and a pragma warning of
deprecation.

This change hopefully prevents future head scratching when changes in a
file are seemingly not picked up by the compiler..

Care has been taken to copy the right start year for the copyright line.
Copyright lines have been updated to 2022 when necessary.

The following template has been used for the .hpp header replacement
text:


```
/*
 * %%COPYRIGHT_LINE%%
 *
 * [... snip license .. ]
 */
/**
 * This file is deprecated and will be removed in release 22.06.
 * Please use the cuh version instead.
 */

/**
 * DISCLAIMER: this file is deprecated: use %%CUH_FILE%% instead
 */
#pragma once

#pragma message(__FILE__                                                               \
                " is deprecated and will be removed in a future release." \
                " Please use the cuh version instead.")
```

Authors:
  - Allard Hendriksen (https://github.com/ahendriksen)

Approvers:
  - Corey J. Nolet (https://github.com/cjnolet)

URL: #793
  • Loading branch information
Allard Hendriksen committed Sep 2, 2022
1 parent c2e7e90 commit ff133d4
Show file tree
Hide file tree
Showing 96 changed files with 567 additions and 5,464 deletions.
4 changes: 2 additions & 2 deletions BUILD.md
Original file line number Diff line number Diff line change
Expand Up @@ -205,8 +205,8 @@ The pre-compiled libraries contain template specializations for commonly used ty

The following example tells the compiler to ignore the pre-compiled templates for the `libraft-distance` API so any symbols already compiled into pre-compiled shared library will be used instead:
```c++
#include <raft/distance/distance.hpp>
#include <raft/distance/specializations.hpp>
#include <raft/distance/distance.cuh>
#include <raft/distance/specializations.cuh>
```

### <a id="build_cxx_source"></a>Building RAFT C++ from source in cmake
Expand Down
4 changes: 2 additions & 2 deletions cpp/bench/distance/distance_common.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@

#include <common/benchmark.hpp>
#include <raft/cudart_utils.h>
#include <raft/distance/distance.hpp>
#include <raft/distance/distance.cuh>
#if defined RAFT_DISTANCE_COMPILED
#include <raft/distance/specializations.hpp>
#include <raft/distance/specializations.cuh>
#endif
#include <rmm/device_uvector.hpp>

Expand Down
2 changes: 1 addition & 1 deletion cpp/bench/linalg/add.cu
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
*/

#include <common/benchmark.hpp>
#include <raft/linalg/add.hpp>
#include <raft/linalg/add.cuh>
#include <rmm/device_uvector.hpp>

namespace raft::bench::linalg {
Expand Down
2 changes: 1 addition & 1 deletion cpp/bench/linalg/map_then_reduce.cu
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
*/

#include <common/benchmark.hpp>
#include <raft/linalg/map_then_reduce.hpp>
#include <raft/linalg/map_then_reduce.cuh>
#include <rmm/device_uvector.hpp>

namespace raft::bench::linalg {
Expand Down
2 changes: 1 addition & 1 deletion cpp/bench/linalg/matrix_vector_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
*/

#include <common/benchmark.hpp>
#include <raft/linalg/matrix_vector_op.hpp>
#include <raft/linalg/matrix_vector_op.cuh>
#include <rmm/device_uvector.hpp>

namespace raft::bench::linalg {
Expand Down
2 changes: 1 addition & 1 deletion cpp/bench/linalg/reduce.cu
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
*/

#include <common/benchmark.hpp>
#include <raft/linalg/reduce.hpp>
#include <raft/linalg/reduce.cuh>

#include <rmm/device_uvector.hpp>

Expand Down
2 changes: 1 addition & 1 deletion cpp/bench/random/make_blobs.cu
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
*/

#include <common/benchmark.hpp>
#include <raft/random/make_blobs.hpp>
#include <raft/random/make_blobs.cuh>
#include <rmm/device_uvector.hpp>
#include <vector>

Expand Down
2 changes: 1 addition & 1 deletion cpp/bench/random/permute.cu
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

#include <common/benchmark.hpp>
#include <raft/cudart_utils.h>
#include <raft/random/permute.hpp>
#include <raft/random/permute.cuh>
#include <raft/random/rng.cuh>

#include <rmm/device_uvector.hpp>
Expand Down
6 changes: 3 additions & 3 deletions cpp/bench/spatial/fused_l2_nn.cu
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@
#include <common/benchmark.hpp>
#include <limits>
#include <raft/cudart_utils.h>
#include <raft/distance/fused_l2_nn.hpp>
#include <raft/distance/fused_l2_nn.cuh>
#include <raft/handle.hpp>
#include <raft/linalg/norm.hpp>
#include <raft/linalg/norm.cuh>
#include <raft/random/rng.cuh>

#if defined RAFT_NN_COMPILED
#include <raft/spatial/knn/specializations.hpp>
#include <raft/spatial/knn/specializations.cuh>
#endif

namespace raft::bench::spatial {
Expand Down
2 changes: 1 addition & 1 deletion cpp/bench/spatial/selection.cu
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
#include <raft/spatial/knn/knn.cuh>

#if defined RAFT_NN_COMPILED
#include <raft/spatial/knn/specializations.hpp>
#include <raft/spatial/knn/specializations.cuh>
#endif

#include <raft/random/rng.cuh>
Expand Down
103 changes: 6 additions & 97 deletions cpp/include/raft/distance/fused_l2_nn.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,105 +18,14 @@
* Please use the cuh version instead.
*/

#ifndef __FUSED_L2_NN_H
#define __FUSED_L2_NN_H

#pragma once

#include <cub/cub.cuh>
#include <limits>
#include <raft/cuda_utils.cuh>
#include <raft/distance/detail/fused_l2_nn.cuh>
#include <raft/handle.hpp>
#include <stdint.h>

namespace raft {
namespace distance {

template <typename LabelT, typename DataT>
using KVPMinReduce = detail::KVPMinReduceImpl<LabelT, DataT>;

template <typename LabelT, typename DataT>
using MinAndDistanceReduceOp = detail::MinAndDistanceReduceOpImpl<LabelT, DataT>;

template <typename LabelT, typename DataT>
using MinReduceOp = detail::MinReduceOpImpl<LabelT, DataT>;

/**
* Initialize array using init value from reduction op
* DISCLAIMER: this file is deprecated: use fused_l2_nn.cuh instead
*/
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT>
void initialize(const raft::handle_t& handle, OutT* min, IdxT m, DataT maxVal, ReduceOpT redOp)
{
detail::initialize<DataT, OutT, IdxT, ReduceOpT>(min, m, maxVal, redOp, handle.get_stream());
}

/**
* @brief Fused L2 distance and 1-nearest-neighbor computation in a single call.
*
* The benefits of such a call are 2-fold: 1) eliminate the need for an
* intermediate buffer to store the output of gemm 2) reduce the memory read
* traffic on this intermediate buffer, otherwise needed during the reduction
* phase for 1-NN.
*
* @tparam DataT data type
* @tparam OutT output type to either store 1-NN indices and their minimum
* distances or store only the min distances. Accordingly, one
* has to pass an appropriate `ReduceOpT`
* @tparam IdxT indexing arithmetic type
* @tparam ReduceOpT A struct to perform the final needed reduction operation
* and also to initialize the output array elements with the
* appropriate initial value needed for reduction.
*
* @param[out] min will contain the reduced output (Length = `m`)
* (on device)
* @param[in] x first matrix. Row major. Dim = `m x k`.
* (on device).
* @param[in] y second matrix. Row major. Dim = `n x k`.
* (on device).
* @param[in] xn L2 squared norm of `x`. Length = `m`. (on device).
* @param[in] yn L2 squared norm of `y`. Length = `n`. (on device)
* @param[in] m gemm m
* @param[in] n gemm n
* @param[in] k gemm k
* @param[in] workspace temp workspace. Size = sizeof(int)*m. (on device)
* @param[in] redOp reduction operator in the epilogue
* @param[in] pairRedOp reduction operation on key value pairs
* @param[in] sqrt Whether the output `minDist` should contain L2-sqrt
* @param[in] initOutBuffer whether to initialize the output buffer before the
* main kernel launch
* @param[in] stream cuda stream
*/
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT, typename KVPReduceOpT>
void fusedL2NN(OutT* min,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
ReduceOpT redOp,
KVPReduceOpT pairRedOp,
bool sqrt,
bool initOutBuffer,
cudaStream_t stream)
{
size_t bytes = sizeof(DataT) * k;
if (16 % sizeof(DataT) == 0 && bytes % 16 == 0) {
detail::fusedL2NNImpl<DataT, OutT, IdxT, 16 / sizeof(DataT), ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
} else if (8 % sizeof(DataT) == 0 && bytes % 8 == 0) {
detail::fusedL2NNImpl<DataT, OutT, IdxT, 8 / sizeof(DataT), ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
} else {
detail::fusedL2NNImpl<DataT, OutT, IdxT, 1, ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
}
}
#pragma once

} // namespace distance
} // namespace raft
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuh version instead.")

#endif
#include "fused_l2_nn.cuh"
11 changes: 7 additions & 4 deletions cpp/include/raft/distance/specializations.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,14 @@
* Please use the cuh version instead.
*/

#ifndef __DISTANCE_SPECIALIZATIONS_H
#define __DISTANCE_SPECIALIZATIONS_H
/**
* DISCLAIMER: this file is deprecated: use specializations.cuh instead
*/

#pragma once

#include <raft/distance/specializations/distance.cuh>
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuh version instead.")

#endif
#include "specializations.cuh"
109 changes: 9 additions & 100 deletions cpp/include/raft/label/classlabels.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,110 +13,19 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef __CLASS_LABELS_H
#define __CLASS_LABELS_H

#pragma once

#include <raft/label/detail/classlabels.cuh>

namespace raft {
namespace label {

/**
* Get unique class labels.
*
* The y array is assumed to store class labels. The unique values are selected
* from this array.
*
* @tparam value_t numeric type of the arrays with class labels
* @param [inout] unique output unique labels
* @param [in] y device array of labels, size [n]
* @param [in] n number of labels
* @param [in] stream cuda stream
* @returns unique device array of unique labels, unallocated on entry,
* on exit it has size
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
template <typename value_t>
int getUniquelabels(rmm::device_uvector<value_t>& unique, value_t* y, size_t n, cudaStream_t stream)
{
return detail::getUniquelabels<value_t>(unique, y, n, stream);
}

/**
* Assign one versus rest labels.
*
* The output labels will have values +/-1:
* y_out = (y == y_unique[idx]) ? +1 : -1;
*
* The output type currently is set to value_t, but for SVM in principle we are
* free to choose other type for y_out (it should represent +/-1, and it is used
* in floating point arithmetics).
*
* @param [in] y device array if input labels, size [n]
* @param [in] n number of labels
* @param [in] y_unique device array of unique labels, size [n_classes]
* @param [in] n_classes number of unique labels
* @param [out] y_out device array of output labels
* @param [in] idx index of unique label that should be labeled as 1
* @param [in] stream cuda stream
*/
template <typename value_t>
void getOvrlabels(
value_t* y, int n, value_t* y_unique, int n_classes, value_t* y_out, int idx, cudaStream_t stream)
{
detail::getOvrlabels<value_t>(y, n, y_unique, n_classes, y_out, idx, stream);
}
/**
* Maps an input array containing a series of numbers into a new array
* where numbers have been mapped to a monotonically increasing set
* of labels. This can be useful in machine learning algorithms, for instance,
* where a given set of labels is not taken from a monotonically increasing
* set. This can happen if they are filtered or if only a subset of the
* total labels are used in a dataset. This is also useful in graph algorithms
* where a set of vertices need to be labeled in a monotonically increasing
* order.
* @tparam Type the numeric type of the input and output arrays
* @tparam Lambda the type of an optional filter function, which determines
* which items in the array to map.
* @param[out] out the output monotonic array
* @param[in] in input label array
* @param[in] N number of elements in the input array
* @param[in] stream cuda stream to use
* @param[in] filter_op an optional function for specifying which values
* should have monotonically increasing labels applied to them.
* @param[in] zero_based force monotonic set to start at 0?
* DISCLAIMER: this file is deprecated: use classlabels.cuh instead
*/
template <typename Type, typename Lambda>
void make_monotonic(
Type* out, Type* in, size_t N, cudaStream_t stream, Lambda filter_op, bool zero_based = false)
{
detail::make_monotonic<Type, Lambda>(out, in, N, stream, filter_op, zero_based);
}

/**
* Maps an input array containing a series of numbers into a new array
* where numbers have been mapped to a monotonically increasing set
* of labels. This can be useful in machine learning algorithms, for instance,
* where a given set of labels is not taken from a monotonically increasing
* set. This can happen if they are filtered or if only a subset of the
* total labels are used in a dataset. This is also useful in graph algorithms
* where a set of vertices need to be labeled in a monotonically increasing
* order.
* @tparam Type the numeric type of the input and output arrays
* @param[out] out output label array with labels assigned monotonically
* @param[in] in input label array
* @param[in] N number of elements in the input array
* @param[in] stream cuda stream to use
* @param[in] zero_based force monotonic label set to start at 0?
*/
template <typename Type>
void make_monotonic(Type* out, Type* in, size_t N, cudaStream_t stream, bool zero_based = false)
{
detail::make_monotonic<Type>(out, in, N, stream, zero_based);
}
}; // namespace label
}; // end namespace raft
#pragma once

#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuh version instead.")

#endif
#include "classlabels.cuh"
Loading

0 comments on commit ff133d4

Please sign in to comment.