Skip to content

Commit

Permalink
Complete the deprecation of duplicated hpp headers
Browse files Browse the repository at this point in the history
Replace all .hpp headers that have a .cuh header in the same directory with the
same name by a simple include of the cuh header and a pragma warning of
deprecation.

This change hopefully prevents future head scratching when changes in a
file are seemingly not picked up by the compiler..

Care has been taken to copy the right start year for the copyright line.
Copyright lines have been updated to 2022 when necessary.

The following template has been used for the .hpp header replacement
text:

------------------------------------------------------------
 * %%COPYRIGHT_LINE%%
 *
 * [... snip license .. ]
 */
/**
 * This file is deprecated and will be removed in release 22.06.
 * Please use the cuh version instead.
 */

/**
 * DISCLAIMER: this file is deprecated: use %%CUH_FILE%% instead
 */

                " is deprecated and will be removed in a future release." \
                " Please use the cuh version instead.")

------------------------------------------------------------
  • Loading branch information
ahendriksen committed Aug 25, 2022
1 parent ab9a695 commit d1d71be
Show file tree
Hide file tree
Showing 83 changed files with 557 additions and 5,468 deletions.
103 changes: 6 additions & 97 deletions cpp/include/raft/distance/fused_l2_nn.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,105 +18,14 @@
* Please use the cuh version instead.
*/

#ifndef __FUSED_L2_NN_H
#define __FUSED_L2_NN_H

#pragma once

#include <cub/cub.cuh>
#include <limits>
#include <raft/cuda_utils.cuh>
#include <raft/distance/detail/fused_l2_nn.cuh>
#include <raft/handle.hpp>
#include <stdint.h>

namespace raft {
namespace distance {

template <typename LabelT, typename DataT>
using KVPMinReduce = detail::KVPMinReduceImpl<LabelT, DataT>;

template <typename LabelT, typename DataT>
using MinAndDistanceReduceOp = detail::MinAndDistanceReduceOpImpl<LabelT, DataT>;

template <typename LabelT, typename DataT>
using MinReduceOp = detail::MinReduceOpImpl<LabelT, DataT>;

/**
* Initialize array using init value from reduction op
* DISCLAIMER: this file is deprecated: use fused_l2_nn.cuh instead
*/
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT>
void initialize(const raft::handle_t& handle, OutT* min, IdxT m, DataT maxVal, ReduceOpT redOp)
{
detail::initialize<DataT, OutT, IdxT, ReduceOpT>(min, m, maxVal, redOp, handle.get_stream());
}

/**
* @brief Fused L2 distance and 1-nearest-neighbor computation in a single call.
*
* The benefits of such a call are 2-fold: 1) eliminate the need for an
* intermediate buffer to store the output of gemm 2) reduce the memory read
* traffic on this intermediate buffer, otherwise needed during the reduction
* phase for 1-NN.
*
* @tparam DataT data type
* @tparam OutT output type to either store 1-NN indices and their minimum
* distances or store only the min distances. Accordingly, one
* has to pass an appropriate `ReduceOpT`
* @tparam IdxT indexing arithmetic type
* @tparam ReduceOpT A struct to perform the final needed reduction operation
* and also to initialize the output array elements with the
* appropriate initial value needed for reduction.
*
* @param[out] min will contain the reduced output (Length = `m`)
* (on device)
* @param[in] x first matrix. Row major. Dim = `m x k`.
* (on device).
* @param[in] y second matrix. Row major. Dim = `n x k`.
* (on device).
* @param[in] xn L2 squared norm of `x`. Length = `m`. (on device).
* @param[in] yn L2 squared norm of `y`. Length = `n`. (on device)
* @param[in] m gemm m
* @param[in] n gemm n
* @param[in] k gemm k
* @param[in] workspace temp workspace. Size = sizeof(int)*m. (on device)
* @param[in] redOp reduction operator in the epilogue
* @param[in] pairRedOp reduction operation on key value pairs
* @param[in] sqrt Whether the output `minDist` should contain L2-sqrt
* @param[in] initOutBuffer whether to initialize the output buffer before the
* main kernel launch
* @param[in] stream cuda stream
*/
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT, typename KVPReduceOpT>
void fusedL2NN(OutT* min,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
ReduceOpT redOp,
KVPReduceOpT pairRedOp,
bool sqrt,
bool initOutBuffer,
cudaStream_t stream)
{
size_t bytes = sizeof(DataT) * k;
if (16 % sizeof(DataT) == 0 && bytes % 16 == 0) {
detail::fusedL2NNImpl<DataT, OutT, IdxT, 16 / sizeof(DataT), ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
} else if (8 % sizeof(DataT) == 0 && bytes % 8 == 0) {
detail::fusedL2NNImpl<DataT, OutT, IdxT, 8 / sizeof(DataT), ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
} else {
detail::fusedL2NNImpl<DataT, OutT, IdxT, 1, ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
}
}
#pragma once

} // namespace distance
} // namespace raft
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuh version instead.")

#endif
#include "fused_l2_nn.cuh"
11 changes: 7 additions & 4 deletions cpp/include/raft/distance/specializations.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,14 @@
* Please use the cuh version instead.
*/

#ifndef __DISTANCE_SPECIALIZATIONS_H
#define __DISTANCE_SPECIALIZATIONS_H
/**
* DISCLAIMER: this file is deprecated: use specializations.cuh instead
*/

#pragma once

#include <raft/distance/specializations/distance.cuh>
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuh version instead.")

#endif
#include "specializations.cuh"
109 changes: 9 additions & 100 deletions cpp/include/raft/label/classlabels.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,110 +13,19 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef __CLASS_LABELS_H
#define __CLASS_LABELS_H

#pragma once

#include <raft/label/detail/classlabels.cuh>

namespace raft {
namespace label {

/**
* Get unique class labels.
*
* The y array is assumed to store class labels. The unique values are selected
* from this array.
*
* @tparam value_t numeric type of the arrays with class labels
* @param [inout] unique output unique labels
* @param [in] y device array of labels, size [n]
* @param [in] n number of labels
* @param [in] stream cuda stream
* @returns unique device array of unique labels, unallocated on entry,
* on exit it has size
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
template <typename value_t>
int getUniquelabels(rmm::device_uvector<value_t>& unique, value_t* y, size_t n, cudaStream_t stream)
{
return detail::getUniquelabels<value_t>(unique, y, n, stream);
}

/**
* Assign one versus rest labels.
*
* The output labels will have values +/-1:
* y_out = (y == y_unique[idx]) ? +1 : -1;
*
* The output type currently is set to value_t, but for SVM in principle we are
* free to choose other type for y_out (it should represent +/-1, and it is used
* in floating point arithmetics).
*
* @param [in] y device array if input labels, size [n]
* @param [in] n number of labels
* @param [in] y_unique device array of unique labels, size [n_classes]
* @param [in] n_classes number of unique labels
* @param [out] y_out device array of output labels
* @param [in] idx index of unique label that should be labeled as 1
* @param [in] stream cuda stream
*/
template <typename value_t>
void getOvrlabels(
value_t* y, int n, value_t* y_unique, int n_classes, value_t* y_out, int idx, cudaStream_t stream)
{
detail::getOvrlabels<value_t>(y, n, y_unique, n_classes, y_out, idx, stream);
}
/**
* Maps an input array containing a series of numbers into a new array
* where numbers have been mapped to a monotonically increasing set
* of labels. This can be useful in machine learning algorithms, for instance,
* where a given set of labels is not taken from a monotonically increasing
* set. This can happen if they are filtered or if only a subset of the
* total labels are used in a dataset. This is also useful in graph algorithms
* where a set of vertices need to be labeled in a monotonically increasing
* order.
* @tparam Type the numeric type of the input and output arrays
* @tparam Lambda the type of an optional filter function, which determines
* which items in the array to map.
* @param[out] out the output monotonic array
* @param[in] in input label array
* @param[in] N number of elements in the input array
* @param[in] stream cuda stream to use
* @param[in] filter_op an optional function for specifying which values
* should have monotonically increasing labels applied to them.
* @param[in] zero_based force monotonic set to start at 0?
* DISCLAIMER: this file is deprecated: use classlabels.cuh instead
*/
template <typename Type, typename Lambda>
void make_monotonic(
Type* out, Type* in, size_t N, cudaStream_t stream, Lambda filter_op, bool zero_based = false)
{
detail::make_monotonic<Type, Lambda>(out, in, N, stream, filter_op, zero_based);
}

/**
* Maps an input array containing a series of numbers into a new array
* where numbers have been mapped to a monotonically increasing set
* of labels. This can be useful in machine learning algorithms, for instance,
* where a given set of labels is not taken from a monotonically increasing
* set. This can happen if they are filtered or if only a subset of the
* total labels are used in a dataset. This is also useful in graph algorithms
* where a set of vertices need to be labeled in a monotonically increasing
* order.
* @tparam Type the numeric type of the input and output arrays
* @param[out] out output label array with labels assigned monotonically
* @param[in] in input label array
* @param[in] N number of elements in the input array
* @param[in] stream cuda stream to use
* @param[in] zero_based force monotonic label set to start at 0?
*/
template <typename Type>
void make_monotonic(Type* out, Type* in, size_t N, cudaStream_t stream, bool zero_based = false)
{
detail::make_monotonic<Type>(out, in, N, stream, zero_based);
}
}; // namespace label
}; // end namespace raft
#pragma once

#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuh version instead.")

#endif
#include "classlabels.cuh"
62 changes: 11 additions & 51 deletions cpp/include/raft/label/merge_labels.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,59 +13,19 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef __MERGE_LABELS_H
#define __MERGE_LABELS_H

#pragma once

#include <raft/label/detail/merge_labels.cuh>

namespace raft {
namespace label {
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/

/**
* @brief Merge two labellings in-place, according to a core mask
*
* A labelling is a representation of disjoint sets (groups) where points that
* belong to the same group have the same label. It is assumed that group
* labels take values between 1 and N. labels relate to points, i.e a label i+1
* means that you belong to the same group as the point i.
* The special value MAX_LABEL is used to mark points that are not labelled.
*
* The two label arrays A and B induce two sets of groups over points 0..N-1.
* If a point is labelled i in A and j in B and the mask is true for this
* point, then i and j are equivalent labels and their groups are merged by
* relabeling the elements of both groups to have the same label. The new label
* is the smaller one from the original labels.
* It is required that if the mask is true for a point, this point is labelled
* (i.e its label is different than the special value MAX_LABEL).
*
* One use case is finding connected components: the two input label arrays can
* represent the connected components of graphs G_A and G_B, and the output
* would be the connected components labels of G_A \union G_B.
*
* @param[inout] labels_a First input, and output label array (in-place)
* @param[in] labels_b Second input label array
* @param[in] mask Core point mask
* @param[out] R label equivalence map
* @param[in] m Working flag
* @param[in] N Number of points in the dataset
* @param[in] stream CUDA stream
* DISCLAIMER: this file is deprecated: use merge_labels.cuh instead
*/
template <typename value_idx = int, int TPB_X = 256>
void merge_labels(value_idx* labels_a,
const value_idx* labels_b,
const bool* mask,
value_idx* R,
bool* m,
value_idx N,
cudaStream_t stream)
{
detail::merge_labels<value_idx, TPB_X>(labels_a, labels_b, mask, R, m, N, stream);
}

}; // namespace label
}; // namespace raft
#pragma once

#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuh version instead.")

#endif
#include "merge_labels.cuh"
Loading

0 comments on commit d1d71be

Please sign in to comment.