Skip to content

Changed all references from hipSYCL to AdaptiveCpp #543

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 7 commits into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -273,9 +273,9 @@ else()
# Find necessary packages
if(ONEMKL_SYCL_IMPLEMENTATION)
string( TOLOWER "${ONEMKL_SYCL_IMPLEMENTATION}" ONEMKL_SYCL_IMPLEMENTATION)
if (ONEMKL_SYCL_IMPLEMENTATION STREQUAL "hipsycl")
message(STATUS "Looking for hipSYCL")
find_package(hipSYCL CONFIG REQUIRED)
if (ONEMKL_SYCL_IMPLEMENTATION STREQUAL "adaptivecpp")
message(STATUS "Looking for AdaptiveCpp")
find_package(AdaptiveCpp CONFIG REQUIRED)
set(USE_ADD_SYCL_TO_TARGET_INTEGRATION true)
set (CMAKE_CXX_STANDARD 17)
add_library(ONEMKL::SYCL::SYCL INTERFACE IMPORTED)
4 changes: 2 additions & 2 deletions cmake/FindcuRAND.cmake
Original file line number Diff line number Diff line change
@@ -59,7 +59,7 @@
find_package(CUDA 10.0 REQUIRED)
get_filename_component(SYCL_BINARY_DIR ${CMAKE_CXX_COMPILER} DIRECTORY)

if (NOT (ONEMKL_SYCL_IMPLEMENTATION STREQUAL "hipsycl"))
if (NOT (ONEMKL_SYCL_IMPLEMENTATION STREQUAL "adaptivecpp"))
# the OpenCL include file from cuda is opencl 1.1 and it is not compatible with DPC++
# the OpenCL include headers 1.2 onward is required. This is used to bypass NVIDIA OpenCL headers
find_path(OPENCL_INCLUDE_DIR CL/cl.h OpenCL/cl.h
@@ -76,7 +76,7 @@ find_package(Threads REQUIRED)

include(FindPackageHandleStandardArgs)

if (ONEMKL_SYCL_IMPLEMENTATION STREQUAL "hipsycl")
if (ONEMKL_SYCL_IMPLEMENTATION STREQUAL "adaptivecpp")
find_package_handle_standard_args(cuRAND
REQUIRED_VARS
CUDA_TOOLKIT_INCLUDE
14 changes: 7 additions & 7 deletions docs/building_the_project_with_adaptivecpp.rst
Original file line number Diff line number Diff line change
@@ -35,13 +35,13 @@ On Linux (other OSes are not supported with the AdaptiveCpp compiler):

# Inside <path to onemkl>
mkdir build && cd build
cmake .. -DONEMKL_SYCL_IMPLEMENTATION=hipsycl \ # Indicate that AdaptiveCpp is being used.
-DENABLE_MKLGPU_BACKEND=False \ # MKLGPU backend is not supported by AdaptiveCpp
-DENABLE_<BACKEND_NAME>_BACKEND=True \ # Enable backend(s) (optional)
-DENABLE_<BACKEND_NAME_2>_BACKEND=True \ # Multiple backends can be enabled at once.
-DHIPSYCL_TARGETS=omp/;hip:gfx90a,gfx906 \ # Set target architectures depending on supported devices.
-DBUILD_FUNCTIONAL_TESTS=False \ # See section *Building the tests* for more on building tests. True by default.
-DBUILD_EXAMPLES=False # Optional: True by default.
cmake .. -DONEMKL_SYCL_IMPLEMENTATION=adaptivecpp \ # Indicate that AdaptiveCpp is being used.
-DENABLE_MKLGPU_BACKEND=False \ # MKLGPU backend is not supported by AdaptiveCpp
-DENABLE_<BACKEND_NAME>_BACKEND=True \ # Enable backend(s) (optional)
-DENABLE_<BACKEND_NAME_2>_BACKEND=True \ # Multiple backends can be enabled at once.
-DACPP_TARGETS=omp\;hip:gfx90a,gfx906 \ # Set target architectures depending on supported devices.
-DBUILD_FUNCTIONAL_TESTS=False \ # See section *Building the tests* for more on building tests. True by default.
-DBUILD_EXAMPLES=False # Optional: True by default.
cmake --build .
cmake --install . --prefix <path_to_install_dir> # required to have full package structure

2 changes: 1 addition & 1 deletion docs/building_the_project_with_dpcpp.rst
Original file line number Diff line number Diff line change
@@ -305,7 +305,7 @@ When building oneMKL the SYCL implementation can be specified by setting the
* ``dpc++`` (default) for the `Intel(R) oneAPI DPC++ Compiler
<https://software.intel.com/en-us/oneapi/dpc-compiler>`_ and for the `oneAPI
DPC++ Compiler <https://github.com/intel/llvm>`_ compilers.
* ``hipsycl`` for the `AdaptiveCpp <https://github.com/illuhad/AdaptiveCpp>`_
* ``AdaptiveCpp`` for the `AdaptiveCpp <https://github.com/AdaptiveCpp/AdaptiveCpp>`_
SYCL implementation.
Please see :ref:`building_the_project_with_adaptivecpp` if using this option.

6 changes: 3 additions & 3 deletions docs/selecting_a_compiler.rst
Original file line number Diff line number Diff line change
@@ -9,11 +9,11 @@ application.
* If your application requires Intel GPU, use
`Intel(R) oneAPI DPC++ Compiler <https://software.intel.com/en-us/oneapi/dpc-compiler>`_ ``icpx`` on Linux or ``icx`` on Windows.
* If your Linux application requires NVIDIA GPU, build ``clang++`` from the latest source of
`oneAPI DPC++ Compiler <https://github.com/intel/llvm>`_ with `support for NVIDIA CUDA <https://github.com/intel/llvm/blob/sycl/sycl/doc/GetStartedGuide.md#build-dpc-toolchain-with-support-for-nvidia-cuda>`_ or use ``hipSYCL`` from the `hipSYCL repository <https://github.com/illuhad/hipSYCL>`_ (except for LAPACK domain).
* If your Linux application requires AMD GPU, build ``clang++`` from the latest source of `oneAPI DPC++ Compiler <https://github.com/intel/llvm>`_ with `support for HIP AMD <https://github.com/intel/llvm/blob/sycl/sycl/doc/GetStartedGuide.md#build-dpc-toolchain-with-support-for-hip-amd>`_ or use ``hipSYCL``.
`oneAPI DPC++ Compiler <https://github.com/intel/llvm>`_ with `support for NVIDIA CUDA <https://github.com/intel/llvm/blob/sycl/sycl/doc/GetStartedGuide.md#build-dpc-toolchain-with-support-for-nvidia-cuda>`_ or use ``AdaptiveCpp`` from the `AdaptiveCpp repository <https://github.com/AdaptiveCpp/AdaptiveCpp>`_ (except for LAPACK or DFT domains).
* If your Linux application requires AMD GPU, build ``clang++`` from the latest source of `oneAPI DPC++ Compiler <https://github.com/intel/llvm>`_ with `support for HIP AMD <https://github.com/intel/llvm/blob/sycl/sycl/doc/GetStartedGuide.md#build-dpc-toolchain-with-support-for-hip-amd>`_ or use ``AdaptiveCpp``.
* If no Intel GPU, NVIDIA GPU, or AMD GPU is required, on Linux you can use
`Intel(R) oneAPI DPC++ Compiler <https://software.intel.com/en-us/oneapi/dpc-compiler>`_
``icpx``, `oneAPI DPC++ Compiler <https://github.com/intel/llvm/releases>`_ ``clang++``, or ``hipSYCL``,
``icpx``, `oneAPI DPC++ Compiler <https://github.com/intel/llvm/releases>`_ ``clang++``, or ``AdaptiveCpp``,
and on Windows you can use either
`Intel(R) oneAPI DPC++ Compiler <https://software.intel.com/en-us/oneapi/dpc-compiler>`_
``icx`` or `oneAPI DPC++ Compiler <https://github.com/intel/llvm/releases>`_ ``clang-cl``.
2 changes: 1 addition & 1 deletion examples/rng/device/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -59,7 +59,7 @@ foreach(rng_device_source ${RNG_DEVICE_SOURCES})
ONEMKL::SYCL::SYCL
)

if(NOT ${ONEMKL_SYCL_IMPLEMENTATION} STREQUAL "hipsycl")
if(NOT ${ONEMKL_SYCL_IMPLEMENTATION} STREQUAL "adaptivecpp")
target_link_options(example_${domain}_${rng_device_source} PUBLIC -fsycl -fsycl-device-code-split=per_kernel)
endif()

10 changes: 0 additions & 10 deletions examples/rng/device/include/rng_example_helper.hpp
Original file line number Diff line number Diff line change
@@ -29,22 +29,12 @@ struct has_member_code_meta<T, std::void_t<decltype(std::declval<T>().get_multi_

template <typename T, typename std::enable_if<has_member_code_meta<T>::value>::type* = nullptr>
auto get_multi_ptr(T acc) {
// Workaround for AdaptiveCPP, as they do not yet support the get_multi_ptr function
#ifndef __HIPSYCL__
return acc.get_multi_ptr();
#else
return acc.get_pointer();
#endif
};

template <typename T, typename std::enable_if<!has_member_code_meta<T>::value>::type* = nullptr>
auto get_multi_ptr(T acc) {
// Workaround for AdaptiveCPP, as they do not yet support the get_multi_ptr function
#ifndef __HIPSYCL__
return acc.template get_multi_ptr<sycl::access::decorated::yes>();
#else
return acc.get_pointer();
#endif
};

#endif // _RNG_EXAMPLE_HELPER_HPP__
4 changes: 2 additions & 2 deletions include/oneapi/mkl/detail/backend_selector_predicates.hpp
Original file line number Diff line number Diff line change
@@ -40,7 +40,7 @@ inline void backend_selector_precondition(sycl::queue&) {}
template <>
inline void backend_selector_precondition<backend::netlib>(sycl::queue& queue) {
#ifndef ONEMKL_DISABLE_PREDICATES
#ifdef __HIPSYCL__
#ifdef __ADAPTIVECPP__
if (!(queue.is_host() || queue.get_device().is_cpu())) {
#else
if (!queue.get_device().is_cpu()) {
@@ -55,7 +55,7 @@ inline void backend_selector_precondition<backend::netlib>(sycl::queue& queue) {
template <>
inline void backend_selector_precondition<backend::mklcpu>(sycl::queue& queue) {
#ifndef ONEMKL_DISABLE_PREDICATES
#ifdef __HIPSYCL__
#ifdef __ADAPTIVECPP__
if (!(queue.is_host() || queue.get_device().is_cpu())) {
#else
if (!queue.get_device().is_cpu()) {
4 changes: 2 additions & 2 deletions include/oneapi/mkl/detail/get_device_id.hpp
Original file line number Diff line number Diff line change
@@ -31,7 +31,7 @@

#define INTEL_ID 32902
#define NVIDIA_ID 4318
#ifndef __HIPSYCL__
#ifndef __ADAPTIVECPP__
#define AMD_ID 4098
#else
#define AMD_ID 1022
@@ -44,7 +44,7 @@ inline oneapi::mkl::device get_device_id(sycl::queue &queue) {
oneapi::mkl::device device_id;
if (queue.get_device().is_cpu())
device_id = device::x86cpu;
#ifdef __HIPSYCL__
#ifdef __ADAPTIVECPP__
else if (queue.is_host())
device_id = device::x86cpu;
#endif
11 changes: 3 additions & 8 deletions include/oneapi/mkl/rng/device/detail/mcg31m1_impl.hpp
Original file line number Diff line number Diff line change
@@ -53,9 +53,9 @@ constexpr sycl::vec<std::uint64_t, VecSize> select_vector_a_mcg31m1() {
UINT64_C(650347998) });
}

// hipSYCL (AdaptiveCpp) doesn't support constexpr sycl::vec constructor
// that's why in case of hipSYCL backend sycl::vec is created as a local variable
#ifndef __HIPSYCL__
// AdaptiveCpp (hipSYCL) doesn't support constexpr sycl::vec constructor
// that's why in case of AdaptiveCpp backend sycl::vec is created as a local variable
#ifndef __ADAPTIVECPP__
template <std::int32_t VecSize>
struct mcg31m1_vector_a {
static constexpr sycl::vec<std::uint64_t, VecSize> vector_a =
@@ -154,12 +154,7 @@ static inline sycl::vec<std::uint32_t, VecSize> generate(
engine_state<oneapi::mkl::rng::device::mcg31m1<VecSize>>& state) {
sycl::vec<std::uint64_t, VecSize> x(state.s);
sycl::vec<std::uint32_t, VecSize> res;
#ifndef __HIPSYCL__
res = custom_mod(mcg31m1_vector_a<VecSize>::vector_a * x);
#else
// a workaround for hipSYCL (AdaptiveCpp)
res = custom_mod(select_vector_a_mcg31m1<VecSize>() * x);
#endif
state.s =
custom_mod<std::uint32_t>(mcg31m1_param::a * static_cast<std::uint64_t>(res[VecSize - 1]));
return res;
11 changes: 3 additions & 8 deletions include/oneapi/mkl/rng/device/detail/mcg59_impl.hpp
Original file line number Diff line number Diff line change
@@ -54,9 +54,9 @@ constexpr sycl::vec<uint64_t, VecSize> select_vector_a_mcg59() {
UINT64_C(0x58145D06A37D795) });
}

// hipSYCL (AdaptiveCpp) doesn't support constexpr sycl::vec constructor
// that's why in case of hipSYCL backend sycl::vec is created as a local variable
#ifndef __HIPSYCL__
// AdaptiveCpp (hipSYCL) doesn't support constexpr sycl::vec constructor
// that's why in case of AdaptiveCpp backend sycl::vec is created as a local variable
#ifndef __ADAPTIVECPP__
template <std::int32_t VecSize>
struct mcg59_vector_a {
static constexpr sycl::vec<std::uint64_t, VecSize> vector_a =
@@ -123,12 +123,7 @@ template <std::int32_t VecSize>
static inline sycl::vec<std::uint64_t, VecSize> generate(
engine_state<oneapi::mkl::rng::device::mcg59<VecSize>>& state) {
sycl::vec<std::uint64_t, VecSize> res(state.s);
#ifndef __HIPSYCL__
res = custom_mod(mcg59_vector_a<VecSize>::vector_a * res);
#else
// a workaround for hipSYCL (AdaptiveCpp)
res = custom_mod(select_vector_a_mcg59<VecSize>() * res);
#endif
state.s = custom_mod(mcg59_param::a * res[VecSize - 1]);
return res;
}
4 changes: 2 additions & 2 deletions include/oneapi/mkl/types.hpp
Original file line number Diff line number Diff line change
@@ -20,7 +20,7 @@
#ifndef _ONEMKL_TYPES_HPP_
#define _ONEMKL_TYPES_HPP_

#ifdef __HIPSYCL__
#ifdef __ADAPTIVECPP__
#include "oneapi/mkl/bfloat16.hpp"
#endif

@@ -33,7 +33,7 @@
namespace oneapi {
namespace mkl {

#ifndef __HIPSYCL__
#ifndef __ADAPTIVECPP__
using bfloat16 = sycl::ext::oneapi::bfloat16;
#endif

4 changes: 2 additions & 2 deletions src/blas/backends/cublas/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -26,7 +26,7 @@ set(SOURCES cublas_level1.cpp
cublas_batch.cpp
cublas_extensions.cpp
$<$<STREQUAL:${ONEMKL_SYCL_IMPLEMENTATION},dpc++>:cublas_scope_handle.cpp >
$<$<STREQUAL:${ONEMKL_SYCL_IMPLEMENTATION},hipsycl>:cublas_scope_handle_hipsycl.cpp >
$<$<STREQUAL:${ONEMKL_SYCL_IMPLEMENTATION},adaptivecpp>:cublas_scope_handle_adaptivecpp.cpp >
$<$<BOOL:${BUILD_SHARED_LIBS}>: cublas_wrappers.cpp>)
add_library(${LIB_NAME})
add_library(${LIB_OBJ} OBJECT ${SOURCES})
@@ -40,7 +40,7 @@ target_include_directories(${LIB_OBJ}
)
target_compile_options(${LIB_OBJ} PRIVATE ${ONEMKL_BUILD_COPT})

if(NOT ${ONEMKL_SYCL_IMPLEMENTATION} STREQUAL "hipsycl")
if(NOT ${ONEMKL_SYCL_IMPLEMENTATION} STREQUAL "adaptivecpp")
target_compile_options(ONEMKL::SYCL::SYCL INTERFACE
-fsycl-targets=nvptx64-nvidia-cuda -fsycl-unnamed-lambda)
target_link_options(ONEMKL::SYCL::SYCL INTERFACE
Original file line number Diff line number Diff line change
@@ -16,7 +16,7 @@
* limitations under the License.
*
**************************************************************************/
#include "cublas_scope_handle_hipsycl.hpp"
#include "cublas_scope_handle_adaptivecpp.hpp"
#include "cublas_handle.hpp"

namespace oneapi {
@@ -71,4 +71,4 @@ CUstream CublasScopedContextHandler::get_stream(const sycl::queue &queue) {
} // namespace cublas
} // namespace blas
} // namespace mkl
} // namespace oneapi
} // namespace oneapi
8 changes: 4 additions & 4 deletions src/blas/backends/cublas/cublas_task.hpp
Original file line number Diff line number Diff line change
@@ -30,15 +30,15 @@
#include <CL/sycl.hpp>
#endif
#include "oneapi/mkl/types.hpp"
#ifndef __HIPSYCL__
#ifndef __ADAPTIVECPP__
#include "cublas_scope_handle.hpp"
#if __has_include(<sycl/detail/pi.hpp>)
#include <sycl/detail/pi.hpp>
#else
#include <CL/sycl/detail/pi.hpp>
#endif
#else
#include "cublas_scope_handle_hipsycl.hpp"
#include "cublas_scope_handle_adaptivecpp.hpp"
namespace sycl {
using interop_handler = sycl::interop_handle;
}
@@ -48,10 +48,10 @@ namespace mkl {
namespace blas {
namespace cublas {

#ifdef __HIPSYCL__
#ifdef __ADAPTIVECPP__
template <typename H, typename F>
static inline void host_task_internal(H &cgh, sycl::queue queue, F f) {
cgh.hipSYCL_enqueue_custom_operation([f, queue](sycl::interop_handle ih) {
cgh.AdaptiveCpp_enqueue_custom_operation([f, queue](sycl::interop_handle ih) {
auto sc = CublasScopedContextHandler(queue, ih);
f(sc);
});
4 changes: 2 additions & 2 deletions src/blas/backends/rocblas/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -31,7 +31,7 @@ set(SOURCES rocblas_level1.cpp
rocblas_batch.cpp
rocblas_extensions.cpp
$<$<STREQUAL:${ONEMKL_SYCL_IMPLEMENTATION},dpc++>:rocblas_scope_handle.cpp >
$<$<STREQUAL:${ONEMKL_SYCL_IMPLEMENTATION},hipsycl>:rocblas_scope_handle_hipsycl.cpp >
$<$<STREQUAL:${ONEMKL_SYCL_IMPLEMENTATION},adaptivecpp>:rocblas_scope_handle_adaptivecpp.cpp >
$<$<BOOL:${BUILD_SHARED_LIBS}>: rocblas_wrappers.cpp>)
add_library(${LIB_NAME})
add_library(${LIB_OBJ} OBJECT ${SOURCES})
@@ -45,7 +45,7 @@ target_include_directories(${LIB_OBJ}
${ONEMKL_GENERATED_INCLUDE_PATH}
)

if(NOT ${ONEMKL_SYCL_IMPLEMENTATION} STREQUAL "hipsycl")
if(NOT ${ONEMKL_SYCL_IMPLEMENTATION} STREQUAL "adaptivecpp")
target_compile_options(${LIB_OBJ} PRIVATE ${ONEMKL_BUILD_COPT})
target_compile_options(ONEMKL::SYCL::SYCL INTERFACE
-fsycl-targets=amdgcn-amd-amdhsa -fsycl-unnamed-lambda
Original file line number Diff line number Diff line change
@@ -19,7 +19,7 @@
*
**************************************************************************/

#include "rocblas_scope_handle_hipsycl.hpp"
#include "rocblas_scope_handle_adaptivecpp.hpp"

namespace oneapi {
namespace mkl {
@@ -91,4 +91,4 @@ hipStream_t RocblasScopedContextHandler::get_stream(const sycl::queue &queue) {
} // namespace rocblas
} // namespace blas
} // namespace mkl
} // namespace oneapi
} // namespace oneapi
8 changes: 4 additions & 4 deletions src/blas/backends/rocblas/rocblas_task.hpp
Original file line number Diff line number Diff line change
@@ -28,26 +28,26 @@
#include <CL/sycl.hpp>
#endif
#include "oneapi/mkl/types.hpp"
#ifndef __HIPSYCL__
#ifndef __ADAPTIVECPP__
#include "rocblas_scope_handle.hpp"
#if __has_include(<sycl/detail/pi.hpp>)
#include <sycl/detail/pi.hpp>
#else
#include <CL/sycl/detail/pi.hpp>
#endif
#else
#include "rocblas_scope_handle_hipsycl.hpp"
#include "rocblas_scope_handle_adaptivecpp.hpp"

#endif
namespace oneapi {
namespace mkl {
namespace blas {
namespace rocblas {

#ifdef __HIPSYCL__
#ifdef __ADAPTIVECPP__
template <typename H, typename F>
static inline void host_task_internal(H &cgh, sycl::queue queue, F f) {
cgh.hipSYCL_enqueue_custom_operation([f, queue](sycl::interop_handle ih) {
cgh.AdaptiveCpp_enqueue_custom_operation([f, queue](sycl::interop_handle ih) {
auto sc = RocblasScopedContextHandler(queue, ih);
f(sc);
});
6 changes: 3 additions & 3 deletions src/rng/backends/curand/curand_task.hpp
Original file line number Diff line number Diff line change
@@ -13,10 +13,10 @@ namespace oneapi {
namespace mkl {
namespace rng {
namespace curand {
#ifdef __HIPSYCL__
#ifdef __ADAPTIVECPP__
template <typename H, typename A, typename E, typename F>
static inline void host_task_internal(H &cgh, A acc, E e, F f) {
cgh.hipSYCL_enqueue_custom_operation([=](sycl::interop_handle ih) {
cgh.AdaptiveCpp_enqueue_custom_operation([=](sycl::interop_handle ih) {
curandStatus_t status;
CURAND_CALL(curandSetStream, status, e, ih.get_native_queue<sycl::backend::cuda>());
auto r_ptr =
@@ -27,7 +27,7 @@ static inline void host_task_internal(H &cgh, A acc, E e, F f) {

template <typename H, typename E, typename F>
static inline void host_task_internal(H &cgh, E e, F f) {
cgh.hipSYCL_enqueue_custom_operation([=](sycl::interop_handle ih) {
cgh.AdaptiveCpp_enqueue_custom_operation([=](sycl::interop_handle ih) {
curandStatus_t status;
CURAND_CALL(curandSetStream, status, e, ih.get_native_queue<sycl::backend::cuda>());
f(ih);
2 changes: 1 addition & 1 deletion src/rng/backends/curand/mrg32k3a.cpp
Original file line number Diff line number Diff line change
@@ -61,7 +61,7 @@
#else
#include <CL/sycl.hpp>
#endif
#ifndef __HIPSYCL__
#ifndef __ADAPTIVECPP__
#if __has_include(<sycl/context.hpp>)
#if __SYCL_COMPILER_VERSION <= 20220930
#include <sycl/backend/cuda.hpp>
2 changes: 1 addition & 1 deletion src/rng/backends/curand/philox4x32x10.cpp
Original file line number Diff line number Diff line change
@@ -61,7 +61,7 @@
#else
#include <CL/sycl.hpp>
#endif
#ifndef __HIPSYCL__
#ifndef __ADAPTIVECPP__
#if __has_include(<sycl/context.hpp>)
#if __SYCL_COMPILER_VERSION <= 20220930
#include <sycl/backend/cuda.hpp>
5 changes: 0 additions & 5 deletions src/rng/backends/mklcpu/cpu_common.hpp
Original file line number Diff line number Diff line change
@@ -58,12 +58,7 @@ class kernel_name_usm {};

template <typename Acc>
typename Acc::value_type *get_raw_ptr(Acc acc) {
// Workaround for AdaptiveCPP, as they do not yet support the get_multi_ptr function
#ifndef __HIPSYCL__
return acc.template get_multi_ptr<sycl::access::decorated::no>().get_raw();
#else
return acc.get_pointer();
#endif
}

} // namespace mklcpu
Loading
Oops, something went wrong.
Loading
Oops, something went wrong.