From 5496273014ef717b99d41e7c0264db5625dc0bf2 Mon Sep 17 00:00:00 2001 From: Anton Volkov Date: Tue, 2 May 2023 04:48:47 -0500 Subject: [PATCH 1/6] Add type dispatching to pybind11 extension of dpnp.linalg.eigh() --- dpnp/backend/extensions/lapack/CMakeLists.txt | 1 + dpnp/backend/extensions/lapack/heevd.cpp | 99 ++++++++++------ dpnp/backend/extensions/lapack/heevd.hpp | 2 + dpnp/backend/extensions/lapack/lapack_py.cpp | 32 +++++- dpnp/backend/extensions/lapack/syevd.cpp | 107 ++++++++++++------ dpnp/backend/extensions/lapack/syevd.hpp | 2 + .../extensions/lapack/types_matrix.hpp | 88 ++++++++++++++ 7 files changed, 255 insertions(+), 76 deletions(-) create mode 100644 dpnp/backend/extensions/lapack/types_matrix.hpp diff --git a/dpnp/backend/extensions/lapack/CMakeLists.txt b/dpnp/backend/extensions/lapack/CMakeLists.txt index a32adaa431ff..fb95e30f489e 100644 --- a/dpnp/backend/extensions/lapack/CMakeLists.txt +++ b/dpnp/backend/extensions/lapack/CMakeLists.txt @@ -45,6 +45,7 @@ target_include_directories(${python_module_name} PRIVATE ${CMAKE_CURRENT_SOURCE_ target_include_directories(${python_module_name} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../../src) target_include_directories(${python_module_name} PUBLIC ${Dpctl_INCLUDE_DIRS}) +target_include_directories(${python_module_name} PUBLIC ${Dpctl_INCLUDE_DIRS}/../tensor/libtensor/include) if (WIN32) target_compile_options(${python_module_name} PRIVATE diff --git a/dpnp/backend/extensions/lapack/heevd.cpp b/dpnp/backend/extensions/lapack/heevd.cpp index 8c943646ff0a..bfb18697a56e 100644 --- a/dpnp/backend/extensions/lapack/heevd.cpp +++ b/dpnp/backend/extensions/lapack/heevd.cpp @@ -26,7 +26,13 @@ #include +// dpctl tensor headers +#include "utils/memory_overlap.hpp" +#include "utils/type_dispatch.hpp" +#include "utils/type_utils.hpp" + #include "heevd.hpp" +#include "types_matrix.hpp" #include "dpnp_utils.hpp" @@ -42,19 +48,35 @@ namespace lapack namespace mkl_lapack = oneapi::mkl::lapack; namespace py = pybind11; +namespace type_dispatch = dpctl::tensor::type_dispatch; +namespace type_utils = dpctl::tensor::type_utils; + +typedef sycl::event (*heevd_impl_fn_ptr_t)(sycl::queue, + const oneapi::mkl::job, + const oneapi::mkl::uplo, + const std::int64_t, + char*, + char*, + std::vector&, + const std::vector&); + +static heevd_impl_fn_ptr_t heevd_dispatch_table[type_dispatch::num_types][type_dispatch::num_types]; template -static sycl::event call_heevd(sycl::queue exec_q, +static sycl::event heevd_impl(sycl::queue exec_q, const oneapi::mkl::job jobz, const oneapi::mkl::uplo upper_lower, const std::int64_t n, - T* a, - RealT* w, + char* in_a, + char* out_w, std::vector& host_task_events, const std::vector& depends) { - validate_type_for_device(exec_q); - validate_type_for_device(exec_q); + type_utils::validate_type_for_device(exec_q); + type_utils::validate_type_for_device(exec_q); + + T* a = reinterpret_cast(in_a); + RealT* w = reinterpret_cast(out_w); const std::int64_t lda = std::max(1UL, n); const std::int64_t scratchpad_size = mkl_lapack::heevd_scratchpad_size(exec_q, jobz, upper_lower, n, lda); @@ -163,13 +185,11 @@ std::pair heevd(sycl::queue exec_q, throw py::value_error("Execution queue is not compatible with allocation queues"); } - // check that arrays do not overlap, and concurrent access is safe. - // TODO: need to be exposed by DPCTL headers - // auto const &overlap = dpctl::tensor::overlap::MemoryOverlap(); - // if (overlap(eig_vecs, eig_vals)) - // { - // throw py::value_error("Arrays index overlapping segments of memory"); - // } + auto const& overlap = dpctl::tensor::overlap::MemoryOverlap(); + if (overlap(eig_vecs, eig_vals)) + { + throw py::value_error("Arrays with eigenvectors and eigenvalues are overlapping segments of memory"); + } bool is_eig_vecs_f_contig = eig_vecs.is_f_contiguous(); bool is_eig_vals_c_contig = eig_vals.is_c_contiguous(); @@ -182,38 +202,51 @@ std::pair heevd(sycl::queue exec_q, throw py::value_error("An array with output eigenvalues must be C-contiguous"); } - int eig_vecs_typenum = eig_vecs.get_typenum(); - int eig_vals_typenum = eig_vals.get_typenum(); - auto const& dpctl_capi = dpctl::detail::dpctl_capi::get(); + auto array_types = type_dispatch::usm_ndarray_types(); + int eig_vecs_type_id = array_types.typenum_to_lookup_id(eig_vecs.get_typenum()); + int eig_vals_type_id = array_types.typenum_to_lookup_id(eig_vals.get_typenum()); - sycl::event heevd_ev; - std::vector host_task_events; + heevd_impl_fn_ptr_t heevd_fn = heevd_dispatch_table[eig_vecs_type_id][eig_vals_type_id]; + if (heevd_fn == nullptr) + { + throw py::value_error("No heevd implementation defined for a pair of type for eigenvectors and eigenvalues"); + } + + char* eig_vecs_data = eig_vecs.get_data(); + char* eig_vals_data = eig_vals.get_data(); const std::int64_t n = eig_vecs_shape[0]; const oneapi::mkl::job jobz_val = static_cast(jobz); const oneapi::mkl::uplo uplo_val = static_cast(upper_lower); - if ((eig_vecs_typenum == dpctl_capi.UAR_CDOUBLE_) && (eig_vals_typenum == dpctl_capi.UAR_DOUBLE_)) - { - std::complex* a = reinterpret_cast*>(eig_vecs.get_data()); - double* w = reinterpret_cast(eig_vals.get_data()); + std::vector host_task_events; + sycl::event heevd_ev = + heevd_fn(exec_q, jobz_val, uplo_val, n, eig_vecs_data, eig_vals_data, host_task_events, depends); - heevd_ev = call_heevd(exec_q, jobz_val, uplo_val, n, a, w, host_task_events, depends); - } - else if ((eig_vecs_typenum == dpctl_capi.UAR_CFLOAT_) && (eig_vals_typenum == dpctl_capi.UAR_FLOAT_)) - { - std::complex* a = reinterpret_cast*>(eig_vecs.get_data()); - float* w = reinterpret_cast(eig_vals.get_data()); + sycl::event args_ev = dpctl::utils::keep_args_alive(exec_q, {eig_vecs, eig_vals}, host_task_events); + return std::make_pair(args_ev, heevd_ev); +} - heevd_ev = call_heevd(exec_q, jobz_val, uplo_val, n, a, w, host_task_events, depends); - } - else +template +struct HeevdContigFactory +{ + fnT get() { - throw py::value_error("Unexpected types of either eigenvectors or eigenvalues"); + if constexpr (types::HeevdTypePairSupportFactory::is_defined) + { + return heevd_impl; + } + else + { + return nullptr; + } } +}; - sycl::event args_ev = dpctl::utils::keep_args_alive(exec_q, {eig_vecs, eig_vals}, host_task_events); - return std::make_pair(args_ev, heevd_ev); +void init_heevd_dispatch_table(void) +{ + type_dispatch::DispatchTableBuilder contig; + contig.populate_dispatch_table(heevd_dispatch_table); } } } diff --git a/dpnp/backend/extensions/lapack/heevd.hpp b/dpnp/backend/extensions/lapack/heevd.hpp index 93ce6fe560e1..85696d147f66 100644 --- a/dpnp/backend/extensions/lapack/heevd.hpp +++ b/dpnp/backend/extensions/lapack/heevd.hpp @@ -45,6 +45,8 @@ namespace lapack dpctl::tensor::usm_ndarray eig_vecs, dpctl::tensor::usm_ndarray eig_vals, const std::vector& depends); + + extern void init_heevd_dispatch_table(void); } } } diff --git a/dpnp/backend/extensions/lapack/lapack_py.cpp b/dpnp/backend/extensions/lapack/lapack_py.cpp index ea7506308032..eaa3e6873b6a 100644 --- a/dpnp/backend/extensions/lapack/lapack_py.cpp +++ b/dpnp/backend/extensions/lapack/lapack_py.cpp @@ -33,25 +33,45 @@ #include "heevd.hpp" #include "syevd.hpp" +namespace lapack_ext = dpnp::backend::ext::lapack; namespace py = pybind11; +// populate dispatch vectors +void init_dispatch_vectors(void) +{ + lapack_ext::init_syevd_dispatch_vector(); +} + +// populate dispatch tables +void init_dispatch_tables(void) +{ + lapack_ext::init_heevd_dispatch_table(); +} + PYBIND11_MODULE(_lapack_impl, m) { + init_dispatch_vectors(); + init_dispatch_tables(); + m.def("_heevd", - &dpnp::backend::ext::lapack::heevd, + &lapack_ext::heevd, "Call `heevd` from OneMKL LAPACK library to return " "the eigenvalues and eigenvectors of a complex Hermitian matrix", py::arg("sycl_queue"), - py::arg("jobz"), py::arg("upper_lower"), - py::arg("eig_vecs"), py::arg("eig_vals"), + py::arg("jobz"), + py::arg("upper_lower"), + py::arg("eig_vecs"), + py::arg("eig_vals"), py::arg("depends") = py::list()); m.def("_syevd", - &dpnp::backend::ext::lapack::syevd, + &lapack_ext::syevd, "Call `syevd` from OneMKL LAPACK library to return " "the eigenvalues and eigenvectors of a real symmetric matrix", py::arg("sycl_queue"), - py::arg("jobz"), py::arg("upper_lower"), - py::arg("eig_vecs"), py::arg("eig_vals"), + py::arg("jobz"), + py::arg("upper_lower"), + py::arg("eig_vecs"), + py::arg("eig_vals"), py::arg("depends") = py::list()); } diff --git a/dpnp/backend/extensions/lapack/syevd.cpp b/dpnp/backend/extensions/lapack/syevd.cpp index a4dded7543ab..4e4ce0001a32 100644 --- a/dpnp/backend/extensions/lapack/syevd.cpp +++ b/dpnp/backend/extensions/lapack/syevd.cpp @@ -26,7 +26,13 @@ #include +// dpctl tensor headers +#include "utils/memory_overlap.hpp" +#include "utils/type_dispatch.hpp" +#include "utils/type_utils.hpp" + #include "syevd.hpp" +#include "types_matrix.hpp" #include "dpnp_utils.hpp" @@ -42,18 +48,34 @@ namespace lapack namespace mkl_lapack = oneapi::mkl::lapack; namespace py = pybind11; +namespace type_dispatch = dpctl::tensor::type_dispatch; +namespace type_utils = dpctl::tensor::type_utils; + +typedef sycl::event (*syevd_impl_fn_ptr_t)(sycl::queue, + const oneapi::mkl::job, + const oneapi::mkl::uplo, + const std::int64_t, + char*, + char*, + std::vector&, + const std::vector&); + +static syevd_impl_fn_ptr_t syevd_dispatch_vector[type_dispatch::num_types]; template -static sycl::event call_syevd(sycl::queue exec_q, +static sycl::event syevd_impl(sycl::queue exec_q, const oneapi::mkl::job jobz, const oneapi::mkl::uplo upper_lower, const std::int64_t n, - T* a, - T* w, + char* in_a, + char* out_w, std::vector& host_task_events, const std::vector& depends) { - validate_type_for_device(exec_q); + type_utils::validate_type_for_device(exec_q); + + T* a = reinterpret_cast(in_a); + T* w = reinterpret_cast(out_w); const std::int64_t lda = std::max(1UL, n); const std::int64_t scratchpad_size = mkl_lapack::syevd_scratchpad_size(exec_q, jobz, upper_lower, n, lda); @@ -162,13 +184,11 @@ std::pair syevd(sycl::queue exec_q, throw py::value_error("Execution queue is not compatible with allocation queues"); } - // check that arrays do not overlap, and concurrent access is safe. - // TODO: need to be exposed by DPCTL headers - // auto const& overlap = dpctl::tensor::overlap::MemoryOverlap(); - // if (overlap(eig_vecs, eig_vals)) - // { - // throw py::value_error("Arrays index overlapping segments of memory"); - // } + auto const& overlap = dpctl::tensor::overlap::MemoryOverlap(); + if (overlap(eig_vecs, eig_vals)) + { + throw py::value_error("Arrays with eigenvectors and eigenvalues are overlapping segments of memory"); + } bool is_eig_vecs_f_contig = eig_vecs.is_f_contiguous(); bool is_eig_vals_c_contig = eig_vals.is_c_contiguous(); @@ -181,43 +201,56 @@ std::pair syevd(sycl::queue exec_q, throw py::value_error("An array with output eigenvalues must be C-contiguous"); } - int eig_vecs_typenum = eig_vecs.get_typenum(); - int eig_vals_typenum = eig_vals.get_typenum(); - auto const& dpctl_capi = dpctl::detail::dpctl_capi::get(); + auto array_types = type_dispatch::usm_ndarray_types(); + int eig_vecs_type_id = array_types.typenum_to_lookup_id(eig_vecs.get_typenum()); + int eig_vals_type_id = array_types.typenum_to_lookup_id(eig_vals.get_typenum()); - sycl::event syevd_ev; - std::vector host_task_events; + if (eig_vecs_type_id != eig_vals_type_id) + { + throw py::value_error("Types of eigenvectors and eigenvalues are missmatched"); + } + + syevd_impl_fn_ptr_t syevd_fn = syevd_dispatch_vector[eig_vecs_type_id]; + if (syevd_fn == nullptr) + { + throw py::value_error("No syevd implementation defined for a type of eigenvectors and eigenvalues"); + } + + char* eig_vecs_data = eig_vecs.get_data(); + char* eig_vals_data = eig_vals.get_data(); const std::int64_t n = eig_vecs_shape[0]; const oneapi::mkl::job jobz_val = static_cast(jobz); const oneapi::mkl::uplo uplo_val = static_cast(upper_lower); - if (eig_vecs_typenum != eig_vals_typenum) - { - throw py::value_error("Types of eigenvectors and eigenvalues aare missmatched"); - } - else if (eig_vecs_typenum == dpctl_capi.UAR_DOUBLE_) - { - double* a = reinterpret_cast(eig_vecs.get_data()); - double* w = reinterpret_cast(eig_vals.get_data()); + std::vector host_task_events; + sycl::event syevd_ev = + syevd_fn(exec_q, jobz_val, uplo_val, n, eig_vecs_data, eig_vals_data, host_task_events, depends); - syevd_ev = call_syevd(exec_q, jobz_val, uplo_val, n, a, w, host_task_events, depends); - } - else if (eig_vecs_typenum == dpctl_capi.UAR_FLOAT_) - { - float* a = reinterpret_cast(eig_vecs.get_data()); - float* w = reinterpret_cast(eig_vals.get_data()); + sycl::event args_ev = dpctl::utils::keep_args_alive(exec_q, {eig_vecs, eig_vals}, host_task_events); + return std::make_pair(args_ev, syevd_ev); +} - syevd_ev = call_syevd(exec_q, jobz_val, uplo_val, n, a, w, host_task_events, depends); - } - else +template +struct SyevdContigFactory +{ + fnT get() { - throw py::value_error("Unexpected types with num=" + std::to_string(eig_vecs_typenum) + - " for eigenvectors and eigenvalues"); + if constexpr (types::SyevdTypePairSupportFactory::is_defined) + { + return syevd_impl; + } + else + { + return nullptr; + } } +}; - sycl::event args_ev = dpctl::utils::keep_args_alive(exec_q, {eig_vecs, eig_vals}, host_task_events); - return std::make_pair(args_ev, syevd_ev); +void init_syevd_dispatch_vector(void) +{ + type_dispatch::DispatchVectorBuilder contig; + contig.populate_dispatch_vector(syevd_dispatch_vector); } } } diff --git a/dpnp/backend/extensions/lapack/syevd.hpp b/dpnp/backend/extensions/lapack/syevd.hpp index 14d167ec02a7..c5f0bc1b1531 100644 --- a/dpnp/backend/extensions/lapack/syevd.hpp +++ b/dpnp/backend/extensions/lapack/syevd.hpp @@ -45,6 +45,8 @@ namespace lapack dpctl::tensor::usm_ndarray eig_vecs, dpctl::tensor::usm_ndarray eig_vals, const std::vector& depends = {}); + + extern void init_syevd_dispatch_vector(void); } } } diff --git a/dpnp/backend/extensions/lapack/types_matrix.hpp b/dpnp/backend/extensions/lapack/types_matrix.hpp new file mode 100644 index 000000000000..66dda89e891d --- /dev/null +++ b/dpnp/backend/extensions/lapack/types_matrix.hpp @@ -0,0 +1,88 @@ +//***************************************************************************** +// Copyright (c) 2023, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** + +#pragma once + +#include + +namespace dpnp +{ +namespace backend +{ +namespace ext +{ +namespace lapack +{ +namespace types +{ +// TODO: to remove, will be provided by dpctl tensor headers +template +struct TypePairEntry : std::bool_constant, std::is_same>> +{ + static constexpr bool is_defined = true; +}; + + +// TODO: to remove, will be provided by dpctl tensor headers +struct NotFoundEntry : std::true_type +{ + static constexpr bool is_defined = false; +}; + +/** + * @brief A factory to define pairs of supported types for which + * MKL LAPACK library provides support in oneapi::mkl::lapack::heevd function. + * + * @tparam T Type of array containing input matrix A and an output array with eigenvectors. + * @tparam RealT Type of output array containing eigenvalues of A. + */ +template +struct HeevdTypePairSupportFactory +{ + static constexpr bool is_defined = std::disjunction, RealT, double>, + TypePairEntry, RealT, float>, + // fall-through + NotFoundEntry>::is_defined; +}; + +/** + * @brief A factory to define pairs of supported types for which + * MKL LAPACK library provides support in oneapi::mkl::lapack::syevd function. + * + * @tparam T Type of array containing input matrix A and an output arrays with eigenvectors and eigenvectors. + */ +template +struct SyevdTypePairSupportFactory +{ + static constexpr bool is_defined = std::disjunction, + TypePairEntry, + // fall-through + NotFoundEntry>::is_defined; +}; +} +} +} +} +} From 9678b1a67819a56b73c5cad43f35f53d0b64c70b Mon Sep 17 00:00:00 2001 From: Anton Volkov Date: Wed, 31 May 2023 04:54:55 -0500 Subject: [PATCH 2/6] Added dep on dpctl tensor headers and removed todo --- dpnp/backend/extensions/lapack/CMakeLists.txt | 4 +-- .../extensions/lapack/types_matrix.hpp | 26 +++++-------------- 2 files changed, 8 insertions(+), 22 deletions(-) diff --git a/dpnp/backend/extensions/lapack/CMakeLists.txt b/dpnp/backend/extensions/lapack/CMakeLists.txt index fb95e30f489e..dea062935355 100644 --- a/dpnp/backend/extensions/lapack/CMakeLists.txt +++ b/dpnp/backend/extensions/lapack/CMakeLists.txt @@ -32,7 +32,7 @@ pybind11_add_module(${python_module_name} MODULE ) if (WIN32) - if (${CMAKE_VERSION} VERSION_LESS "3.23") + if (${CMAKE_VERSION} VERSION_LESS "3.27") # this is a work-around for target_link_options inserting option after -link option, cause # linker to ignore it. set(CMAKE_CXX_LINK_FLAGS "${CMAKE_CXX_LINK_FLAGS} -fsycl-device-code-split=per_kernel") @@ -45,7 +45,7 @@ target_include_directories(${python_module_name} PRIVATE ${CMAKE_CURRENT_SOURCE_ target_include_directories(${python_module_name} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../../src) target_include_directories(${python_module_name} PUBLIC ${Dpctl_INCLUDE_DIRS}) -target_include_directories(${python_module_name} PUBLIC ${Dpctl_INCLUDE_DIRS}/../tensor/libtensor/include) +target_include_directories(${py_module_name} PUBLIC ${Dpctl_TENSOR_INCLUDE_DIR}) if (WIN32) target_compile_options(${python_module_name} PRIVATE diff --git a/dpnp/backend/extensions/lapack/types_matrix.hpp b/dpnp/backend/extensions/lapack/types_matrix.hpp index 66dda89e891d..ebfaf3cfb125 100644 --- a/dpnp/backend/extensions/lapack/types_matrix.hpp +++ b/dpnp/backend/extensions/lapack/types_matrix.hpp @@ -37,20 +37,6 @@ namespace lapack { namespace types { -// TODO: to remove, will be provided by dpctl tensor headers -template -struct TypePairEntry : std::bool_constant, std::is_same>> -{ - static constexpr bool is_defined = true; -}; - - -// TODO: to remove, will be provided by dpctl tensor headers -struct NotFoundEntry : std::true_type -{ - static constexpr bool is_defined = false; -}; - /** * @brief A factory to define pairs of supported types for which * MKL LAPACK library provides support in oneapi::mkl::lapack::heevd function. @@ -61,10 +47,10 @@ struct NotFoundEntry : std::true_type template struct HeevdTypePairSupportFactory { - static constexpr bool is_defined = std::disjunction, RealT, double>, - TypePairEntry, RealT, float>, + static constexpr bool is_defined = std::disjunction, RealT, double>, + TypePairDefinedEntry, RealT, float>, // fall-through - NotFoundEntry>::is_defined; + NotDefinedEntry>::is_defined; }; /** @@ -76,10 +62,10 @@ struct HeevdTypePairSupportFactory template struct SyevdTypePairSupportFactory { - static constexpr bool is_defined = std::disjunction, - TypePairEntry, + static constexpr bool is_defined = std::disjunction, + TypePairDefinedEntry, // fall-through - NotFoundEntry>::is_defined; + NotDefinedEntry>::is_defined; }; } } From 2ab4159b35a41e1c2210217a0ff66b1f57c116f8 Mon Sep 17 00:00:00 2001 From: Anton Volkov Date: Wed, 31 May 2023 05:23:51 -0500 Subject: [PATCH 3/6] Resolved compilation errors --- dpnp/backend/extensions/lapack/CMakeLists.txt | 2 +- dpnp/backend/extensions/lapack/heevd.cpp | 8 +++----- dpnp/backend/extensions/lapack/syevd.cpp | 8 +++----- dpnp/backend/extensions/lapack/types_matrix.hpp | 16 ++++++++++------ 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/dpnp/backend/extensions/lapack/CMakeLists.txt b/dpnp/backend/extensions/lapack/CMakeLists.txt index dea062935355..e54de4068c01 100644 --- a/dpnp/backend/extensions/lapack/CMakeLists.txt +++ b/dpnp/backend/extensions/lapack/CMakeLists.txt @@ -45,7 +45,7 @@ target_include_directories(${python_module_name} PRIVATE ${CMAKE_CURRENT_SOURCE_ target_include_directories(${python_module_name} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../../src) target_include_directories(${python_module_name} PUBLIC ${Dpctl_INCLUDE_DIRS}) -target_include_directories(${py_module_name} PUBLIC ${Dpctl_TENSOR_INCLUDE_DIR}) +target_include_directories(${python_module_name} PUBLIC ${Dpctl_TENSOR_INCLUDE_DIR}) if (WIN32) target_compile_options(${python_module_name} PRIVATE diff --git a/dpnp/backend/extensions/lapack/heevd.cpp b/dpnp/backend/extensions/lapack/heevd.cpp index bfb18697a56e..f99fb94c18ec 100644 --- a/dpnp/backend/extensions/lapack/heevd.cpp +++ b/dpnp/backend/extensions/lapack/heevd.cpp @@ -28,7 +28,6 @@ // dpctl tensor headers #include "utils/memory_overlap.hpp" -#include "utils/type_dispatch.hpp" #include "utils/type_utils.hpp" #include "heevd.hpp" @@ -48,7 +47,6 @@ namespace lapack namespace mkl_lapack = oneapi::mkl::lapack; namespace py = pybind11; -namespace type_dispatch = dpctl::tensor::type_dispatch; namespace type_utils = dpctl::tensor::type_utils; typedef sycl::event (*heevd_impl_fn_ptr_t)(sycl::queue, @@ -60,7 +58,7 @@ typedef sycl::event (*heevd_impl_fn_ptr_t)(sycl::queue, std::vector&, const std::vector&); -static heevd_impl_fn_ptr_t heevd_dispatch_table[type_dispatch::num_types][type_dispatch::num_types]; +static heevd_impl_fn_ptr_t heevd_dispatch_table[dpctl_td_ns::num_types][dpctl_td_ns::num_types]; template static sycl::event heevd_impl(sycl::queue exec_q, @@ -202,7 +200,7 @@ std::pair heevd(sycl::queue exec_q, throw py::value_error("An array with output eigenvalues must be C-contiguous"); } - auto array_types = type_dispatch::usm_ndarray_types(); + auto array_types = dpctl_td_ns::usm_ndarray_types(); int eig_vecs_type_id = array_types.typenum_to_lookup_id(eig_vecs.get_typenum()); int eig_vals_type_id = array_types.typenum_to_lookup_id(eig_vals.get_typenum()); @@ -245,7 +243,7 @@ struct HeevdContigFactory void init_heevd_dispatch_table(void) { - type_dispatch::DispatchTableBuilder contig; + dpctl_td_ns::DispatchTableBuilder contig; contig.populate_dispatch_table(heevd_dispatch_table); } } diff --git a/dpnp/backend/extensions/lapack/syevd.cpp b/dpnp/backend/extensions/lapack/syevd.cpp index 4e4ce0001a32..d03c2dff372c 100644 --- a/dpnp/backend/extensions/lapack/syevd.cpp +++ b/dpnp/backend/extensions/lapack/syevd.cpp @@ -28,7 +28,6 @@ // dpctl tensor headers #include "utils/memory_overlap.hpp" -#include "utils/type_dispatch.hpp" #include "utils/type_utils.hpp" #include "syevd.hpp" @@ -48,7 +47,6 @@ namespace lapack namespace mkl_lapack = oneapi::mkl::lapack; namespace py = pybind11; -namespace type_dispatch = dpctl::tensor::type_dispatch; namespace type_utils = dpctl::tensor::type_utils; typedef sycl::event (*syevd_impl_fn_ptr_t)(sycl::queue, @@ -60,7 +58,7 @@ typedef sycl::event (*syevd_impl_fn_ptr_t)(sycl::queue, std::vector&, const std::vector&); -static syevd_impl_fn_ptr_t syevd_dispatch_vector[type_dispatch::num_types]; +static syevd_impl_fn_ptr_t syevd_dispatch_vector[dpctl_td_ns::num_types]; template static sycl::event syevd_impl(sycl::queue exec_q, @@ -201,7 +199,7 @@ std::pair syevd(sycl::queue exec_q, throw py::value_error("An array with output eigenvalues must be C-contiguous"); } - auto array_types = type_dispatch::usm_ndarray_types(); + auto array_types = dpctl_td_ns::usm_ndarray_types(); int eig_vecs_type_id = array_types.typenum_to_lookup_id(eig_vecs.get_typenum()); int eig_vals_type_id = array_types.typenum_to_lookup_id(eig_vals.get_typenum()); @@ -249,7 +247,7 @@ struct SyevdContigFactory void init_syevd_dispatch_vector(void) { - type_dispatch::DispatchVectorBuilder contig; + dpctl_td_ns::DispatchVectorBuilder contig; contig.populate_dispatch_vector(syevd_dispatch_vector); } } diff --git a/dpnp/backend/extensions/lapack/types_matrix.hpp b/dpnp/backend/extensions/lapack/types_matrix.hpp index ebfaf3cfb125..a56ca4926256 100644 --- a/dpnp/backend/extensions/lapack/types_matrix.hpp +++ b/dpnp/backend/extensions/lapack/types_matrix.hpp @@ -27,6 +27,10 @@ #include +#include "utils/type_dispatch.hpp" + +namespace dpctl_td_ns = dpctl::tensor::type_dispatch; + namespace dpnp { namespace backend @@ -47,10 +51,10 @@ namespace types template struct HeevdTypePairSupportFactory { - static constexpr bool is_defined = std::disjunction, RealT, double>, - TypePairDefinedEntry, RealT, float>, + static constexpr bool is_defined = std::disjunction, RealT, double>, + dpctl_td_ns::TypePairDefinedEntry, RealT, float>, // fall-through - NotDefinedEntry>::is_defined; + dpctl_td_ns::NotDefinedEntry>::is_defined; }; /** @@ -62,10 +66,10 @@ struct HeevdTypePairSupportFactory template struct SyevdTypePairSupportFactory { - static constexpr bool is_defined = std::disjunction, - TypePairDefinedEntry, + static constexpr bool is_defined = std::disjunction, + dpctl_td_ns::TypePairDefinedEntry, // fall-through - NotDefinedEntry>::is_defined; + dpctl_td_ns::NotDefinedEntry>::is_defined; }; } } From ce3f60310de5b4ef880ba1ae98300b9b20a88a33 Mon Sep 17 00:00:00 2001 From: Anton Volkov Date: Wed, 31 May 2023 08:51:37 -0500 Subject: [PATCH 4/6] Fix coverage action --- .github/workflows/generate_coverage.yaml | 3 +-- dpnp/backend/extensions/lapack/CMakeLists.txt | 3 +++ dpnp/backend/extensions/lapack/types_matrix.hpp | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/generate_coverage.yaml b/.github/workflows/generate_coverage.yaml index 4d054274502f..f60a8cbc2eaa 100644 --- a/.github/workflows/generate_coverage.yaml +++ b/.github/workflows/generate_coverage.yaml @@ -34,7 +34,6 @@ jobs: python-version: ${{ env.python-ver }} miniconda-version: 'latest' activate-environment: 'coverage' - channels: intel, conda-forge - name: Install Lcov run: | @@ -42,7 +41,7 @@ jobs: - name: Install dpnp dependencies run: | conda install cython llvm cmake scikit-build ninja pytest pytest-cov coverage[toml] \ - dppy/label/dev::dpctl dpcpp_linux-64 mkl-devel-dpcpp tbb-devel onedpl-devel + dpctl dpcpp_linux-64 sysroot_linux-64">=2.28" mkl-devel-dpcpp tbb-devel onedpl-devel -c dppy/label/dev -c intel -c conda-forge --override-channels - name: Conda info run: | conda info diff --git a/dpnp/backend/extensions/lapack/CMakeLists.txt b/dpnp/backend/extensions/lapack/CMakeLists.txt index e54de4068c01..c104c15e831b 100644 --- a/dpnp/backend/extensions/lapack/CMakeLists.txt +++ b/dpnp/backend/extensions/lapack/CMakeLists.txt @@ -44,6 +44,9 @@ set_target_properties(${python_module_name} PROPERTIES CMAKE_POSITION_INDEPENDEN target_include_directories(${python_module_name} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../../include) target_include_directories(${python_module_name} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../../src) +message(STATUS "Dpctl_INCLUDE_DIRS=" ${Dpctl_INCLUDE_DIRS}) +message(STATUS "Dpctl_TENSOR_INCLUDE_DIR=" ${Dpctl_TENSOR_INCLUDE_DIR}) + target_include_directories(${python_module_name} PUBLIC ${Dpctl_INCLUDE_DIRS}) target_include_directories(${python_module_name} PUBLIC ${Dpctl_TENSOR_INCLUDE_DIR}) diff --git a/dpnp/backend/extensions/lapack/types_matrix.hpp b/dpnp/backend/extensions/lapack/types_matrix.hpp index a56ca4926256..4175873b541f 100644 --- a/dpnp/backend/extensions/lapack/types_matrix.hpp +++ b/dpnp/backend/extensions/lapack/types_matrix.hpp @@ -27,8 +27,10 @@ #include +// dpctl tensor headers #include "utils/type_dispatch.hpp" +// dpctl namespace for operations with types namespace dpctl_td_ns = dpctl::tensor::type_dispatch; namespace dpnp From 57f86e6c5aac46062883c758ad33d1c00036fc2b Mon Sep 17 00:00:00 2001 From: Anton Volkov Date: Wed, 31 May 2023 09:30:49 -0500 Subject: [PATCH 5/6] Fix sphinix build --- .github/workflows/build-sphinx.yml | 5 +++-- .github/workflows/generate_coverage.yaml | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-sphinx.yml b/.github/workflows/build-sphinx.yml index f6664f66c1f0..f4e3b74c6237 100644 --- a/.github/workflows/build-sphinx.yml +++ b/.github/workflows/build-sphinx.yml @@ -17,6 +17,7 @@ jobs: env: python-ver: '3.9' + CHANNELS: '-c dppy/label/dev -c intel -c conda-forge --override-channels' steps: - name: Cancel Previous Runs @@ -74,10 +75,10 @@ jobs: - name: Install dpnp dependencies run: | conda install dpctl mkl-devel-dpcpp onedpl-devel tbb-devel dpcpp_linux-64 \ - cmake cython pytest ninja scikit-build -c dppy/label/dev -c intel -c conda-forge + cmake cython pytest ninja scikit-build sysroot_linux-64">=2.28" ${{ env.CHANNELS }} - name: Install cuPy dependencies - run: conda install -c conda-forge cupy cudatoolkit=10.0 + run: conda install cupy cudatoolkit=10.0 - name: Conda info run: conda info diff --git a/.github/workflows/generate_coverage.yaml b/.github/workflows/generate_coverage.yaml index f60a8cbc2eaa..fd38dde9a5d3 100644 --- a/.github/workflows/generate_coverage.yaml +++ b/.github/workflows/generate_coverage.yaml @@ -15,6 +15,7 @@ jobs: env: python-ver: '3.10' + CHANNELS: '-c dppy/label/dev -c intel -c conda-forge --override-channels' steps: - name: Cancel Previous Runs @@ -41,7 +42,7 @@ jobs: - name: Install dpnp dependencies run: | conda install cython llvm cmake scikit-build ninja pytest pytest-cov coverage[toml] \ - dpctl dpcpp_linux-64 sysroot_linux-64">=2.28" mkl-devel-dpcpp tbb-devel onedpl-devel -c dppy/label/dev -c intel -c conda-forge --override-channels + dpctl dpcpp_linux-64 sysroot_linux-64">=2.28" mkl-devel-dpcpp tbb-devel onedpl-devel ${{ env.CHANNELS }} - name: Conda info run: | conda info From cb5d0292bc93a40496c8411613c8851e4a707e55 Mon Sep 17 00:00:00 2001 From: Anton Volkov Date: Wed, 31 May 2023 09:42:50 -0500 Subject: [PATCH 6/6] Added print of dpctl includes --- CMakeLists.txt | 3 +++ dpnp/backend/extensions/lapack/CMakeLists.txt | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index cdecc3cefd72..efa35ac50869 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -57,6 +57,9 @@ set(CYTHON_FLAGS "-t -w \"${CMAKE_SOURCE_DIR}\"") find_package(Cython REQUIRED) find_package(Dpctl REQUIRED) +message(STATUS "Dpctl_INCLUDE_DIRS=" ${Dpctl_INCLUDE_DIRS}) +message(STATUS "Dpctl_TENSOR_INCLUDE_DIR=" ${Dpctl_TENSOR_INCLUDE_DIR}) + if(WIN32) string(CONCAT WARNING_FLAGS "-Wall " diff --git a/dpnp/backend/extensions/lapack/CMakeLists.txt b/dpnp/backend/extensions/lapack/CMakeLists.txt index c104c15e831b..e54de4068c01 100644 --- a/dpnp/backend/extensions/lapack/CMakeLists.txt +++ b/dpnp/backend/extensions/lapack/CMakeLists.txt @@ -44,9 +44,6 @@ set_target_properties(${python_module_name} PROPERTIES CMAKE_POSITION_INDEPENDEN target_include_directories(${python_module_name} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../../include) target_include_directories(${python_module_name} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../../src) -message(STATUS "Dpctl_INCLUDE_DIRS=" ${Dpctl_INCLUDE_DIRS}) -message(STATUS "Dpctl_TENSOR_INCLUDE_DIR=" ${Dpctl_TENSOR_INCLUDE_DIR}) - target_include_directories(${python_module_name} PUBLIC ${Dpctl_INCLUDE_DIRS}) target_include_directories(${python_module_name} PUBLIC ${Dpctl_TENSOR_INCLUDE_DIR})