From 92083875adf7be2549c1373a7d72c088728c4f55 Mon Sep 17 00:00:00 2001 From: "aidan.belton" Date: Tue, 16 Aug 2022 14:19:19 +0100 Subject: [PATCH 01/10] Move dev_info check to host_task --- .../backends/cusolver/cusolver_helper.hpp | 37 +-- .../backends/cusolver/cusolver_lapack.cpp | 244 ++++++++++-------- 2 files changed, 151 insertions(+), 130 deletions(-) diff --git a/src/lapack/backends/cusolver/cusolver_helper.hpp b/src/lapack/backends/cusolver/cusolver_helper.hpp index 0991b0efc..e03f51440 100644 --- a/src/lapack/backends/cusolver/cusolver_helper.hpp +++ b/src/lapack/backends/cusolver/cusolver_helper.hpp @@ -261,26 +261,27 @@ struct CudaEquivalentType> { /* devinfo */ -inline int get_cusolver_devinfo(sycl::queue &queue, sycl::buffer &devInfo) { - sycl::host_accessor dev_info_{ devInfo }; - return dev_info_[0]; -} - -inline int get_cusolver_devinfo(sycl::queue &queue, const int *devInfo) { - int dev_info_; - queue.wait(); - queue.memcpy(&dev_info_, devInfo, sizeof(int)); - return dev_info_; +// Accepts a int*, copies the memory from device to host, +// checks value does not indicate an error, frees the device memory +inline void lapack_info_check_and_free(int *dev_info_d, const char *func_name, + const char *cufunc_name, int num_elements = 1) { + int *dev_info_h = (int *)malloc(sizeof(int) * num_elements); + cuMemcpyDtoH(dev_info_h, reinterpret_cast(dev_info_d), sizeof(int) * num_elements); + for (uint32_t i = 0; i < num_elements; ++i) { + if (dev_info_h[i] > 0) + throw oneapi::mkl::lapack::computation_error( + func_name, + std::string(cufunc_name) + " failed with info = " + std::to_string(dev_info_h[i]), + dev_info_h[i]); + } + cuMemFree(reinterpret_cast(dev_info_d)); } -template -inline void lapack_info_check(sycl::queue &queue, DEVINFO_T devinfo, const char *func_name, - const char *cufunc_name) { - const int devinfo_ = get_cusolver_devinfo(queue, devinfo); - if (devinfo_ > 0) - throw oneapi::mkl::lapack::computation_error( - func_name, std::string(cufunc_name) + " failed with info = " + std::to_string(devinfo_), - devinfo_); +// Allocates and returns a CUDA device pointer for cuSolver dev_info +inline int *create_dev_info(int num_elements = 1) { + CUdeviceptr dev_info_d; + cuMemAlloc(&dev_info_d, sizeof(int) * num_elements); + return reinterpret_cast(dev_info_d); } } // namespace cusolver diff --git a/src/lapack/backends/cusolver/cusolver_lapack.cpp b/src/lapack/backends/cusolver/cusolver_lapack.cpp index 4fe2c30ab..79ff125d1 100644 --- a/src/lapack/backends/cusolver/cusolver_lapack.cpp +++ b/src/lapack/backends/cusolver/cusolver_lapack.cpp @@ -150,22 +150,24 @@ void getrf(const char *func_name, Func func, sycl::queue &queue, std::int64_t m, // Create new buffer with 32-bit ints then copy over results std::uint64_t ipiv_size = std::min(n, m); sycl::buffer ipiv32(sycl::range<1>{ ipiv_size }); - sycl::buffer devInfo{ 1 }; queue.submit([&](sycl::handler &cgh) { auto a_acc = a.template get_access(cgh); auto ipiv32_acc = ipiv32.template get_access(cgh); - auto devInfo_acc = devInfo.template get_access(cgh); auto scratch_acc = scratchpad.template get_access(cgh); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = sc.get_mem(a_acc); auto ipiv32_ = sc.get_mem(ipiv32_acc); - auto devInfo_ = sc.get_mem(devInfo_acc); auto scratch_ = sc.get_mem(scratch_acc); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, m, n, a_, lda, scratch_, ipiv32_, - devInfo_); + dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); @@ -177,7 +179,6 @@ void getrf(const char *func_name, Func func, sycl::queue &queue, std::int64_t m, ipiv_acc[index] = static_cast(ipiv32_acc[index]); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); } #define GETRF_LAUNCHER(TYPE, CUSOLVER_ROUTINE) \ @@ -281,13 +282,11 @@ inline void gesvd(const char *func_name, Func func, sycl::queue &queue, oneapi:: using cuDataType_A = typename CudaEquivalentType::Type; using cuDataType_B = typename CudaEquivalentType::Type; overflow_check(n, m, lda, ldu, ldvt, scratchpad_size); - sycl::buffer devInfo{ 1 }; queue.submit([&](sycl::handler &cgh) { auto a_acc = a.template get_access(cgh); auto s_acc = s.template get_access(cgh); auto u_acc = u.template get_access(cgh); auto vt_acc = vt.template get_access(cgh); - auto devInfo_acc = devInfo.template get_access(cgh); auto scratch_acc = scratchpad.template get_access(cgh); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); @@ -295,16 +294,19 @@ inline void gesvd(const char *func_name, Func func, sycl::queue &queue, oneapi:: auto s_ = sc.get_mem(s_acc); auto u_ = sc.get_mem(u_acc); auto vt_ = sc.get_mem(vt_acc); - auto devInfo_ = sc.get_mem(devInfo_acc); auto scratch_ = sc.get_mem(scratch_acc); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + // rwork is set to nullptr. If set it is filled with information from the superdiagonal. CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cusolver_jobsvd(jobu), get_cusolver_jobsvd(jobvt), m, n, a_, lda, s_, u_, ldu, vt_, ldvt, - scratch_, scratchpad_size, nullptr, devInfo_); + scratch_, scratchpad_size, nullptr, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); } #define GESVD_LAUNCHER(TYPE_A, TYPE_B, CUSOLVER_ROUTINE) \ @@ -332,25 +334,26 @@ inline void heevd(const char *func_name, Func func, sycl::queue &queue, oneapi:: using cuDataType_A = typename CudaEquivalentType::Type; using cuDataType_B = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); - sycl::buffer devInfo{ 1 }; queue.submit([&](sycl::handler &cgh) { auto a_acc = a.template get_access(cgh); auto w_acc = w.template get_access(cgh); - auto devInfo_acc = devInfo.template get_access(cgh); auto scratch_acc = scratchpad.template get_access(cgh); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = sc.get_mem(a_acc); auto w_ = sc.get_mem(w_acc); - auto devInfo_ = sc.get_mem(devInfo_acc); auto scratch_ = sc.get_mem(scratch_acc); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cusolver_job(jobz), get_cublas_fill_mode(uplo), n, a_, lda, w_, scratch_, - scratchpad_size, devInfo_); + scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); } #define HEEVD_LAUNCHER(TYPE_A, TYPE_B, CUSOLVER_ROUTINE) \ @@ -375,27 +378,28 @@ inline void hegvd(const char *func_name, Func func, sycl::queue &queue, std::int using cuDataType_A = typename CudaEquivalentType::Type; using cuDataType_B = typename CudaEquivalentType::Type; overflow_check(n, lda, ldb, scratchpad_size); - sycl::buffer devInfo{ 1 }; queue.submit([&](sycl::handler &cgh) { auto a_acc = a.template get_access(cgh); auto b_acc = b.template get_access(cgh); auto w_acc = w.template get_access(cgh); - auto devInfo_acc = devInfo.template get_access(cgh); auto scratch_acc = scratchpad.template get_access(cgh); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = sc.get_mem(a_acc); auto b_ = sc.get_mem(b_acc); auto w_ = sc.get_mem(w_acc); - auto devInfo_ = sc.get_mem(devInfo_acc); auto scratch_ = sc.get_mem(scratch_acc); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cusolver_itype(itype), get_cusolver_job(jobz), get_cublas_fill_mode(uplo), n, a_, lda, - b_, ldb, w_, scratch_, scratchpad_size, devInfo_); + b_, ldb, w_, scratch_, scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); } #define HEGVD_LAUNCHER(TYPE_A, TYPE_B, CUSOLVER_ROUTINE) \ @@ -420,13 +424,11 @@ inline void hetrd(const char *func_name, Func func, sycl::queue &queue, oneapi:: using cuDataType_A = typename CudaEquivalentType::Type; using cuDataType_B = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); - sycl::buffer devInfo{ 1 }; queue.submit([&](sycl::handler &cgh) { auto a_acc = a.template get_access(cgh); auto d_acc = d.template get_access(cgh); auto e_acc = e.template get_access(cgh); auto tau_acc = tau.template get_access(cgh); - auto devInfo_acc = devInfo.template get_access(cgh); auto scratch_acc = scratchpad.template get_access(cgh); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); @@ -434,14 +436,17 @@ inline void hetrd(const char *func_name, Func func, sycl::queue &queue, oneapi:: auto d_ = sc.get_mem(d_acc); auto e_ = sc.get_mem(e_acc); auto tau_ = sc.get_mem(tau_acc); - auto devInfo_ = sc.get_mem(devInfo_acc); auto scratch_ = sc.get_mem(scratch_acc); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cublas_fill_mode(uplo), n, a_, - lda, d_, e_, tau_, scratch_, scratchpad_size, devInfo_); + lda, d_, e_, tau_, scratch_, scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); } #define HETRD_LAUNCHER(TYPE_A, TYPE_B, CUSOLVER_ROUTINE) \ @@ -677,22 +682,23 @@ inline void potrf(const char *func_name, Func func, sycl::queue &queue, oneapi:: std::int64_t scratchpad_size) { using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); - sycl::buffer devInfo{ 1 }; queue.submit([&](sycl::handler &cgh) { auto a_acc = a.template get_access(cgh); - auto devInfo_acc = devInfo.template get_access(cgh); auto scratch_acc = scratchpad.template get_access(cgh); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = sc.get_mem(a_acc); - auto devInfo_ = sc.get_mem(devInfo_acc); auto scratch_ = sc.get_mem(scratch_acc); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cublas_fill_mode(uplo), n, a_, - lda, scratch_, scratchpad_size, devInfo_); + lda, scratch_, scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); } #define POTRF_LAUNCHER(TYPE, CUSOLVER_ROUTINE) \ @@ -715,22 +721,23 @@ inline void potri(const char *func_name, Func func, sycl::queue &queue, oneapi:: std::int64_t scratchpad_size) { using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); - sycl::buffer devInfo{ 1 }; queue.submit([&](sycl::handler &cgh) { auto a_acc = a.template get_access(cgh); - auto devInfo_acc = devInfo.template get_access(cgh); auto scratch_acc = scratchpad.template get_access(cgh); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = sc.get_mem(a_acc); - auto devInfo_ = sc.get_mem(devInfo_acc); auto scratch_ = sc.get_mem(scratch_acc); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cublas_fill_mode(uplo), n, a_, - lda, scratch_, scratchpad_size, devInfo_); + lda, scratch_, scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); } #define POTRI_LAUNCHER(TYPE, CUSOLVER_ROUTINE) \ @@ -790,25 +797,26 @@ inline void syevd(const char *func_name, Func func, sycl::queue &queue, oneapi:: sycl::buffer &w, sycl::buffer &scratchpad, std::int64_t scratchpad_size) { using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); - sycl::buffer devInfo{ 1 }; queue.submit([&](sycl::handler &cgh) { auto a_acc = a.template get_access(cgh); auto w_acc = w.template get_access(cgh); - auto devInfo_acc = devInfo.template get_access(cgh); auto scratch_acc = scratchpad.template get_access(cgh); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = sc.get_mem(a_acc); auto w_ = sc.get_mem(w_acc); - auto devInfo_ = sc.get_mem(devInfo_acc); auto scratch_ = sc.get_mem(scratch_acc); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cusolver_job(jobz), get_cublas_fill_mode(uplo), n, a_, lda, w_, scratch_, - scratchpad_size, devInfo_); + scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); } #define SYEVD_LAUNCHER(TYPE, CUSOLVER_ROUTINE) \ @@ -831,27 +839,28 @@ inline void sygvd(const char *func_name, Func func, sycl::queue &queue, std::int sycl::buffer &scratchpad, std::int64_t scratchpad_size) { using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, ldb, scratchpad_size); - sycl::buffer devInfo{ 1 }; queue.submit([&](sycl::handler &cgh) { auto a_acc = a.template get_access(cgh); auto b_acc = b.template get_access(cgh); auto w_acc = w.template get_access(cgh); - auto devInfo_acc = devInfo.template get_access(cgh); auto scratch_acc = scratchpad.template get_access(cgh); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = sc.get_mem(a_acc); auto b_ = sc.get_mem(b_acc); auto w_ = sc.get_mem(w_acc); - auto devInfo_ = sc.get_mem(devInfo_acc); auto scratch_ = sc.get_mem(scratch_acc); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cusolver_itype(itype), get_cusolver_job(jobz), get_cublas_fill_mode(uplo), n, a_, lda, - b_, ldb, w_, scratch_, scratchpad_size, devInfo_); + b_, ldb, w_, scratch_, scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); } #define SYGVD_LAUNCHER(TYPE, CUSOLVER_ROUTINE) \ @@ -875,13 +884,11 @@ inline void sytrd(const char *func_name, Func func, sycl::queue &queue, oneapi:: std::int64_t scratchpad_size) { using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); - sycl::buffer devInfo{ 1 }; queue.submit([&](sycl::handler &cgh) { auto a_acc = a.template get_access(cgh); auto d_acc = d.template get_access(cgh); auto e_acc = e.template get_access(cgh); auto tau_acc = tau.template get_access(cgh); - auto devInfo_acc = devInfo.template get_access(cgh); auto scratch_acc = scratchpad.template get_access(cgh); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); @@ -889,14 +896,17 @@ inline void sytrd(const char *func_name, Func func, sycl::queue &queue, oneapi:: auto d_ = sc.get_mem(d_acc); auto e_ = sc.get_mem(e_acc); auto tau_ = sc.get_mem(tau_acc); - auto devInfo_ = sc.get_mem(devInfo_acc); auto scratch_ = sc.get_mem(scratch_acc); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cublas_fill_mode(uplo), n, a_, - lda, d_, e_, tau_, scratch_, scratchpad_size, devInfo_); + lda, d_, e_, tau_, scratch_, scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); } #define SYTRD_LAUNCHER(TYPE, CUSOLVER_ROUTINE) \ @@ -920,7 +930,6 @@ inline void sytrf(const char *func_name, Func func, sycl::queue &queue, oneapi:: std::int64_t scratchpad_size) { using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); - sycl::buffer devInfo{ 1 }; // cuSolver legacy api does not accept 64-bit ints. // To get around the limitation. @@ -931,17 +940,20 @@ inline void sytrf(const char *func_name, Func func, sycl::queue &queue, oneapi:: queue.submit([&](sycl::handler &cgh) { auto a_acc = a.template get_access(cgh); auto ipiv32_acc = ipiv32.template get_access(cgh); - auto devInfo_acc = devInfo.template get_access(cgh); auto scratch_acc = scratchpad.template get_access(cgh); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = sc.get_mem(a_acc); auto ipiv32_ = sc.get_mem(ipiv32_acc); - auto devInfo_ = sc.get_mem(devInfo_acc); auto scratch_ = sc.get_mem(scratch_acc); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cublas_fill_mode(uplo), n, a_, - lda, ipiv32_, scratch_, scratchpad_size, devInfo_); + lda, ipiv32_, scratch_, scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); @@ -953,7 +965,6 @@ inline void sytrf(const char *func_name, Func func, sycl::queue &queue, oneapi:: ipiv_acc[index] = static_cast(ipiv32_acc[index]); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); } #define SYTRF_LAUNCHER(TYPE, CUSOLVER_ROUTINE) \ @@ -1327,7 +1338,6 @@ inline sycl::event getrf(const char *func_name, Func func, sycl::queue &queue, s std::uint64_t ipiv_size = std::min(n, m); int *ipiv32 = (int *)malloc_device(sizeof(int) * ipiv_size, queue); - int *devInfo = (int *)malloc_device(sizeof(int), queue); auto done = queue.submit([&](sycl::handler &cgh) { int64_t num_events = dependencies.size(); for (int64_t i = 0; i < num_events; i++) { @@ -1336,12 +1346,16 @@ inline sycl::event getrf(const char *func_name, Func func, sycl::queue &queue, s onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); - auto devInfo_ = reinterpret_cast(devInfo); auto scratch_ = reinterpret_cast(scratchpad); auto ipiv_ = reinterpret_cast(ipiv32); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, m, n, a_, lda, scratch_, ipiv_, - devInfo_); + dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); @@ -1356,9 +1370,6 @@ inline sycl::event getrf(const char *func_name, Func func, sycl::queue &queue, s queue.wait(); free(ipiv32, queue); - - lapack_info_check(queue, devInfo, __func__, func_name); - free(devInfo, queue); return done_casting; } @@ -1470,7 +1481,6 @@ inline sycl::event gesvd(const char *func_name, Func func, sycl::queue &queue, using cuDataType_A = typename CudaEquivalentType::Type; using cuDataType_B = typename CudaEquivalentType::Type; overflow_check(m, n, lda, ldu, ldvt, scratchpad_size); - int *devInfo = (int *)malloc_device(sizeof(int), queue); auto done = queue.submit([&](sycl::handler &cgh) { int64_t num_events = dependencies.size(); for (int64_t i = 0; i < num_events; i++) { @@ -1482,17 +1492,19 @@ inline sycl::event gesvd(const char *func_name, Func func, sycl::queue &queue, auto s_ = reinterpret_cast(s); auto u_ = reinterpret_cast(u); auto vt_ = reinterpret_cast(vt); - auto devInfo_ = reinterpret_cast(devInfo); auto scratch_ = reinterpret_cast(scratchpad); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + // rwork is set to nullptr. If set it is filled with information from the superdiagonal. CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cusolver_jobsvd(jobu), get_cusolver_jobsvd(jobvt), m, n, a_, lda, s_, u_, ldu, vt_, ldvt, - scratch_, scratchpad_size, nullptr, devInfo_); + scratch_, scratchpad_size, nullptr, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); - free(devInfo, queue); return done; } @@ -1521,7 +1533,6 @@ inline sycl::event heevd(const char *func_name, Func func, sycl::queue &queue, using cuDataType_A = typename CudaEquivalentType::Type; using cuDataType_B = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); - int *devInfo = (int *)malloc_device(sizeof(int), queue); auto done = queue.submit([&](sycl::handler &cgh) { int64_t num_events = dependencies.size(); for (int64_t i = 0; i < num_events; i++) { @@ -1531,16 +1542,18 @@ inline sycl::event heevd(const char *func_name, Func func, sycl::queue &queue, auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); auto w_ = reinterpret_cast(w); - auto devInfo_ = reinterpret_cast(devInfo); auto scratch_ = reinterpret_cast(scratchpad); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cusolver_job(jobz), get_cublas_fill_mode(uplo), n, a_, lda, w_, scratch_, - scratchpad_size, devInfo_); + scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); - free(devInfo, queue); return done; } @@ -1567,7 +1580,6 @@ inline sycl::event hegvd(const char *func_name, Func func, sycl::queue &queue, s using cuDataType_A = typename CudaEquivalentType::Type; using cuDataType_B = typename CudaEquivalentType::Type; overflow_check(n, lda, ldb, scratchpad_size); - int *devInfo = (int *)malloc_device(sizeof(int), queue); auto done = queue.submit([&](sycl::handler &cgh) { int64_t num_events = dependencies.size(); for (int64_t i = 0; i < num_events; i++) { @@ -1578,16 +1590,18 @@ inline sycl::event hegvd(const char *func_name, Func func, sycl::queue &queue, s auto a_ = reinterpret_cast(a); auto b_ = reinterpret_cast(b); auto w_ = reinterpret_cast(w); - auto devInfo_ = reinterpret_cast(devInfo); auto scratch_ = reinterpret_cast(scratchpad); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cusolver_itype(itype), get_cusolver_job(jobz), get_cublas_fill_mode(uplo), n, a_, lda, - b_, ldb, w_, scratch_, scratchpad_size, devInfo); + b_, ldb, w_, scratch_, scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); - free(devInfo, queue); return done; } @@ -1614,7 +1628,6 @@ inline sycl::event hetrd(const char *func_name, Func func, sycl::queue &queue, using cuDataType_A = typename CudaEquivalentType::Type; using cuDataType_B = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); - int *devInfo = (int *)malloc_device(sizeof(int), queue); auto done = queue.submit([&](sycl::handler &cgh) { int64_t num_events = dependencies.size(); for (int64_t i = 0; i < num_events; i++) { @@ -1626,15 +1639,17 @@ inline sycl::event hetrd(const char *func_name, Func func, sycl::queue &queue, auto d_ = reinterpret_cast(d); auto e_ = reinterpret_cast(e); auto tau_ = reinterpret_cast(tau); - auto devInfo_ = reinterpret_cast(devInfo); auto scratch_ = reinterpret_cast(scratchpad); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cublas_fill_mode(uplo), n, a_, - lda, d_, e_, tau_, scratch_, scratchpad_size, devInfo_); + lda, d_, e_, tau_, scratch_, scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); - free(devInfo, queue); return done; } @@ -1888,7 +1903,6 @@ inline sycl::event potrf(const char *func_name, Func func, sycl::queue &queue, const std::vector &dependencies) { using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); - int *devInfo = (int *)malloc_device(sizeof(int), queue); auto done = queue.submit([&](sycl::handler &cgh) { int64_t num_events = dependencies.size(); for (int64_t i = 0; i < num_events; i++) { @@ -1897,15 +1911,17 @@ inline sycl::event potrf(const char *func_name, Func func, sycl::queue &queue, onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); - auto devInfo_ = reinterpret_cast(devInfo); auto scratch_ = reinterpret_cast(scratchpad); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cublas_fill_mode(uplo), n, a_, - lda, scratch_, scratchpad_size, devInfo_); + lda, scratch_, scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); - free(devInfo, queue); return done; } @@ -1931,7 +1947,6 @@ inline sycl::event potri(const char *func_name, Func func, sycl::queue &queue, const std::vector &dependencies) { using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); - int *devInfo = (int *)malloc_device(sizeof(int), queue); auto done = queue.submit([&](sycl::handler &cgh) { int64_t num_events = dependencies.size(); for (int64_t i = 0; i < num_events; i++) { @@ -1941,14 +1956,16 @@ inline sycl::event potri(const char *func_name, Func func, sycl::queue &queue, auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); auto scratch_ = reinterpret_cast(scratchpad); - auto devInfo_ = reinterpret_cast(devInfo); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cublas_fill_mode(uplo), n, a_, - lda, scratch_, scratchpad_size, devInfo_); + lda, scratch_, scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); - free(devInfo, queue); return done; } @@ -2016,7 +2033,6 @@ inline sycl::event syevd(const char *func_name, Func func, sycl::queue &queue, const std::vector &dependencies) { using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); - int *devInfo = (int *)malloc_device(sizeof(int), queue); auto done = queue.submit([&](sycl::handler &cgh) { int64_t num_events = dependencies.size(); for (int64_t i = 0; i < num_events; i++) { @@ -2027,15 +2043,17 @@ inline sycl::event syevd(const char *func_name, Func func, sycl::queue &queue, auto a_ = reinterpret_cast(a); auto w_ = reinterpret_cast(w); auto scratch_ = reinterpret_cast(scratchpad); - auto devInfo_ = reinterpret_cast(devInfo); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cusolver_job(jobz), get_cublas_fill_mode(uplo), n, a_, lda, w_, scratch_, - scratchpad_size, devInfo_); + scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); - free(devInfo, queue); return done; } @@ -2061,7 +2079,6 @@ inline sycl::event sygvd(const char *func_name, Func func, sycl::queue &queue, s const std::vector &dependencies) { using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, ldb, scratchpad_size); - int *devInfo = (int *)malloc_device(sizeof(int), queue); auto done = queue.submit([&](sycl::handler &cgh) { int64_t num_events = dependencies.size(); for (int64_t i = 0; i < num_events; i++) { @@ -2072,16 +2089,18 @@ inline sycl::event sygvd(const char *func_name, Func func, sycl::queue &queue, s auto a_ = reinterpret_cast(a); auto b_ = reinterpret_cast(b); auto w_ = reinterpret_cast(w); - auto devInfo_ = reinterpret_cast(devInfo); auto scratch_ = reinterpret_cast(scratchpad); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cusolver_itype(itype), get_cusolver_job(jobz), get_cublas_fill_mode(uplo), n, a_, lda, - b_, ldb, w_, scratch_, scratchpad_size, devInfo); + b_, ldb, w_, scratch_, scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); - free(devInfo, queue); return done; } @@ -2106,7 +2125,6 @@ inline sycl::event sytrd(const char *func_name, Func func, sycl::queue &queue, const std::vector &dependencies) { using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); - int *devInfo = (int *)malloc_device(sizeof(int), queue); auto done = queue.submit([&](sycl::handler &cgh) { int64_t num_events = dependencies.size(); for (int64_t i = 0; i < num_events; i++) { @@ -2118,15 +2136,17 @@ inline sycl::event sytrd(const char *func_name, Func func, sycl::queue &queue, auto d_ = reinterpret_cast(d); auto e_ = reinterpret_cast(e); auto tau_ = reinterpret_cast(tau); - auto devInfo_ = reinterpret_cast(devInfo); auto scratch_ = reinterpret_cast(scratchpad); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cublas_fill_mode(uplo), n, a_, - lda, d_, e_, tau_, scratch_, scratchpad_size, devInfo_); + lda, d_, e_, tau_, scratch_, scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); - lapack_info_check(queue, devInfo, __func__, func_name); - free(devInfo, queue); return done; } @@ -2151,7 +2171,6 @@ inline sycl::event sytrf(const char *func_name, Func func, sycl::queue &queue, const std::vector &dependencies) { using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); - int *devInfo = (int *)malloc_device(sizeof(int), queue); // cuSolver legacy api does not accept 64-bit ints. // To get around the limitation. @@ -2169,10 +2188,14 @@ inline sycl::event sytrf(const char *func_name, Func func, sycl::queue &queue, auto a_ = reinterpret_cast(a); auto scratch_ = reinterpret_cast(scratchpad); auto ipiv_ = reinterpret_cast(ipiv32); - auto devInfo_ = reinterpret_cast(devInfo); cusolverStatus_t err; + + int *dev_info_d = create_dev_info(); + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cublas_fill_mode(uplo), n, a_, - lda, ipiv_, scratch_, scratchpad_size, devInfo_); + lda, ipiv_, scratch_, scratchpad_size, dev_info_d); + + lapack_info_check_and_free(dev_info_d, __func__, func_name); }); }); @@ -2187,9 +2210,6 @@ inline sycl::event sytrf(const char *func_name, Func func, sycl::queue &queue, queue.wait(); free(ipiv32, queue); - - lapack_info_check(queue, devInfo, __func__, func_name); - free(devInfo, queue); return done_casting; } From c515f253d00843f538babbdfe8b9a3816020eaf3 Mon Sep 17 00:00:00 2001 From: "aidan.belton" Date: Tue, 16 Aug 2022 15:29:39 +0100 Subject: [PATCH 02/10] Asynchronously free ipiv 32-bit memory --- .../backends/cusolver/cusolver_helper.hpp | 18 ++++++++++++++++++ .../backends/cusolver/cusolver_lapack.cpp | 10 +++------- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/src/lapack/backends/cusolver/cusolver_helper.hpp b/src/lapack/backends/cusolver/cusolver_helper.hpp index e03f51440..e20d392ef 100644 --- a/src/lapack/backends/cusolver/cusolver_helper.hpp +++ b/src/lapack/backends/cusolver/cusolver_helper.hpp @@ -284,6 +284,24 @@ inline int *create_dev_info(int num_elements = 1) { return reinterpret_cast(dev_info_d); } +// Helper function for waiting on a vector of sycl events +inline void depends_on_events(sycl::handler &cgh, std::vector &dependencies = {}) { + for (sycl::event &e : dependencies) + cgh.depends_on(e); +} + +// Asynchronously frees sycl USM `ptr` after waiting on events `dependencies` +template +inline sycl::event free_async(sycl::queue &queue, T *ptr, + std::vector &dependencies = {}) { + sycl::event done = queue.submit([&](sycl::handler &cgh) { + depends_on_events(cgh, dependencies); + + cgh.host_task([=](sycl::interop_handle ih) { sycl::free(ptr, queue); }); + }); + return done; +} + } // namespace cusolver } // namespace lapack } // namespace mkl diff --git a/src/lapack/backends/cusolver/cusolver_lapack.cpp b/src/lapack/backends/cusolver/cusolver_lapack.cpp index 79ff125d1..37317e7b1 100644 --- a/src/lapack/backends/cusolver/cusolver_lapack.cpp +++ b/src/lapack/backends/cusolver/cusolver_lapack.cpp @@ -1367,9 +1367,8 @@ inline sycl::event getrf(const char *func_name, Func func, sycl::queue &queue, s }); }); - queue.wait(); + free_async(queue, ipiv32, { done_casting }); - free(ipiv32, queue); return done_casting; } @@ -1449,9 +1448,7 @@ inline sycl::event getrs(const char *func_name, Func func, sycl::queue &queue, }); }); - queue.wait(); - - free(ipiv32, queue); + free_async(queue, ipiv32, { done }); return done; } @@ -2207,9 +2204,8 @@ inline sycl::event sytrf(const char *func_name, Func func, sycl::queue &queue, }); }); - queue.wait(); + free_async(queue, ipiv32, { done_casting }); - free(ipiv32, queue); return done_casting; } From e12107b5ce7d3d4c4cf0abe3ee8697168ac1c4d6 Mon Sep 17 00:00:00 2001 From: "aidan.belton" Date: Tue, 16 Aug 2022 15:33:12 +0100 Subject: [PATCH 03/10] User helper for depends on events --- .../backends/cusolver/cusolver_helper.hpp | 7 +- .../backends/cusolver/cusolver_lapack.cpp | 126 ++++-------------- 2 files changed, 30 insertions(+), 103 deletions(-) diff --git a/src/lapack/backends/cusolver/cusolver_helper.hpp b/src/lapack/backends/cusolver/cusolver_helper.hpp index e20d392ef..fbac40232 100644 --- a/src/lapack/backends/cusolver/cusolver_helper.hpp +++ b/src/lapack/backends/cusolver/cusolver_helper.hpp @@ -285,15 +285,16 @@ inline int *create_dev_info(int num_elements = 1) { } // Helper function for waiting on a vector of sycl events -inline void depends_on_events(sycl::handler &cgh, std::vector &dependencies = {}) { - for (sycl::event &e : dependencies) +inline void depends_on_events(sycl::handler &cgh, + const std::vector &dependencies = {}) { + for (auto &e : dependencies) cgh.depends_on(e); } // Asynchronously frees sycl USM `ptr` after waiting on events `dependencies` template inline sycl::event free_async(sycl::queue &queue, T *ptr, - std::vector &dependencies = {}) { + const std::vector &dependencies = {}) { sycl::event done = queue.submit([&](sycl::handler &cgh) { depends_on_events(cgh, dependencies); diff --git a/src/lapack/backends/cusolver/cusolver_lapack.cpp b/src/lapack/backends/cusolver/cusolver_lapack.cpp index 37317e7b1..ad6dca86b 100644 --- a/src/lapack/backends/cusolver/cusolver_lapack.cpp +++ b/src/lapack/backends/cusolver/cusolver_lapack.cpp @@ -1227,10 +1227,7 @@ inline sycl::event gebrd(const char *func_name, Func func, sycl::queue &queue, s throw unimplemented("lapack", "gebrd", "cusolver gebrd does not support m < n"); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1292,10 +1289,7 @@ inline sycl::event geqrf(const char *func_name, Func func, sycl::queue &queue, s using cuDataType = typename CudaEquivalentType::Type; overflow_check(m, n, lda, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1339,10 +1333,7 @@ inline sycl::event getrf(const char *func_name, Func func, sycl::queue &queue, s int *ipiv32 = (int *)malloc_device(sizeof(int) * ipiv_size, queue); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1432,10 +1423,7 @@ inline sycl::event getrs(const char *func_name, Func func, sycl::queue &queue, }); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); cgh.depends_on(done_casting); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); @@ -1479,10 +1467,7 @@ inline sycl::event gesvd(const char *func_name, Func func, sycl::queue &queue, using cuDataType_B = typename CudaEquivalentType::Type; overflow_check(m, n, lda, ldu, ldvt, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1531,10 +1516,7 @@ inline sycl::event heevd(const char *func_name, Func func, sycl::queue &queue, using cuDataType_B = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1578,10 +1560,7 @@ inline sycl::event hegvd(const char *func_name, Func func, sycl::queue &queue, s using cuDataType_B = typename CudaEquivalentType::Type; overflow_check(n, lda, ldb, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1626,10 +1605,7 @@ inline sycl::event hetrd(const char *func_name, Func func, sycl::queue &queue, using cuDataType_B = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1686,10 +1662,7 @@ inline sycl::event orgbr(const char *func_name, Func func, sycl::queue &queue, using cuDataType = typename CudaEquivalentType::Type; overflow_check(m, n, k, lda, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1725,10 +1698,7 @@ inline sycl::event orgqr(const char *func_name, Func func, sycl::queue &queue, s using cuDataType = typename CudaEquivalentType::Type; overflow_check(m, n, k, lda, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1763,10 +1733,7 @@ inline sycl::event orgtr(const char *func_name, Func func, sycl::queue &queue, using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1803,10 +1770,7 @@ inline sycl::event ormtr(const char *func_name, Func func, sycl::queue &queue, using cuDataType = typename CudaEquivalentType::Type; overflow_check(m, n, lda, ldc, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1859,10 +1823,7 @@ inline sycl::event ormqr(const char *func_name, Func func, sycl::queue &queue, using cuDataType = typename CudaEquivalentType::Type; overflow_check(m, n, k, lda, ldc, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1901,10 +1862,7 @@ inline sycl::event potrf(const char *func_name, Func func, sycl::queue &queue, using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1945,10 +1903,7 @@ inline sycl::event potri(const char *func_name, Func func, sycl::queue &queue, using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1991,10 +1946,7 @@ inline sycl::event potrs(const char *func_name, Func func, sycl::queue &queue, using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, nrhs, lda, ldb, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -2031,10 +1983,7 @@ inline sycl::event syevd(const char *func_name, Func func, sycl::queue &queue, using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -2077,10 +2026,7 @@ inline sycl::event sygvd(const char *func_name, Func func, sycl::queue &queue, s using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, ldb, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -2123,10 +2069,7 @@ inline sycl::event sytrd(const char *func_name, Func func, sycl::queue &queue, using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -2176,10 +2119,7 @@ inline sycl::event sytrf(const char *func_name, Func func, sycl::queue &queue, int *ipiv32 = (int *)malloc_device(sizeof(int) * ipiv_size, queue); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -2261,10 +2201,7 @@ inline sycl::event ungbr(const char *func_name, Func func, sycl::queue &queue, using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -2300,10 +2237,7 @@ inline sycl::event ungqr(const char *func_name, Func func, sycl::queue &queue, s using cuDataType = typename CudaEquivalentType::Type; overflow_check(m, n, k, lda, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -2338,10 +2272,7 @@ inline sycl::event ungtr(const char *func_name, Func func, sycl::queue &queue, using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -2392,10 +2323,7 @@ inline sycl::event unmqr(const char *func_name, Func func, sycl::queue &queue, using cuDataType = typename CudaEquivalentType::Type; overflow_check(n, lda, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -2436,10 +2364,7 @@ inline sycl::event unmtr(const char *func_name, Func func, sycl::queue &queue, using cuDataType = typename CudaEquivalentType::Type; overflow_check(m, n, lda, ldc, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -3271,6 +3196,7 @@ inline void unmqr_scratchpad_size(const char *func_name, Func func, sycl::queue nullptr, ldc, scratch_size); }); }); + e.wait(); } #define UNMQR_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ From 654e5f5896f13d02e2be2cdd392edb4a56a1f114 Mon Sep 17 00:00:00 2001 From: "aidan.belton" Date: Tue, 16 Aug 2022 15:36:03 +0100 Subject: [PATCH 04/10] Remove unncessary reinterpret_cast for ipiv32 --- src/lapack/backends/cusolver/cusolver_lapack.cpp | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/lapack/backends/cusolver/cusolver_lapack.cpp b/src/lapack/backends/cusolver/cusolver_lapack.cpp index ad6dca86b..b75954465 100644 --- a/src/lapack/backends/cusolver/cusolver_lapack.cpp +++ b/src/lapack/backends/cusolver/cusolver_lapack.cpp @@ -1338,12 +1338,11 @@ inline sycl::event getrf(const char *func_name, Func func, sycl::queue &queue, s auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); auto scratch_ = reinterpret_cast(scratchpad); - auto ipiv_ = reinterpret_cast(ipiv32); cusolverStatus_t err; int *dev_info_d = create_dev_info(); - CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, m, n, a_, lda, scratch_, ipiv_, + CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, m, n, a_, lda, scratch_, ipiv32, dev_info_d); lapack_info_check_and_free(dev_info_d, __func__, func_name); @@ -1428,11 +1427,10 @@ inline sycl::event getrs(const char *func_name, Func func, sycl::queue &queue, onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); - auto ipiv_ = reinterpret_cast(ipiv32); auto b_ = reinterpret_cast(b); cusolverStatus_t err; CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cublas_operation(trans), n, - nrhs, a_, lda, ipiv_, b_, ldb, nullptr); + nrhs, a_, lda, ipiv32, b_, ldb, nullptr); }); }); @@ -2124,13 +2122,12 @@ inline sycl::event sytrf(const char *func_name, Func func, sycl::queue &queue, auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); auto scratch_ = reinterpret_cast(scratchpad); - auto ipiv_ = reinterpret_cast(ipiv32); cusolverStatus_t err; int *dev_info_d = create_dev_info(); CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cublas_fill_mode(uplo), n, a_, - lda, ipiv_, scratch_, scratchpad_size, dev_info_d); + lda, ipiv32, scratch_, scratchpad_size, dev_info_d); lapack_info_check_and_free(dev_info_d, __func__, func_name); }); From 39d8f6589088c167a169e673af13c1acbbbc6b88 Mon Sep 17 00:00:00 2001 From: "aidan.belton" Date: Tue, 16 Aug 2022 15:44:56 +0100 Subject: [PATCH 05/10] Wait on scratchpad_size host task --- .../backends/cusolver/cusolver_lapack.cpp | 65 ++++++++++++------- 1 file changed, 43 insertions(+), 22 deletions(-) diff --git a/src/lapack/backends/cusolver/cusolver_lapack.cpp b/src/lapack/backends/cusolver/cusolver_lapack.cpp index b75954465..7fc4889d9 100644 --- a/src/lapack/backends/cusolver/cusolver_lapack.cpp +++ b/src/lapack/backends/cusolver/cusolver_lapack.cpp @@ -2398,13 +2398,14 @@ template inline void gebrd_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, std::int64_t m, std::int64_t n, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, m, n, scratch_size); }); }); + e.wait(); } #define GEBRD_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2449,13 +2450,14 @@ template inline void geqrf_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, std::int64_t m, std::int64_t n, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, m, n, nullptr, lda, scratch_size); }); }); + e.wait(); } #define GEQRF_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2480,13 +2482,14 @@ inline void gesvd_scratchpad_size(const char *func_name, Func func, sycl::queue oneapi::mkl::jobsvd jobu, oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n, std::int64_t lda, std::int64_t ldu, std::int64_t ldvt, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, m, n, scratch_size); }); }); + e.wait(); } #define GESVD_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2511,13 +2514,14 @@ template inline void getrf_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, std::int64_t m, std::int64_t n, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, m, n, nullptr, lda, scratch_size); }); }); + e.wait(); } #define GETRF_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2576,7 +2580,7 @@ template inline void heevd_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -2585,6 +2589,7 @@ inline void heevd_scratchpad_size(const char *func_name, Func func, sycl::queue scratch_size); }); }); + e.wait(); } #define HEEVD_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2608,7 +2613,7 @@ inline void hegvd_scratchpad_size(const char *func_name, Func func, sycl::queue std::int64_t itype, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, std::int64_t ldb, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -2617,6 +2622,7 @@ inline void hegvd_scratchpad_size(const char *func_name, Func func, sycl::queue lda, nullptr, ldb, nullptr, scratch_size); }); }); + e.wait(); } #define HEGVD_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2639,7 +2645,7 @@ template inline void hetrd_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -2647,6 +2653,7 @@ inline void hetrd_scratchpad_size(const char *func_name, Func func, sycl::queue nullptr, lda, nullptr, nullptr, nullptr, scratch_size); }); }); + e.wait(); } #define HETRD_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2679,7 +2686,7 @@ template inline void orgbr_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, oneapi::mkl::generate vec, std::int64_t m, std::int64_t n, std::int64_t k, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -2687,6 +2694,7 @@ inline void orgbr_scratchpad_size(const char *func_name, Func func, sycl::queue nullptr, lda, nullptr, scratch_size); }); }); + e.wait(); } #define ORGBR_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2709,7 +2717,7 @@ template inline void orgtr_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -2717,6 +2725,7 @@ inline void orgtr_scratchpad_size(const char *func_name, Func func, sycl::queue nullptr, lda, nullptr, scratch_size); }); }); + e.wait(); } #define ORGTR_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2738,7 +2747,7 @@ template inline void orgqr_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, std::int64_t m, std::int64_t n, std::int64_t k, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -2746,6 +2755,7 @@ inline void orgqr_scratchpad_size(const char *func_name, Func func, sycl::queue scratch_size); }); }); + e.wait(); } #define ORGQR_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2815,7 +2825,7 @@ inline void ormtr_scratchpad_size(const char *func_name, Func func, sycl::queue oneapi::mkl::side side, oneapi::mkl::uplo uplo, oneapi::mkl::transpose trans, std::int64_t m, std::int64_t n, std::int64_t lda, std::int64_t ldc, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -2824,6 +2834,7 @@ inline void ormtr_scratchpad_size(const char *func_name, Func func, sycl::queue nullptr, lda, nullptr, nullptr, ldc, scratch_size); }); }); + e.wait(); } #define ORMTR_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2847,7 +2858,7 @@ template inline void potrf_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -2855,6 +2866,7 @@ inline void potrf_scratchpad_size(const char *func_name, Func func, sycl::queue nullptr, lda, scratch_size); }); }); + e.wait(); } #define POTRF_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2894,7 +2906,7 @@ template inline void potri_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -2902,6 +2914,7 @@ inline void potri_scratchpad_size(const char *func_name, Func func, sycl::queue nullptr, lda, scratch_size); }); }); + e.wait(); } #define POTRI_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2925,13 +2938,14 @@ template inline void sytrf_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, n, nullptr, lda, scratch_size); }); }); + e.wait(); } #define SYTRF_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2955,7 +2969,7 @@ template inline void syevd_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -2964,6 +2978,7 @@ inline void syevd_scratchpad_size(const char *func_name, Func func, sycl::queue scratch_size); }); }); + e.wait(); } #define SYEVD_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -2987,7 +3002,7 @@ inline void sygvd_scratchpad_size(const char *func_name, Func func, sycl::queue std::int64_t itype, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, std::int64_t ldb, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -2996,6 +3011,7 @@ inline void sygvd_scratchpad_size(const char *func_name, Func func, sycl::queue lda, nullptr, ldb, nullptr, scratch_size); }); }); + e.wait(); } #define SYGVD_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -3018,7 +3034,7 @@ template inline void sytrd_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -3026,6 +3042,7 @@ inline void sytrd_scratchpad_size(const char *func_name, Func func, sycl::queue nullptr, lda, nullptr, nullptr, nullptr, scratch_size); }); }); + e.wait(); } #define SYTRD_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -3078,7 +3095,7 @@ template inline void ungbr_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, oneapi::mkl::generate vec, std::int64_t m, std::int64_t n, std::int64_t k, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -3086,6 +3103,7 @@ inline void ungbr_scratchpad_size(const char *func_name, Func func, sycl::queue nullptr, lda, nullptr, scratch_size); }); }); + e.wait(); } #define UNGBR_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -3108,7 +3126,7 @@ template inline void ungqr_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, std::int64_t m, std::int64_t n, std::int64_t k, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -3116,6 +3134,7 @@ inline void ungqr_scratchpad_size(const char *func_name, Func func, sycl::queue scratch_size); }); }); + e.wait(); } #define UNGQR_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -3137,7 +3156,7 @@ template inline void ungtr_scratchpad_size(const char *func_name, Func func, sycl::queue &queue, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -3145,6 +3164,7 @@ inline void ungtr_scratchpad_size(const char *func_name, Func func, sycl::queue nullptr, lda, nullptr, scratch_size); }); }); + e.wait(); } #define UNGTR_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ @@ -3184,7 +3204,7 @@ inline void unmqr_scratchpad_size(const char *func_name, Func func, sycl::queue oneapi::mkl::side side, oneapi::mkl::transpose trans, std::int64_t m, std::int64_t n, std::int64_t k, std::int64_t lda, std::int64_t ldc, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -3217,7 +3237,7 @@ inline void unmtr_scratchpad_size(const char *func_name, Func func, sycl::queue oneapi::mkl::side side, oneapi::mkl::uplo uplo, oneapi::mkl::transpose trans, std::int64_t m, std::int64_t n, std::int64_t lda, std::int64_t ldc, int *scratch_size) { - queue.submit([&](sycl::handler &cgh) { + auto e = queue.submit([&](sycl::handler &cgh) { onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); cusolverStatus_t err; @@ -3226,6 +3246,7 @@ inline void unmtr_scratchpad_size(const char *func_name, Func func, sycl::queue nullptr, lda, nullptr, nullptr, ldc, scratch_size); }); }); + e.wait(); } #define UNMTR_LAUNCHER_SCRATCH(TYPE, CUSOLVER_ROUTINE) \ From ac4f639aec5896205c53411fd68f24d6b951a982 Mon Sep 17 00:00:00 2001 From: "aidan.belton" Date: Tue, 16 Aug 2022 16:58:08 +0100 Subject: [PATCH 06/10] Pass queue by reference to cusolver host task --- src/lapack/backends/cusolver/cusolver_task.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lapack/backends/cusolver/cusolver_task.hpp b/src/lapack/backends/cusolver/cusolver_task.hpp index 425c83697..03cae29d1 100644 --- a/src/lapack/backends/cusolver/cusolver_task.hpp +++ b/src/lapack/backends/cusolver/cusolver_task.hpp @@ -33,7 +33,7 @@ namespace lapack { namespace cusolver { template -static inline void host_task_internal(H &cgh, sycl::queue queue, F f) { +static inline void host_task_internal(H &cgh, sycl::queue &queue, F f) { cgh.interop_task([f, queue](sycl::interop_handler ih) { auto sc = CusolverScopedContextHandler(queue, ih); f(sc); @@ -41,7 +41,7 @@ static inline void host_task_internal(H &cgh, sycl::queue queue, F f) { } template -static inline void onemkl_cusolver_host_task(H &cgh, sycl::queue queue, F f) { +static inline void onemkl_cusolver_host_task(H &cgh, sycl::queue &queue, F f) { (void)host_task_internal(cgh, queue, f); } From 87b490060304228b3dbf18e8ede598cb903a72ca Mon Sep 17 00:00:00 2001 From: "aidan.belton" Date: Tue, 16 Aug 2022 17:01:55 +0100 Subject: [PATCH 07/10] Update for includes for removed CL namespace --- src/lapack/backends/cusolver/cusolver_helper.hpp | 2 +- src/lapack/backends/cusolver/cusolver_scope_handle.cpp | 2 +- src/lapack/backends/cusolver/cusolver_scope_handle.hpp | 8 ++++---- src/lapack/backends/cusolver/cusolver_task.hpp | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/lapack/backends/cusolver/cusolver_helper.hpp b/src/lapack/backends/cusolver/cusolver_helper.hpp index fbac40232..68c7157c0 100644 --- a/src/lapack/backends/cusolver/cusolver_helper.hpp +++ b/src/lapack/backends/cusolver/cusolver_helper.hpp @@ -23,7 +23,7 @@ */ #ifndef _CUSOLVER_HELPER_HPP_ #define _CUSOLVER_HELPER_HPP_ -#include +#include #include #include #include diff --git a/src/lapack/backends/cusolver/cusolver_scope_handle.cpp b/src/lapack/backends/cusolver/cusolver_scope_handle.cpp index 6e8b6ebf2..571fbd4d8 100644 --- a/src/lapack/backends/cusolver/cusolver_scope_handle.cpp +++ b/src/lapack/backends/cusolver/cusolver_scope_handle.cpp @@ -17,7 +17,7 @@ * **************************************************************************/ #include "cusolver_scope_handle.hpp" -#include +#include namespace oneapi { namespace mkl { diff --git a/src/lapack/backends/cusolver/cusolver_scope_handle.hpp b/src/lapack/backends/cusolver/cusolver_scope_handle.hpp index 65482b77b..52abe2d03 100644 --- a/src/lapack/backends/cusolver/cusolver_scope_handle.hpp +++ b/src/lapack/backends/cusolver/cusolver_scope_handle.hpp @@ -18,10 +18,10 @@ **************************************************************************/ #ifndef _CUSOLVER_SCOPED_HANDLE_HPP_ #define _CUSOLVER_SCOPED_HANDLE_HPP_ -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include diff --git a/src/lapack/backends/cusolver/cusolver_task.hpp b/src/lapack/backends/cusolver/cusolver_task.hpp index 03cae29d1..bd1fcd71e 100644 --- a/src/lapack/backends/cusolver/cusolver_task.hpp +++ b/src/lapack/backends/cusolver/cusolver_task.hpp @@ -23,10 +23,10 @@ #include #include #include -#include +#include #include "oneapi/mkl/types.hpp" #include "cusolver_scope_handle.hpp" -#include +#include namespace oneapi { namespace mkl { namespace lapack { From 0d057c3859cb96bb93069198037872f9d4f81559 Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Tue, 11 Oct 2022 16:19:22 +0100 Subject: [PATCH 08/10] Resolved conflict in cusolver_helper.hpp Signed-off-by: JackAKirk --- .../backends/cusolver/cusolver_helper.hpp | 27 ------------------- 1 file changed, 27 deletions(-) diff --git a/src/lapack/backends/cusolver/cusolver_helper.hpp b/src/lapack/backends/cusolver/cusolver_helper.hpp index 5cb4aee8f..a362eb522 100644 --- a/src/lapack/backends/cusolver/cusolver_helper.hpp +++ b/src/lapack/backends/cusolver/cusolver_helper.hpp @@ -280,7 +280,6 @@ struct CudaEquivalentType> { /* devinfo */ -<<<<<<< HEAD // Accepts a int*, copies the memory from device to host, // checks value does not indicate an error, frees the device memory inline void lapack_info_check_and_free(int *dev_info_d, const char *func_name, @@ -321,31 +320,6 @@ inline sycl::event free_async(sycl::queue &queue, T *ptr, cgh.host_task([=](sycl::interop_handle ih) { sycl::free(ptr, queue); }); }); return done; -======= -inline void get_cusolver_devinfo(sycl::queue &queue, sycl::buffer &devInfo, - std::vector &dev_info_) { - sycl::host_accessor dev_info_acc{ devInfo }; - for (unsigned int i = 0; i < dev_info_.size(); ++i) - dev_info_[i] = dev_info_acc[i]; -} - -inline void get_cusolver_devinfo(sycl::queue &queue, const int *devInfo, - std::vector &dev_info_) { - queue.wait(); - queue.memcpy(dev_info_.data(), devInfo, sizeof(int)); -} - -template -inline void lapack_info_check(sycl::queue &queue, DEVINFO_T devinfo, const char *func_name, - const char *cufunc_name, int dev_info_size = 1) { - std::vector dev_info_(dev_info_size); - get_cusolver_devinfo(queue, devinfo, dev_info_); - for (const auto &val : dev_info_) { - if (val > 0) - throw oneapi::mkl::lapack::computation_error( - func_name, std::string(cufunc_name) + " failed with info = " + std::to_string(val), - val); - } } /* batched helpers */ @@ -359,7 +333,6 @@ T **create_ptr_list_from_stride(T *ptr, int64_t ptr_stride, int64_t batch_size) ptr_list[i] = ptr + i * ptr_stride; return ptr_list; ->>>>>>> develop } } // namespace cusolver From cf2a7ddd675739cd97930475caff94ea6f387a92 Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Thu, 13 Oct 2022 07:28:49 -0700 Subject: [PATCH 09/10] Update cusolver_batch cases. Signed-off-by: JackAKirk --- .../backends/cusolver/cusolver_batch.cpp | 27 +++++++------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/src/lapack/backends/cusolver/cusolver_batch.cpp b/src/lapack/backends/cusolver/cusolver_batch.cpp index 57b9f4a88..f0e5f1aed 100644 --- a/src/lapack/backends/cusolver/cusolver_batch.cpp +++ b/src/lapack/backends/cusolver/cusolver_batch.cpp @@ -184,26 +184,25 @@ inline void getrf_batch(const char *func_name, Func func, sycl::queue &queue, st // Create new buffer with 32-bit ints then copy over results std::uint64_t ipiv_size = stride_ipiv * batch_size; sycl::buffer ipiv32(sycl::range<1>{ ipiv_size }); - sycl::buffer devInfo{ batch_size }; queue.submit([&](sycl::handler &cgh) { auto a_acc = a.template get_access(cgh); auto ipiv32_acc = ipiv32.template get_access(cgh); - auto devInfo_acc = devInfo.template get_access(cgh); auto scratch_acc = scratchpad.template get_access(cgh); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = sc.get_mem(a_acc); auto ipiv_ = sc.get_mem(ipiv32_acc); - auto devInfo_ = sc.get_mem(devInfo_acc); auto scratch_ = sc.get_mem(scratch_acc); cusolverStatus_t err; + int *dev_info_d = create_dev_info(batch_size); // Uses scratch so sync between each cuSolver call for (std::int64_t i = 0; i < batch_size; ++i) { CUSOLVER_ERROR_FUNC_T_SYNC(func_name, func, err, handle, m, n, a_ + stride_a * i, - lda, scratch_, ipiv_ + stride_ipiv * i, devInfo_ + i); + lda, scratch_, ipiv_ + stride_ipiv * i, dev_info_d + i); } + lapack_info_check_and_free(dev_info_d, __func__, func_name, batch_size); }); }); @@ -215,7 +214,6 @@ inline void getrf_batch(const char *func_name, Func func, sycl::queue &queue, st [=](sycl::id<1> index) { ipiv_acc[index] = ipiv32_acc[index]; }); }); - lapack_info_check(queue, devInfo, __func__, func_name, batch_size); } #define GETRF_STRIDED_BATCH_LAUNCHER(TYPE, CUSOLVER_ROUTINE) \ @@ -571,7 +569,6 @@ inline sycl::event getrf_batch(const char *func_name, Func func, sycl::queue &qu // Allocate memory with 32-bit ints then copy over results std::uint64_t ipiv_size = stride_ipiv * batch_size; int *ipiv32 = (int *)malloc_device(sizeof(int) * ipiv_size, queue); - int *devInfo = (int *)malloc_device(sizeof(int) * batch_size, queue); auto done = queue.submit([&](sycl::handler &cgh) { int64_t num_events = dependencies.size(); @@ -581,16 +578,17 @@ inline sycl::event getrf_batch(const char *func_name, Func func, sycl::queue &qu onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); - auto devInfo_ = reinterpret_cast(devInfo); auto scratchpad_ = reinterpret_cast(scratchpad); auto ipiv_ = reinterpret_cast(ipiv32); cusolverStatus_t err; + int *dev_info_d = create_dev_info(batch_size); // Uses scratch so sync between each cuSolver call for (int64_t i = 0; i < batch_size; ++i) { CUSOLVER_ERROR_FUNC_T_SYNC(func_name, func, err, handle, m, n, a_ + stride_a * i, - lda, scratchpad_, ipiv_ + stride_ipiv * i, devInfo_ + i); + lda, scratchpad_, ipiv_ + stride_ipiv * i, dev_info_d + i); } + lapack_info_check_and_free(dev_info_d, __func__, func_name, batch_size); }); }); @@ -607,10 +605,6 @@ inline sycl::event getrf_batch(const char *func_name, Func func, sycl::queue &qu cgh.host_task([=](sycl::interop_handle ih) { sycl::free(ipiv32, queue); }); }); - // lapack_info_check calls queue.wait() - lapack_info_check(queue, devInfo, __func__, func_name, batch_size); - sycl::free(devInfo, queue); - return done_casting; } @@ -656,7 +650,6 @@ inline sycl::event getrf_batch(const char *func_name, Func func, sycl::queue &qu for (int64_t group_id = 0; group_id < group_count; ++group_id) for (int64_t local_id = 0; local_id < group_sizes[group_id]; ++local_id, ++global_id) ipiv32[global_id] = (int *)malloc_device(sizeof(int) * n[group_id], queue); - int *devInfo = (int *)malloc_device(sizeof(int) * batch_size, queue); auto done = queue.submit([&](sycl::handler &cgh) { int64_t num_events = dependencies.size(); @@ -669,6 +662,7 @@ inline sycl::event getrf_batch(const char *func_name, Func func, sycl::queue &qu auto scratch_ = reinterpret_cast(scratchpad); int64_t global_id = 0; cusolverStatus_t err; + int *dev_info_d = create_dev_info(batch_size); // Uses scratch so sync between each cuSolver call for (int64_t group_id = 0; group_id < group_count; ++group_id) { @@ -676,9 +670,10 @@ inline sycl::event getrf_batch(const char *func_name, Func func, sycl::queue &qu ++local_id, ++global_id) { CUSOLVER_ERROR_FUNC_T_SYNC(func_name, func, err, handle, m[group_id], n[group_id], a_[global_id], lda[group_id], scratch_, - ipiv32[global_id], devInfo + global_id); + ipiv32[global_id], dev_info_d + global_id); } } + lapack_info_check_and_free(dev_info_d, __func__, func_name, batch_size); }); }); @@ -712,10 +707,6 @@ inline sycl::event getrf_batch(const char *func_name, Func func, sycl::queue &qu }); }); - // lapack_info_check calls queue.wait() - lapack_info_check(queue, devInfo, __func__, func_name, batch_size); - sycl::free(devInfo, queue); - return done_freeing; } From 0d023734740b6394975fcf6d69a10131622778a2 Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Thu, 13 Oct 2022 09:41:11 -0700 Subject: [PATCH 10/10] Applied Aidan's suggestions. Signed-off-by: JackAKirk --- .../backends/cusolver/cusolver_batch.cpp | 80 ++++--------------- 1 file changed, 17 insertions(+), 63 deletions(-) diff --git a/src/lapack/backends/cusolver/cusolver_batch.cpp b/src/lapack/backends/cusolver/cusolver_batch.cpp index f0e5f1aed..9095a11e3 100644 --- a/src/lapack/backends/cusolver/cusolver_batch.cpp +++ b/src/lapack/backends/cusolver/cusolver_batch.cpp @@ -457,10 +457,7 @@ inline sycl::event geqrf_batch(const char *func_name, Func func, sycl::queue &qu overflow_check(m, n, lda, stride_a, stride_tau, batch_size, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -511,10 +508,7 @@ inline sycl::event geqrf_batch(const char *func_name, Func func, sycl::queue &qu overflow_check(m[i], n[i], lda[i], group_sizes[i]); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -571,22 +565,18 @@ inline sycl::event getrf_batch(const char *func_name, Func func, sycl::queue &qu int *ipiv32 = (int *)malloc_device(sizeof(int) * ipiv_size, queue); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); auto scratchpad_ = reinterpret_cast(scratchpad); - auto ipiv_ = reinterpret_cast(ipiv32); cusolverStatus_t err; int *dev_info_d = create_dev_info(batch_size); // Uses scratch so sync between each cuSolver call for (int64_t i = 0; i < batch_size; ++i) { CUSOLVER_ERROR_FUNC_T_SYNC(func_name, func, err, handle, m, n, a_ + stride_a * i, - lda, scratchpad_, ipiv_ + stride_ipiv * i, dev_info_d + i); + lda, scratchpad_, ipiv32 + stride_ipiv * i, dev_info_d + i); } lapack_info_check_and_free(dev_info_d, __func__, func_name, batch_size); }); @@ -652,10 +642,7 @@ inline sycl::event getrf_batch(const char *func_name, Func func, sycl::queue &qu ipiv32[global_id] = (int *)malloc_device(sizeof(int) * n[group_id], queue); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -805,22 +792,18 @@ inline sycl::event getrs_batch(const char *func_name, Func func, sycl::queue &qu }); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); cgh.depends_on(done_casting); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); - auto ipiv_ = reinterpret_cast(ipiv32); auto b_ = reinterpret_cast(b); cusolverStatus_t err; // Does not use scratch so call cuSolver asynchronously and sync at end for (int64_t i = 0; i < batch_size; ++i) { CUSOLVER_ERROR_FUNC_T(func_name, func, err, handle, get_cublas_operation(trans), n, - nrhs, a_ + stride_a * i, lda, ipiv_ + stride_ipiv * i, + nrhs, a_ + stride_a * i, lda, ipiv32 + stride_ipiv * i, b_ + stride_b * i, ldb, nullptr); } CUSOLVER_SYNC(err, handle) @@ -893,13 +876,8 @@ inline sycl::event getrs_batch(const char *func_name, Func func, sycl::queue &qu } auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } - for (int64_t i = 0; i < batch_size; i++) { - cgh.depends_on(casting_dependencies[i]); - } + depends_on_events(cgh, dependencies); + depends_on_events(cgh, casting_dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); @@ -958,10 +936,7 @@ inline sycl::event orgqr_batch(const char *func_name, Func func, sycl::queue &qu overflow_check(m, n, k, lda, stride_a, stride_tau, batch_size, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1011,10 +986,7 @@ inline sycl::event orgqr_batch(const char *func_name, Func func, sycl::queue &qu overflow_check(m[i], n[i], k[i], lda[i], group_sizes[i]); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1065,10 +1037,7 @@ inline sycl::event potrf_batch(const char *func_name, Func func, sycl::queue &qu overflow_check(n, lda, stride_a, batch_size, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); CUdeviceptr a_dev; @@ -1126,10 +1095,7 @@ inline sycl::event potrf_batch(const char *func_name, Func func, sycl::queue &qu } auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); int64_t offset = 0; @@ -1190,10 +1156,7 @@ inline sycl::event potrs_batch(const char *func_name, Func func, sycl::queue &qu throw unimplemented("lapack", "potrs_batch", "cusolver potrs_batch only supports nrhs = 1"); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); CUresult cuda_result; @@ -1274,10 +1237,7 @@ inline sycl::event potrs_batch(const char *func_name, Func func, sycl::queue &qu queue.submit([&](sycl::handler &h) { h.memcpy(b_dev, b, batch_size * sizeof(T *)); }); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); cgh.depends_on(done_cpy_a); cgh.depends_on(done_cpy_b); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { @@ -1331,10 +1291,7 @@ inline sycl::event ungqr_batch(const char *func_name, Func func, sycl::queue &qu overflow_check(m, n, k, lda, stride_a, stride_tau, batch_size, scratchpad_size); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a); @@ -1384,10 +1341,7 @@ inline sycl::event ungqr_batch(const char *func_name, Func func, sycl::queue &qu overflow_check(m[i], n[i], k[i], lda[i], group_sizes[i]); auto done = queue.submit([&](sycl::handler &cgh) { - int64_t num_events = dependencies.size(); - for (int64_t i = 0; i < num_events; i++) { - cgh.depends_on(dependencies[i]); - } + depends_on_events(cgh, dependencies); onemkl_cusolver_host_task(cgh, queue, [=](CusolverScopedContextHandler &sc) { auto handle = sc.get_handle(queue); auto a_ = reinterpret_cast(a);