From 23f2611c59fd64df46f8f5e7c57174681f98c82a Mon Sep 17 00:00:00 2001 From: "Larsen, Steffen" Date: Fri, 2 Sep 2022 04:20:20 -0700 Subject: [PATCH] [SYCL] Remove host run and dependencies from SYCL/Reduction tests This commit removes the host run and any assumptions and operations related to the host device from the tests in SYCL/Reduction. Co-authored-by: Sachkov, Alexey Signed-off-by: Larsen, Steffen --- SYCL/Reduction/reduction_big_data.cpp | 6 +----- SYCL/Reduction/reduction_nd_N_queue_shortcut.cpp | 6 +----- SYCL/Reduction/reduction_nd_conditional.cpp | 1 - SYCL/Reduction/reduction_nd_dw.cpp | 3 +-- SYCL/Reduction/reduction_nd_ext_double.cpp | 3 --- SYCL/Reduction/reduction_nd_ext_half.cpp | 3 --- SYCL/Reduction/reduction_nd_ext_type.hpp | 2 +- SYCL/Reduction/reduction_nd_lambda.cpp | 4 +--- SYCL/Reduction/reduction_nd_queue_shortcut.cpp | 6 +----- SYCL/Reduction/reduction_range_queue_shortcut.cpp | 6 +----- 10 files changed, 7 insertions(+), 33 deletions(-) diff --git a/SYCL/Reduction/reduction_big_data.cpp b/SYCL/Reduction/reduction_big_data.cpp index 2d6b0e4130..1daf18b864 100644 --- a/SYCL/Reduction/reduction_big_data.cpp +++ b/SYCL/Reduction/reduction_big_data.cpp @@ -3,14 +3,10 @@ // RUN: %ACC_RUN_PLACEHOLDER %t.out // RUN: %CPU_RUN_PLACEHOLDER %t.out // -// `Group algorithms are not supported on host device` on Nvidia. +// Group algorithms are not supported on Nvidia. // XFAIL: hip_nvidia // -// RUNx: %HOST_RUN_PLACEHOLDER %t.out -// TODO: Enable the test for HOST when it supports ext::oneapi::reduce() and -// barrier() - // This test performs basic checks of parallel_for(nd_range, reduction, func) // where the bigger data size and/or non-uniform work-group sizes may cause // errors. diff --git a/SYCL/Reduction/reduction_nd_N_queue_shortcut.cpp b/SYCL/Reduction/reduction_nd_N_queue_shortcut.cpp index 258621159b..782aed15e3 100644 --- a/SYCL/Reduction/reduction_nd_N_queue_shortcut.cpp +++ b/SYCL/Reduction/reduction_nd_N_queue_shortcut.cpp @@ -3,13 +3,9 @@ // RUN: %ACC_RUN_PLACEHOLDER %t.out // RUN: %CPU_RUN_PLACEHOLDER %t.out -// `Group algorithms are not supported on host device.` on NVidia. +// Group algorithms are not supported on NVidia. // XFAIL: hip_nvidia -// RUNx: %HOST_RUN_PLACEHOLDER %t.out -// TODO: Enable the test for HOST when it supports ext::oneapi::reduce() and -// barrier() - // This test only checks that the method queue::parallel_for() accepting // reduction, can be properly translated into queue::submit + parallel_for(). diff --git a/SYCL/Reduction/reduction_nd_conditional.cpp b/SYCL/Reduction/reduction_nd_conditional.cpp index f78c253829..97d0e3f2b3 100644 --- a/SYCL/Reduction/reduction_nd_conditional.cpp +++ b/SYCL/Reduction/reduction_nd_conditional.cpp @@ -1,5 +1,4 @@ // RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out -// RUNx: %HOST_RUN_PLACEHOLDER %t.out // RUN: %CPU_RUN_PLACEHOLDER %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out diff --git a/SYCL/Reduction/reduction_nd_dw.cpp b/SYCL/Reduction/reduction_nd_dw.cpp index acbc6f6ac6..5670da10ab 100644 --- a/SYCL/Reduction/reduction_nd_dw.cpp +++ b/SYCL/Reduction/reduction_nd_dw.cpp @@ -1,10 +1,9 @@ // RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out -// RUNx: %HOST_RUN_PLACEHOLDER %t.out // RUN: %CPU_RUN_PLACEHOLDER %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out // -// `Group algorithms are not supported on host device.` on Nvidia. +// Group algorithms are not supported on Nvidia. // XFAIL: hip_nvidia // This test performs basic checks of parallel_for(nd_range, reduction, func) diff --git a/SYCL/Reduction/reduction_nd_ext_double.cpp b/SYCL/Reduction/reduction_nd_ext_double.cpp index cd58ec9fa6..c8469ac3d0 100644 --- a/SYCL/Reduction/reduction_nd_ext_double.cpp +++ b/SYCL/Reduction/reduction_nd_ext_double.cpp @@ -9,9 +9,6 @@ // XFAIL: hip_nvidia -// TODO: Enable the test for HOST when it supports intel::reduce() and barrier() -// RUNx: %HOST_RUN_PLACEHOLDER %t.out - // This test performs basic checks of parallel_for(nd_range, reduction, func) // used with 'double' type. diff --git a/SYCL/Reduction/reduction_nd_ext_half.cpp b/SYCL/Reduction/reduction_nd_ext_half.cpp index 993074e551..c6fea9e388 100644 --- a/SYCL/Reduction/reduction_nd_ext_half.cpp +++ b/SYCL/Reduction/reduction_nd_ext_half.cpp @@ -8,9 +8,6 @@ // work group size not bigger than 1` on Nvidia. // XFAIL: hip_amd || hip_nvidia -// TODO: Enable the test for HOST when it supports intel::reduce() and barrier() -// RUNx: %HOST_RUN_PLACEHOLDER %t.out - // This test performs basic checks of parallel_for(nd_range, reduction, func) // used with 'half' type. diff --git a/SYCL/Reduction/reduction_nd_ext_type.hpp b/SYCL/Reduction/reduction_nd_ext_type.hpp index e75a729321..5fa320389f 100644 --- a/SYCL/Reduction/reduction_nd_ext_type.hpp +++ b/SYCL/Reduction/reduction_nd_ext_type.hpp @@ -20,7 +20,7 @@ template int runTests(sycl::aspect ExtAspect) { queue Q; printDeviceInfo(Q); device D = Q.get_device(); - if (!D.is_host() && !D.has(ExtAspect)) { + if (!D.has(ExtAspect)) { std::cout << "Test skipped\n"; return 0; } diff --git a/SYCL/Reduction/reduction_nd_lambda.cpp b/SYCL/Reduction/reduction_nd_lambda.cpp index cc5645a519..a85fe4c6e5 100644 --- a/SYCL/Reduction/reduction_nd_lambda.cpp +++ b/SYCL/Reduction/reduction_nd_lambda.cpp @@ -1,11 +1,9 @@ // RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out -// RUNx: %HOST_RUN_PLACEHOLDER %t.out // RUN: %CPU_RUN_PLACEHOLDER %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out // -// Inconsistently fails on HIP AMD, error message `Barrier is not supported on -// the host device yet.` on HIP Nvidia. +// Inconsistently fails on HIP AMD, HIP Nvidia. // UNSUPPORTED: hip_amd || hip_nvidia // This test performs basic checks of parallel_for(nd_range, reduction, lambda) diff --git a/SYCL/Reduction/reduction_nd_queue_shortcut.cpp b/SYCL/Reduction/reduction_nd_queue_shortcut.cpp index 5288f983df..f3817a4054 100644 --- a/SYCL/Reduction/reduction_nd_queue_shortcut.cpp +++ b/SYCL/Reduction/reduction_nd_queue_shortcut.cpp @@ -3,13 +3,9 @@ // RUN: %ACC_RUN_PLACEHOLDER %t.out // RUN: %CPU_RUN_PLACEHOLDER %t.out -// `Group algorithms are not supported on host device.` on NVidia. +// Group algorithms are not supported on NVidia. // XFAIL: hip_nvidia -// RUNx: %HOST_RUN_PLACEHOLDER %t.out -// TODO: Enable the test for HOST when it supports ext::oneapi::reduce() and -// barrier() - // This test only checks that the method queue::parallel_for() accepting // reduction, can be properly translated into queue::submit + parallel_for(). diff --git a/SYCL/Reduction/reduction_range_queue_shortcut.cpp b/SYCL/Reduction/reduction_range_queue_shortcut.cpp index cbc1566da1..43079f14fd 100644 --- a/SYCL/Reduction/reduction_range_queue_shortcut.cpp +++ b/SYCL/Reduction/reduction_range_queue_shortcut.cpp @@ -3,13 +3,9 @@ // RUN: %ACC_RUN_PLACEHOLDER %t.out // RUN: %CPU_RUN_PLACEHOLDER %t.out -// `Group algorithms are not supported on host device.` on NVidia. +// Group algorithms are not supported on NVidia. // XFAIL: hip_nvidia -// RUNx: %HOST_RUN_PLACEHOLDER %t.out -// TODO: Enable the test for HOST when it supports ext::oneapi::reduce() and -// barrier() - // This test only checks that the shortcut method queue::parallel_for() // can accept 2 or more reduction variables.