Skip to content

Commit

Permalink
[OpenMP] Limit reduction support for pragma 'distribute' when combine…
Browse files Browse the repository at this point in the history
…d with pragma 'simd'

Differential Revision: https://reviews.llvm.org/D43513

This is a bug fix that removes the emission of reduction support for pragma 'distribute' when found alone or in combinations without simd.
Pragma 'distribute' does not have a reduction clause, but when combined with pragma 'simd' we need to emit the support for simd's reduction clause as part of code generation for distribute. This guard is similar to the one used for reduction support earlier in the same code gen function.

llvm-svn: 325822
  • Loading branch information
Carlo Bertolli committed Feb 22, 2018
1 parent 401b2a4 commit beda214
Show file tree
Hide file tree
Showing 2 changed files with 107 additions and 19 deletions.
42 changes: 23 additions & 19 deletions clang/lib/CodeGen/CGStmtOpenMP.cpp
Expand Up @@ -3358,26 +3358,30 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
CGF.EmitLoadOfScalar(IL, S.getLocStart()));
});
}
OpenMPDirectiveKind ReductionKind = OMPD_unknown;
if (isOpenMPParallelDirective(S.getDirectiveKind()) &&
isOpenMPSimdDirective(S.getDirectiveKind())) {
ReductionKind = OMPD_parallel_for_simd;
} else if (isOpenMPParallelDirective(S.getDirectiveKind())) {
ReductionKind = OMPD_parallel_for;
} else if (isOpenMPSimdDirective(S.getDirectiveKind())) {
ReductionKind = OMPD_simd;
} else if (!isOpenMPTeamsDirective(S.getDirectiveKind()) &&
S.hasClausesOfKind<OMPReductionClause>()) {
llvm_unreachable(
"No reduction clauses is allowed in distribute directive.");
if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
!isOpenMPParallelDirective(S.getDirectiveKind()) &&
!isOpenMPTeamsDirective(S.getDirectiveKind())) {
OpenMPDirectiveKind ReductionKind = OMPD_unknown;
if (isOpenMPParallelDirective(S.getDirectiveKind()) &&
isOpenMPSimdDirective(S.getDirectiveKind())) {
ReductionKind = OMPD_parallel_for_simd;
} else if (isOpenMPParallelDirective(S.getDirectiveKind())) {
ReductionKind = OMPD_parallel_for;
} else if (isOpenMPSimdDirective(S.getDirectiveKind())) {
ReductionKind = OMPD_simd;
} else if (!isOpenMPTeamsDirective(S.getDirectiveKind()) &&
S.hasClausesOfKind<OMPReductionClause>()) {
llvm_unreachable(
"No reduction clauses is allowed in distribute directive.");
}
EmitOMPReductionClauseFinal(S, ReductionKind);
// Emit post-update of the reduction variables if IsLastIter != 0.
emitPostUpdateForReductionClause(
*this, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
return CGF.Builder.CreateIsNotNull(
CGF.EmitLoadOfScalar(IL, S.getLocStart()));
});
}
EmitOMPReductionClauseFinal(S, ReductionKind);
// Emit post-update of the reduction variables if IsLastIter != 0.
emitPostUpdateForReductionClause(
*this, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
return CGF.Builder.CreateIsNotNull(
CGF.EmitLoadOfScalar(IL, S.getLocStart()));
});
// Emit final copy of the lastprivate variables if IsLastIter != 0.
if (HasLastprivateClause) {
EmitOMPLastprivateClauseFinal(
Expand Down
84 changes: 84 additions & 0 deletions clang/test/OpenMP/distribute_parallel_for_reduction_codegen.cpp
@@ -0,0 +1,84 @@
// Test host code gen

// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32

// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1 %s
// SIMD-ONLY1-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
#ifndef HEADER
#define HEADER


template <typename T>
T tmain(T &r) {
int n = 1000;
// schedule: dynamic chunk
#pragma omp target map(tofrom:r)
#pragma omp teams
#pragma omp distribute parallel for reduction(+:r)
for (int i = 0; i < n; ++i)
r += (T)i;

return r;
}

int main() {
int n = 1000;
int r = 0;
#pragma omp target map(tofrom:r)
#pragma omp teams
#pragma omp distribute parallel for reduction(+:r)
for (int i = 0; i < n; ++i)
r += i;

return tmain<int>(r);
}

// CHECK-LABEL: main
// CHECK: call{{.+}} @__tgt_target_teams(
// CHECK: call void [[OFFL:@.+]](
// CHECK: call{{.+}} [[TMAIN:@.+]](i{{32|64}}
// CHECK: ret

// CHECK: define{{.+}} [[OFFL]](
// CHECK: call{{.+}} @__kmpc_fork_teams({{.+}}, {{.+}}, {{.+}} [[TEOUTL:@.+]] to{{.+}}
// CHECK: ret void

// CHECK: define{{.+}} [[TEOUTL]](
// CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}} [[PAROUTL:@.+]] to{{.+}}
// CHECK: ret void

// CHECK: define{{.+}} [[PAROUTL]](
// CHECK: call{{.+}} @__kmpc_reduce_nowait(
// CHECK: call{{.+}} @__kmpc_end_reduce_nowait(
// CHECK: ret void

// CHECK: define{{.+}} [[TMAIN]](i{{32|64}}
// CHECK: call{{.+}} @__tgt_target_teams(
// CHECK: call void [[TOFFL:@.+]](
// CHECK: ret

// CHECK: define{{.+}} [[TOFFL]](
// CHECK: call{{.+}} @__kmpc_fork_teams({{.+}}, {{.+}}, {{.+}} [[TEMPLTEOUTL:@.+]] to{{.+}}
// CHECK: ret void

// CHECK: define{{.+}} [[TEMPLTEOUTL]](
// CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}} [[TPAROUTL:@.+]] to{{.+}}
// CHECK: ret void

// CHECK: define{{.+}} [[TPAROUTL]](
// CHECK: call{{.+}} @__kmpc_reduce_nowait(
// CHECK: call{{.+}} @__kmpc_end_reduce_nowait(
// CHECK: ret void

#endif // HEADER

0 comments on commit beda214

Please sign in to comment.