diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index ad1a00b4bbcc47..7bb1405c131ab2 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -638,6 +638,9 @@ C++2b Feature Support CUDA/HIP Language Changes in Clang ---------------------------------- + - Allow the use of ``__noinline__`` as a keyword (instead of ``__attribute__((noinline))``) + in lambda declarations. + Objective-C Language Changes in Clang ------------------------------------- diff --git a/clang/lib/Parse/ParseExprCXX.cpp b/clang/lib/Parse/ParseExprCXX.cpp index e34bd8d7bca401..a768c4da504afc 100644 --- a/clang/lib/Parse/ParseExprCXX.cpp +++ b/clang/lib/Parse/ParseExprCXX.cpp @@ -1291,7 +1291,22 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer( if (getLangOpts().CUDA) { // In CUDA code, GNU attributes are allowed to appear immediately after the // "[...]", even if there is no "(...)" before the lambda body. - MaybeParseGNUAttributes(D); + // + // Note that we support __noinline__ as a keyword in this mode and thus + // it has to be separately handled. + while (true) { + if (Tok.is(tok::kw___noinline__)) { + IdentifierInfo *AttrName = Tok.getIdentifierInfo(); + SourceLocation AttrNameLoc = ConsumeToken(); + Attr.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0, + ParsedAttr::AS_Keyword); + } else if (Tok.is(tok::kw___attribute)) + ParseGNUAttributes(Attr, nullptr, &D); + else + break; + } + + D.takeAttributes(Attr); } // Helper to emit a warning if we see a CUDA host/device/global attribute diff --git a/clang/test/CodeGenCUDA/lambda-noinline.cu b/clang/test/CodeGenCUDA/lambda-noinline.cu new file mode 100644 index 00000000000000..de2196e63f0748 --- /dev/null +++ b/clang/test/CodeGenCUDA/lambda-noinline.cu @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 -no-opaque-pointers -x hip -emit-llvm -std=c++11 %s -o - \ +// RUN: -triple x86_64-linux-gnu \ +// RUN: | FileCheck -check-prefix=HOST %s +// RUN: %clang_cc1 -no-opaque-pointers -x hip -emit-llvm -std=c++11 %s -o - \ +// RUN: -triple amdgcn-amd-amdhsa -fcuda-is-device \ +// RUN: | FileCheck -check-prefix=DEV %s + +#include "Inputs/cuda.h" + +// Checks noinline is correctly added to the lambda function. + +// HOST: define{{.*}}@_ZZ4HostvENKUlvE_clEv({{.*}}) #[[ATTR:[0-9]+]] +// HOST: attributes #[[ATTR]]{{.*}}noinline + +// DEV: define{{.*}}@_ZZ6DevicevENKUlvE_clEv({{.*}}) #[[ATTR:[0-9]+]] +// DEV: attributes #[[ATTR]]{{.*}}noinline + +__device__ int a; +int b; + +__device__ int Device() { return ([&] __device__ __noinline__ (){ return a; })(); } + +__host__ int Host() { return ([&] __host__ __noinline__ (){ return b; })(); } diff --git a/clang/test/Parser/lambda-attr.cu b/clang/test/Parser/lambda-attr.cu index 886212b97f50ba..7fa128effd5129 100644 --- a/clang/test/Parser/lambda-attr.cu +++ b/clang/test/Parser/lambda-attr.cu @@ -18,6 +18,10 @@ __attribute__((device)) void device_attr() { ([&](int) __attribute__((device)){ device_fn(); })(0); // expected-warning@-1 {{nvcc does not allow '__device__' to appear after the parameter list in lambdas}} ([&] __attribute__((device)) (int) { device_fn(); })(0); + + // test that noinline can appear anywhere. + ([&] __attribute__((device)) __noinline__ () { device_fn(); })(); + ([&] __noinline__ __attribute__((device)) () { device_fn(); })(); } __attribute__((host)) __attribute__((device)) void host_device_attrs() { @@ -37,6 +41,11 @@ __attribute__((host)) __attribute__((device)) void host_device_attrs() { // expected-warning@-1 {{nvcc does not allow '__host__' to appear after the parameter list in lambdas}} // expected-warning@-2 {{nvcc does not allow '__device__' to appear after the parameter list in lambdas}} ([&] __attribute__((host)) __attribute__((device)) (int) { hd_fn(); })(0); + + // test that noinline can also appear anywhere. + ([] __attribute__((host)) __attribute__((device)) () { hd_fn(); })(); + ([] __attribute__((host)) __noinline__ __attribute__((device)) () { hd_fn(); })(); + ([] __attribute__((host)) __attribute__((device)) __noinline__ () { hd_fn(); })(); } // TODO: Add tests for __attribute__((global)) once we support global lambdas.