1- // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 \
1+ // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
22// RUN: -aux-triple x86_64-unknown-linux-gnu -fcuda-is-device -emit-llvm %s \
33// RUN: -o - | FileCheck %s
44
5- // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 \
5+ // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
66// RUN: -aux-triple x86_64-pc-windows-msvc -fcuda-is-device -emit-llvm %s \
77// RUN: -o - | FileCheck %s
88
99#include " Inputs/cuda.h"
1010
1111// CHECK-LABEL: @_Z16use_dispatch_ptrPi(
1212// CHECK: %[[PTR:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
13- // CHECK: %{{.*}} = addrspacecast i8 addrspace(4)* %[[PTR]] to i8 *
13+ // CHECK: %{{.*}} = addrspacecast i8 addrspace(4)* %[[PTR]] to i32 *
1414__global__ void use_dispatch_ptr (int * out) {
1515 const int * dispatch_ptr = (const int *)__builtin_amdgcn_dispatch_ptr ();
1616 *out = *dispatch_ptr;
@@ -24,6 +24,39 @@ void test_ds_fmax(float src) {
2424 volatile float x = __builtin_amdgcn_ds_fmaxf (&shared, src, 0 , 0 , false );
2525}
2626
27+ // CHECK-LABEL: @_Z12test_ds_faddf(
28+ // CHECK: call contract float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* @_ZZ12test_ds_faddfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
29+ __global__ void test_ds_fadd (float src) {
30+ __shared__ float shared;
31+ volatile float x = __builtin_amdgcn_ds_faddf (&shared, src, 0 , 0 , false );
32+ }
33+
34+ // CHECK-LABEL: @_Z12test_ds_fminfPf(float %src, float addrspace(1)* %shared.coerce
35+ // CHECK: %shared = alloca float*, align 8, addrspace(5)
36+ // CHECK: %shared.ascast = addrspacecast float* addrspace(5)* %shared to float**
37+ // CHECK: %shared.addr = alloca float*, align 8, addrspace(5)
38+ // CHECK: %shared.addr.ascast = addrspacecast float* addrspace(5)* %shared.addr to float**
39+ // CHECK: %[[S0:.*]] = addrspacecast float addrspace(1)* %shared.coerce to float*
40+ // CHECK: store float* %[[S0]], float** %shared.ascast, align 8
41+ // CHECK: %shared1 = load float*, float** %shared.ascast, align 8
42+ // CHECK: store float* %shared1, float** %shared.addr.ascast, align 8
43+ // CHECK: %[[S1:.*]] = load float*, float** %shared.addr.ascast, align 8
44+ // CHECK: %[[S2:.*]] = addrspacecast float* %[[S1]] to float addrspace(3)*
45+ // CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[S2]]
46+ __global__ void test_ds_fmin (float src, float *shared) {
47+ volatile float x = __builtin_amdgcn_ds_fminf (shared, src, 0 , 0 , false );
48+ }
49+
50+ // CHECK: @_Z33test_ret_builtin_nondef_addrspace
51+ // CHECK: %[[X:.*]] = alloca i8*, align 8, addrspace(5)
52+ // CHECK: %[[XC:.*]] = addrspacecast i8* addrspace(5)* %[[X]] to i8**
53+ // CHECK: %[[Y:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
54+ // CHECK: %[[YASCAST:.*]] = addrspacecast i8 addrspace(4)* %[[Y]] to i8*
55+ // CHECK: store i8* %[[YASCAST]], i8** %[[XC]], align 8
56+ __device__ void test_ret_builtin_nondef_addrspace () {
57+ void *x = __builtin_amdgcn_dispatch_ptr ();
58+ }
59+
2760// CHECK-LABEL: @_Z6endpgmv(
2861// CHECK: call void @llvm.amdgcn.endpgm()
2962__global__ void endpgm () {
@@ -33,12 +66,12 @@ __global__ void endpgm() {
3366// Check the 64 bit argument is correctly passed to the intrinsic without truncation or assertion.
3467
3568// CHECK-LABEL: @_Z14test_uicmp_i64
36- // CHECK: store i64* %out , i64** %out.addr.ascast
69+ // CHECK: store i64* %out1 , i64** %out.addr.ascast
3770// CHECK-NEXT: store i64 %a, i64* %a.addr.ascast
3871// CHECK-NEXT: store i64 %b, i64* %b.addr.ascast
3972// CHECK-NEXT: %[[V0:.*]] = load i64, i64* %a.addr.ascast
4073// CHECK-NEXT: %[[V1:.*]] = load i64, i64* %b.addr.ascast
41- // CHECK-NEXT: %[[V2:.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %0 , i64 %1 , i32 35)
74+ // CHECK-NEXT: %[[V2:.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %[[V0]] , i64 %[[V1]] , i32 35)
4275// CHECK-NEXT: %[[V3:.*]] = load i64*, i64** %out.addr.ascast
4376// CHECK-NEXT: store i64 %[[V2]], i64* %[[V3]]
4477// CHECK-NEXT: ret void
@@ -58,3 +91,45 @@ __global__ void test_s_memtime(unsigned long long* out)
5891{
5992 *out = __builtin_amdgcn_s_memtime ();
6093}
94+
95+ // Check a generic pointer can be passed as a shared pointer and a generic pointer.
96+ __device__ void func (float *x);
97+
98+ // CHECK: @_Z17test_ds_fmin_funcfPf
99+ // CHECK: %[[SHARED:.*]] = alloca float*, align 8, addrspace(5)
100+ // CHECK: %[[SHARED_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED]] to float**
101+ // CHECK: %[[SRC_ADDR:.*]] = alloca float, align 4, addrspace(5)
102+ // CHECK: %[[SRC_ADDR_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[SRC_ADDR]] to float*
103+ // CHECK: %[[SHARED_ADDR:.*]] = alloca float*, align 8, addrspace(5)
104+ // CHECK: %[[SHARED_ADDR_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED_ADDR]] to float**
105+ // CHECK: %[[X:.*]] = alloca float, align 4, addrspace(5)
106+ // CHECK: %[[X_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[X]] to float*
107+ // CHECK: %[[SHARED1:.*]] = load float*, float** %[[SHARED_ASCAST]], align 8
108+ // CHECK: store float %src, float* %[[SRC_ADDR_ASCAST]], align 4
109+ // CHECK: store float* %[[SHARED1]], float** %[[SHARED_ADDR_ASCAST]], align 8
110+ // CHECK: %[[ARG0_PTR:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
111+ // CHECK: %[[ARG0:.*]] = addrspacecast float* %[[ARG0_PTR]] to float addrspace(3)*
112+ // CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[ARG0]]
113+ // CHECK: %[[ARG0:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
114+ // CHECK: call void @_Z4funcPf(float* %[[ARG0]]) #8
115+ __global__ void test_ds_fmin_func (float src, float *__restrict shared) {
116+ volatile float x = __builtin_amdgcn_ds_fminf (shared, src, 0 , 0 , false );
117+ func (shared);
118+ }
119+
120+ // CHECK: @_Z14test_is_sharedPf(float addrspace(1)* %[[X_COERCE:.*]])
121+ // CHECK: %[[X:.*]] = alloca float*, align 8, addrspace(5)
122+ // CHECK: %[[X_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[X]] to float**
123+ // CHECK: %[[X_ADDR:.*]] = alloca float*, align 8, addrspace(5)
124+ // CHECK: %[[X_ADDR_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[X_ADDR]] to float**
125+ // CHECK: %[[X_FP:.*]] = addrspacecast float addrspace(1)* %[[X_COERCE]] to float*
126+ // CHECK: store float* %[[X_FP]], float** %[[X_ASCAST]], align 8
127+ // CHECK: %[[X1:.*]] = load float*, float** %[[X_ASCAST]], align 8
128+ // CHECK: store float* %[[X1]], float** %[[X_ADDR_ASCAST]], align 8
129+ // CHECK: %[[X_TMP:.*]] = load float*, float** %[[X_ADDR_ASCAST]], align 8
130+ // CHECK: %[[X_ARG:.*]] = bitcast float* %[[X_TMP]] to i8*
131+ // CHECK: call i1 @llvm.amdgcn.is.shared(i8* %[[X_ARG]])
132+
133+ __global__ void test_is_shared (float *x){
134+ bool ret = __builtin_amdgcn_is_shared (x);
135+ }
0 commit comments