Skip to content

Commit

Permalink
[CUDA] Add CUDA wrappers over clang builtins for sm_90.
Browse files Browse the repository at this point in the history
Differential Revision: https://reviews.llvm.org/D151362
  • Loading branch information
Artem-B committed May 25, 2023
1 parent 25708b3 commit 5c082e7
Showing 1 changed file with 127 additions and 0 deletions.
127 changes: 127 additions & 0 deletions clang/lib/Headers/__clang_cuda_intrinsics.h
Original file line number Diff line number Diff line change
Expand Up @@ -577,6 +577,133 @@ __nv_associate_access_property(const void *__ptr, unsigned long long __prop) {
}
#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800

#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
__device__ inline unsigned __isCtaShared(const void *ptr) {
return __isShared(ptr);
}

__device__ inline unsigned __isClusterShared(const void *__ptr) {
return __nvvm_isspacep_shared_cluster(__ptr);
}

__device__ inline void *__cluster_map_shared_rank(const void *__ptr,
unsigned __rank) {
return __nvvm_mapa((void *)__ptr, __rank);
}

__device__ inline unsigned __cluster_query_shared_rank(const void *__ptr) {
return __nvvm_getctarank((void *)__ptr);
}

__device__ inline uint2
__cluster_map_shared_multicast(const void *__ptr,
unsigned int __cluster_cta_mask) {
return make_uint2((unsigned)__cvta_generic_to_shared(__ptr),
__cluster_cta_mask);
}

__device__ inline unsigned __clusterDimIsSpecified() {
return __nvvm_is_explicit_cluster();
}

__device__ inline dim3 __clusterDim() {
return {__nvvm_read_ptx_sreg_cluster_nctaid_x(),
__nvvm_read_ptx_sreg_cluster_nctaid_y(),
__nvvm_read_ptx_sreg_cluster_nctaid_z()};
}

__device__ inline dim3 __clusterRelativeBlockIdx() {
return {__nvvm_read_ptx_sreg_cluster_ctaid_x(),
__nvvm_read_ptx_sreg_cluster_ctaid_y(),
__nvvm_read_ptx_sreg_cluster_ctaid_z()};
}

__device__ inline dim3 __clusterGridDimInClusters() {
return {__nvvm_read_ptx_sreg_nclusterid_x(),
__nvvm_read_ptx_sreg_nclusterid_y(),
__nvvm_read_ptx_sreg_nclusterid_z()};
}

__device__ inline dim3 __clusterIdx() {
return {__nvvm_read_ptx_sreg_clusterid_x(),
__nvvm_read_ptx_sreg_clusterid_y(),
__nvvm_read_ptx_sreg_clusterid_z()};
}

__device__ inline unsigned __clusterRelativeBlockRank() {
return __nvvm_read_ptx_sreg_cluster_ctarank();
}

__device__ inline unsigned __clusterSizeInBlocks() {
return __nvvm_read_ptx_sreg_cluster_nctarank();
}

__device__ inline void __cluster_barrier_arrive() {
__nvvm_barrier_cluster_arrive();
}

__device__ inline void __cluster_barrier_arrive_relaxed() {
__nvvm_barrier_cluster_arrive_relaxed();
}

__device__ inline void __cluster_barrier_wait() {
__nvvm_barrier_cluster_wait();
}

__device__ inline void __threadfence_cluster() { __nvvm_fence_sc_cluster(); }

__device__ inline float2 atomicAdd(float2 *__ptr, float2 __val) {
float2 __ret;
__asm__("atom.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
: "=f"(__ret.x), "=f"(__ret.y)
: "l"(__ptr), "f"(__val.x), "f"(__val.y));
return __ret;
}

__device__ inline float2 atomicAdd_block(float2 *__ptr, float2 __val) {
float2 __ret;
__asm__("atom.cta.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
: "=f"(__ret.x), "=f"(__ret.y)
: "l"(__ptr), "f"(__val.x), "f"(__val.y));
return __ret;
}

__device__ inline float2 atomicAdd_system(float2 *__ptr, float2 __val) {
float2 __ret;
__asm__("atom.sys.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
: "=f"(__ret.x), "=f"(__ret.y)
: "l"(__ptr), "f"(__val.x), "f"(__val.y));
return __ret;
}

__device__ inline float4 atomicAdd(float4 *__ptr, float4 __val) {
float4 __ret;
__asm__("atom.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
: "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
: "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w));
return __ret;
}

__device__ inline float4 atomicAdd_block(float4 *__ptr, float4 __val) {
float4 __ret;
__asm__(
"atom.cta.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
: "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
: "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w));
return __ret;
}

__device__ inline float4 atomicAdd_system(float4 *__ptr, float4 __val) {
float4 __ret;
__asm__(
"atom.sys.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
: "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
: "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w)
:);
return __ret;
}

#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
#endif // CUDA_VERSION >= 11000

#endif // defined(__CLANG_CUDA_INTRINSICS_H__)

0 comments on commit 5c082e7

Please sign in to comment.