Skip to content

Commit

Permalink
Allow pointer types for atomicrmw xchg
Browse files Browse the repository at this point in the history
This adds support for pointer types for `atomic xchg` and let us write
instructions such as `atomicrmw xchg i64** %0, i64* %1 seq_cst`. This
is similar to the patch for allowing atomicrmw xchg on floating point
types: https://reviews.llvm.org/D52416.

Differential Revision: https://reviews.llvm.org/D124728
  • Loading branch information
tkf authored and jyknight committed May 25, 2022
1 parent 172149e commit 18e6b82
Show file tree
Hide file tree
Showing 11 changed files with 77 additions and 18 deletions.
4 changes: 2 additions & 2 deletions llvm/docs/LangRef.rst
Expand Up @@ -10263,8 +10263,8 @@ operation. The operation must be one of the following keywords:
For most of these operations, the type of '<value>' must be an integer
type whose bit width is a power of two greater than or equal to eight
and less than or equal to a target-specific size limit. For xchg, this
may also be a floating point type with the same size constraints as
integers. For fadd/fsub, this must be a floating point type. The
may also be a floating point or a pointer type with the same size constraints
as integers. For fadd/fsub, this must be a floating point type. The
type of the '``<pointer>``' operand must be a pointer to that type. If
the ``atomicrmw`` is marked as ``volatile``, then the optimizer is not
allowed to modify the number or order of execution of this
Expand Down
3 changes: 2 additions & 1 deletion llvm/include/llvm/CodeGen/TargetLowering.h
Expand Up @@ -2092,7 +2092,8 @@ class TargetLoweringBase {
virtual AtomicExpansionKind
shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const {
if (RMWI->getOperation() == AtomicRMWInst::Xchg &&
RMWI->getValOperand()->getType()->isFloatingPointTy())
(RMWI->getValOperand()->getType()->isFloatingPointTy() ||
RMWI->getValOperand()->getType()->isPointerTy()))
return AtomicExpansionKind::CastToInteger;

return AtomicExpansionKind::None;
Expand Down
14 changes: 9 additions & 5 deletions llvm/lib/AsmParser/LLParser.cpp
Expand Up @@ -7436,10 +7436,12 @@ int LLParser::parseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {

if (Operation == AtomicRMWInst::Xchg) {
if (!Val->getType()->isIntegerTy() &&
!Val->getType()->isFloatingPointTy()) {
return error(ValLoc,
"atomicrmw " + AtomicRMWInst::getOperationName(Operation) +
" operand must be an integer or floating point type");
!Val->getType()->isFloatingPointTy() &&
!Val->getType()->isPointerTy()) {
return error(
ValLoc,
"atomicrmw " + AtomicRMWInst::getOperationName(Operation) +
" operand must be an integer, floating point, or pointer type");
}
} else if (IsFP) {
if (!Val->getType()->isFloatingPointTy()) {
Expand All @@ -7455,7 +7457,9 @@ int LLParser::parseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
}
}

unsigned Size = Val->getType()->getPrimitiveSizeInBits();
unsigned Size =
PFS.getFunction().getParent()->getDataLayout().getTypeStoreSizeInBits(
Val->getType());
if (Size < 8 || (Size & (Size - 1)))
return error(ValLoc, "atomicrmw operand must be power-of-two byte-sized"
" integer");
Expand Down
9 changes: 7 additions & 2 deletions llvm/lib/CodeGen/AtomicExpandPass.cpp
Expand Up @@ -387,15 +387,19 @@ AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
Value *Val = RMWI->getValOperand();
Type *PT = PointerType::get(NewTy, RMWI->getPointerAddressSpace());
Value *NewAddr = Builder.CreateBitCast(Addr, PT);
Value *NewVal = Builder.CreateBitCast(Val, NewTy);
Value *NewVal = Val->getType()->isPointerTy()
? Builder.CreatePtrToInt(Val, NewTy)
: Builder.CreateBitCast(Val, NewTy);

auto *NewRMWI =
Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, NewAddr, NewVal,
RMWI->getAlign(), RMWI->getOrdering());
NewRMWI->setVolatile(RMWI->isVolatile());
LLVM_DEBUG(dbgs() << "Replaced " << *RMWI << " with " << *NewRMWI << "\n");

Value *NewRVal = Builder.CreateBitCast(NewRMWI, RMWI->getType());
Value *NewRVal = RMWI->getType()->isPointerTy()
? Builder.CreateIntToPtr(NewRMWI, RMWI->getType())
: Builder.CreateBitCast(NewRMWI, RMWI->getType());
RMWI->replaceAllUsesWith(NewRVal);
RMWI->eraseFromParent();
return NewRMWI;
Expand Down Expand Up @@ -527,6 +531,7 @@ static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr,
Type *OrigTy = NewVal->getType();

// This code can go away when cmpxchg supports FP types.
assert(!OrigTy->isPointerTy());
bool NeedBitcast = OrigTy->isFloatingPointTy();
if (NeedBitcast) {
IntegerType *IntTy = Builder.getIntNTy(OrigTy->getPrimitiveSizeInBits());
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/IR/Verifier.cpp
Expand Up @@ -3925,7 +3925,8 @@ void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
auto Op = RMWI.getOperation();
Type *ElTy = RMWI.getOperand(1)->getType();
if (Op == AtomicRMWInst::Xchg) {
Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy(),
Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
ElTy->isPointerTy(),
"atomicrmw " + AtomicRMWInst::getOperationName(Op) +
" operand must have integer or floating point type!",
&RMWI, ElTy);
Expand Down

This file was deleted.

6 changes: 6 additions & 0 deletions llvm/test/Bitcode/compatibility.ll
Expand Up @@ -854,6 +854,12 @@ define void @fp_atomics(float* %word) {
ret void
}

define void @pointer_atomics(i8** %word) {
; CHECK: %atomicrmw.xchg = atomicrmw xchg i8** %word, i8* null monotonic
%atomicrmw.xchg = atomicrmw xchg i8** %word, i8* null monotonic
ret void
}

;; Fast Math Flags
define void @fastmathflags_unop(float %op1) {
%f.nnan = fneg nnan float %op1
Expand Down
9 changes: 9 additions & 0 deletions llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll
Expand Up @@ -659,6 +659,15 @@ entry:
ret void
}

; GCN-LABEL: {{^}}atomic_xchg_pointer_offset:
; GCN: flat_atomic_swap_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
define amdgpu_kernel void @atomic_xchg_pointer_offset(i8** %out, i8* %in) {
entry:
%gep = getelementptr i8*, i8** %out, i32 4
%val = atomicrmw volatile xchg i8** %gep, i8* %in seq_cst
ret void
}

; GCN-LABEL: {{^}}atomic_xchg_i64_ret_offset:
; GCN: flat_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
Expand Down
11 changes: 11 additions & 0 deletions llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll
Expand Up @@ -794,6 +794,17 @@ entry:
ret void
}

; GCN-LABEL: {{^}}atomic_xchg_pointer_offset:
; CIVI: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}

; GFX9: global_atomic_swap_x2 v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}} offset:32{{$}}
define amdgpu_kernel void @atomic_xchg_pointer_offset(i8* addrspace(1)* %out, i8* %in) {
entry:
%gep = getelementptr i8*, i8* addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile xchg i8* addrspace(1)* %gep, i8* %in seq_cst
ret void
}

; GCN-LABEL: {{^}}atomic_xchg_i64_ret_offset:
; CIVI: buffer_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
; CIVI: buffer_store_dwordx2 [[RET]]
Expand Down
13 changes: 13 additions & 0 deletions llvm/test/CodeGen/AMDGPU/local-atomics64.ll
Expand Up @@ -40,6 +40,19 @@ define amdgpu_kernel void @lds_atomic_xchg_ret_f64_offset(double addrspace(1)* %
ret void
}

; GCN-LABEL: {{^}}lds_atomic_xchg_ret_pointer_offset:
; SICIVI: s_mov_b32 m0
; GFX9-NOT: m0

; GCN: ds_wrxchg_rtn_b64 {{.*}} offset:32
; GCN: s_endpgm
define amdgpu_kernel void @lds_atomic_xchg_ret_pointer_offset(i8* addrspace(1)* %out, i8* addrspace(3)* %ptr) nounwind {
%gep = getelementptr i8*, i8* addrspace(3)* %ptr, i32 4
%result = atomicrmw xchg i8* addrspace(3)* %gep, i8* null seq_cst
store i8* %result, i8* addrspace(1)* %out, align 8
ret void
}

; GCN-LABEL: {{^}}lds_atomic_add_ret_i64:
; SICIVI: s_mov_b32 m0
; GFX9-NOT: m0
Expand Down
16 changes: 16 additions & 0 deletions llvm/test/CodeGen/X86/atomic64.ll
Expand Up @@ -4,6 +4,7 @@

@sc64 = external dso_local global i64
@fsc64 = external dso_local global double
@psc64 = external dso_local global i8*

define void @atomic_fetch_add64() nounwind {
; X64-LABEL: atomic_fetch_add64:
Expand Down Expand Up @@ -780,3 +781,18 @@ define void @atomic_fetch_swapf64(double %x) nounwind {
%t1 = atomicrmw xchg double* @fsc64, double %x acquire
ret void
}

define void @atomic_fetch_swapptr(i8* %x) nounwind {
; X64-LABEL: atomic_fetch_swapptr:
; X64: # %bb.0:
; X64-NEXT: xchgq %rdi, psc64(%rip)
; X64-NEXT: retq
;
; I486-LABEL: atomic_fetch_swapptr:
; I486: # %bb.0:
; I486-NEXT: movl {{[0-9]+}}(%esp), %eax
; I486-NEXT: xchgl %eax, psc64
; I486-NEXT: retl
%t1 = atomicrmw xchg i8** @psc64, i8* %x acquire
ret void
}

0 comments on commit 18e6b82

Please sign in to comment.