Skip to content

Commit

Permalink
[IR] Extend cmpxchg to allow pointer type operands
Browse files Browse the repository at this point in the history
Today, we do not allow cmpxchg operations with pointer arguments. We require the frontend to insert ptrtoint casts and do the cmpxchg in integers. While correct, this is problematic from a couple of perspectives:
1) It makes the IR harder to analyse (for instance, it make capture tracking overly conservative)
2) It pushes work onto the frontend authors for no real gain

This patch implements the simplest form of IR support. As we did with floating point loads and stores, we teach AtomicExpand to convert back to the old representation. This prevents us needing to change all backends in a single lock step change. Over time, we can migrate each backend to natively selecting the pointer type. In the meantime, we get the advantages of a cleaner IR representation without waiting for the backend changes.

Differential Revision: http://reviews.llvm.org/D17413

llvm-svn: 261281
  • Loading branch information
preames committed Feb 19, 2016
1 parent 878ae01 commit 1960cfd
Show file tree
Hide file tree
Showing 5 changed files with 161 additions and 20 deletions.
14 changes: 7 additions & 7 deletions llvm/docs/LangRef.rst
Expand Up @@ -7082,13 +7082,13 @@ Arguments:
There are three arguments to the '``cmpxchg``' instruction: an address
to operate on, a value to compare to the value currently be at that
address, and a new value to place at that address if the compared values
are equal. The type of '<cmp>' must be an integer type whose bit width
is a power of two greater than or equal to eight and less than or equal
to a target-specific size limit. '<cmp>' and '<new>' must have the same
type, and the type of '<pointer>' must be a pointer to that type. If the
``cmpxchg`` is marked as ``volatile``, then the optimizer is not allowed
to modify the number or order of execution of this ``cmpxchg`` with
other :ref:`volatile operations <volatile>`.
are equal. The type of '<cmp>' must be an integer or pointer type whose
bit width is a power of two greater than or equal to eight and less
than or equal to a target-specific size limit. '<cmp>' and '<new>' must
have the same type, and the type of '<pointer>' must be a pointer to
that type. If the ``cmpxchg`` is marked as ``volatile``, then the
optimizer is not allowed to modify the number or order of execution of
this ``cmpxchg`` with other :ref:`volatile operations <volatile>`.

The success and failure :ref:`ordering <ordering>` arguments specify how this
``cmpxchg`` synchronizes with other atomic operations. Both ordering parameters
Expand Down
10 changes: 3 additions & 7 deletions llvm/lib/AsmParser/LLParser.cpp
Expand Up @@ -27,6 +27,7 @@
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
Expand Down Expand Up @@ -5903,13 +5904,8 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
return Error(CmpLoc, "compare value and pointer type do not match");
if (cast<PointerType>(Ptr->getType())->getElementType() != New->getType())
return Error(NewLoc, "new value and pointer type do not match");
if (!New->getType()->isIntegerTy())
return Error(NewLoc, "cmpxchg operand must be an integer");
unsigned Size = New->getType()->getPrimitiveSizeInBits();
if (Size < 8 || (Size & (Size - 1)))
return Error(NewLoc, "cmpxchg operand must be power-of-two byte-sized"
" integer");

if (!New->getType()->isFirstClassType())
return Error(NewLoc, "cmpxchg operand must be a first class value");
AtomicCmpXchgInst *CXI = new AtomicCmpXchgInst(
Ptr, Cmp, New, SuccessOrdering, FailureOrdering, Scope);
CXI->setVolatile(isVolatile);
Expand Down
67 changes: 63 additions & 4 deletions llvm/lib/CodeGen/AtomicExpandPass.cpp
Expand Up @@ -60,6 +60,7 @@ namespace {
bool expandAtomicOpToLLSC(
Instruction *I, Value *Addr, AtomicOrdering MemOpOrder,
std::function<Value *(IRBuilder<> &, Value *)> PerformOp);
AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI);
bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
bool isIdempotentRMW(AtomicRMWInst *AI);
bool simplifyIdempotentRMW(AtomicRMWInst *AI);
Expand Down Expand Up @@ -168,8 +169,22 @@ bool AtomicExpand::runOnFunction(Function &F) {
} else {
MadeChange |= tryExpandAtomicRMW(RMWI);
}
} else if (CASI && TLI->shouldExpandAtomicCmpXchgInIR(CASI)) {
MadeChange |= expandAtomicCmpXchg(CASI);
} else if (CASI) {
// TODO: when we're ready to make the change at the IR level, we can
// extend convertCmpXchgToInteger for floating point too.
assert(!CASI->getCompareOperand()->getType()->isFloatingPointTy() &&
"unimplemented - floating point not legal at IR level");
if (CASI->getCompareOperand()->getType()->isPointerTy() ) {
// TODO: add a TLI hook to control this so that each target can
// convert to lowering the original type one at a time.
CASI = convertCmpXchgToIntegerType(CASI);
assert(CASI->getCompareOperand()->getType()->isIntegerTy() &&
"invariant broken");
MadeChange = true;
}

if (TLI->shouldExpandAtomicCmpXchgInIR(CASI))
MadeChange |= expandAtomicCmpXchg(CASI);
}
}
return MadeChange;
Expand Down Expand Up @@ -206,7 +221,7 @@ IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
}

/// Convert an atomic load of a non-integral type to an integer load of the
/// equivelent bitwidth. See the function comment on
/// equivalent bitwidth. See the function comment on
/// convertAtomicStoreToIntegerType for background.
LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
auto *M = LI->getModule();
Expand Down Expand Up @@ -283,7 +298,7 @@ bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
}

/// Convert an atomic store of a non-integral type to an integer store of the
/// equivelent bitwidth. We used to not support floating point or vector
/// equivalent bitwidth. We used to not support floating point or vector
/// atomics in the IR at all. The backends learned to deal with the bitcast
/// idiom because that was the only way of expressing the notion of a atomic
/// float or vector store. The long term plan is to teach each backend to
Expand Down Expand Up @@ -448,6 +463,50 @@ bool AtomicExpand::expandAtomicOpToLLSC(
return true;
}

/// Convert an atomic cmpxchg of a non-integral type to an integer cmpxchg of
/// the equivalent bitwidth. We used to not support pointer cmpxchg in the
/// IR. As a migration step, we convert back to what use to be the standard
/// way to represent a pointer cmpxchg so that we can update backends one by
/// one.
AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
auto *M = CI->getModule();
Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(),
M->getDataLayout());

IRBuilder<> Builder(CI);

Value *Addr = CI->getPointerOperand();
Type *PT = PointerType::get(NewTy,
Addr->getType()->getPointerAddressSpace());
Value *NewAddr = Builder.CreateBitCast(Addr, PT);

Value *NewCmp = Builder.CreatePtrToInt(CI->getCompareOperand(), NewTy);
Value *NewNewVal = Builder.CreatePtrToInt(CI->getNewValOperand(), NewTy);


auto *NewCI = Builder.CreateAtomicCmpXchg(NewAddr, NewCmp, NewNewVal,
CI->getSuccessOrdering(),
CI->getFailureOrdering(),
CI->getSynchScope());
NewCI->setVolatile(CI->isVolatile());
NewCI->setWeak(CI->isWeak());
DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n");

Value *OldVal = Builder.CreateExtractValue(NewCI, 0);
Value *Succ = Builder.CreateExtractValue(NewCI, 1);

OldVal = Builder.CreateIntToPtr(OldVal, CI->getCompareOperand()->getType());

Value *Res = UndefValue::get(CI->getType());
Res = Builder.CreateInsertValue(Res, OldVal, 0);
Res = Builder.CreateInsertValue(Res, Succ, 1);

CI->replaceAllUsesWith(Res);
CI->eraseFromParent();
return NewCI;
}


bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
AtomicOrdering FailureOrder = CI->getFailureOrdering();
Expand Down
5 changes: 3 additions & 2 deletions llvm/lib/IR/Verifier.cpp
Expand Up @@ -2952,8 +2952,9 @@ void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI);
Type *ElTy = PTy->getElementType();
Assert(ElTy->isIntegerTy(), "cmpxchg operand must have integer type!", &CXI,
ElTy);
Assert(ElTy->isIntegerTy() || ElTy->isPointerTy(),
"cmpxchg operand must have integer or pointer type",
ElTy, &CXI);
checkAtomicMemAccessSize(M, ElTy, &CXI);
Assert(ElTy == CXI.getOperand(1)->getType(),
"Expected value type does not match pointer operand type!", &CXI,
Expand Down
85 changes: 85 additions & 0 deletions llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
Expand Up @@ -80,3 +80,88 @@ define void @float_store_expand_addr1(float addrspace(1)* %ptr, float %v) {
ret void
}

define void @pointer_cmpxchg_expand(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst monotonic
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg i8** %ptr, i8* null, i8* %v seq_cst monotonic
ret void
}

define void @pointer_cmpxchg_expand2(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand2
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 release monotonic
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg i8** %ptr, i8* null, i8* %v release monotonic
ret void
}

define void @pointer_cmpxchg_expand3(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand3
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst seq_cst
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg i8** %ptr, i8* null, i8* %v seq_cst seq_cst
ret void
}

define void @pointer_cmpxchg_expand4(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand4
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg weak i64* %1, i64 0, i64 %2 seq_cst seq_cst
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg weak i8** %ptr, i8* null, i8* %v seq_cst seq_cst
ret void
}

define void @pointer_cmpxchg_expand5(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand5
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg volatile i64* %1, i64 0, i64 %2 seq_cst seq_cst
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg volatile i8** %ptr, i8* null, i8* %v seq_cst seq_cst
ret void
}

define void @pointer_cmpxchg_expand6(i8 addrspace(2)* addrspace(1)* %ptr,
i8 addrspace(2)* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand6
; CHECK: %1 = bitcast i8 addrspace(2)* addrspace(1)* %ptr to i64 addrspace(1)*
; CHECK: %2 = ptrtoint i8 addrspace(2)* %v to i64
; CHECK: %3 = cmpxchg i64 addrspace(1)* %1, i64 0, i64 %2 seq_cst seq_cst
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8 addrspace(2)*
; CHECK: %7 = insertvalue { i8 addrspace(2)*, i1 } undef, i8 addrspace(2)* %6, 0
; CHECK: %8 = insertvalue { i8 addrspace(2)*, i1 } %7, i1 %5, 1
cmpxchg i8 addrspace(2)* addrspace(1)* %ptr, i8 addrspace(2)* null, i8 addrspace(2)* %v seq_cst seq_cst
ret void
}

0 comments on commit 1960cfd

Please sign in to comment.