Skip to content

Commit

Permalink
[CHERI-RISC-V] Insert the required fences for explicit address mode a…
Browse files Browse the repository at this point in the history
…tomics

The explicit addressing mode atomics always use relaxed ordering so we
need to insert fences if strong orderings are requested.
Fortunately there is already support for this in the AtomicExpandPass so
all we need to do here is to fix emitLeadingFence/emitTrailingFence and
handle RMW instructions in shouldInsertFencesForAtomic().
  • Loading branch information
arichardson committed Dec 8, 2020
1 parent 17d3927 commit 71f5af4
Show file tree
Hide file tree
Showing 5 changed files with 120 additions and 15 deletions.
37 changes: 30 additions & 7 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3249,17 +3249,17 @@ void RISCVTargetLowering::LowerAsmOperandForConstraint(
Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
Instruction *Inst,
AtomicOrdering Ord) const {
if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
if (Inst->hasAtomicLoad() && Ord == AtomicOrdering::SequentiallyConsistent)
return Builder.CreateFence(Ord);
if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
if (Inst->hasAtomicStore() && isReleaseOrStronger(Ord))
return Builder.CreateFence(AtomicOrdering::Release);
return nullptr;
}

Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
Instruction *Inst,
AtomicOrdering Ord) const {
if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord))
return Builder.CreateFence(AtomicOrdering::Acquire);
return nullptr;
}
Expand Down Expand Up @@ -3297,6 +3297,30 @@ EVT RISCVTargetLowering::getOptimalMemOpType(
return TargetLowering::getOptimalMemOpType(Op, FuncAttributes);
}

template <typename Instr>
static bool
needsExplicitAddressingModeAtomics(const Instr *I,
const RISCVSubtarget &Subtarget) {
// We have to fall back to the explicit addressing mode atomics when we can't
// use the more powerful mode-dependent econdings. This happens when using
// capability pointers in non-capability mode and integer pointers in capmode.
const DataLayout &DL = I->getModule()->getDataLayout();
bool IsCheriPurecap = RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI());
return DL.isFatPointer(I->getPointerOperand()->getType()) != IsCheriPurecap;
}

bool RISCVTargetLowering::shouldInsertFencesForAtomic(
const Instruction *I) const {
// The CHERI instructions with explicit addressing mode are always relaxed
// so we need to insert fences when using them.
if (const auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
return needsExplicitAddressingModeAtomics(RMWI, Subtarget);
} else if (const auto *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
return needsExplicitAddressingModeAtomics(CASI, Subtarget);
}
return isa<LoadInst>(I) || isa<StoreInst>(I);
}

TargetLowering::AtomicExpansionKind
RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
// atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
Expand All @@ -3305,20 +3329,19 @@ RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
if (AI->isFloatingPointOperation())
return AtomicExpansionKind::CmpXChg;

const DataLayout &DL = AI->getModule()->getDataLayout();
bool IsCheriPurecap = RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI());
// The atomic RMW instructions are only available in capmode, so for the
// hybrid ABI we have expand these operations to use compare-exchange (which
// is expanded to a LR/SC sequence when not in capmode).
// Note: Since the operations between LR/SC don't break the forward progress
// guarantee, we could generate slightly more efficient code by using
// AtomicExpansionKind::LLSC. However, the required TLI hooks for are not
// implemented so we use CmpXChg.
if (DL.isFatPointer(AI->getPointerOperand()->getType()) && !IsCheriPurecap)
if (needsExplicitAddressingModeAtomics(AI, Subtarget))
return AtomicExpansionKind::CmpXChg;

unsigned Size = AI->getType()->getPrimitiveSizeInBits();
if ((Size == 8 || Size == 16) && !IsCheriPurecap)
if ((Size == 8 || Size == 16) &&
!RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()))
return AtomicExpansionKind::MaskedIntrinsic;
return AtomicExpansionKind::None;
}
Expand Down
4 changes: 1 addition & 3 deletions llvm/lib/Target/RISCV/RISCVISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,9 +133,7 @@ class RISCVTargetLowering : public TargetLowering {
}
bool convertSelectOfConstantsToMath(EVT VT) const override { return true; }

bool shouldInsertFencesForAtomic(const Instruction *I) const override {
return isa<LoadInst>(I) || isa<StoreInst>(I);
}
bool shouldInsertFencesForAtomic(const Instruction *I) const override;
Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
AtomicOrdering Ord) const override;
Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
Expand Down
22 changes: 17 additions & 5 deletions llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td
Original file line number Diff line number Diff line change
Expand Up @@ -1309,6 +1309,13 @@ multiclass PseudoCheriCmpXchgPat<string Op, Pseudo CmpXchgInst,
(CmpXchgInst GPCR:$addr, ValTy:$cmp, ValTy:$new, 7)>;
}

// The explicit address mode atomics are always relaxed
multiclass PseudoCheriCmpXchgPatExplicit<string Op, Pseudo CmpXchgInst,
RegisterClass ValTy=GPR> {
def : Pat<(!cast<PatFrag>(Op#"_monotonic") GPCR:$addr, ValTy:$cmp, ValTy:$new),
(CmpXchgInst GPCR:$addr, ValTy:$cmp, ValTy:$new, 2)>;
}

multiclass CheriAMOCapPat<string CLenStr, string AtomicOp, string BaseInst> {
def : PatGpcrGpcr<!cast<PatFrag>(AtomicOp#"_monotonic"),
!cast<RVInst>(BaseInst#"_"#CLenStr)>;
Expand Down Expand Up @@ -1523,13 +1530,18 @@ defm : CheriAMOCapPat<"128", "atomic_swap_cap", "CAMOSWAP_C">;
/// Only atomic_cmp_swap is supported since all other operations are expanded.

let Predicates = [HasCheri, HasStdExtA, NotCapMode] in {
defm : PseudoCheriCmpXchgPat<"atomic_cmp_swap_8", PseudoCheriCmpXchg8ExplicitCap>;
defm : PseudoCheriCmpXchgPat<"atomic_cmp_swap_16", PseudoCheriCmpXchg16ExplicitCap>;
defm : PseudoCheriCmpXchgPat<"atomic_cmp_swap_32", PseudoCheriCmpXchg32ExplicitCap>;
defm : PseudoCheriCmpXchgPat<"atomic_cmp_swap_cap", PseudoCheriCmpXchgCapExplicitCap, GPCR>;
defm : PseudoCheriCmpXchgPatExplicit<"atomic_cmp_swap_8",
PseudoCheriCmpXchg8ExplicitCap>;
defm : PseudoCheriCmpXchgPatExplicit<"atomic_cmp_swap_16",
PseudoCheriCmpXchg16ExplicitCap>;
defm : PseudoCheriCmpXchgPatExplicit<"atomic_cmp_swap_32",
PseudoCheriCmpXchg32ExplicitCap>;
defm : PseudoCheriCmpXchgPatExplicit<"atomic_cmp_swap_cap",
PseudoCheriCmpXchgCapExplicitCap, GPCR>;
} // Predicates = [HasCheri, HasStdExtA, NotCapMode]
let Predicates = [HasCheri, HasStdExtA, IsRV64, NotCapMode] in
defm : PseudoCheriCmpXchgPat<"atomic_cmp_swap_64", PseudoCheriCmpXchg64ExplicitCap>;
defm : PseudoCheriCmpXchgPatExplicit<"atomic_cmp_swap_64",
PseudoCheriCmpXchg64ExplicitCap>;


/// 'F' (Single-Precision Floating-Point) extension
Expand Down
34 changes: 34 additions & 0 deletions llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ define { i8, i1 } @test_cmpxchg_strong_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: slli a1, a1, 24
; HYBRID-ATOMICS-NEXT: srai a1, a1, 24
; HYBRID-ATOMICS-NEXT: fence rw, w
; HYBRID-ATOMICS-NEXT: .LBB0_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.b.cap a2, (ca0)
Expand All @@ -66,6 +67,7 @@ define { i8, i1 } @test_cmpxchg_strong_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %
; HYBRID-ATOMICS-NEXT: .LBB0_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: mv a0, a2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -137,6 +139,7 @@ define { i16, i1 } @test_cmpxchg_strong_i16(i16 addrspace(200)* %ptr, i16 %exp,
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: slli a1, a1, 16
; HYBRID-ATOMICS-NEXT: srai a1, a1, 16
; HYBRID-ATOMICS-NEXT: fence rw, w
; HYBRID-ATOMICS-NEXT: .LBB1_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.h.cap a2, (ca0)
Expand All @@ -149,6 +152,7 @@ define { i16, i1 } @test_cmpxchg_strong_i16(i16 addrspace(200)* %ptr, i16 %exp,
; HYBRID-ATOMICS-NEXT: .LBB1_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: mv a0, a2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -216,6 +220,7 @@ define { i32, i1 } @test_cmpxchg_strong_i32(i32 addrspace(200)* %ptr, i32 %exp,
;
; HYBRID-ATOMICS-LABEL: test_cmpxchg_strong_i32:
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: fence rw, w
; HYBRID-ATOMICS-NEXT: .LBB2_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.w.cap a2, (ca0)
Expand All @@ -228,6 +233,7 @@ define { i32, i1 } @test_cmpxchg_strong_i32(i32 addrspace(200)* %ptr, i32 %exp,
; HYBRID-ATOMICS-NEXT: .LBB2_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: mv a0, a2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -362,6 +368,7 @@ define { i8 addrspace(200)*, i1 } @test_cmpxchg_strong_cap(i8 addrspace(200)* ad
;
; HYBRID-ATOMICS-LABEL: test_cmpxchg_strong_cap:
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: fence rw, w
; HYBRID-ATOMICS-NEXT: .LBB4_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.c.cap ca2, (ca0)
Expand All @@ -374,6 +381,7 @@ define { i8 addrspace(200)*, i1 } @test_cmpxchg_strong_cap(i8 addrspace(200)* ad
; HYBRID-ATOMICS-NEXT: .LBB4_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: cmove ca0, ca2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -441,6 +449,7 @@ define { i32 addrspace(200)*, i1 } @test_cmpxchg_strong_cap_i32(i32 addrspace(20
;
; HYBRID-ATOMICS-LABEL: test_cmpxchg_strong_cap_i32:
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: fence rw, w
; HYBRID-ATOMICS-NEXT: .LBB5_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.c.cap ca2, (ca0)
Expand All @@ -453,6 +462,7 @@ define { i32 addrspace(200)*, i1 } @test_cmpxchg_strong_cap_i32(i32 addrspace(20
; HYBRID-ATOMICS-NEXT: .LBB5_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: cmove ca0, ca2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -524,6 +534,7 @@ define { i8, i1 } @test_cmpxchg_weak_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %ne
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: slli a1, a1, 24
; HYBRID-ATOMICS-NEXT: srai a1, a1, 24
; HYBRID-ATOMICS-NEXT: fence rw, w
; HYBRID-ATOMICS-NEXT: .LBB6_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.b.cap a2, (ca0)
Expand All @@ -536,6 +547,7 @@ define { i8, i1 } @test_cmpxchg_weak_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %ne
; HYBRID-ATOMICS-NEXT: .LBB6_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: mv a0, a2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -607,6 +619,7 @@ define { i16, i1 } @test_cmpxchg_weak_i16(i16 addrspace(200)* %ptr, i16 %exp, i1
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: slli a1, a1, 16
; HYBRID-ATOMICS-NEXT: srai a1, a1, 16
; HYBRID-ATOMICS-NEXT: fence rw, w
; HYBRID-ATOMICS-NEXT: .LBB7_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.h.cap a2, (ca0)
Expand All @@ -619,6 +632,7 @@ define { i16, i1 } @test_cmpxchg_weak_i16(i16 addrspace(200)* %ptr, i16 %exp, i1
; HYBRID-ATOMICS-NEXT: .LBB7_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: mv a0, a2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -686,6 +700,7 @@ define { i32, i1 } @test_cmpxchg_weak_i32(i32 addrspace(200)* %ptr, i32 %exp, i3
;
; HYBRID-ATOMICS-LABEL: test_cmpxchg_weak_i32:
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: fence rw, w
; HYBRID-ATOMICS-NEXT: .LBB8_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.w.cap a2, (ca0)
Expand All @@ -698,6 +713,7 @@ define { i32, i1 } @test_cmpxchg_weak_i32(i32 addrspace(200)* %ptr, i32 %exp, i3
; HYBRID-ATOMICS-NEXT: .LBB8_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: mv a0, a2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -832,6 +848,7 @@ define { i8 addrspace(200)*, i1 } @test_cmpxchg_weak_cap(i8 addrspace(200)* addr
;
; HYBRID-ATOMICS-LABEL: test_cmpxchg_weak_cap:
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: fence rw, w
; HYBRID-ATOMICS-NEXT: .LBB10_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.c.cap ca2, (ca0)
Expand All @@ -844,6 +861,7 @@ define { i8 addrspace(200)*, i1 } @test_cmpxchg_weak_cap(i8 addrspace(200)* addr
; HYBRID-ATOMICS-NEXT: .LBB10_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: cmove ca0, ca2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -911,6 +929,7 @@ define { i32 addrspace(200)*, i1 } @test_cmpxchg_weak_cap_i32(i32 addrspace(200)
;
; HYBRID-ATOMICS-LABEL: test_cmpxchg_weak_cap_i32:
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: fence rw, w
; HYBRID-ATOMICS-NEXT: .LBB11_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.c.cap ca2, (ca0)
Expand All @@ -923,6 +942,7 @@ define { i32 addrspace(200)*, i1 } @test_cmpxchg_weak_cap_i32(i32 addrspace(200)
; HYBRID-ATOMICS-NEXT: .LBB11_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: cmove ca0, ca2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -992,6 +1012,7 @@ define { i32, i1 } @test_cmpxchg_weak_sc_sc(i32 addrspace(200)* %ptr, i32 %exp,
;
; HYBRID-ATOMICS-LABEL: test_cmpxchg_weak_sc_sc:
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: fence rw, rw
; HYBRID-ATOMICS-NEXT: .LBB12_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.w.cap a2, (ca0)
Expand All @@ -1004,6 +1025,7 @@ define { i32, i1 } @test_cmpxchg_weak_sc_sc(i32 addrspace(200)* %ptr, i32 %exp,
; HYBRID-ATOMICS-NEXT: .LBB12_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: mv a0, a2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -1071,6 +1093,7 @@ define { i32, i1 } @test_cmpxchg_weak_sc_acquire(i32 addrspace(200)* %ptr, i32 %
;
; HYBRID-ATOMICS-LABEL: test_cmpxchg_weak_sc_acquire:
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: fence rw, rw
; HYBRID-ATOMICS-NEXT: .LBB13_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.w.cap a2, (ca0)
Expand All @@ -1083,6 +1106,7 @@ define { i32, i1 } @test_cmpxchg_weak_sc_acquire(i32 addrspace(200)* %ptr, i32 %
; HYBRID-ATOMICS-NEXT: .LBB13_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: mv a0, a2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -1150,6 +1174,7 @@ define { i32, i1 } @test_cmpxchg_weak_sc_relaxed(i32 addrspace(200)* %ptr, i32 %
;
; HYBRID-ATOMICS-LABEL: test_cmpxchg_weak_sc_relaxed:
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: fence rw, rw
; HYBRID-ATOMICS-NEXT: .LBB14_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.w.cap a2, (ca0)
Expand All @@ -1162,6 +1187,7 @@ define { i32, i1 } @test_cmpxchg_weak_sc_relaxed(i32 addrspace(200)* %ptr, i32 %
; HYBRID-ATOMICS-NEXT: .LBB14_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: mv a0, a2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -1229,6 +1255,7 @@ define { i32, i1 } @test_cmpxchg_weak_acqrel_acquire(i32 addrspace(200)* %ptr, i
;
; HYBRID-ATOMICS-LABEL: test_cmpxchg_weak_acqrel_acquire:
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: fence rw, w
; HYBRID-ATOMICS-NEXT: .LBB15_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.w.cap a2, (ca0)
Expand All @@ -1241,6 +1268,7 @@ define { i32, i1 } @test_cmpxchg_weak_acqrel_acquire(i32 addrspace(200)* %ptr, i
; HYBRID-ATOMICS-NEXT: .LBB15_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: mv a0, a2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -1308,6 +1336,7 @@ define { i32, i1 } @test_cmpxchg_weak_acqrel_relaxed(i32 addrspace(200)* %ptr, i
;
; HYBRID-ATOMICS-LABEL: test_cmpxchg_weak_acqrel_relaxed:
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: fence rw, w
; HYBRID-ATOMICS-NEXT: .LBB16_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.w.cap a2, (ca0)
Expand All @@ -1320,6 +1349,7 @@ define { i32, i1 } @test_cmpxchg_weak_acqrel_relaxed(i32 addrspace(200)* %ptr, i
; HYBRID-ATOMICS-NEXT: .LBB16_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: mv a0, a2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -1387,6 +1417,7 @@ define { i32, i1 } @test_cmpxchg_weak_release_acquire(i32 addrspace(200)* %ptr,
;
; HYBRID-ATOMICS-LABEL: test_cmpxchg_weak_release_acquire:
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: fence rw, w
; HYBRID-ATOMICS-NEXT: .LBB17_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.w.cap a2, (ca0)
Expand Down Expand Up @@ -1466,6 +1497,7 @@ define { i32, i1 } @test_cmpxchg_weak_release_relaxed(i32 addrspace(200)* %ptr,
;
; HYBRID-ATOMICS-LABEL: test_cmpxchg_weak_release_relaxed:
; HYBRID-ATOMICS: # %bb.0: # %entry
; HYBRID-ATOMICS-NEXT: fence rw, w
; HYBRID-ATOMICS-NEXT: .LBB18_1: # %entry
; HYBRID-ATOMICS-NEXT: # =>This Inner Loop Header: Depth=1
; HYBRID-ATOMICS-NEXT: lr.w.cap a2, (ca0)
Expand Down Expand Up @@ -1557,6 +1589,7 @@ define { i32, i1 } @test_cmpxchg_weak_acquire_acquire(i32 addrspace(200)* %ptr,
; HYBRID-ATOMICS-NEXT: .LBB19_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: mv a0, a2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down Expand Up @@ -1636,6 +1669,7 @@ define { i32, i1 } @test_cmpxchg_weak_acquire_relaxed(i32 addrspace(200)* %ptr,
; HYBRID-ATOMICS-NEXT: .LBB20_3: # %entry
; HYBRID-ATOMICS-NEXT: xor a0, a2, a1
; HYBRID-ATOMICS-NEXT: seqz a1, a0
; HYBRID-ATOMICS-NEXT: fence r, rw
; HYBRID-ATOMICS-NEXT: mv a0, a2
; HYBRID-ATOMICS-NEXT: ret
;
Expand Down
Loading

0 comments on commit 71f5af4

Please sign in to comment.