80 changes: 78 additions & 2 deletions llvm/lib/Target/RISCV/RISCVInstrInfoA.td
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,

class PseudoCmpXchg
: Pseudo<(outs GPR:$res, GPR:$scratch),
(ins GPR:$addr, GPR:$cmpval, GPR:$newval, i32imm:$ordering), []> {
(ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> {
let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
let mayLoad = 1;
let mayStore = 1;
Expand Down Expand Up @@ -263,7 +263,7 @@ defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>;
def PseudoMaskedCmpXchg32
: Pseudo<(outs GPR:$res, GPR:$scratch),
(ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
i32imm:$ordering), []> {
ixlenimm:$ordering), []> {
let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
let mayLoad = 1;
let mayStore = 1;
Expand All @@ -276,3 +276,79 @@ def : Pat<(int_riscv_masked_cmpxchg_i32
GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering)>;

} // Predicates = [HasStdExtA]

let Predicates = [HasStdExtA, IsRV64] in {

/// 64-bit atomic loads and stores

// Fences will be inserted for atomic load/stores according to the logic in
// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
defm : LdPat<atomic_load_64, LD>;
defm : AtomicStPat<atomic_store_64, SD, GPR>;

defm : AMOPat<"atomic_swap_64", "AMOSWAP_D">;
defm : AMOPat<"atomic_load_add_64", "AMOADD_D">;
defm : AMOPat<"atomic_load_and_64", "AMOAND_D">;
defm : AMOPat<"atomic_load_or_64", "AMOOR_D">;
defm : AMOPat<"atomic_load_xor_64", "AMOXOR_D">;
defm : AMOPat<"atomic_load_max_64", "AMOMAX_D">;
defm : AMOPat<"atomic_load_min_64", "AMOMIN_D">;
defm : AMOPat<"atomic_load_umax_64", "AMOMAXU_D">;
defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D">;

/// 64-bit AMOs

def : Pat<(atomic_load_sub_64_monotonic GPR:$addr, GPR:$incr),
(AMOADD_D GPR:$addr, (SUB X0, GPR:$incr))>;
def : Pat<(atomic_load_sub_64_acquire GPR:$addr, GPR:$incr),
(AMOADD_D_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
def : Pat<(atomic_load_sub_64_release GPR:$addr, GPR:$incr),
(AMOADD_D_RL GPR:$addr, (SUB X0, GPR:$incr))>;
def : Pat<(atomic_load_sub_64_acq_rel GPR:$addr, GPR:$incr),
(AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
def : Pat<(atomic_load_sub_64_seq_cst GPR:$addr, GPR:$incr),
(AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;

/// 64-bit pseudo AMOs

def PseudoAtomicLoadNand64 : PseudoAMO;
// Ordering constants must be kept in sync with the AtomicOrdering enum in
// AtomicOrdering.h.
def : Pat<(atomic_load_nand_64_monotonic GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>;
def : Pat<(atomic_load_nand_64_acquire GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>;
def : Pat<(atomic_load_nand_64_release GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>;
def : Pat<(atomic_load_nand_64_acq_rel GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
def : Pat<(atomic_load_nand_64_seq_cst GPR:$addr, GPR:$incr),
(PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;

def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
PseudoMaskedAtomicSwap32>;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
PseudoMaskedAtomicLoadAdd32>;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
PseudoMaskedAtomicLoadSub32>;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
PseudoMaskedAtomicLoadNand32>;
def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
PseudoMaskedAtomicLoadMax32>;
def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
PseudoMaskedAtomicLoadMin32>;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
PseudoMaskedAtomicLoadUMax32>;
def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
PseudoMaskedAtomicLoadUMin32>;

/// 64-bit compare and exchange

def PseudoCmpXchg64 : PseudoCmpXchg;
defm : PseudoCmpXchgPat<"atomic_cmp_swap_64", PseudoCmpXchg64>;

def : Pat<(int_riscv_masked_cmpxchg_i64
GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering),
(PseudoMaskedCmpXchg32
GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering)>;
} // Predicates = [HasStdExtA, IsRV64]
712 changes: 712 additions & 0 deletions llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll

Large diffs are not rendered by default.

182 changes: 182 additions & 0 deletions llvm/test/CodeGen/RISCV/atomic-load-store.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
; RUN: | FileCheck -check-prefix=RV32IA %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64IA %s

define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
; RV32I-LABEL: atomic_load_i8_unordered:
Expand All @@ -31,6 +33,11 @@ define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i8_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lb a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i8, i8* %a unordered, align 1
ret i8 %1
}
Expand Down Expand Up @@ -60,6 +67,11 @@ define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lb a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i8, i8* %a monotonic, align 1
ret i8 %1
}
Expand Down Expand Up @@ -90,6 +102,12 @@ define i8 @atomic_load_i8_acquire(i8 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i8_acquire:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lb a0, 0(a0)
; RV64IA-NEXT: fence r, rw
; RV64IA-NEXT: ret
%1 = load atomic i8, i8* %a acquire, align 1
ret i8 %1
}
Expand Down Expand Up @@ -121,6 +139,13 @@ define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i8_seq_cst:
; RV64IA: # %bb.0:
; RV64IA-NEXT: fence rw, rw
; RV64IA-NEXT: lb a0, 0(a0)
; RV64IA-NEXT: fence r, rw
; RV64IA-NEXT: ret
%1 = load atomic i8, i8* %a seq_cst, align 1
ret i8 %1
}
Expand Down Expand Up @@ -150,6 +175,11 @@ define i16 @atomic_load_i16_unordered(i16 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i16_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lh a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i16, i16* %a unordered, align 2
ret i16 %1
}
Expand Down Expand Up @@ -179,6 +209,11 @@ define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lh a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i16, i16* %a monotonic, align 2
ret i16 %1
}
Expand Down Expand Up @@ -209,6 +244,12 @@ define i16 @atomic_load_i16_acquire(i16 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i16_acquire:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lh a0, 0(a0)
; RV64IA-NEXT: fence r, rw
; RV64IA-NEXT: ret
%1 = load atomic i16, i16* %a acquire, align 2
ret i16 %1
}
Expand Down Expand Up @@ -240,6 +281,13 @@ define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i16_seq_cst:
; RV64IA: # %bb.0:
; RV64IA-NEXT: fence rw, rw
; RV64IA-NEXT: lh a0, 0(a0)
; RV64IA-NEXT: fence r, rw
; RV64IA-NEXT: ret
%1 = load atomic i16, i16* %a seq_cst, align 2
ret i16 %1
}
Expand Down Expand Up @@ -269,6 +317,11 @@ define i32 @atomic_load_i32_unordered(i32 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i32_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lw a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i32, i32* %a unordered, align 4
ret i32 %1
}
Expand Down Expand Up @@ -298,6 +351,11 @@ define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lw a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i32, i32* %a monotonic, align 4
ret i32 %1
}
Expand Down Expand Up @@ -328,6 +386,12 @@ define i32 @atomic_load_i32_acquire(i32 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i32_acquire:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lw a0, 0(a0)
; RV64IA-NEXT: fence r, rw
; RV64IA-NEXT: ret
%1 = load atomic i32, i32* %a acquire, align 4
ret i32 %1
}
Expand Down Expand Up @@ -359,6 +423,13 @@ define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i32_seq_cst:
; RV64IA: # %bb.0:
; RV64IA-NEXT: fence rw, rw
; RV64IA-NEXT: lw a0, 0(a0)
; RV64IA-NEXT: fence r, rw
; RV64IA-NEXT: ret
%1 = load atomic i32, i32* %a seq_cst, align 4
ret i32 %1
}
Expand Down Expand Up @@ -393,6 +464,11 @@ define i64 @atomic_load_i64_unordered(i64 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i64_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: ld a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i64, i64* %a unordered, align 8
ret i64 %1
}
Expand Down Expand Up @@ -427,6 +503,11 @@ define i64 @atomic_load_i64_monotonic(i64 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: ld a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i64, i64* %a monotonic, align 8
ret i64 %1
}
Expand Down Expand Up @@ -461,6 +542,12 @@ define i64 @atomic_load_i64_acquire(i64 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i64_acquire:
; RV64IA: # %bb.0:
; RV64IA-NEXT: ld a0, 0(a0)
; RV64IA-NEXT: fence r, rw
; RV64IA-NEXT: ret
%1 = load atomic i64, i64* %a acquire, align 8
ret i64 %1
}
Expand Down Expand Up @@ -495,6 +582,13 @@ define i64 @atomic_load_i64_seq_cst(i64 *%a) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i64_seq_cst:
; RV64IA: # %bb.0:
; RV64IA-NEXT: fence rw, rw
; RV64IA-NEXT: ld a0, 0(a0)
; RV64IA-NEXT: fence r, rw
; RV64IA-NEXT: ret
%1 = load atomic i64, i64* %a seq_cst, align 8
ret i64 %1
}
Expand Down Expand Up @@ -524,6 +618,11 @@ define void @atomic_store_i8_unordered(i8 *%a, i8 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i8_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sb a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i8 %b, i8* %a unordered, align 1
ret void
}
Expand Down Expand Up @@ -553,6 +652,11 @@ define void @atomic_store_i8_monotonic(i8 *%a, i8 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sb a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i8 %b, i8* %a monotonic, align 1
ret void
}
Expand Down Expand Up @@ -583,6 +687,12 @@ define void @atomic_store_i8_release(i8 *%a, i8 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i8_release:
; RV64IA: # %bb.0:
; RV64IA-NEXT: fence rw, w
; RV64IA-NEXT: sb a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i8 %b, i8* %a release, align 1
ret void
}
Expand Down Expand Up @@ -613,6 +723,12 @@ define void @atomic_store_i8_seq_cst(i8 *%a, i8 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i8_seq_cst:
; RV64IA: # %bb.0:
; RV64IA-NEXT: fence rw, w
; RV64IA-NEXT: sb a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i8 %b, i8* %a seq_cst, align 1
ret void
}
Expand Down Expand Up @@ -642,6 +758,11 @@ define void @atomic_store_i16_unordered(i16 *%a, i16 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i16_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sh a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i16 %b, i16* %a unordered, align 2
ret void
}
Expand Down Expand Up @@ -671,6 +792,11 @@ define void @atomic_store_i16_monotonic(i16 *%a, i16 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sh a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i16 %b, i16* %a monotonic, align 2
ret void
}
Expand Down Expand Up @@ -701,6 +827,12 @@ define void @atomic_store_i16_release(i16 *%a, i16 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i16_release:
; RV64IA: # %bb.0:
; RV64IA-NEXT: fence rw, w
; RV64IA-NEXT: sh a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i16 %b, i16* %a release, align 2
ret void
}
Expand Down Expand Up @@ -731,6 +863,12 @@ define void @atomic_store_i16_seq_cst(i16 *%a, i16 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i16_seq_cst:
; RV64IA: # %bb.0:
; RV64IA-NEXT: fence rw, w
; RV64IA-NEXT: sh a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i16 %b, i16* %a seq_cst, align 2
ret void
}
Expand Down Expand Up @@ -760,6 +898,11 @@ define void @atomic_store_i32_unordered(i32 *%a, i32 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i32_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sw a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i32 %b, i32* %a unordered, align 4
ret void
}
Expand Down Expand Up @@ -789,6 +932,11 @@ define void @atomic_store_i32_monotonic(i32 *%a, i32 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sw a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i32 %b, i32* %a monotonic, align 4
ret void
}
Expand Down Expand Up @@ -819,6 +967,12 @@ define void @atomic_store_i32_release(i32 *%a, i32 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i32_release:
; RV64IA: # %bb.0:
; RV64IA-NEXT: fence rw, w
; RV64IA-NEXT: sw a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i32 %b, i32* %a release, align 4
ret void
}
Expand Down Expand Up @@ -849,6 +1003,12 @@ define void @atomic_store_i32_seq_cst(i32 *%a, i32 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i32_seq_cst:
; RV64IA: # %bb.0:
; RV64IA-NEXT: fence rw, w
; RV64IA-NEXT: sw a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i32 %b, i32* %a seq_cst, align 4
ret void
}
Expand Down Expand Up @@ -883,6 +1043,11 @@ define void @atomic_store_i64_unordered(i64 *%a, i64 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i64_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sd a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i64 %b, i64* %a unordered, align 8
ret void
}
Expand Down Expand Up @@ -917,6 +1082,11 @@ define void @atomic_store_i64_monotonic(i64 *%a, i64 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sd a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i64 %b, i64* %a monotonic, align 8
ret void
}
Expand Down Expand Up @@ -951,6 +1121,12 @@ define void @atomic_store_i64_release(i64 *%a, i64 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i64_release:
; RV64IA: # %bb.0:
; RV64IA-NEXT: fence rw, w
; RV64IA-NEXT: sd a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i64 %b, i64* %a release, align 8
ret void
}
Expand Down Expand Up @@ -985,6 +1161,12 @@ define void @atomic_store_i64_seq_cst(i64 *%a, i64 %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_store_i64_seq_cst:
; RV64IA: # %bb.0:
; RV64IA-NEXT: fence rw, w
; RV64IA-NEXT: sd a1, 0(a0)
; RV64IA-NEXT: ret
store atomic i64 %b, i64* %a seq_cst, align 8
ret void
}
3,017 changes: 3,017 additions & 0 deletions llvm/test/CodeGen/RISCV/atomic-rmw.ll

Large diffs are not rendered by default.