Skip to content

Commit

Permalink
[RISCV] Fold store of vmv.x.s to a vse with VL=1.
Browse files Browse the repository at this point in the history
This can avoid a loss of decoupling with the scalar unit on cores
with decoupled scalar and vector units.

We should support FP too, but those use extract_element and not a
custom ISD node so it is a little different. I also left a FIXME
in the test for i64 extract and store on RV32.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D109482
  • Loading branch information
topperc committed Sep 27, 2021
1 parent 2bf06d9 commit a2a07e8
Show file tree
Hide file tree
Showing 4 changed files with 104 additions and 4 deletions.
3 changes: 2 additions & 1 deletion llvm/include/llvm/CodeGen/SelectionDAG.h
Expand Up @@ -1352,7 +1352,8 @@ class SelectionDAG {
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo,
Align Alignment, MachineMemOperand::Flags MMOFlags,
const AAMDNodes &AAInfo, bool IsCompressing = false);
const AAMDNodes &AAInfo = AAMDNodes(),
bool IsCompressing = false);
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
SDValue Mask, SDValue EVL, MachineMemOperand *MMO,
bool IsCompressing = false);
Expand Down
25 changes: 25 additions & 0 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Expand Up @@ -930,6 +930,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::SRA);
setTargetDAGCombine(ISD::SRL);
setTargetDAGCombine(ISD::SHL);
setTargetDAGCombine(ISD::STORE);
}
}

Expand Down Expand Up @@ -7116,6 +7117,30 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
return V;
return SDValue();
}
case ISD::STORE: {
auto *Store = cast<StoreSDNode>(N);
SDValue Val = Store->getValue();
// Combine store of vmv.x.s to vse with VL of 1.
// FIXME: Support FP.
if (Val.getOpcode() == RISCVISD::VMV_X_S) {
SDValue Src = Val.getOperand(0);
EVT VecVT = Src.getValueType();
EVT MemVT = Store->getMemoryVT();
// The memory VT and the element type must match.
if (VecVT.getVectorElementType() == MemVT) {
SDLoc DL(N);
MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
return DAG.getStoreVP(Store->getChain(), DL, Src, Store->getBasePtr(),
DAG.getConstant(1, DL, MaskVT),
DAG.getConstant(1, DL, Subtarget.getXLenVT()),
Store->getPointerInfo(),
Store->getOriginalAlign(),
Store->getMemOperand()->getFlags());
}
}

break;
}
}

return SDValue();
Expand Down
76 changes: 76 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
Expand Up @@ -521,3 +521,79 @@ define i64 @extractelt_v3i64_idx(<3 x i64>* %x, i32 signext %idx) nounwind {
%c = extractelement <3 x i64> %b, i32 %idx
ret i64 %c
}

define void @store_extractelt_v16i8(<16 x i8>* %x, i8* %p) nounwind {
; CHECK-LABEL: store_extractelt_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v25, v25, 7
; CHECK-NEXT: vse8.v v25, (a1)
; CHECK-NEXT: ret
%a = load <16 x i8>, <16 x i8>* %x
%b = extractelement <16 x i8> %a, i32 7
store i8 %b, i8* %p
ret void
}

define void @store_extractelt_v8i16(<8 x i16>* %x, i16* %p) nounwind {
; CHECK-LABEL: store_extractelt_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v25, v25, 7
; CHECK-NEXT: vse16.v v25, (a1)
; CHECK-NEXT: ret
%a = load <8 x i16>, <8 x i16>* %x
%b = extractelement <8 x i16> %a, i32 7
store i16 %b, i16* %p
ret void
}

define void @store_extractelt_v4i32(<4 x i32>* %x, i32* %p) nounwind {
; CHECK-LABEL: store_extractelt_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v25, v25, 2
; CHECK-NEXT: vse32.v v25, (a1)
; CHECK-NEXT: ret
%a = load <4 x i32>, <4 x i32>* %x
%b = extractelement <4 x i32> %a, i32 2
store i32 %b, i32* %p
ret void
}

; FIXME: Use vse64.v on RV32 to avoid two scalar extracts and two scalar stores.
define void @store_extractelt_v4i64(<2 x i64>* %x, i64* %p) nounwind {
; RV32-LABEL: store_extractelt_v4i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vle64.v v25, (a0)
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
; RV32-NEXT: vslidedown.vi v25, v25, 1
; RV32-NEXT: addi a0, zero, 32
; RV32-NEXT: vsrl.vx v26, v25, a0
; RV32-NEXT: vmv.x.s a0, v26
; RV32-NEXT: vmv.x.s a2, v25
; RV32-NEXT: sw a2, 0(a1)
; RV32-NEXT: sw a0, 4(a1)
; RV32-NEXT: ret
;
; RV64-LABEL: store_extractelt_v4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV64-NEXT: vle64.v v25, (a0)
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu
; RV64-NEXT: vslidedown.vi v25, v25, 1
; RV64-NEXT: vse64.v v25, (a1)
; RV64-NEXT: ret
%a = load <2 x i64>, <2 x i64>* %x
%b = extractelement <2 x i64> %a, i64 1
store i64 %b, i64* %p
ret void
}

4 changes: 1 addition & 3 deletions llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
Expand Up @@ -727,10 +727,8 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
; RV32MV-NEXT: vmsne.vv v0, v26, v30
; RV32MV-NEXT: vmv.v.i v26, 0
; RV32MV-NEXT: vmerge.vim v26, v26, -1, v0
; RV32MV-NEXT: vsetivli zero, 0, e32, m2, ta, mu
; RV32MV-NEXT: vmv.x.s a0, v26
; RV32MV-NEXT: sw a0, 0(s1)
; RV32MV-NEXT: vsetivli zero, 1, e32, m2, ta, mu
; RV32MV-NEXT: vse32.v v26, (s1)
; RV32MV-NEXT: vslidedown.vi v28, v26, 1
; RV32MV-NEXT: vmv.x.s a0, v28
; RV32MV-NEXT: vslidedown.vi v28, v26, 2
Expand Down

0 comments on commit a2a07e8

Please sign in to comment.