Skip to content

Commit

Permalink
[RISCV] Reorder shuffle operands if one side is an identity (#98534)
Browse files Browse the repository at this point in the history
Doing so allows one side to fold entirely into the mask applied to the
other recursive call (or a vmerge.vv at worst). This is a generalization
of the existing IsSelect case (both operands are selects), so I removed
that code in the process.

This actually started as an attempt to remove the IsSelect bit as I'd
thought it was fully redundant
with the recursive formulation, but digging into test deltas revealed
that we depended on that
to catch the majority of the identity cases, and that in turn we were
missing some cases where only RHS was an identity.
  • Loading branch information
preames committed Jul 11, 2024
1 parent 6f04f46 commit 657dbc3
Show file tree
Hide file tree
Showing 5 changed files with 90 additions and 115 deletions.
59 changes: 17 additions & 42 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5321,67 +5321,42 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
return convertFromScalableVector(VT, Gather, DAG, Subtarget);
}

// By default we preserve the original operand order, and use a mask to
// select LHS as true and RHS as false. However, since RVV vector selects may
// feature splats but only on the LHS, we may choose to invert our mask and
// instead select between RHS and LHS.
bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);

// Detect shuffles which can be re-expressed as vector selects; these are
// shuffles in which each element in the destination is taken from an element
// at the corresponding index in either source vectors.
bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
int MaskIndex = MaskIdx.value();
return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
});
if (IsSelect) {
// Now construct the mask that will be used by the vselect operation.
SmallVector<SDValue> MaskVals;
for (int MaskIndex : Mask) {
bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ SwapOps;
MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
}

if (SwapOps)
std::swap(V1, V2);

assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
}

// As a backup, shuffles can be lowered via a vrgather instruction, possibly
// merged with a second vrgather.
SmallVector<int> ShuffleMaskLHS, ShuffleMaskRHS;
SmallVector<SDValue> MaskVals;

// Now construct the mask that will be used by the blended vrgather operation.
// Cconstruct the appropriate indices into each vector.
// Construct the appropriate indices into each vector.
for (int MaskIndex : Mask) {
bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ !SwapOps;
MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
ShuffleMaskLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
? MaskIndex : -1);
ShuffleMaskRHS.push_back(IsLHSOrUndefIndex ? -1 : (MaskIndex - NumElts));
}

if (SwapOps) {
std::swap(V1, V2);
std::swap(ShuffleMaskLHS, ShuffleMaskRHS);
}

assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
// Try to pick a profitable operand order.
bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
SwapOps = SwapOps ^ ShuffleVectorInst::isIdentityMask(ShuffleMaskRHS, NumElts);

// Recursively invoke lowering for each operand if we had two
// independent single source shuffles, and then combine the result via a
// vselect. Note that the vselect will likely be folded back into the
// second permute (vrgather, or other) by the post-isel combine.
V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), ShuffleMaskLHS);
V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), ShuffleMaskRHS);

SmallVector<SDValue> MaskVals;
for (int MaskIndex : Mask) {
bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ !SwapOps;
MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
}

assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);

if (SwapOps)
return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V2, V1);
}

Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
Original file line number Diff line number Diff line change
Expand Up @@ -663,11 +663,11 @@ define <8 x i8> @merge_start_into_start(<8 x i8> %v, <8 x i8> %w) {
define <8 x i8> @merge_slidedown(<8 x i8> %v, <8 x i8> %w) {
; CHECK-LABEL: merge_slidedown:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 195
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: li a0, 60
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: vslidedown.vi v9, v8, 1, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%res = shufflevector <8 x i8> %v, <8 x i8> %w, <8 x i32> <i32 8, i32 9, i32 3, i32 4, i32 5, i32 6, i32 14, i32 15>
ret <8 x i8> %res
Expand Down
98 changes: 49 additions & 49 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-transpose.ll
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@ define <8 x i8> @trn1.v8i8(<8 x i8> %v0, <8 x i8> %v1) {
define <8 x i8> @trn2.v8i8(<8 x i8> %v0, <8 x i8> %v1) {
; CHECK-LABEL: trn2.v8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 170
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: li a0, 85
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: vslidedown.vi v9, v8, 1, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%tmp0 = shufflevector <8 x i8> %v0, <8 x i8> %v1, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
ret <8 x i8> %tmp0
Expand All @@ -46,13 +46,13 @@ define <16 x i8> @trn1.v16i8(<16 x i8> %v0, <16 x i8> %v1) {
define <16 x i8> @trn2.v16i8(<16 x i8> %v0, <16 x i8> %v1) {
; CHECK-LABEL: trn2.v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 11
; CHECK-NEXT: addi a0, a0, -1366
; CHECK-NEXT: lui a0, 5
; CHECK-NEXT: addi a0, a0, 1365
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v9, v8, 1, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%tmp0 = shufflevector <16 x i8> %v0, <16 x i8> %v1, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
ret <16 x i8> %tmp0
Expand All @@ -72,10 +72,10 @@ define <4 x i16> @trn1.v4i16(<4 x i16> %v0, <4 x i16> %v1) {
define <4 x i16> @trn2.v4i16(<4 x i16> %v0, <4 x i16> %v1) {
; CHECK-LABEL: trn2.v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v0, 5
; CHECK-NEXT: vslidedown.vi v9, v8, 1, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x i16> %v0, <4 x i16> %v1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
ret <4 x i16> %tmp0
Expand All @@ -96,11 +96,11 @@ define <8 x i16> @trn1.v8i16(<8 x i16> %v0, <8 x i16> %v1) {
define <8 x i16> @trn2.v8i16(<8 x i16> %v0, <8 x i16> %v1) {
; CHECK-LABEL: trn2.v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 170
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: li a0, 85
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: vslidedown.vi v9, v8, 1, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%tmp0 = shufflevector <8 x i16> %v0, <8 x i16> %v1, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
ret <8 x i16> %tmp0
Expand All @@ -119,10 +119,10 @@ define <2 x i32> @trn1.v2i32(<2 x i32> %v0, <2 x i32> %v1) {
define <2 x i32> @trn2.v2i32(<2 x i32> %v0, <2 x i32> %v1) {
; CHECK-LABEL: trn2.v2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v0, 2
; CHECK-NEXT: vrgather.vi v10, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v0, 1
; CHECK-NEXT: vrgather.vi v9, v8, 1, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%tmp0 = shufflevector <2 x i32> %v0, <2 x i32> %v1, <2 x i32> <i32 1, i32 3>
ret <2 x i32> %tmp0
Expand All @@ -142,10 +142,10 @@ define <4 x i32> @trn1.v4i32(<4 x i32> %v0, <4 x i32> %v1) {
define <4 x i32> @trn2.v4i32(<4 x i32> %v0, <4 x i32> %v1) {
; CHECK-LABEL: trn2.v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vmv.v.i v0, 5
; CHECK-NEXT: vslidedown.vi v9, v8, 1, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
ret <4 x i32> %tmp0
Expand All @@ -164,10 +164,10 @@ define <2 x i64> @trn1.v2i64(<2 x i64> %v0, <2 x i64> %v1) {
define <2 x i64> @trn2.v2i64(<2 x i64> %v0, <2 x i64> %v1) {
; CHECK-LABEL: trn2.v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vmv.v.i v0, 2
; CHECK-NEXT: vrgather.vi v10, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vmv.v.i v0, 1
; CHECK-NEXT: vrgather.vi v9, v8, 1, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%tmp0 = shufflevector <2 x i64> %v0, <2 x i64> %v1, <2 x i32> <i32 1, i32 3>
ret <2 x i64> %tmp0
Expand All @@ -186,10 +186,10 @@ define <2 x float> @trn1.v2f32(<2 x float> %v0, <2 x float> %v1) {
define <2 x float> @trn2.v2f32(<2 x float> %v0, <2 x float> %v1) {
; CHECK-LABEL: trn2.v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v0, 2
; CHECK-NEXT: vrgather.vi v10, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v0, 1
; CHECK-NEXT: vrgather.vi v9, v8, 1, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%tmp0 = shufflevector <2 x float> %v0, <2 x float> %v1, <2 x i32> <i32 1, i32 3>
ret <2 x float> %tmp0
Expand All @@ -209,10 +209,10 @@ define <4 x float> @trn1.v4f32(<4 x float> %v0, <4 x float> %v1) {
define <4 x float> @trn2.v4f32(<4 x float> %v0, <4 x float> %v1) {
; CHECK-LABEL: trn2.v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vmv.v.i v0, 5
; CHECK-NEXT: vslidedown.vi v9, v8, 1, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x float> %v0, <4 x float> %v1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
ret <4 x float> %tmp0
Expand All @@ -231,10 +231,10 @@ define <2 x double> @trn1.v2f64(<2 x double> %v0, <2 x double> %v1) {
define <2 x double> @trn2.v2f64(<2 x double> %v0, <2 x double> %v1) {
; CHECK-LABEL: trn2.v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vmv.v.i v0, 2
; CHECK-NEXT: vrgather.vi v10, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vmv.v.i v0, 1
; CHECK-NEXT: vrgather.vi v9, v8, 1, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%tmp0 = shufflevector <2 x double> %v0, <2 x double> %v1, <2 x i32> <i32 1, i32 3>
ret <2 x double> %tmp0
Expand All @@ -254,10 +254,10 @@ define <4 x half> @trn1.v4f16(<4 x half> %v0, <4 x half> %v1) {
define <4 x half> @trn2.v4f16(<4 x half> %v0, <4 x half> %v1) {
; CHECK-LABEL: trn2.v4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v0, 5
; CHECK-NEXT: vslidedown.vi v9, v8, 1, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x half> %v0, <4 x half> %v1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
ret <4 x half> %tmp0
Expand All @@ -278,11 +278,11 @@ define <8 x half> @trn1.v8f16(<8 x half> %v0, <8 x half> %v1) {
define <8 x half> @trn2.v8f16(<8 x half> %v0, <8 x half> %v1) {
; CHECK-LABEL: trn2.v8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 170
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: li a0, 85
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: vslidedown.vi v9, v8, 1, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%tmp0 = shufflevector <8 x half> %v0, <8 x half> %v1, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
ret <8 x half> %tmp0
Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shufflevector-vnsrl.ll
Original file line number Diff line number Diff line change
Expand Up @@ -179,10 +179,10 @@ define void @vnsrl_32_i32(ptr %in, ptr %out) {
; ZVE32F: # %bb.0: # %entry
; ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; ZVE32F-NEXT: vle32.v v8, (a0)
; ZVE32F-NEXT: vmv.v.i v0, 2
; ZVE32F-NEXT: vmv.v.i v0, 1
; ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu
; ZVE32F-NEXT: vrgather.vi v9, v8, 1
; ZVE32F-NEXT: vslidedown.vi v9, v8, 2, v0.t
; ZVE32F-NEXT: vslidedown.vi v9, v8, 2
; ZVE32F-NEXT: vrgather.vi v9, v8, 1, v0.t
; ZVE32F-NEXT: vse32.v v9, (a1)
; ZVE32F-NEXT: ret
entry:
Expand Down Expand Up @@ -233,10 +233,10 @@ define void @vnsrl_32_float(ptr %in, ptr %out) {
; ZVE32F: # %bb.0: # %entry
; ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; ZVE32F-NEXT: vle32.v v8, (a0)
; ZVE32F-NEXT: vmv.v.i v0, 2
; ZVE32F-NEXT: vmv.v.i v0, 1
; ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu
; ZVE32F-NEXT: vrgather.vi v9, v8, 1
; ZVE32F-NEXT: vslidedown.vi v9, v8, 2, v0.t
; ZVE32F-NEXT: vslidedown.vi v9, v8, 2
; ZVE32F-NEXT: vrgather.vi v9, v8, 1, v0.t
; ZVE32F-NEXT: vse32.v v9, (a1)
; ZVE32F-NEXT: ret
entry:
Expand Down Expand Up @@ -276,10 +276,10 @@ define void @vnsrl_64_i64(ptr %in, ptr %out) {
; V: # %bb.0: # %entry
; V-NEXT: vsetivli zero, 4, e64, m1, ta, ma
; V-NEXT: vle64.v v8, (a0)
; V-NEXT: vmv.v.i v0, 2
; V-NEXT: vmv.v.i v0, 1
; V-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; V-NEXT: vrgather.vi v9, v8, 1
; V-NEXT: vslidedown.vi v9, v8, 2, v0.t
; V-NEXT: vslidedown.vi v9, v8, 2
; V-NEXT: vrgather.vi v9, v8, 1, v0.t
; V-NEXT: vse64.v v9, (a1)
; V-NEXT: ret
;
Expand Down Expand Up @@ -327,10 +327,10 @@ define void @vnsrl_64_double(ptr %in, ptr %out) {
; V: # %bb.0: # %entry
; V-NEXT: vsetivli zero, 4, e64, m1, ta, ma
; V-NEXT: vle64.v v8, (a0)
; V-NEXT: vmv.v.i v0, 2
; V-NEXT: vmv.v.i v0, 1
; V-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; V-NEXT: vrgather.vi v9, v8, 1
; V-NEXT: vslidedown.vi v9, v8, 2, v0.t
; V-NEXT: vslidedown.vi v9, v8, 2
; V-NEXT: vrgather.vi v9, v8, 1, v0.t
; V-NEXT: vse64.v v9, (a1)
; V-NEXT: ret
;
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,10 @@ define {<2 x i64>, <2 x i64>} @vector_deinterleave_v2i64_v4i64(<4 x i64> %vec) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 2
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vmv.v.i v0, 2
; CHECK-NEXT: vrgather.vi v9, v8, 1
; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vmv.v.i v0, 1
; CHECK-NEXT: vmv1r.v v9, v10
; CHECK-NEXT: vrgather.vi v9, v8, 1, v0.t
; CHECK-NEXT: vslideup.vi v8, v10, 1
; CHECK-NEXT: ret
%retval = call {<2 x i64>, <2 x i64>} @llvm.vector.deinterleave2.v4i64(<4 x i64> %vec)
Expand Down Expand Up @@ -166,10 +166,10 @@ define {<2 x double>, <2 x double>} @vector_deinterleave_v2f64_v4f64(<4 x double
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 2
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vmv.v.i v0, 2
; CHECK-NEXT: vrgather.vi v9, v8, 1
; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vmv.v.i v0, 1
; CHECK-NEXT: vmv1r.v v9, v10
; CHECK-NEXT: vrgather.vi v9, v8, 1, v0.t
; CHECK-NEXT: vslideup.vi v8, v10, 1
; CHECK-NEXT: ret
%retval = call {<2 x double>, <2 x double>} @llvm.vector.deinterleave2.v4f64(<4 x double> %vec)
Expand Down

0 comments on commit 657dbc3

Please sign in to comment.