Skip to content

Commit

Permalink
[SVE] Prefer zero-extending loads when lowering ISD::EXTLOAD.
Browse files Browse the repository at this point in the history
The decision is perhaps arbitrary but I figure zeroing has no
dependency on the value being loaded.

Differential Revision: https://reviews.llvm.org/D119327
  • Loading branch information
paulwalker-arm committed Feb 10, 2022
1 parent a57a7f3 commit c58be85
Show file tree
Hide file tree
Showing 12 changed files with 86 additions and 87 deletions.
3 changes: 1 addition & 2 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Expand Up @@ -4604,7 +4604,6 @@ SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
bool IdxNeedsExtend =
getGatherScatterIndexIsExtended(Index) ||
Index.getSimpleValueType().getVectorElementType() == MVT::i32;
bool ResNeedsSignExtend = ExtTy == ISD::EXTLOAD || ExtTy == ISD::SEXTLOAD;

EVT VT = PassThru.getSimpleValueType();
EVT IndexVT = Index.getSimpleValueType();
Expand Down Expand Up @@ -4652,7 +4651,7 @@ SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
selectGatherScatterAddrMode(BasePtr, Index, MemVT, Opcode,
/*isGather=*/true, DAG);

if (ResNeedsSignExtend)
if (ExtTy == ISD::SEXTLOAD)
Opcode = getSignExtendedGatherOpcode(Opcode);

if (IsFixedLength) {
Expand Down
36 changes: 18 additions & 18 deletions llvm/lib/Target/AArch64/AArch64InstrInfo.td
Expand Up @@ -350,49 +350,49 @@ def nonext_masked_load :
cast<MaskedLoadSDNode>(N)->isUnindexed() &&
!cast<MaskedLoadSDNode>(N)->isNonTemporal();
}]>;
// sign extending masked load fragments.
def asext_masked_load :
// Any/Zero extending masked load fragments.
def azext_masked_load :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
(masked_ld node:$ptr, undef, node:$pred, node:$def),[{
return (cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD ||
cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD) &&
cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD) &&
cast<MaskedLoadSDNode>(N)->isUnindexed();
}]>;
def asext_masked_load_i8 :
def azext_masked_load_i8 :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
(asext_masked_load node:$ptr, node:$pred, node:$def), [{
(azext_masked_load node:$ptr, node:$pred, node:$def), [{
return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
}]>;
def asext_masked_load_i16 :
def azext_masked_load_i16 :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
(asext_masked_load node:$ptr, node:$pred, node:$def), [{
(azext_masked_load node:$ptr, node:$pred, node:$def), [{
return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
}]>;
def asext_masked_load_i32 :
def azext_masked_load_i32 :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
(asext_masked_load node:$ptr, node:$pred, node:$def), [{
(azext_masked_load node:$ptr, node:$pred, node:$def), [{
return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
}]>;
// zero extending masked load fragments.
def zext_masked_load :
// Sign extending masked load fragments.
def sext_masked_load :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
(masked_ld node:$ptr, undef, node:$pred, node:$def), [{
return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD &&
return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD &&
cast<MaskedLoadSDNode>(N)->isUnindexed();
}]>;
def zext_masked_load_i8 :
def sext_masked_load_i8 :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
(zext_masked_load node:$ptr, node:$pred, node:$def), [{
(sext_masked_load node:$ptr, node:$pred, node:$def), [{
return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
}]>;
def zext_masked_load_i16 :
def sext_masked_load_i16 :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
(zext_masked_load node:$ptr, node:$pred, node:$def), [{
(sext_masked_load node:$ptr, node:$pred, node:$def), [{
return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
}]>;
def zext_masked_load_i32 :
def sext_masked_load_i32 :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
(zext_masked_load node:$ptr, node:$pred, node:$def), [{
(sext_masked_load node:$ptr, node:$pred, node:$def), [{
return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
}]>;

Expand Down
24 changes: 12 additions & 12 deletions llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
Expand Up @@ -2150,31 +2150,31 @@ let Predicates = [HasSVEorStreamingSVE] in {
}

// 2-element contiguous loads
defm : pred_load<nxv2i64, nxv2i1, zext_masked_load_i8, LD1B_D, LD1B_D_IMM, am_sve_regreg_lsl0>;
defm : pred_load<nxv2i64, nxv2i1, asext_masked_load_i8, LD1SB_D, LD1SB_D_IMM, am_sve_regreg_lsl0>;
defm : pred_load<nxv2i64, nxv2i1, zext_masked_load_i16, LD1H_D, LD1H_D_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv2i64, nxv2i1, asext_masked_load_i16, LD1SH_D, LD1SH_D_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv2i64, nxv2i1, zext_masked_load_i32, LD1W_D, LD1W_D_IMM, am_sve_regreg_lsl2>;
defm : pred_load<nxv2i64, nxv2i1, asext_masked_load_i32, LD1SW_D, LD1SW_D_IMM, am_sve_regreg_lsl2>;
defm : pred_load<nxv2i64, nxv2i1, azext_masked_load_i8, LD1B_D, LD1B_D_IMM, am_sve_regreg_lsl0>;
defm : pred_load<nxv2i64, nxv2i1, sext_masked_load_i8, LD1SB_D, LD1SB_D_IMM, am_sve_regreg_lsl0>;
defm : pred_load<nxv2i64, nxv2i1, azext_masked_load_i16, LD1H_D, LD1H_D_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv2i64, nxv2i1, sext_masked_load_i16, LD1SH_D, LD1SH_D_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv2i64, nxv2i1, azext_masked_load_i32, LD1W_D, LD1W_D_IMM, am_sve_regreg_lsl2>;
defm : pred_load<nxv2i64, nxv2i1, sext_masked_load_i32, LD1SW_D, LD1SW_D_IMM, am_sve_regreg_lsl2>;
defm : pred_load<nxv2i64, nxv2i1, nonext_masked_load, LD1D, LD1D_IMM, am_sve_regreg_lsl3>;
defm : pred_load<nxv2f16, nxv2i1, nonext_masked_load, LD1H_D, LD1H_D_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv2bf16, nxv2i1, nonext_masked_load, LD1H_D, LD1H_D_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv2f32, nxv2i1, nonext_masked_load, LD1W_D, LD1W_D_IMM, am_sve_regreg_lsl2>;
defm : pred_load<nxv2f64, nxv2i1, nonext_masked_load, LD1D, LD1D_IMM, am_sve_regreg_lsl3>;

// 4-element contiguous loads
defm : pred_load<nxv4i32, nxv4i1, zext_masked_load_i8, LD1B_S, LD1B_S_IMM, am_sve_regreg_lsl0>;
defm : pred_load<nxv4i32, nxv4i1, asext_masked_load_i8, LD1SB_S, LD1SB_S_IMM, am_sve_regreg_lsl0>;
defm : pred_load<nxv4i32, nxv4i1, zext_masked_load_i16, LD1H_S, LD1H_S_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv4i32, nxv4i1, asext_masked_load_i16, LD1SH_S, LD1SH_S_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv4i32, nxv4i1, azext_masked_load_i8, LD1B_S, LD1B_S_IMM, am_sve_regreg_lsl0>;
defm : pred_load<nxv4i32, nxv4i1, sext_masked_load_i8, LD1SB_S, LD1SB_S_IMM, am_sve_regreg_lsl0>;
defm : pred_load<nxv4i32, nxv4i1, azext_masked_load_i16, LD1H_S, LD1H_S_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv4i32, nxv4i1, sext_masked_load_i16, LD1SH_S, LD1SH_S_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv4i32, nxv4i1, nonext_masked_load, LD1W, LD1W_IMM, am_sve_regreg_lsl2>;
defm : pred_load<nxv4f16, nxv4i1, nonext_masked_load, LD1H_S, LD1H_S_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv4bf16, nxv4i1, nonext_masked_load, LD1H_S, LD1H_S_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv4f32, nxv4i1, nonext_masked_load, LD1W, LD1W_IMM, am_sve_regreg_lsl2>;

// 8-element contiguous loads
defm : pred_load<nxv8i16, nxv8i1, zext_masked_load_i8, LD1B_H, LD1B_H_IMM, am_sve_regreg_lsl0>;
defm : pred_load<nxv8i16, nxv8i1, asext_masked_load_i8, LD1SB_H, LD1SB_H_IMM, am_sve_regreg_lsl0>;
defm : pred_load<nxv8i16, nxv8i1, azext_masked_load_i8, LD1B_H, LD1B_H_IMM, am_sve_regreg_lsl0>;
defm : pred_load<nxv8i16, nxv8i1, sext_masked_load_i8, LD1SB_H, LD1SB_H_IMM, am_sve_regreg_lsl0>;
defm : pred_load<nxv8i16, nxv8i1, nonext_masked_load, LD1H, LD1H_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv8f16, nxv8i1, nonext_masked_load, LD1H, LD1H_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv8bf16, nxv8i1, nonext_masked_load, LD1H, LD1H_IMM, am_sve_regreg_lsl1>;
Expand Down
36 changes: 18 additions & 18 deletions llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
Expand Up @@ -56,7 +56,7 @@ define void @fcvt_v8f16_v8f32(<8 x half>* %a, <8 x float>* %b) #0 {
; CHECK-LABEL: fcvt_v8f16_v8f32:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s, vl8
; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
; CHECK-NEXT: fcvt z0.s, p0/m, z0.h
; CHECK-NEXT: st1w { z0.s }, p0, [x1]
; CHECK-NEXT: ret
Expand All @@ -72,8 +72,8 @@ define void @fcvt_v16f16_v16f32(<16 x half>* %a, <16 x float>* %b) #0 {
; VBITS_EQ_256: // %bb.0:
; VBITS_EQ_256-NEXT: mov x8, #8
; VBITS_EQ_256-NEXT: ptrue p0.s, vl8
; VBITS_EQ_256-NEXT: ld1sh { z0.s }, p0/z, [x0, x8, lsl #1]
; VBITS_EQ_256-NEXT: ld1sh { z1.s }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1h { z0.s }, p0/z, [x0, x8, lsl #1]
; VBITS_EQ_256-NEXT: ld1h { z1.s }, p0/z, [x0]
; VBITS_EQ_256-NEXT: fcvt z0.s, p0/m, z0.h
; VBITS_EQ_256-NEXT: fcvt z1.s, p0/m, z1.h
; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2]
Expand All @@ -83,7 +83,7 @@ define void @fcvt_v16f16_v16f32(<16 x half>* %a, <16 x float>* %b) #0 {
; VBITS_GE_512-LABEL: fcvt_v16f16_v16f32:
; VBITS_GE_512: // %bb.0:
; VBITS_GE_512-NEXT: ptrue p0.s, vl16
; VBITS_GE_512-NEXT: ld1sh { z0.s }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1h { z0.s }, p0/z, [x0]
; VBITS_GE_512-NEXT: fcvt z0.s, p0/m, z0.h
; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x1]
; VBITS_GE_512-NEXT: ret
Expand All @@ -98,7 +98,7 @@ define void @fcvt_v32f16_v32f32(<32 x half>* %a, <32 x float>* %b) #0 {
; VBITS_GE_1024-LABEL: fcvt_v32f16_v32f32:
; VBITS_GE_1024: // %bb.0:
; VBITS_GE_1024-NEXT: ptrue p0.s, vl32
; VBITS_GE_1024-NEXT: ld1sh { z0.s }, p0/z, [x0]
; VBITS_GE_1024-NEXT: ld1h { z0.s }, p0/z, [x0]
; VBITS_GE_1024-NEXT: fcvt z0.s, p0/m, z0.h
; VBITS_GE_1024-NEXT: st1w { z0.s }, p0, [x1]
; VBITS_GE_1024-NEXT: ret
Expand All @@ -112,7 +112,7 @@ define void @fcvt_v64f16_v64f32(<64 x half>* %a, <64 x float>* %b) #0 {
; VBITS_GE_2048-LABEL: fcvt_v64f16_v64f32:
; VBITS_GE_2048: // %bb.0:
; VBITS_GE_2048-NEXT: ptrue p0.s, vl64
; VBITS_GE_2048-NEXT: ld1sh { z0.s }, p0/z, [x0]
; VBITS_GE_2048-NEXT: ld1h { z0.s }, p0/z, [x0]
; VBITS_GE_2048-NEXT: fcvt z0.s, p0/m, z0.h
; VBITS_GE_2048-NEXT: st1w { z0.s }, p0, [x1]
; VBITS_GE_2048-NEXT: ret
Expand Down Expand Up @@ -161,7 +161,7 @@ define void @fcvt_v4f16_v4f64(<4 x half>* %a, <4 x double>* %b) #0 {
; CHECK-LABEL: fcvt_v4f16_v4f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d, vl4
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
; CHECK-NEXT: fcvt z0.d, p0/m, z0.h
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
; CHECK-NEXT: ret
Expand All @@ -176,8 +176,8 @@ define void @fcvt_v8f16_v8f64(<8 x half>* %a, <8 x double>* %b) #0 {
; VBITS_EQ_256: // %bb.0:
; VBITS_EQ_256-NEXT: mov x8, #4
; VBITS_EQ_256-NEXT: ptrue p0.d, vl4
; VBITS_EQ_256-NEXT: ld1sh { z0.d }, p0/z, [x0, x8, lsl #1]
; VBITS_EQ_256-NEXT: ld1sh { z1.d }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1h { z0.d }, p0/z, [x0, x8, lsl #1]
; VBITS_EQ_256-NEXT: ld1h { z1.d }, p0/z, [x0]
; VBITS_EQ_256-NEXT: fcvt z0.d, p0/m, z0.h
; VBITS_EQ_256-NEXT: fcvt z1.d, p0/m, z1.h
; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x1, x8, lsl #3]
Expand All @@ -187,7 +187,7 @@ define void @fcvt_v8f16_v8f64(<8 x half>* %a, <8 x double>* %b) #0 {
; VBITS_GE_512-LABEL: fcvt_v8f16_v8f64:
; VBITS_GE_512: // %bb.0:
; VBITS_GE_512-NEXT: ptrue p0.d, vl8
; VBITS_GE_512-NEXT: ld1sh { z0.d }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1h { z0.d }, p0/z, [x0]
; VBITS_GE_512-NEXT: fcvt z0.d, p0/m, z0.h
; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x1]
; VBITS_GE_512-NEXT: ret
Expand All @@ -202,7 +202,7 @@ define void @fcvt_v16f16_v16f64(<16 x half>* %a, <16 x double>* %b) #0 {
; VBITS_GE_1024-LABEL: fcvt_v16f16_v16f64:
; VBITS_GE_1024: // %bb.0:
; VBITS_GE_1024-NEXT: ptrue p0.d, vl16
; VBITS_GE_1024-NEXT: ld1sh { z0.d }, p0/z, [x0]
; VBITS_GE_1024-NEXT: ld1h { z0.d }, p0/z, [x0]
; VBITS_GE_1024-NEXT: fcvt z0.d, p0/m, z0.h
; VBITS_GE_1024-NEXT: st1d { z0.d }, p0, [x1]
; VBITS_GE_1024-NEXT: ret
Expand All @@ -216,7 +216,7 @@ define void @fcvt_v32f16_v32f64(<32 x half>* %a, <32 x double>* %b) #0 {
; VBITS_GE_2048-LABEL: fcvt_v32f16_v32f64:
; VBITS_GE_2048: // %bb.0:
; VBITS_GE_2048-NEXT: ptrue p0.d, vl32
; VBITS_GE_2048-NEXT: ld1sh { z0.d }, p0/z, [x0]
; VBITS_GE_2048-NEXT: ld1h { z0.d }, p0/z, [x0]
; VBITS_GE_2048-NEXT: fcvt z0.d, p0/m, z0.h
; VBITS_GE_2048-NEXT: st1d { z0.d }, p0, [x1]
; VBITS_GE_2048-NEXT: ret
Expand Down Expand Up @@ -262,7 +262,7 @@ define void @fcvt_v4f32_v4f64(<4 x float>* %a, <4 x double>* %b) #0 {
; CHECK-LABEL: fcvt_v4f32_v4f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d, vl4
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
; CHECK-NEXT: fcvt z0.d, p0/m, z0.s
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
; CHECK-NEXT: ret
Expand All @@ -278,8 +278,8 @@ define void @fcvt_v8f32_v8f64(<8 x float>* %a, <8 x double>* %b) #0 {
; VBITS_EQ_256: // %bb.0:
; VBITS_EQ_256-NEXT: mov x8, #4
; VBITS_EQ_256-NEXT: ptrue p0.d, vl4
; VBITS_EQ_256-NEXT: ld1sw { z0.d }, p0/z, [x0, x8, lsl #2]
; VBITS_EQ_256-NEXT: ld1sw { z1.d }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1w { z0.d }, p0/z, [x0, x8, lsl #2]
; VBITS_EQ_256-NEXT: ld1w { z1.d }, p0/z, [x0]
; VBITS_EQ_256-NEXT: fcvt z0.d, p0/m, z0.s
; VBITS_EQ_256-NEXT: fcvt z1.d, p0/m, z1.s
; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x1, x8, lsl #3]
Expand All @@ -289,7 +289,7 @@ define void @fcvt_v8f32_v8f64(<8 x float>* %a, <8 x double>* %b) #0 {
; VBITS_GE_512-LABEL: fcvt_v8f32_v8f64:
; VBITS_GE_512: // %bb.0:
; VBITS_GE_512-NEXT: ptrue p0.d, vl8
; VBITS_GE_512-NEXT: ld1sw { z0.d }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1w { z0.d }, p0/z, [x0]
; VBITS_GE_512-NEXT: fcvt z0.d, p0/m, z0.s
; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x1]
; VBITS_GE_512-NEXT: ret
Expand All @@ -303,7 +303,7 @@ define void @fcvt_v16f32_v16f64(<16 x float>* %a, <16 x double>* %b) #0 {
; VBITS_GE_1024-LABEL: fcvt_v16f32_v16f64:
; VBITS_GE_1024: // %bb.0:
; VBITS_GE_1024-NEXT: ptrue p0.d, vl16
; VBITS_GE_1024-NEXT: ld1sw { z0.d }, p0/z, [x0]
; VBITS_GE_1024-NEXT: ld1w { z0.d }, p0/z, [x0]
; VBITS_GE_1024-NEXT: fcvt z0.d, p0/m, z0.s
; VBITS_GE_1024-NEXT: st1d { z0.d }, p0, [x1]
; VBITS_GE_1024-NEXT: ret
Expand All @@ -317,7 +317,7 @@ define void @fcvt_v32f32_v32f64(<32 x float>* %a, <32 x double>* %b) #0 {
; VBITS_GE_2048-LABEL: fcvt_v32f32_v32f64:
; VBITS_GE_2048: // %bb.0:
; VBITS_GE_2048-NEXT: ptrue p0.d, vl32
; VBITS_GE_2048-NEXT: ld1sw { z0.d }, p0/z, [x0]
; VBITS_GE_2048-NEXT: ld1w { z0.d }, p0/z, [x0]
; VBITS_GE_2048-NEXT: fcvt z0.d, p0/m, z0.s
; VBITS_GE_2048-NEXT: st1d { z0.d }, p0, [x1]
; VBITS_GE_2048-NEXT: ret
Expand Down
10 changes: 5 additions & 5 deletions llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll
Expand Up @@ -25,7 +25,7 @@ define void @masked_gather_v2i8(<2 x i8>* %a, <2 x i8*>* %b) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x1]
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ptrue p0.s, vl2
; CHECK-NEXT: xtn v0.2s, v0.2d
; CHECK-NEXT: st1b { z0.s }, p0, [x0]
Expand All @@ -41,7 +41,7 @@ define void @masked_gather_v4i8(<4 x i8>* %a, <4 x i8*>* %b) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d, vl4
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x1]
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ptrue p0.h, vl4
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
Expand All @@ -60,8 +60,8 @@ define void @masked_gather_v8i8(<8 x i8>* %a, <8 x i8*>* %b) #0 {
; VBITS_EQ_256-NEXT: ptrue p0.d, vl4
; VBITS_EQ_256-NEXT: ld1d { z0.d }, p0/z, [x1, x8, lsl #3]
; VBITS_EQ_256-NEXT: ld1d { z1.d }, p0/z, [x1]
; VBITS_EQ_256-NEXT: ld1sb { z0.d }, p0/z, [z0.d]
; VBITS_EQ_256-NEXT: ld1sb { z1.d }, p0/z, [z1.d]
; VBITS_EQ_256-NEXT: ld1b { z0.d }, p0/z, [z0.d]
; VBITS_EQ_256-NEXT: ld1b { z1.d }, p0/z, [z1.d]
; VBITS_EQ_256-NEXT: uzp1 z0.s, z0.s, z0.s
; VBITS_EQ_256-NEXT: uzp1 z1.s, z1.s, z1.s
; VBITS_EQ_256-NEXT: uzp1 z0.h, z0.h, z0.h
Expand Down Expand Up @@ -134,7 +134,7 @@ define void @masked_gather_v2i16(<2 x i16>* %a, <2 x i16*>* %b) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x1]
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ptrue p0.s, vl2
; CHECK-NEXT: xtn v0.2s, v0.2d
; CHECK-NEXT: st1h { z0.s }, p0, [x0]
Expand Down
10 changes: 5 additions & 5 deletions llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
Expand Up @@ -36,7 +36,7 @@ define void @masked_gather_v2i8(<2 x i8>* %a, <2 x i8*>* %b) #0 {
; CHECK-NEXT: cmeq v0.2s, v0.2s, #0
; CHECK-NEXT: sshll v0.2d, v0.2s, #0
; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [z1.d]
; CHECK-NEXT: ld1b { z0.d }, p0/z, [z1.d]
; CHECK-NEXT: ptrue p0.s, vl2
; CHECK-NEXT: xtn v0.2s, v0.2d
; CHECK-NEXT: st1b { z0.s }, p0, [x0]
Expand All @@ -60,7 +60,7 @@ define void @masked_gather_v4i8(<4 x i8>* %a, <4 x i8*>* %b) #0 {
; CHECK-NEXT: sunpklo z0.s, z0.h
; CHECK-NEXT: sunpklo z0.d, z0.s
; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [z1.d]
; CHECK-NEXT: ld1b { z0.d }, p0/z, [z1.d]
; CHECK-NEXT: ptrue p0.h, vl4
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
Expand Down Expand Up @@ -96,8 +96,8 @@ define void @masked_gather_v8i8(<8 x i8>* %a, <8 x i8*>* %b) #0 {
; VBITS_EQ_256-NEXT: sunpklo z0.d, z0.s
; VBITS_EQ_256-NEXT: cmpne p1.d, p0/z, z1.d, #0
; VBITS_EQ_256-NEXT: cmpne p0.d, p0/z, z0.d, #0
; VBITS_EQ_256-NEXT: ld1sb { z0.d }, p1/z, [z2.d]
; VBITS_EQ_256-NEXT: ld1sb { z1.d }, p0/z, [z3.d]
; VBITS_EQ_256-NEXT: ld1b { z0.d }, p1/z, [z2.d]
; VBITS_EQ_256-NEXT: ld1b { z1.d }, p0/z, [z3.d]
; VBITS_EQ_256-NEXT: uzp1 z0.s, z0.s, z0.s
; VBITS_EQ_256-NEXT: uzp1 z1.s, z1.s, z1.s
; VBITS_EQ_256-NEXT: uzp1 z0.h, z0.h, z0.h
Expand Down Expand Up @@ -196,7 +196,7 @@ define void @masked_gather_v2i16(<2 x i16>* %a, <2 x i16*>* %b) #0 {
; CHECK-NEXT: cmeq v0.2s, v0.2s, #0
; CHECK-NEXT: sshll v0.2d, v0.2s, #0
; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [z1.d]
; CHECK-NEXT: ld1h { z0.d }, p0/z, [z1.d]
; CHECK-NEXT: ptrue p0.s, vl2
; CHECK-NEXT: xtn v0.2s, v0.2d
; CHECK-NEXT: st1h { z0.s }, p0, [x0]
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
Expand Up @@ -50,7 +50,7 @@ define <vscale x 4 x i8> @gather_i8_index_offset_8(i8* %base, i64 %offset, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, x1
; CHECK-NEXT: index z0.s, #0, #1
; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x8, z0.s, sxtw]
; CHECK-NEXT: ld1b { z0.s }, p0/z, [x8, z0.s, sxtw]
; CHECK-NEXT: ret
%splat.insert0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
%splat0 = shufflevector <vscale x 4 x i64> %splat.insert0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
Expand Down Expand Up @@ -209,7 +209,7 @@ define <vscale x 4 x i8> @gather_8i8_index_offset_8([8 x i8]* %base, i64 %offset
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, x1, lsl #3
; CHECK-NEXT: index z0.s, #0, #8
; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x8, z0.s, sxtw]
; CHECK-NEXT: ld1b { z0.s }, p0/z, [x8, z0.s, sxtw]
; CHECK-NEXT: ret
%t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
%t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
Expand Down

0 comments on commit c58be85

Please sign in to comment.