Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
88 changes: 47 additions & 41 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5544,9 +5544,10 @@ SDValue AArch64TargetLowering::LowerGET_ROUNDING(SDValue Op,
SDLoc DL(Op);

SDValue Chain = Op.getOperand(0);
SDValue FPCR_64 = DAG.getNode(
ISD::INTRINSIC_W_CHAIN, DL, {MVT::i64, MVT::Other},
{Chain, DAG.getConstant(Intrinsic::aarch64_get_fpcr, DL, MVT::i64)});
SDValue FPCR_64 =
DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, {MVT::i64, MVT::Other},
{Chain, DAG.getTargetConstant(Intrinsic::aarch64_get_fpcr, DL,
MVT::i64)});
Chain = FPCR_64.getValue(1);
SDValue FPCR_32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPCR_64);
SDValue FltRounds = DAG.getNode(ISD::ADD, DL, MVT::i32, FPCR_32,
Expand Down Expand Up @@ -5632,7 +5633,8 @@ SDValue AArch64TargetLowering::LowerSET_FPMODE(SDValue Op,

// Set new value of FPCR.
SDValue Ops2[] = {
Chain, DAG.getConstant(Intrinsic::aarch64_set_fpcr, DL, MVT::i64), FPCR};
Chain, DAG.getTargetConstant(Intrinsic::aarch64_set_fpcr, DL, MVT::i64),
FPCR};
return DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops2);
}

Expand All @@ -5655,9 +5657,9 @@ SDValue AArch64TargetLowering::LowerRESET_FPMODE(SDValue Op,
DAG.getConstant(AArch64::ReservedFPControlBits, DL, MVT::i64));

// Set new value of FPCR.
SDValue Ops2[] = {Chain,
DAG.getConstant(Intrinsic::aarch64_set_fpcr, DL, MVT::i64),
FPSCRMasked};
SDValue Ops2[] = {
Chain, DAG.getTargetConstant(Intrinsic::aarch64_set_fpcr, DL, MVT::i64),
FPSCRMasked};
return DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops2);
}

Expand Down Expand Up @@ -7289,17 +7291,19 @@ SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,

SDValue Compressed = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, Vec.getValueType(),
DAG.getConstant(Intrinsic::aarch64_sve_compact, DL, MVT::i64), Mask, Vec);
DAG.getTargetConstant(Intrinsic::aarch64_sve_compact, DL, MVT::i64), Mask,
Vec);

// compact fills with 0s, so if our passthru is all 0s, do nothing here.
if (HasPassthru && !ISD::isConstantSplatVectorAllZeros(Passthru.getNode())) {
SDValue Offset = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64,
DAG.getConstant(Intrinsic::aarch64_sve_cntp, DL, MVT::i64), Mask, Mask);
DAG.getTargetConstant(Intrinsic::aarch64_sve_cntp, DL, MVT::i64), Mask,
Mask);

SDValue IndexMask = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, MaskVT,
DAG.getConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64),
DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64),
DAG.getConstant(0, DL, MVT::i64), Offset);

Compressed =
Expand Down Expand Up @@ -7428,10 +7432,10 @@ static SDValue LowerFLDEXP(SDValue Op, SelectionDAG &DAG) {
DAG.getUNDEF(ExpVT), Exp, Zero);
SDValue VPg = getPTrue(DAG, DL, XVT.changeVectorElementType(MVT::i1),
AArch64SVEPredPattern::all);
SDValue FScale =
DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XVT,
DAG.getConstant(Intrinsic::aarch64_sve_fscale, DL, MVT::i64),
VPg, VX, VExp);
SDValue FScale = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, XVT,
DAG.getTargetConstant(Intrinsic::aarch64_sve_fscale, DL, MVT::i64), VPg,
VX, VExp);
SDValue Final =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, X.getValueType(), FScale, Zero);
if (X.getValueType() != XScalarTy)
Expand Down Expand Up @@ -8094,7 +8098,7 @@ static SDValue emitRestoreZALazySave(SDValue Chain, SDLoc DL,
TLI.getLibcallName(LC), TLI.getPointerTy(DAG.getDataLayout()));
SDValue TPIDR2_EL0 = DAG.getNode(
ISD::INTRINSIC_W_CHAIN, DL, MVT::i64, Chain,
DAG.getConstant(Intrinsic::aarch64_sme_get_tpidr2, DL, MVT::i32));
DAG.getTargetConstant(Intrinsic::aarch64_sme_get_tpidr2, DL, MVT::i32));
// Copy the address of the TPIDR2 block into X0 before 'calling' the
// RESTORE_ZA pseudo.
SDValue Glue;
Expand All @@ -8109,7 +8113,7 @@ static SDValue emitRestoreZALazySave(SDValue Chain, SDLoc DL,
// Finally reset the TPIDR2_EL0 register to 0.
Chain = DAG.getNode(
ISD::INTRINSIC_VOID, DL, MVT::Other, Chain,
DAG.getConstant(Intrinsic::aarch64_sme_set_tpidr2, DL, MVT::i32),
DAG.getTargetConstant(Intrinsic::aarch64_sme_set_tpidr2, DL, MVT::i32),
DAG.getConstant(0, DL, MVT::i64));
TPIDR2.Uses++;
return Chain;
Expand Down Expand Up @@ -8704,7 +8708,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
if (Attrs.isNewZT0())
Chain = DAG.getNode(
ISD::INTRINSIC_VOID, DL, MVT::Other, Chain,
DAG.getConstant(Intrinsic::aarch64_sme_zero_zt, DL, MVT::i32),
DAG.getTargetConstant(Intrinsic::aarch64_sme_zero_zt, DL, MVT::i32),
DAG.getTargetConstant(0, DL, MVT::i32));
}

Expand Down Expand Up @@ -9517,7 +9521,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout()));
Chain = DAG.getNode(
ISD::INTRINSIC_VOID, DL, MVT::Other, Chain,
DAG.getConstant(Intrinsic::aarch64_sme_set_tpidr2, DL, MVT::i32),
DAG.getTargetConstant(Intrinsic::aarch64_sme_set_tpidr2, DL, MVT::i32),
TPIDR2ObjAddr);
OptimizationRemarkEmitter ORE(&MF.getFunction());
ORE.emit([&]() {
Expand Down Expand Up @@ -13408,8 +13412,8 @@ SDValue ReconstructShuffleWithRuntimeMask(SDValue Op, SelectionDAG &DAG) {

return DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, VT,
DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), SourceVec,
MaskSourceVec);
DAG.getTargetConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32),
SourceVec, MaskSourceVec);
}

// Gather data to see if the operation can be modelled as a
Expand Down Expand Up @@ -14265,14 +14269,16 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst);
Shuffle = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
DAG.getTargetConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32),
V1Cst,
DAG.getBuildVector(IndexVT, DL, ArrayRef(TBLMask.data(), IndexLen)));
} else {
if (IndexLen == 8) {
V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst);
Shuffle = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
DAG.getTargetConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32),
V1Cst,
DAG.getBuildVector(IndexVT, DL, ArrayRef(TBLMask.data(), IndexLen)));
} else {
// FIXME: We cannot, for the moment, emit a TBL2 instruction because we
Expand All @@ -14283,8 +14289,8 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
// IndexLen));
Shuffle = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32), V1Cst,
V2Cst,
DAG.getTargetConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32),
V1Cst, V2Cst,
DAG.getBuildVector(IndexVT, DL, ArrayRef(TBLMask.data(), IndexLen)));
}
}
Expand Down Expand Up @@ -16437,10 +16443,10 @@ SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize)
return DAG.getNode(AArch64ISD::VSHL, DL, VT, Op.getOperand(0),
DAG.getTargetConstant(Cnt, DL, MVT::i32));
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
DAG.getConstant(Intrinsic::aarch64_neon_ushl, DL,
MVT::i32),
Op.getOperand(0), Op.getOperand(1));
return DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, VT,
DAG.getTargetConstant(Intrinsic::aarch64_neon_ushl, DL, MVT::i32),
Op.getOperand(0), Op.getOperand(1));
case ISD::SRA:
case ISD::SRL:
if (VT.isScalableVector() &&
Expand Down Expand Up @@ -20160,7 +20166,7 @@ static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG,
: Intrinsic::aarch64_neon_vcvtfp2fxu;
SDValue FixConv =
DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, ResTy,
DAG.getConstant(IntrinsicOpcode, DL, MVT::i32),
DAG.getTargetConstant(IntrinsicOpcode, DL, MVT::i32),
Op->getOperand(0), DAG.getTargetConstant(C, DL, MVT::i32));
// We can handle smaller integers by generating an extra trunc.
if (IntBits < FloatBits)
Expand Down Expand Up @@ -27416,8 +27422,8 @@ static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG,
// ...and remap the intrinsic `aarch64_sve_prf<T>_gather_scalar_offset` to
// `aarch64_sve_prfb_gather_uxtw_index`.
SDLoc DL(N);
Ops[1] = DAG.getConstant(Intrinsic::aarch64_sve_prfb_gather_uxtw_index, DL,
MVT::i64);
Ops[1] = DAG.getTargetConstant(Intrinsic::aarch64_sve_prfb_gather_uxtw_index,
DL, MVT::i64);

return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops);
}
Expand Down Expand Up @@ -31251,10 +31257,10 @@ static SDValue GenerateFixedLengthSVETBL(SDValue Op, SDValue Op1, SDValue Op2,

SDValue Shuffle;
if (IsSingleOp)
Shuffle =
DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, ContainerVT,
DAG.getConstant(Intrinsic::aarch64_sve_tbl, DL, MVT::i32),
Op1, SVEMask);
Shuffle = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, ContainerVT,
DAG.getTargetConstant(Intrinsic::aarch64_sve_tbl, DL, MVT::i32), Op1,
SVEMask);
else if (Subtarget.hasSVE2()) {
if (!MinMaxEqual) {
unsigned MinNumElts = AArch64::SVEBitsPerBlock / BitsPerElt;
Expand All @@ -31273,10 +31279,10 @@ static SDValue GenerateFixedLengthSVETBL(SDValue Op, SDValue Op1, SDValue Op2,
SVEMask = convertToScalableVector(
DAG, getContainerForFixedLengthVector(DAG, MaskType), UpdatedVecMask);
}
Shuffle =
DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, ContainerVT,
DAG.getConstant(Intrinsic::aarch64_sve_tbl2, DL, MVT::i32),
Op1, Op2, SVEMask);
Shuffle = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, ContainerVT,
DAG.getTargetConstant(Intrinsic::aarch64_sve_tbl2, DL, MVT::i32), Op1,
Op2, SVEMask);
}
Shuffle = convertFromScalableVector(DAG, VT, Shuffle);
return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Shuffle);
Expand Down Expand Up @@ -31436,8 +31442,8 @@ SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE(
unsigned SegmentElts = VT.getVectorNumElements() / Segments;
if (std::optional<unsigned> Lane =
isDUPQMask(ShuffleMask, Segments, SegmentElts)) {
SDValue IID =
DAG.getConstant(Intrinsic::aarch64_sve_dup_laneq, DL, MVT::i64);
SDValue IID = DAG.getTargetConstant(Intrinsic::aarch64_sve_dup_laneq,
DL, MVT::i64);
return convertFromScalableVector(
DAG, VT,
DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, ContainerVT,
Expand Down