-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[AArch64] Combine vector FNEG+FMA into FNML[A|S]
#167900
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
|
@llvm/pr-subscribers-backend-aarch64 Author: Damian Heaton (dheaton-arm) ChangesThis allows for FNEG + FMA sequences to be combined into a single operation, with Full diff: https://github.com/llvm/llvm-project/pull/167900.diff 3 Files Affected:
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c8a038fa99b30..d104e2e956a40 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1176,6 +1176,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::VECTOR_DEINTERLEAVE);
setTargetDAGCombine(ISD::CTPOP);
+ setTargetDAGCombine(ISD::FMA);
+
// In case of strict alignment, avoid an excessive number of byte wide stores.
MaxStoresPerMemsetOptSize = 8;
MaxStoresPerMemset =
@@ -20435,6 +20437,52 @@ static SDValue performFADDCombine(SDNode *N,
return SDValue();
}
+static SDValue performFMACombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const AArch64Subtarget *Subtarget) {
+ SelectionDAG &DAG = DCI.DAG;
+ SDValue Op1 = N->getOperand(0);
+ SDValue Op2 = N->getOperand(1);
+ SDValue Op3 = N->getOperand(2);
+ EVT VT = N->getValueType(0);
+ SDLoc DL(N);
+
+ // fma(a, b, neg(c)) -> fnmls(a, b, c)
+ // fma(neg(a), b, neg(c)) -> fnmla(a, b, c)
+ // fma(a, neg(b), neg(c)) -> fnmla(a, b, c)
+ if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
+ (Subtarget->hasSVE() || Subtarget->hasSME())) {
+ if (Op3.getOpcode() == ISD::FNEG) {
+ unsigned int Opcode;
+ if (Op1.getOpcode() == ISD::FNEG) {
+ Op1 = Op1.getOperand(0);
+ Opcode = AArch64ISD::FNMLA_PRED;
+ } else if (Op2.getOpcode() == ISD::FNEG) {
+ Op2 = Op2.getOperand(0);
+ Opcode = AArch64ISD::FNMLA_PRED;
+ } else {
+ Opcode = AArch64ISD::FNMLS_PRED;
+ }
+ Op3 = Op3.getOperand(0);
+ auto Pg = getPredicateForVector(DAG, DL, VT);
+ if (VT.isFixedLengthVector()) {
+ assert(DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
+ "Expected only legal fixed-width types");
+ EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
+ Op1 = convertToScalableVector(DAG, ContainerVT, Op1);
+ Op2 = convertToScalableVector(DAG, ContainerVT, Op2);
+ Op3 = convertToScalableVector(DAG, ContainerVT, Op3);
+ auto ScalableRes =
+ DAG.getNode(Opcode, DL, ContainerVT, Pg, Op1, Op2, Op3);
+ return convertFromScalableVector(DAG, VT, ScalableRes);
+ }
+ return DAG.getNode(Opcode, DL, VT, Pg, Op1, Op2, Op3);
+ }
+ }
+
+ return SDValue();
+}
+
static bool hasPairwiseAdd(unsigned Opcode, EVT VT, bool FullFP16) {
switch (Opcode) {
case ISD::STRICT_FADD:
@@ -27958,6 +28006,8 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
return performANDCombine(N, DCI);
case ISD::FADD:
return performFADDCombine(N, DCI);
+ case ISD::FMA:
+ return performFMACombine(N, DCI, Subtarget);
case ISD::INTRINSIC_WO_CHAIN:
return performIntrinsicCombine(N, DCI, Subtarget);
case ISD::ANY_EXTEND:
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index e1f43867bbe5b..2f1e860cb8916 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -240,6 +240,8 @@ def AArch64udiv_p : SDNode<"AArch64ISD::UDIV_PRED", SDT_AArch64Arith>;
def AArch64umax_p : SDNode<"AArch64ISD::UMAX_PRED", SDT_AArch64Arith>;
def AArch64umin_p : SDNode<"AArch64ISD::UMIN_PRED", SDT_AArch64Arith>;
def AArch64umulh_p : SDNode<"AArch64ISD::MULHU_PRED", SDT_AArch64Arith>;
+def AArch64fnmla_p_node : SDNode<"AArch64ISD::FNMLA_PRED", SDT_AArch64FMA>;
+def AArch64fnmls_p_node : SDNode<"AArch64ISD::FNMLS_PRED", SDT_AArch64FMA>;
def AArch64fadd_p_contract : PatFrag<(ops node:$op1, node:$op2, node:$op3),
(AArch64fadd_p node:$op1, node:$op2, node:$op3), [{
@@ -460,12 +462,14 @@ def AArch64fmlsidx : PatFrags<(ops node:$acc, node:$op1, node:$op2, node:$idx),
def AArch64fnmla_p : PatFrags<(ops node:$pg, node:$za, node:$zn, node:$zm),
- [(int_aarch64_sve_fnmla_u node:$pg, node:$za, node:$zn, node:$zm),
+ [(AArch64fnmla_p_node node:$pg, node:$zn, node:$zm, node:$za),
+ (int_aarch64_sve_fnmla_u node:$pg, node:$za, node:$zn, node:$zm),
(AArch64fma_p node:$pg, (AArch64fneg_mt node:$pg, node:$zn, (undef)), node:$zm, (AArch64fneg_mt node:$pg, node:$za, (undef))),
(AArch64fneg_mt_nsz node:$pg, (AArch64fma_p node:$pg, node:$zn, node:$zm, node:$za), (undef))]>;
def AArch64fnmls_p : PatFrags<(ops node:$pg, node:$za, node:$zn, node:$zm),
- [(int_aarch64_sve_fnmls_u node:$pg, node:$za, node:$zn, node:$zm),
+ [(AArch64fnmls_p_node node:$pg, node:$zn, node:$zm, node:$za),
+ (int_aarch64_sve_fnmls_u node:$pg, node:$za, node:$zn, node:$zm),
(AArch64fma_p node:$pg, node:$zn, node:$zm, (AArch64fneg_mt node:$pg, node:$za, (undef)))]>;
def AArch64fsubr_p : PatFrag<(ops node:$pg, node:$op1, node:$op2),
diff --git a/llvm/test/CodeGen/AArch64/sve-fmsub.ll b/llvm/test/CodeGen/AArch64/sve-fmsub.ll
new file mode 100644
index 0000000000000..721066038769c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-fmsub.ll
@@ -0,0 +1,276 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=aarch64 -mattr=+v9a,+sve2,+crypto,+bf16,+sm4,+i8mm,+sve2-bitperm,+sve2-sha3,+sve2-aes,+sve2-sm4 %s -o - | FileCheck %s --check-prefixes=CHECK
+
+define <vscale x 2 x double> @fmsub_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
+; CHECK-LABEL: fmsub_nxv2f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: fnmsb z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 2 x double> %c
+ %0 = tail call <vscale x 2 x double> @llvm.fmuladd(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %neg)
+ ret <vscale x 2 x double> %0
+}
+
+define <vscale x 4 x float> @fmsub_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
+; CHECK-LABEL: fmsub_nxv4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: fnmsb z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 4 x float> %c
+ %0 = tail call <vscale x 4 x float> @llvm.fmuladd(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %neg)
+ ret <vscale x 4 x float> %0
+}
+
+define <vscale x 8 x half> @fmsub_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
+; CHECK-LABEL: fmsub_nxv8f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: fnmsb z0.h, p0/m, z1.h, z2.h
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 8 x half> %c
+ %0 = tail call <vscale x 8 x half> @llvm.fmuladd(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %neg)
+ ret <vscale x 8 x half> %0
+}
+
+define <2 x double> @fmsub_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: fmsub_v2f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.d, vl2
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmsb z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <2 x double> %c
+ %0 = tail call <2 x double> @llvm.fmuladd(<2 x double> %a, <2 x double> %b, <2 x double> %neg)
+ ret <2 x double> %0
+}
+
+define <4 x float> @fmsub_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: fmsub_v4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s, vl4
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmsb z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <4 x float> %c
+ %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %a, <4 x float> %b, <4 x float> %neg)
+ ret <4 x float> %0
+}
+
+define <8 x half> @fmsub_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: fmsub_v8f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.h, vl8
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmsb z0.h, p0/m, z1.h, z2.h
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <8 x half> %c
+ %0 = tail call <8 x half> @llvm.fmuladd(<8 x half> %a, <8 x half> %b, <8 x half> %neg)
+ ret <8 x half> %0
+}
+
+
+define <2 x double> @fmsub_flipped_v2f64(<2 x double> %c, <2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: fmsub_flipped_v2f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.d, vl2
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmls z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <2 x double> %c
+ %0 = tail call <2 x double> @llvm.fmuladd(<2 x double> %a, <2 x double> %b, <2 x double> %neg)
+ ret <2 x double> %0
+}
+
+define <4 x float> @fmsub_flipped_v4f32(<4 x float> %c, <4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: fmsub_flipped_v4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s, vl4
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmls z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <4 x float> %c
+ %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %a, <4 x float> %b, <4 x float> %neg)
+ ret <4 x float> %0
+}
+
+define <8 x half> @fmsub_flipped_v8f16(<8 x half> %c, <8 x half> %a, <8 x half> %b) {
+; CHECK-LABEL: fmsub_flipped_v8f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.h, vl8
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmls z0.h, p0/m, z1.h, z2.h
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <8 x half> %c
+ %0 = tail call <8 x half> @llvm.fmuladd(<8 x half> %a, <8 x half> %b, <8 x half> %neg)
+ ret <8 x half> %0
+}
+
+define <vscale x 2 x double> @fnmsub_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
+; CHECK-LABEL: fnmsub_nxv2f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: fnmad z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 2 x double> %a
+ %neg1 = fneg <vscale x 2 x double> %c
+ %0 = tail call <vscale x 2 x double> @llvm.fmuladd(<vscale x 2 x double> %neg, <vscale x 2 x double> %b, <vscale x 2 x double> %neg1)
+ ret <vscale x 2 x double> %0
+}
+
+define <vscale x 4 x float> @fnmsub_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
+; CHECK-LABEL: fnmsub_nxv4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: fnmad z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 4 x float> %a
+ %neg1 = fneg <vscale x 4 x float> %c
+ %0 = tail call <vscale x 4 x float> @llvm.fmuladd(<vscale x 4 x float> %neg, <vscale x 4 x float> %b, <vscale x 4 x float> %neg1)
+ ret <vscale x 4 x float> %0
+}
+
+define <vscale x 8 x half> @fnmsub_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
+; CHECK-LABEL: fnmsub_nxv8f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 8 x half> %a
+ %neg1 = fneg <vscale x 8 x half> %c
+ %0 = tail call <vscale x 8 x half> @llvm.fmuladd(<vscale x 8 x half> %neg, <vscale x 8 x half> %b, <vscale x 8 x half> %neg1)
+ ret <vscale x 8 x half> %0
+}
+
+define <2 x double> @fnmsub_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: fnmsub_v2f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.d, vl2
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmad z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <2 x double> %a
+ %neg1 = fneg <2 x double> %c
+ %0 = tail call <2 x double> @llvm.fmuladd(<2 x double> %neg, <2 x double> %b, <2 x double> %neg1)
+ ret <2 x double> %0
+}
+
+define <4 x float> @fnmsub_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: fnmsub_v4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s, vl4
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmad z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <4 x float> %a
+ %neg1 = fneg <4 x float> %c
+ %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %neg, <4 x float> %b, <4 x float> %neg1)
+ ret <4 x float> %0
+}
+
+define <8 x half> @fnmsub_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: fnmsub_v8f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.h, vl8
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <8 x half> %a
+ %neg1 = fneg <8 x half> %c
+ %0 = tail call <8 x half> @llvm.fmuladd(<8 x half> %neg, <8 x half> %b, <8 x half> %neg1)
+ ret <8 x half> %0
+}
+
+define <2 x double> @fnmsub_flipped_v2f64(<2 x double> %c, <2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: fnmsub_flipped_v2f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.d, vl2
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmla z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <2 x double> %a
+ %neg1 = fneg <2 x double> %c
+ %0 = tail call <2 x double> @llvm.fmuladd(<2 x double> %neg, <2 x double> %b, <2 x double> %neg1)
+ ret <2 x double> %0
+}
+
+define <4 x float> @fnmsub_flipped_v4f32(<4 x float> %c, <4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: fnmsub_flipped_v4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s, vl4
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmla z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <4 x float> %a
+ %neg1 = fneg <4 x float> %c
+ %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %neg, <4 x float> %b, <4 x float> %neg1)
+ ret <4 x float> %0
+}
+
+define <8 x half> @fnmsub_flipped_v8f16(<8 x half> %c, <8 x half> %a, <8 x half> %b) {
+; CHECK-LABEL: fnmsub_flipped_v8f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.h, vl8
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmla z0.h, p0/m, z1.h, z2.h
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <8 x half> %a
+ %neg1 = fneg <8 x half> %c
+ %0 = tail call <8 x half> @llvm.fmuladd(<8 x half> %neg, <8 x half> %b, <8 x half> %neg1)
+ ret <8 x half> %0
+}
|
|
nit: I think the PR title should be prefixed with |
FNML[A|S]FNML[A|S]
gbossu
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Looks good, although I haven't fully checked the correctness of the tests.
Have you checked if we have ISel patterns in .td that could be removed after introducing these DAG combines? I seem to remember we had a lot of complex patterns to select those instructions. Out of curiosity, have you tried to add more patterns instead of creating a SDAG-specific combiner?
The advantage of doing that would be that GlobalISel might be able to re-use them. The downside is, well, tablegen is hard to read...
| %neg1 = fneg <8 x half> %c | ||
| %0 = tail call <8 x half> @llvm.fmuladd(<8 x half> %neg, <8 x half> %b, <8 x half> %neg1) | ||
| ret <8 x half> %0 | ||
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Questions
- Do these combines also work if
llvm.fmuladdwas replaced with fmul and fadd intrinsics? - Do these combines also work if
llvm.fmuladdwas replaced with the equivalent NEON/SVE intrinsic?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Do these combines also work if llvm.fmuladd was replaced with fmul and fadd intrinsics?
Generally, no; in that case, the fadd gets switched elsewhere for an fsub (eliminating the fneg on c). There might be a case where they get fused into an fma, in which case these should apply.
Do these combines also work if llvm.fmuladd was replaced with the equivalent NEON/SVE intrinsic?
I don't think so; the combines themselves will only identify an fma DAG node with fnegs feeding into it. Unless a more specific intrinsic was switched to that, I don't think I'd expect to see this apply.
This allows for FNEG + FMA sequences to be combined into a single operation, with `FNML[A|S]`, `FNMAD`, or `FNMSB` selected depending on the operand order.
ba0e9ae to
d4e4360
Compare
Ah, I had seen some but forgot to remove them. Done :)
Yes, though the main problem is converting the Neon vectors to scalable, though I might just be unaware of a way to do that in TableGen. |
🐧 Linux x64 Test Results
|
| if (OpA.getOpcode() == ISD::FNEG) { | ||
| OpA = OpA.getOperand(0); | ||
| Opcode = AArch64ISD::FNMLA_PRED; | ||
| } else if (OpB.getOpcode() == ISD::FNEG) { | ||
| OpB = OpB.getOperand(0); | ||
| Opcode = AArch64ISD::FNMLA_PRED; | ||
| } else { | ||
| Opcode = AArch64ISD::FNMLS_PRED; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Do we need these new ISD nodes? These ISEL patterns exist for scalable vectors (as shown by: https://godbolt.org/z/1Pn78GKox). So maybe you can promote the FNEG and FMA to scalable vectors, and rely on the existing patterns to lower them?
I think it can be done, but it's a little tricky. You can use I think an example is something like: llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td Lines 2117 to 2131 in 87a1fd1
|
This allows for FNEG + FMA sequences to be combined into a single operation, with
FNML[A|S],FNMAD, orFNMSBselected depending on the operand order, similarly to (but more specific than) howperformSVEMulAddSubCombineenables generatingML[A|S]instructions by converting the ADD/SUB intrinsics to scalable vectors.