Skip to content

Commit

Permalink
[llvm] Support fixed point multiplication on AArch64 (#84237)
Browse files Browse the repository at this point in the history
Prior to this, fixed point multiplication would lead to this assertion
error on AArhc64, armv8, and armv7.

```
 _Accum f(_Accum x, _Accum y) { return x * y; }

// ./bin/clang++ -ffixed-point /tmp/test2.cc -c -S -o - -target aarch64 -O3
clang++: llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp:10245: void llvm::TargetLowering::forceExpandWideMUL(SelectionDAG &, const SDLoc &, bool, EVT, const SDValue, const SDValue, const SDValue, const SDValue, SDValue &, SDValue &) const: Assertion `Ret.getOpcode() == ISD::MERGE_VALUES && "Ret value is a collection of constituent nodes holding result."' failed.
```

This path into forceExpandWideMUL should only be taken if we don't
support [US]MUL_LOHI or MULH[US] for the operand size (32 in this case).
But we should also check if we can just leverage regular wide
multiplication. That is, extend the operands from 32 to 64, do a regular
64-bit mul, then trunc and shift. These ops are certainly available on
aarch64 but for wider types.
  • Loading branch information
PiJoules committed May 14, 2024
1 parent f89b1b8 commit 19008d3
Show file tree
Hide file tree
Showing 5 changed files with 775 additions and 0 deletions.
12 changes: 12 additions & 0 deletions llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10500,13 +10500,25 @@ TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const {
SDValue Lo, Hi;
unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI;
unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU;
EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VTSize * 2);
if (isOperationLegalOrCustom(LoHiOp, VT)) {
SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS);
Lo = Result.getValue(0);
Hi = Result.getValue(1);
} else if (isOperationLegalOrCustom(HiOp, VT)) {
Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS);
} else if (isOperationLegalOrCustom(ISD::MUL, WideVT)) {
// Try for a multiplication using a wider type.
unsigned Ext = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
SDValue LHSExt = DAG.getNode(Ext, dl, WideVT, LHS);
SDValue RHSExt = DAG.getNode(Ext, dl, WideVT, RHS);
SDValue Res = DAG.getNode(ISD::MUL, dl, WideVT, LHSExt, RHSExt);
Lo = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
SDValue Shifted =
DAG.getNode(ISD::SRA, dl, WideVT, Res,
DAG.getShiftAmountConstant(VTSize, WideVT, dl));
Hi = DAG.getNode(ISD::TRUNCATE, dl, VT, Shifted);
} else if (VT.isVector()) {
return SDValue();
} else {
Expand Down
139 changes: 139 additions & 0 deletions llvm/test/CodeGen/AArch64/smul_fix.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s

define i32 @func(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: func:
; CHECK: // %bb.0:
; CHECK-NEXT: smull x8, w0, w1
; CHECK-NEXT: lsr x9, x8, #32
; CHECK-NEXT: extr w0, w9, w8, #2
; CHECK-NEXT: ret
%tmp = call i32 @llvm.smul.fix.i32(i32 %x, i32 %y, i32 2)
ret i32 %tmp
}

define i64 @func2(i64 %x, i64 %y) {
; CHECK-LABEL: func2:
; CHECK: // %bb.0:
; CHECK-NEXT: mul x8, x0, x1
; CHECK-NEXT: smulh x9, x0, x1
; CHECK-NEXT: extr x0, x9, x8, #2
; CHECK-NEXT: ret
%tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 2)
ret i64 %tmp
}

define i4 @func3(i4 %x, i4 %y) nounwind {
; CHECK-LABEL: func3:
; CHECK: // %bb.0:
; CHECK-NEXT: sbfx w8, w1, #0, #4
; CHECK-NEXT: sbfx w9, w0, #0, #4
; CHECK-NEXT: smull x8, w9, w8
; CHECK-NEXT: lsr x9, x8, #32
; CHECK-NEXT: extr w0, w9, w8, #2
; CHECK-NEXT: ret
%tmp = call i4 @llvm.smul.fix.i4(i4 %x, i4 %y, i32 2)
ret i4 %tmp
}

;; These result in regular integer multiplication
define i32 @func4(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: func4:
; CHECK: // %bb.0:
; CHECK-NEXT: mul w0, w0, w1
; CHECK-NEXT: ret
%tmp = call i32 @llvm.smul.fix.i32(i32 %x, i32 %y, i32 0)
ret i32 %tmp
}

define i64 @func5(i64 %x, i64 %y) {
; CHECK-LABEL: func5:
; CHECK: // %bb.0:
; CHECK-NEXT: mul x0, x0, x1
; CHECK-NEXT: ret
%tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 0)
ret i64 %tmp
}

define i4 @func6(i4 %x, i4 %y) nounwind {
; CHECK-LABEL: func6:
; CHECK: // %bb.0:
; CHECK-NEXT: sbfx w8, w1, #0, #4
; CHECK-NEXT: sbfx w9, w0, #0, #4
; CHECK-NEXT: mul w0, w9, w8
; CHECK-NEXT: ret
%tmp = call i4 @llvm.smul.fix.i4(i4 %x, i4 %y, i32 0)
ret i4 %tmp
}

define i64 @func7(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: func7:
; CHECK: // %bb.0:
; CHECK-NEXT: mul x8, x0, x1
; CHECK-NEXT: smulh x9, x0, x1
; CHECK-NEXT: extr x0, x9, x8, #32
; CHECK-NEXT: ret
%tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 32)
ret i64 %tmp
}

define i64 @func8(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: func8:
; CHECK: // %bb.0:
; CHECK-NEXT: mul x8, x0, x1
; CHECK-NEXT: smulh x9, x0, x1
; CHECK-NEXT: extr x0, x9, x8, #63
; CHECK-NEXT: ret
%tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 63)
ret i64 %tmp
}

define <2 x i32> @vec(<2 x i32> %x, <2 x i32> %y) nounwind {
; CHECK-LABEL: vec:
; CHECK: // %bb.0:
; CHECK-NEXT: mul v0.2s, v0.2s, v1.2s
; CHECK-NEXT: ret
%tmp = call <2 x i32> @llvm.smul.fix.v2i32(<2 x i32> %x, <2 x i32> %y, i32 0)
ret <2 x i32> %tmp
}

define <4 x i32> @vec2(<4 x i32> %x, <4 x i32> %y) nounwind {
; CHECK-LABEL: vec2:
; CHECK: // %bb.0:
; CHECK-NEXT: mul v0.4s, v0.4s, v1.4s
; CHECK-NEXT: ret
%tmp = call <4 x i32> @llvm.smul.fix.v4i32(<4 x i32> %x, <4 x i32> %y, i32 0)
ret <4 x i32> %tmp
}

define <4 x i64> @vec3(<4 x i64> %x, <4 x i64> %y) nounwind {
; CHECK-LABEL: vec3:
; CHECK: // %bb.0:
; CHECK-NEXT: mov x8, v2.d[1]
; CHECK-NEXT: mov x9, v0.d[1]
; CHECK-NEXT: fmov x10, d2
; CHECK-NEXT: fmov x11, d0
; CHECK-NEXT: mov x14, v3.d[1]
; CHECK-NEXT: mov x15, v1.d[1]
; CHECK-NEXT: mul x12, x11, x10
; CHECK-NEXT: mul x13, x9, x8
; CHECK-NEXT: smulh x8, x9, x8
; CHECK-NEXT: smulh x9, x11, x10
; CHECK-NEXT: fmov x10, d3
; CHECK-NEXT: fmov x11, d1
; CHECK-NEXT: mul x16, x11, x10
; CHECK-NEXT: extr x8, x8, x13, #32
; CHECK-NEXT: smulh x10, x11, x10
; CHECK-NEXT: extr x9, x9, x12, #32
; CHECK-NEXT: mul x11, x15, x14
; CHECK-NEXT: fmov d0, x9
; CHECK-NEXT: smulh x14, x15, x14
; CHECK-NEXT: extr x10, x10, x16, #32
; CHECK-NEXT: mov v0.d[1], x8
; CHECK-NEXT: fmov d1, x10
; CHECK-NEXT: extr x11, x14, x11, #32
; CHECK-NEXT: mov v1.d[1], x11
; CHECK-NEXT: ret
%tmp = call <4 x i64> @llvm.smul.fix.v4i64(<4 x i64> %x, <4 x i64> %y, i32 32)
ret <4 x i64> %tmp
}
Loading

0 comments on commit 19008d3

Please sign in to comment.