Skip to content

Commit

Permalink
[AArch32] Armv8.6-a Matrix Mult Assembly + Intrinsics
Browse files Browse the repository at this point in the history
This patch upstreams support for the Armv8.6-a Matrix Multiplication
Extension. A summary of the features can be found here:

https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/arm-architecture-developments-armv8-6-a

This patch includes:

- Assembly support for AArch32
- Intrinsics Support for AArch32 Neon Intrinsics for Matrix
  Multiplication

Note: these extensions are optional in the 8.6a architecture and so have
to be enabled by default

No additional IR types or C Types are needed for this extension.

This is part of a patch series, starting with BFloat16 support and
the other components in the armv8.6a extension (in previous patches
linked in phabricator)

Based on work by:
- Luke Geeson
- Oliver Stannard
- Luke Cheeseman

Reviewers: t.p.northover, miyuki

Reviewed By: miyuki

Subscribers: miyuki, ostannard, kristof.beyls, hiraditya, danielkiss,
cfe-commits

Tags: #clang

Differential Revision: https://reviews.llvm.org/D77872
  • Loading branch information
lukeg101 committed Apr 24, 2020
1 parent 832cd74 commit 7da1905
Show file tree
Hide file tree
Showing 10 changed files with 286 additions and 8 deletions.
6 changes: 6 additions & 0 deletions clang/lib/Basic/Targets/ARM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -425,6 +425,7 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
// Note that SoftFloatABI is initialized in our constructor.
HWDiv = 0;
DotProd = 0;
HasMatMul = 0;
HasFloat16 = true;
ARMCDECoprocMask = 0;

Expand Down Expand Up @@ -491,6 +492,8 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
FPU |= FPARMV8;
MVE |= MVE_INT | MVE_FP;
HW_FP |= HW_FP_SP | HW_FP_HP;
} else if (Feature == "+i8mm") {
HasMatMul = 1;
} else if (Feature.size() == strlen("+cdecp0") && Feature >= "+cdecp0" &&
Feature <= "+cdecp7") {
unsigned Coproc = Feature.back() - '0';
Expand Down Expand Up @@ -820,6 +823,9 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
if (DotProd)
Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");

if (HasMatMul)
Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");

switch (ArchKind) {
default:
break;
Expand Down
1 change: 1 addition & 0 deletions clang/lib/Basic/Targets/ARM.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
unsigned DSP : 1;
unsigned Unaligned : 1;
unsigned DotProd : 1;
unsigned HasMatMul : 1;

enum {
LDREX_B = (1 << 0), /// byte (8-bit)
Expand Down
4 changes: 4 additions & 0 deletions clang/lib/CodeGen/CGBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4807,6 +4807,7 @@ static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
Expand Down Expand Up @@ -4914,6 +4915,9 @@ static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP0(vtrnq_v),
NEONMAP0(vtst_v),
NEONMAP0(vtstq_v),
NEONMAP1(vusdot_v, arm_neon_usdot, 0),
NEONMAP1(vusdotq_v, arm_neon_usdot, 0),
NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0),
NEONMAP0(vuzp_v),
NEONMAP0(vuzpq_v),
NEONMAP0(vzip_v),
Expand Down
87 changes: 87 additions & 0 deletions clang/test/CodeGen/arm-v8.6a-neon-intrinsics.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
// RUN: %clang_cc1 -triple armv8.6a-arm-none-eabi -target-feature +neon -target-feature +fullfp16 -target-feature +i8mm \
// RUN: -fallow-half-arguments-and-returns -S -disable-O0-optnone -emit-llvm -o - %s \
// RUN: | opt -S -mem2reg -sroa \
// RUN: | FileCheck %s

// REQUIRES: arm-registered-target

#include <arm_neon.h>

// CHECK-LABEL: test_vmmlaq_s32
// CHECK: [[VAL:%.*]] = call <4 x i32> @llvm.arm.neon.smmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b)
// CHECK: ret <4 x i32> [[VAL]]
int32x4_t test_vmmlaq_s32(int32x4_t r, int8x16_t a, int8x16_t b) {
return vmmlaq_s32(r, a, b);
}

// CHECK-LABEL: test_vmmlaq_u32
// CHECK: [[VAL:%.*]] = call <4 x i32> @llvm.arm.neon.ummla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b)
// CHECK: ret <4 x i32> [[VAL]]
uint32x4_t test_vmmlaq_u32(uint32x4_t r, uint8x16_t a, uint8x16_t b) {
return vmmlaq_u32(r, a, b);
}

// CHECK-LABEL: test_vusmmlaq_s32
// CHECK: [[VAL:%.*]] = call <4 x i32> @llvm.arm.neon.usmmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b)
// CHECK: ret <4 x i32> [[VAL]]
int32x4_t test_vusmmlaq_s32(int32x4_t r, uint8x16_t a, int8x16_t b) {
return vusmmlaq_s32(r, a, b);
}

// CHECK-LABEL: test_vusdot_s32
// CHECK: [[VAL:%.*]] = call <2 x i32> @llvm.arm.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b)
// CHECK: ret <2 x i32> [[VAL]]
int32x2_t test_vusdot_s32(int32x2_t r, uint8x8_t a, int8x8_t b) {
return vusdot_s32(r, a, b);
}

// CHECK-LABEL: test_vusdot_lane_s32
// CHECK: [[TMP0:%.*]] = bitcast <8 x i8> %b to <2 x i32>
// CHECK: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
// CHECK: [[LANE:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> [[TMP2]], <2 x i32> zeroinitializer
// CHECK: [[TMP3:%.*]] = bitcast <2 x i32> [[LANE]] to <8 x i8>
// CHECK: [[TMP4:%.*]] = bitcast <2 x i32> %r to <8 x i8>
// CHECK: [[OP:%.*]] = call <2 x i32> @llvm.arm.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> [[TMP3]])
// CHECK: ret <2 x i32> [[OP]]
int32x2_t test_vusdot_lane_s32(int32x2_t r, uint8x8_t a, int8x8_t b) {
return vusdot_lane_s32(r, a, b, 0);
}

// CHECK-LABEL: test_vsudot_lane_s32
// CHECK: [[TMP0:%.*]] = bitcast <8 x i8> %b to <2 x i32>
// CHECK: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
// CHECK: [[LANE:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> [[TMP2]], <2 x i32> zeroinitializer
// CHECK: [[TMP3:%.*]] = bitcast <2 x i32> [[LANE]] to <8 x i8>
// CHECK: [[TMP4:%.*]] = bitcast <2 x i32> %r to <8 x i8>
// CHECK: [[OP:%.*]] = call <2 x i32> @llvm.arm.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> [[TMP3]], <8 x i8> %a)
// CHECK: ret <2 x i32> [[OP]]
int32x2_t test_vsudot_lane_s32(int32x2_t r, int8x8_t a, uint8x8_t b) {
return vsudot_lane_s32(r, a, b, 0);
}

// CHECK-LABEL: test_vusdotq_lane_s32
// CHECK: [[TMP0:%.*]] = bitcast <8 x i8> %b to <2 x i32>
// CHECK: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
// CHECK: [[LANE:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> [[TMP2]], <4 x i32> zeroinitializer
// CHECK: [[TMP4:%.*]] = bitcast <4 x i32> [[LANE]] to <16 x i8>
// CHECK: [[TMP5:%.*]] = bitcast <4 x i32> %r to <16 x i8>
// CHECK: [[OP:%.*]] = call <4 x i32> @llvm.arm.neon.usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> [[TMP4]])
// CHECK: ret <4 x i32> [[OP]]
int32x4_t test_vusdotq_lane_s32(int32x4_t r, uint8x16_t a, int8x8_t b) {
return vusdotq_lane_s32(r, a, b, 0);
}

// CHECK-LABEL: test_vsudotq_lane_s32
// CHECK: [[TMP0:%.*]] = bitcast <8 x i8> %b to <2 x i32>
// CHECK: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
// CHECK: [[LANE:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> [[TMP2]], <4 x i32> zeroinitializer
// CHECK: [[TMP4:%.*]] = bitcast <4 x i32> %r to <16 x i8>
// CHECK: [[OP:%.*]] = call <4 x i32> @llvm.arm.neon.usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %3, <16 x i8> %a)
// CHECK: ret <4 x i32> [[OP]]
int32x4_t test_vsudotq_lane_s32(int32x4_t r, int8x16_t a, uint8x8_t b) {
return vsudotq_lane_s32(r, a, b, 0);
}
13 changes: 13 additions & 0 deletions llvm/include/llvm/IR/IntrinsicsARM.td
Original file line number Diff line number Diff line change
Expand Up @@ -773,6 +773,19 @@ class Neon_Dot_Intrinsic
def int_arm_neon_udot : Neon_Dot_Intrinsic;
def int_arm_neon_sdot : Neon_Dot_Intrinsic;

// v8.6-A Matrix Multiply Intrinsics
class Neon_MatMul_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty,
LLVMMatchType<1>],
[IntrNoMem]>;
def int_arm_neon_ummla : Neon_MatMul_Intrinsic;
def int_arm_neon_smmla : Neon_MatMul_Intrinsic;
def int_arm_neon_usmmla : Neon_MatMul_Intrinsic;
def int_arm_neon_usdot : Neon_Dot_Intrinsic;

// v8.6-A Bfloat Intrinsics

def int_arm_cls: Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_arm_cls64: Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem]>;

Expand Down
6 changes: 5 additions & 1 deletion llvm/lib/Target/ARM/ARM.td
Original file line number Diff line number Diff line change
Expand Up @@ -428,6 +428,9 @@ def FeatureSB : SubtargetFeature<"sb", "HasSB", "true",
def FeatureBF16 : SubtargetFeature<"bf16", "HasBF16", "true",
"Enable support for BFloat16 instructions", [FeatureNEON]>;

def FeatureMatMulInt8 : SubtargetFeature<"i8mm", "HasMatMulInt8",
"true", "Enable Matrix Multiply Int8 Extension", [FeatureNEON]>;

// Armv8.1-M extensions

def FeatureLOB : SubtargetFeature<"lob", "HasLOB", "true",
Expand Down Expand Up @@ -529,7 +532,8 @@ def HasV8_5aOps : SubtargetFeature<"v8.5a", "HasV8_5aOps", "true",

def HasV8_6aOps : SubtargetFeature<"v8.6a", "HasV8_6aOps", "true",
"Support ARM v8.6a instructions",
[HasV8_5aOps, FeatureBF16]>;
[HasV8_5aOps, FeatureBF16,
FeatureMatMulInt8]>;

def HasV8_1MMainlineOps : SubtargetFeature<
"v8.1m.main", "HasV8_1MMainlineOps", "true",
Expand Down
87 changes: 80 additions & 7 deletions llvm/lib/Target/ARM/ARMInstrNEON.td
Original file line number Diff line number Diff line change
Expand Up @@ -4823,10 +4823,10 @@ def : Pat<(v4f32 (fma (fneg QPR:$Vn), QPR:$Vm, QPR:$src1)),
// We put them in the VFPV8 decoder namespace because the ARM and Thumb
// encodings are the same and thus no further bit twiddling is necessary
// in the disassembler.
class VDOT<bit op6, bit op4, RegisterClass RegTy, string Asm, string AsmTy,
ValueType AccumTy, ValueType InputTy,
class VDOT<bit op6, bit op4, bit op23, RegisterClass RegTy, string Asm,
string AsmTy, ValueType AccumTy, ValueType InputTy,
SDPatternOperator OpNode> :
N3Vnp<0b11000, 0b10, 0b1101, op6, op4, (outs RegTy:$dst),
N3Vnp<{0b1100, op23}, 0b10, 0b1101, op6, op4, (outs RegTy:$dst),
(ins RegTy:$Vd, RegTy:$Vn, RegTy:$Vm), N3RegFrm, IIC_VDOTPROD,
Asm, AsmTy,
[(set (AccumTy RegTy:$dst),
Expand All @@ -4838,10 +4838,19 @@ class VDOT<bit op6, bit op4, RegisterClass RegTy, string Asm, string AsmTy,
let Constraints = "$dst = $Vd";
}

def VUDOTD : VDOT<0, 1, DPR, "vudot", "u8", v2i32, v8i8, int_arm_neon_udot>;
def VSDOTD : VDOT<0, 0, DPR, "vsdot", "s8", v2i32, v8i8, int_arm_neon_sdot>;
def VUDOTQ : VDOT<1, 1, QPR, "vudot", "u8", v4i32, v16i8, int_arm_neon_udot>;
def VSDOTQ : VDOT<1, 0, QPR, "vsdot", "s8", v4i32, v16i8, int_arm_neon_sdot>;

class VUSDOT<bit op6, bit op4, bit op23, RegisterClass RegTy, string Asm,
string AsmTy, ValueType AccumTy, ValueType InputTy,
SDPatternOperator OpNode> :
VDOT<op6, op4, op23, RegTy, Asm, AsmTy, AccumTy, InputTy, OpNode> {
let hasNoSchedulingInfo = 1;

}

def VUDOTD : VDOT<0, 1, 0, DPR, "vudot", "u8", v2i32, v8i8, int_arm_neon_udot>;
def VSDOTD : VDOT<0, 0, 0, DPR, "vsdot", "s8", v2i32, v8i8, int_arm_neon_sdot>;
def VUDOTQ : VDOT<1, 1, 0, QPR, "vudot", "u8", v4i32, v16i8, int_arm_neon_udot>;
def VSDOTQ : VDOT<1, 0, 0, QPR, "vsdot", "s8", v4i32, v16i8, int_arm_neon_sdot>;

// Indexed dot product instructions:
multiclass DOTI<string opc, string dt, bit Q, bit U, RegisterClass Ty,
Expand Down Expand Up @@ -4876,6 +4885,70 @@ defm VUDOTQI : DOTI<"vudot", "u8", 0b1, 0b1, QPR, v4i32, v16i8,
defm VSDOTQI : DOTI<"vsdot", "s8", 0b1, 0b0, QPR, v4i32, v16i8,
int_arm_neon_sdot, (EXTRACT_SUBREG QPR:$Vm, dsub_0)>;

// v8.6A matrix multiplication extension
let Predicates = [HasMatMulInt8] in {
class N3VMatMul<bit B, bit U, string Asm, string AsmTy,
SDPatternOperator OpNode>
: N3Vnp<{0b1100, B}, 0b10, 0b1100, 1, U, (outs QPR:$dst),
(ins QPR:$Vd, QPR:$Vn, QPR:$Vm), N3RegFrm, NoItinerary,
Asm, AsmTy,
[(set (v4i32 QPR:$dst), (OpNode (v4i32 QPR:$Vd),
(v16i8 QPR:$Vn),
(v16i8 QPR:$Vm)))]> {
let DecoderNamespace = "VFPV8";
let Constraints = "$dst = $Vd";
let hasNoSchedulingInfo = 1;
}

multiclass N3VMixedDotLane<bit Q, bit U, string Asm, string AsmTy, RegisterClass RegTy,
ValueType AccumTy, ValueType InputTy, SDPatternOperator OpNode,
dag RHS> {

def "" : N3Vnp<0b11101, 0b00, 0b1101, Q, U, (outs RegTy:$dst),
(ins RegTy:$Vd, RegTy:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane), N3RegFrm,
NoItinerary, Asm, AsmTy, []> {
bit lane;
let hasNoSchedulingInfo = 1;
let Inst{5} = lane;
let AsmString = !strconcat(Asm, ".", AsmTy, "\t$Vd, $Vn, $Vm$lane");
let DecoderNamespace = "VFPV8";
let Constraints = "$dst = $Vd";
}

def : Pat<
(AccumTy (OpNode (AccumTy RegTy:$Vd),
(InputTy RegTy:$Vn),
(InputTy (bitconvert (AccumTy
(ARMvduplane (AccumTy RegTy:$Vm),
VectorIndex32:$lane)))))),
(!cast<Instruction>(NAME) RegTy:$Vd, RegTy:$Vn, RHS, VectorIndex32:$lane)>;

}

multiclass SUDOTLane<bit Q, RegisterClass RegTy, ValueType AccumTy, ValueType InputTy, dag RHS>
: N3VMixedDotLane<Q, 1, "vsudot", "u8", RegTy, AccumTy, InputTy, null_frag, null_frag> {
def : Pat<
(AccumTy (int_arm_neon_usdot (AccumTy RegTy:$Vd),
(InputTy (bitconvert (AccumTy
(ARMvduplane (AccumTy RegTy:$Vm),
VectorIndex32:$lane)))),
(InputTy RegTy:$Vn))),
(!cast<Instruction>(NAME) RegTy:$Vd, RegTy:$Vn, RHS, VectorIndex32:$lane)>;
}

def VSMMLA : N3VMatMul<0, 0, "vsmmla", "s8", int_arm_neon_smmla>;
def VUMMLA : N3VMatMul<0, 1, "vummla", "u8", int_arm_neon_ummla>;
def VUSMMLA : N3VMatMul<1, 0, "vusmmla", "s8", int_arm_neon_usmmla>;
def VUSDOTD : VUSDOT<0, 0, 1, DPR, "vusdot", "s8", v2i32, v8i8, int_arm_neon_usdot>;
def VUSDOTQ : VUSDOT<1, 0, 1, QPR, "vusdot", "s8", v4i32, v16i8, int_arm_neon_usdot>;

defm VUSDOTDI : N3VMixedDotLane<0, 0, "vusdot", "s8", DPR, v2i32, v8i8,
int_arm_neon_usdot, (v2i32 DPR_VFP2:$Vm)>;
defm VUSDOTQI : N3VMixedDotLane<1, 0, "vusdot", "s8", QPR, v4i32, v16i8,
int_arm_neon_usdot, (EXTRACT_SUBREG QPR:$Vm, dsub_0)>;
defm VSUDOTDI : SUDOTLane<0, DPR, v2i32, v8i8, (v2i32 DPR_VFP2:$Vm)>;
defm VSUDOTQI : SUDOTLane<1, QPR, v4i32, v16i8, (EXTRACT_SUBREG QPR:$Vm, dsub_0)>;
}

// ARMv8.3 complex operations
class BaseN3VCP8ComplexTied<bit op21, bit op4, bit s, bit q,
Expand Down
2 changes: 2 additions & 0 deletions llvm/lib/Target/ARM/ARMPredicates.td
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,8 @@ def HasFP16FML : Predicate<"Subtarget->hasFP16FML()">,
AssemblerPredicate<(all_of FeatureFP16FML),"full half-float fml">;
def HasBF16 : Predicate<"Subtarget->hasBF16()">,
AssemblerPredicate<(all_of FeatureBF16),"BFloat16 floating point extension">;
def HasMatMulInt8 : Predicate<"Subtarget->hasMatMulInt8()">,
AssemblerPredicate<(all_of FeatureMatMulInt8),"8-bit integer matrix multiply">;
def HasDivideInThumb : Predicate<"Subtarget->hasDivideInThumbMode()">,
AssemblerPredicate<(all_of FeatureHWDivThumb), "divide in THUMB">;
def HasDivideInARM : Predicate<"Subtarget->hasDivideInARMMode()">,
Expand Down
5 changes: 5 additions & 0 deletions llvm/lib/Target/ARM/ARMSubtarget.h
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,9 @@ class ARMSubtarget : public ARMGenSubtargetInfo {
/// HasBF16 - True if subtarget supports BFloat16 floating point operations
bool HasBF16 = false;

/// HasMatMulInt8 - True if subtarget supports 8-bit integer matrix multiply
bool HasMatMulInt8 = false;

/// HasD32 - True if subtarget has the full 32 double precision
/// FP registers for VFPv3.
bool HasD32 = false;
Expand Down Expand Up @@ -704,6 +707,8 @@ class ARMSubtarget : public ARMGenSubtargetInfo {
/// Return true if the CPU supports any kind of instruction fusion.
bool hasFusion() const { return hasFuseAES() || hasFuseLiterals(); }

bool hasMatMulInt8() const { return HasMatMulInt8; }

const Triple &getTargetTriple() const { return TargetTriple; }

bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
Expand Down
Loading

0 comments on commit 7da1905

Please sign in to comment.