Skip to content

Commit

Permalink
target/arm: Implement integer matrix multiply accumulate
Browse files Browse the repository at this point in the history
This is {S,U,US}MMLA for both AArch64 AdvSIMD and SVE,
and V{S,U,US}MMLA.S8 for AArch32 NEON.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210525010358.152808-91-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
  • Loading branch information
rth7680 authored and pm215 committed May 25, 2021
1 parent 51879c6 commit 2323c5f
Show file tree
Hide file tree
Showing 7 changed files with 169 additions and 0 deletions.
7 changes: 7 additions & 0 deletions target/arm/helper.h
Expand Up @@ -993,6 +993,13 @@ DEF_HELPER_FLAGS_6(sve2_fmlal_zzxw_s, TCG_CALL_NO_RWG,

DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

DEF_HELPER_FLAGS_5(gvec_smmla_b, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(gvec_ummla_b, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(gvec_usmmla_b, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)

#ifdef TARGET_AARCH64
#include "helper-a64.h"
#include "helper-sve.h"
Expand Down
7 changes: 7 additions & 0 deletions target/arm/neon-shared.decode
Expand Up @@ -59,6 +59,13 @@ VFML 1111 110 0 s:1 . 10 .... .... 1000 . 0 . 1 .... \
VFML 1111 110 0 s:1 . 10 .... .... 1000 . 1 . 1 .... \
vm=%vm_dp vn=%vn_dp vd=%vd_dp q=1

VSMMLA 1111 1100 0.10 .... .... 1100 .1.0 .... \
vm=%vm_dp vn=%vn_dp vd=%vd_dp
VUMMLA 1111 1100 0.10 .... .... 1100 .1.1 .... \
vm=%vm_dp vn=%vn_dp vd=%vd_dp
VUSMMLA 1111 1100 1.10 .... .... 1100 .1.0 .... \
vm=%vm_dp vn=%vn_dp vd=%vd_dp

VCMLA_scalar 1111 1110 0 . rot:2 .... .... 1000 . q:1 index:1 0 vm:4 \
vn=%vn_dp vd=%vd_dp size=1
VCMLA_scalar 1111 1110 1 . rot:2 .... .... 1000 . q:1 . 0 .... \
Expand Down
6 changes: 6 additions & 0 deletions target/arm/sve.decode
Expand Up @@ -1413,6 +1413,12 @@ USHLLT 01000101 .. 0 ..... 1010 11 ..... ..... @rd_rn_tszimm_shl
EORBT 01000101 .. 0 ..... 10010 0 ..... ..... @rd_rn_rm
EORTB 01000101 .. 0 ..... 10010 1 ..... ..... @rd_rn_rm

## SVE integer matrix multiply accumulate

SMMLA 01000101 00 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0
USMMLA 01000101 10 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0
UMMLA 01000101 11 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0

## SVE2 bitwise permute

BEXT 01000101 .. 0 ..... 1011 00 ..... ..... @rd_rn_rm
Expand Down
18 changes: 18 additions & 0 deletions target/arm/translate-a64.c
Expand Up @@ -12182,6 +12182,15 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
}
feature = dc_isar_feature(aa64_i8mm, s);
break;
case 0x04: /* SMMLA */
case 0x14: /* UMMLA */
case 0x05: /* USMMLA */
if (!is_q || size != MO_32) {
unallocated_encoding(s);
return;
}
feature = dc_isar_feature(aa64_i8mm, s);
break;
case 0x18: /* FCMLA, #0 */
case 0x19: /* FCMLA, #90 */
case 0x1a: /* FCMLA, #180 */
Expand Down Expand Up @@ -12226,6 +12235,15 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_usdot_b);
return;

case 0x04: /* SMMLA, UMMLA */
gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0,
u ? gen_helper_gvec_ummla_b
: gen_helper_gvec_smmla_b);
return;
case 0x05: /* USMMLA */
gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, gen_helper_gvec_usmmla_b);
return;

case 0x8: /* FCMLA, #0 */
case 0x9: /* FCMLA, #90 */
case 0xa: /* FCMLA, #180 */
Expand Down
27 changes: 27 additions & 0 deletions target/arm/translate-neon.c
Expand Up @@ -4036,3 +4036,30 @@ static bool trans_VTRN(DisasContext *s, arg_2misc *a)
tcg_temp_free_i32(tmp2);
return true;
}

static bool trans_VSMMLA(DisasContext *s, arg_VSMMLA *a)
{
if (!dc_isar_feature(aa32_i8mm, s)) {
return false;
}
return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
gen_helper_gvec_smmla_b);
}

static bool trans_VUMMLA(DisasContext *s, arg_VUMMLA *a)
{
if (!dc_isar_feature(aa32_i8mm, s)) {
return false;
}
return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
gen_helper_gvec_ummla_b);
}

static bool trans_VUSMMLA(DisasContext *s, arg_VUSMMLA *a)
{
if (!dc_isar_feature(aa32_i8mm, s)) {
return false;
}
return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
gen_helper_gvec_usmmla_b);
}
27 changes: 27 additions & 0 deletions target/arm/translate-sve.c
Expand Up @@ -8610,3 +8610,30 @@ static bool trans_FMLSLT_zzxw(DisasContext *s, arg_rrxr_esz *a)
{
return do_FMLAL_zzxw(s, a, true, true);
}

static bool do_i8mm_zzzz_ool(DisasContext *s, arg_rrrr_esz *a,
gen_helper_gvec_4 *fn, int data)
{
if (!dc_isar_feature(aa64_sve_i8mm, s)) {
return false;
}
if (sve_access_check(s)) {
gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data);
}
return true;
}

static bool trans_SMMLA(DisasContext *s, arg_rrrr_esz *a)
{
return do_i8mm_zzzz_ool(s, a, gen_helper_gvec_smmla_b, 0);
}

static bool trans_USMMLA(DisasContext *s, arg_rrrr_esz *a)
{
return do_i8mm_zzzz_ool(s, a, gen_helper_gvec_usmmla_b, 0);
}

static bool trans_UMMLA(DisasContext *s, arg_rrrr_esz *a)
{
return do_i8mm_zzzz_ool(s, a, gen_helper_gvec_ummla_b, 0);
}
77 changes: 77 additions & 0 deletions target/arm/vec_helper.c
Expand Up @@ -2335,3 +2335,80 @@ void HELPER(gvec_xar_d)(void *vd, void *vn, void *vm, uint32_t desc)
}
clear_tail(d, opr_sz * 8, simd_maxsz(desc));
}

/*
* Integer matrix-multiply accumulate
*/

static uint32_t do_smmla_b(uint32_t sum, void *vn, void *vm)
{
int8_t *n = vn, *m = vm;

for (intptr_t k = 0; k < 8; ++k) {
sum += n[H1(k)] * m[H1(k)];
}
return sum;
}

static uint32_t do_ummla_b(uint32_t sum, void *vn, void *vm)
{
uint8_t *n = vn, *m = vm;

for (intptr_t k = 0; k < 8; ++k) {
sum += n[H1(k)] * m[H1(k)];
}
return sum;
}

static uint32_t do_usmmla_b(uint32_t sum, void *vn, void *vm)
{
uint8_t *n = vn;
int8_t *m = vm;

for (intptr_t k = 0; k < 8; ++k) {
sum += n[H1(k)] * m[H1(k)];
}
return sum;
}

static void do_mmla_b(void *vd, void *vn, void *vm, void *va, uint32_t desc,
uint32_t (*inner_loop)(uint32_t, void *, void *))
{
intptr_t seg, opr_sz = simd_oprsz(desc);

for (seg = 0; seg < opr_sz; seg += 16) {
uint32_t *d = vd + seg;
uint32_t *a = va + seg;
uint32_t sum0, sum1, sum2, sum3;

/*
* Process the entire segment at once, writing back the
* results only after we've consumed all of the inputs.
*
* Key to indicies by column:
* i j i j
*/
sum0 = a[H4(0 + 0)];
sum0 = inner_loop(sum0, vn + seg + 0, vm + seg + 0);
sum1 = a[H4(0 + 1)];
sum1 = inner_loop(sum1, vn + seg + 0, vm + seg + 8);
sum2 = a[H4(2 + 0)];
sum2 = inner_loop(sum2, vn + seg + 8, vm + seg + 0);
sum3 = a[H4(2 + 1)];
sum3 = inner_loop(sum3, vn + seg + 8, vm + seg + 8);

d[H4(0)] = sum0;
d[H4(1)] = sum1;
d[H4(2)] = sum2;
d[H4(3)] = sum3;
}
clear_tail(vd, opr_sz, simd_maxsz(desc));
}

#define DO_MMLA_B(NAME, INNER) \
void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
{ do_mmla_b(vd, vn, vm, va, desc, INNER); }

DO_MMLA_B(gvec_smmla_b, do_smmla_b)
DO_MMLA_B(gvec_ummla_b, do_ummla_b)
DO_MMLA_B(gvec_usmmla_b, do_usmmla_b)

0 comments on commit 2323c5f

Please sign in to comment.