Skip to content

Commit

Permalink
neon mlal qs8 rsum accumulating microkernels
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 631778204
  • Loading branch information
alankelly authored and xnnpack-bot committed May 16, 2024
1 parent 7fabcac commit 21fa2d9
Show file tree
Hide file tree
Showing 32 changed files with 2,305 additions and 9 deletions.
6 changes: 6 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2991,6 +2991,12 @@ IF(XNNPACK_BUILD_TESTS)
TARGET_LINK_LIBRARIES(qs8-requantization-test PRIVATE hardware-config logging microkernels-all)
ADD_TEST(NAME qs8-requantization-test COMMAND qs8-requantization-test)

ADD_EXECUTABLE(qs8-rsum-minmax-fp32-test test/qs8-rsum-mimax-fp32.cc)
TARGET_INCLUDE_DIRECTORIES(qs8-rsum-minmax-fp32-test PRIVATE include src test)
TARGET_LINK_LIBRARIES(qs8-rsum-minmax-fp32-test PRIVATE fp16 pthreadpool GTest::gtest GTest::gtest_main microparams-init)
TARGET_LINK_LIBRARIES(qs8-rsum-minmax-fp32-test PRIVATE hardware-config logging microkernels-all)
ADD_TEST(NAME qs8-rsum-minmax-fp32-test COMMAND qs8-rsum-minmax-fp32-test)

ADD_EXECUTABLE(qs8-vadd-minmax-test test/qs8-vadd-minmax.cc)
SET_TARGET_PROPERTIES(qs8-vadd-minmax-test PROPERTIES CXX_EXTENSIONS YES)
TARGET_INCLUDE_DIRECTORIES(qs8-vadd-minmax-test PRIVATE include src test)
Expand Down
8 changes: 8 additions & 0 deletions cmake/gen/neon_microkernels.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -684,6 +684,14 @@ SET(ALL_NEON_MICROKERNEL_SRCS
src/qs8-requantization/qs8-requantization-rndna-neon.c
src/qs8-requantization/qs8-requantization-rndnu-neon-mull.c
src/qs8-requantization/qs8-requantization-rndnu-neon-qdmulh.c
src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u16-acc2.c
src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u16.c
src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u32-acc2.c
src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u32-acc4.c
src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u32.c
src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u64-acc2.c
src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u64-acc4.c
src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u64.c
src/qs8-vadd/gen/qs8-vadd-minmax-neon-ld64-u8.c
src/qs8-vadd/gen/qs8-vadd-minmax-neon-ld64-u16.c
src/qs8-vadd/gen/qs8-vadd-minmax-neon-ld64-u24.c
Expand Down
3 changes: 3 additions & 0 deletions cmake/gen/scalar_microkernels.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -827,6 +827,9 @@ SET(ALL_SCALAR_MICROKERNEL_SRCS
src/qs8-requantization/qs8-requantization-rndna-scalar-unsigned32.c
src/qs8-requantization/qs8-requantization-rndna-scalar-unsigned64.c
src/qs8-requantization/qs8-requantization-rndnu-scalar.c
src/qs8-rsum/gen/qs8-rdsum-minmax-fp32-scalar-imagic-u1-acc1.c
src/qs8-rsum/gen/qs8-rdsum-minmax-fp32-scalar-imagic-u2-acc1.c
src/qs8-rsum/gen/qs8-rdsum-minmax-fp32-scalar-imagic-u4-acc1.c
src/qs8-vadd/gen/qs8-vadd-minmax-scalar-u1.c
src/qs8-vadd/gen/qs8-vadd-minmax-scalar-u2.c
src/qs8-vadd/gen/qs8-vadd-minmax-scalar-u4.c
Expand Down
8 changes: 8 additions & 0 deletions gen/neon_microkernels.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -680,6 +680,14 @@ ALL_NEON_MICROKERNEL_SRCS = [
"src/qs8-requantization/qs8-requantization-rndna-neon.c",
"src/qs8-requantization/qs8-requantization-rndnu-neon-mull.c",
"src/qs8-requantization/qs8-requantization-rndnu-neon-qdmulh.c",
"src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u16-acc2.c",
"src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u16.c",
"src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u32-acc2.c",
"src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u32-acc4.c",
"src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u32.c",
"src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u64-acc2.c",
"src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u64-acc4.c",
"src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u64.c",
"src/qs8-vadd/gen/qs8-vadd-minmax-neon-ld64-u8.c",
"src/qs8-vadd/gen/qs8-vadd-minmax-neon-ld64-u16.c",
"src/qs8-vadd/gen/qs8-vadd-minmax-neon-ld64-u24.c",
Expand Down
3 changes: 3 additions & 0 deletions gen/scalar_microkernels.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -823,6 +823,9 @@ ALL_SCALAR_MICROKERNEL_SRCS = [
"src/qs8-requantization/qs8-requantization-rndna-scalar-unsigned32.c",
"src/qs8-requantization/qs8-requantization-rndna-scalar-unsigned64.c",
"src/qs8-requantization/qs8-requantization-rndnu-scalar.c",
"src/qs8-rsum/gen/qs8-rdsum-minmax-fp32-scalar-imagic-u1-acc1.c",
"src/qs8-rsum/gen/qs8-rdsum-minmax-fp32-scalar-imagic-u2-acc1.c",
"src/qs8-rsum/gen/qs8-rdsum-minmax-fp32-scalar-imagic-u4-acc1.c",
"src/qs8-vadd/gen/qs8-vadd-minmax-scalar-u1.c",
"src/qs8-vadd/gen/qs8-vadd-minmax-scalar-u2.c",
"src/qs8-vadd/gen/qs8-vadd-minmax-scalar-u4.c",
Expand Down
22 changes: 22 additions & 0 deletions scripts/generate-qs8-rsum.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/sh
# Copyright 2024 Google LLC
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

#################################### Scalar ###################################
tools/xngen src/qs8-rsum/scalar.c.in -D CHANNEL_TILE=1 -D ACCUMULATORS=1 -D REQUANTIZATION=FP32 -D VARIANT=IMAGIC -D WASM=0 -o src/qs8-rsum/gen/qs8-rdsum-minmax-fp32-scalar-imagic-u1-acc1.c &
tools/xngen src/qs8-rsum/scalar.c.in -D CHANNEL_TILE=2 -D ACCUMULATORS=1 -D REQUANTIZATION=FP32 -D VARIANT=IMAGIC -D WASM=0 -o src/qs8-rsum/gen/qs8-rdsum-minmax-fp32-scalar-imagic-u2-acc1.c &
tools/xngen src/qs8-rsum/scalar.c.in -D CHANNEL_TILE=4 -D ACCUMULATORS=1 -D REQUANTIZATION=FP32 -D VARIANT=IMAGIC -D WASM=0 -o src/qs8-rsum/gen/qs8-rdsum-minmax-fp32-scalar-imagic-u4-acc1.c &

################################## ARM NEON ###################################
tools/xngen src/qs8-rsum/neon-mlal.c.in -D ACCUMULATORS=1 -D CHANNEL_TILE=16 -D REQUANTIZATION=FP32 -D ARMV8=0 -o src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u16.c &
tools/xngen src/qs8-rsum/neon-mlal.c.in -D ACCUMULATORS=1 -D CHANNEL_TILE=32 -D REQUANTIZATION=FP32 -D ARMV8=0 -o src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u32.c &
tools/xngen src/qs8-rsum/neon-mlal.c.in -D ACCUMULATORS=1 -D CHANNEL_TILE=64 -D REQUANTIZATION=FP32 -D ARMV8=0 -o src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u64.c &

tools/xngen src/qs8-rsum/neon-mlal.c.in -D ACCUMULATORS=2 -D CHANNEL_TILE=16 -D REQUANTIZATION=FP32 -D ARMV8=0 -o src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u16-acc2.c &
tools/xngen src/qs8-rsum/neon-mlal.c.in -D ACCUMULATORS=2 -D CHANNEL_TILE=32 -D REQUANTIZATION=FP32 -D ARMV8=0 -o src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u32-acc2.c &
tools/xngen src/qs8-rsum/neon-mlal.c.in -D ACCUMULATORS=2 -D CHANNEL_TILE=64 -D REQUANTIZATION=FP32 -D ARMV8=0 -o src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u64-acc2.c &

tools/xngen src/qs8-rsum/neon-mlal.c.in -D ACCUMULATORS=4 -D CHANNEL_TILE=32 -D REQUANTIZATION=FP32 -D ARMV8=0 -o src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u32-acc4.c &
tools/xngen src/qs8-rsum/neon-mlal.c.in -D ACCUMULATORS=4 -D CHANNEL_TILE=64 -D REQUANTIZATION=FP32 -D ARMV8=0 -o src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u64-acc4.c &
1 change: 1 addition & 0 deletions scripts/generate-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,7 @@ tools/generate-reduce-test.py --tester ReduceMicrokernelTester --spec test/f32-r
tools/generate-reduce-test.py --tester ReduceMicrokernelTester --spec test/f32-rmin.yaml --output test/f32-rmin.cc &
tools/generate-reduce-test.py --tester ReduceMicrokernelTester --spec test/f32-rminmax.yaml --output test/f32-rminmax.cc &

tools/generate-reduce-test.py --tester RSumMicrokernelTester --spec test/qs8-rsum-minmax-fp32.yaml --output test/qs8-rsum-minmax-fp32.cc &
tools/generate-reduce-test.py --tester RSumMicrokernelTester --spec test/f32-rsum.yaml --output test/f32-rsum.cc &

tools/generate-reduce-test.py --tester ReduceMicrokernelTester --spec test/u8-rmax.yaml --output test/u8-rmax.cc &
Expand Down
6 changes: 6 additions & 0 deletions src/microparams-init.c
Original file line number Diff line number Diff line change
Expand Up @@ -1125,6 +1125,12 @@ size_t xnn_init_qs8_avgpool_minmax_fp32_neon_params(
params->fp32_neon.magic_bias_less_output_zero_point = INT32_C(0x4B400000) - (int32_t) output_zero_point;
params->fp32_neon.output_min = output_min;
params->fp32_neon.output_max = output_max;
for (uint32_t i = 0; i < 7; i++) {
params->fp32_neon.mask_table[i] = 1;
}
for (uint32_t i = 7; i < 14; i++) {
params->fp32_neon.mask_table[i] = 0;
}
return sizeof(params->fp32_neon);
}

Expand Down
47 changes: 47 additions & 0 deletions src/qs8-rsum/gen/qs8-rdsum-minmax-fp32-scalar-imagic-u1-acc1.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
// Auto-generated file. Do not edit!
// Template: src/qs8-rsum/scalar.c.in
// Generator: tools/xngen
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.

#include <assert.h>

#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>


void xnn_qs8_rsum_minmax_fp32_ukernel__scalar_imagic_u1(
size_t batch,
const int8_t* restrict input,
int8_t* restrict output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(input != NULL);
assert(output != NULL);

const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
int32_t vacc0 = vinit_bias;
do {
const int32_t vt = (int32_t) *input++;
vacc0 += vt;
batch -= sizeof(int8_t);
} while (batch != 0);

const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;

float vfpacc = (float) vacc0 * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;

*output += (int8_t) vout;
}
55 changes: 55 additions & 0 deletions src/qs8-rsum/gen/qs8-rdsum-minmax-fp32-scalar-imagic-u2-acc1.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
// Auto-generated file. Do not edit!
// Template: src/qs8-rsum/scalar.c.in
// Generator: tools/xngen
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.

#include <assert.h>

#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>


void xnn_qs8_rsum_minmax_fp32_ukernel__scalar_imagic_u2(
size_t batch,
const int8_t* restrict input,
int8_t* restrict output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(input != NULL);
assert(output != NULL);

const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
int32_t vacc0 = vinit_bias;
for (; batch >= 2; batch -= 2) {
const int32_t vt0 = (int32_t) input[0];
const int32_t vt1 = (int32_t) input[1];
input += 2;

vacc0 += vt0;
vacc0 += vt1;
}

if XNN_UNLIKELY(batch != 0) {
const int32_t vt = (int32_t) *input;
vacc0 += vt;
}

const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;

float vfpacc = (float) vacc0 * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;

*output += (int8_t) vout;
}
62 changes: 62 additions & 0 deletions src/qs8-rsum/gen/qs8-rdsum-minmax-fp32-scalar-imagic-u4-acc1.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
// Auto-generated file. Do not edit!
// Template: src/qs8-rsum/scalar.c.in
// Generator: tools/xngen
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.

#include <assert.h>

#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>


void xnn_qs8_rsum_minmax_fp32_ukernel__scalar_imagic_u4(
size_t batch,
const int8_t* restrict input,
int8_t* restrict output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(input != NULL);
assert(output != NULL);

const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
int32_t vacc0 = vinit_bias;
for (; batch >= 4; batch -= 4) {
const int32_t vt0 = (int32_t) input[0];
const int32_t vt1 = (int32_t) input[1];
const int32_t vt2 = (int32_t) input[2];
const int32_t vt3 = (int32_t) input[3];
input += 4;

vacc0 += vt0;
vacc0 += vt1;
vacc0 += vt2;
vacc0 += vt3;
}

if XNN_UNLIKELY(batch != 0) {
do {
const int32_t vt = (int32_t) *input++;
vacc0 += vt;
batch -= sizeof(int8_t);
} while (batch != 0);
}

const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;

float vfpacc = (float) vacc0 * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;

*output += (int8_t) vout;
}
85 changes: 85 additions & 0 deletions src/qs8-rsum/gen/qs8-rsum-minmax-fp32-neon-u16-acc2.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
// Auto-generated file. Do not edit!
// Template: src/qs8-rsum/neon-mlal.c.in
// Generator: tools/xngen
//
// Copyright 2024 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.

#include <assert.h>

#include <arm_neon.h>

#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>

void xnn_qs8_rsum_minmax_fp32_ukernel__neon_mlal_u16_acc2(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(input != NULL);
assert(output != NULL);

int8x8_t vone = vdup_n_s8(1);
int num_batches = batch >> 9;
int32x4_t vacc0 = vmovq_n_s32(0);
int32x4_t vacc1 = vmovq_n_s32(0);
for (; num_batches > 0; --num_batches) {
int16x8_t vacc16_0 = vmovq_n_s16(0);
int16x8_t vacc16_1 = vmovq_n_s16(0);
for (size_t current_batch = 512; current_batch > 0; current_batch -= 16) {
const int8x8_t vt0 = vld1_s8(input); input += 8;
const int8x8_t vt1 = vld1_s8(input); input += 8;

vacc16_0 = vmlal_s8(vacc16_0, vt0, vone);
vacc16_1 = vmlal_s8(vacc16_1, vt1, vone);
}
vacc0 = vaddq_s32(vacc0, vaddq_s32(vmovl_s16(vget_low_s16(vacc16_0)), vmovl_s16(vget_high_s16(vacc16_0))));
vacc1 = vaddq_s32(vacc1, vaddq_s32(vmovl_s16(vget_low_s16(vacc16_1)), vmovl_s16(vget_high_s16(vacc16_1))));
batch -= 512;
}
if (XNN_UNLIKELY(batch != 0)) {
int16x8_t vacc16_0 = vmovq_n_s16(0);
int16x8_t vacc16_1 = vmovq_n_s16(0);
for (; batch >= 16; batch -= 16) {
const int8x8_t vt0 = vld1_s8(input); input += 8;
const int8x8_t vt1 = vld1_s8(input); input += 8;
vacc16_0 = vmlal_s8(vacc16_0, vt0, vone);
vacc16_1 = vmlal_s8(vacc16_1, vt1, vone);
}
vacc16_0 = vaddq_s16(vacc16_0, vacc16_1);
for (; batch >= 8; batch -= 8) {
const int8x8_t vt = vld1_s8(input); input += 8;
vacc16_0 = vmlal_s8(vacc16_0, vt, vone);
}
if (XNN_UNLIKELY(batch != 0)) {
int8x8_t vt = vld1_s8(input);
vone = vld1_s8(&params->fp32_neon.mask_table[7 - batch]);
vacc16_0 = vmlal_s8(vacc16_0, vt, vone);
}
vacc0 = vaddq_s32(vacc0, vaddq_s32(vmovl_s16(vget_low_s16(vacc16_0)), vmovl_s16(vget_high_s16(vacc16_0))));
}
vacc0 = vaddq_s32(vacc0, vacc1);
int32x2_t vacc_lo = vadd_s32(vget_low_s32(vacc0), vget_high_s32(vacc0));
vacc_lo = vpadd_s32(vacc_lo, vacc_lo);

const int32_t vinit_bias = params->fp32_neon.init_bias;
const float vscale = params->fp32_neon.scale;
const int32_t output_min = params->fp32_neon.output_min;
const int32_t output_max = params->fp32_neon.output_max;
const float vmagic_bias = params->fp32_neon.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_neon.magic_bias_less_output_zero_point;

float vfpacc = (float) (vget_lane_s32(vacc_lo, 0) + vinit_bias) * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout -= vmagic_bias_less_output_zero_point;
vout = math_max_s32(vout, output_min);
vout = math_min_s32(vout, output_max);
*output += (int8_t) vout;
}

0 comments on commit 21fa2d9

Please sign in to comment.