Skip to content

Commit

Permalink
[qnnpack] Fix unused var warning when building for different archs. (#…
Browse files Browse the repository at this point in the history
…48730)

Summary:
Pull Request resolved: #48730

.

Test Plan: CI

Differential Revision: D25273068

fbshipit-source-id: aa2c0578127d7c67eb338f6d0603119b5124b647
  • Loading branch information
shoumikhin authored and facebook-github-bot committed Dec 3, 2020
1 parent e7038a7 commit 234b3ce
Show file tree
Hide file tree
Showing 8 changed files with 24 additions and 0 deletions.
Expand Up @@ -30,16 +30,19 @@ void pytorch_q8avgpool_ukernel_mp8x9p8q__neon(

const int32x4_t vbias = vld1q_dup_s32(&quantization_params->neon.bias);
const float32x4_t vscale = vdupq_n_f32(quantization_params->neon.scale);
#if defined(__aarch64__)
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

do {
{
Expand Down
Expand Up @@ -30,16 +30,19 @@ void pytorch_q8avgpool_ukernel_up8x9__neon(
const int32x4_t vbias = vld1q_dup_s32(&quantization_params->neon.bias);
const float32x4_t vscale =
vdupq_n_f32(quantization_params->neon.scale);
#if defined(__aarch64__)
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

do {
const uint8_t* i0 = input[0];
Expand Down
Expand Up @@ -23,14 +23,17 @@ void pytorch_q8dwconv_ukernel_mp8x25_per_channel__neon(
quantization_params[restrict static 1]) {
const uint8x8_t vinput_zero_point =
vld1_dup_u8((const uint8_t*)&quantization_params->neon.input_zero_point);
#ifdef __aarch64__
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

do {
uint8_t* output_start = output;
Expand Down
Expand Up @@ -27,14 +27,17 @@ void pytorch_q8dwconv_ukernel_mp8x25__neon(
vdup_n_u8(quantization_params->neon.kernel_zero_points[0]);
const float32x4_t requantization_scale_v =
vdupq_n_f32(quantization_params->neon.requantization_scales[0]);
#ifdef __aarch64__
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

do {
uint8_t* output_start = output;
Expand Down
Expand Up @@ -23,16 +23,19 @@ void pytorch_q8dwconv_ukernel_up8x9_per_channel__neon(
quantization_params[restrict static 1]) {
const uint8x8_t va_zero_point =
vld1_dup_u8((const uint8_t*)&quantization_params->neon.input_zero_point);
#ifdef __aarch64__
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

#ifdef __aarch64__
/* Larger number of registers on AArch64 make it possible to process few
Expand Down
Expand Up @@ -27,16 +27,19 @@ void pytorch_q8dwconv_ukernel_up8x9__neon(
vdup_n_u8(quantization_params->neon.kernel_zero_points[0]);
const float32x4_t requantization_scale_v =
vdupq_n_f32(quantization_params->neon.requantization_scales[0]);
#ifdef __aarch64__
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

#ifdef __aarch64__
/* Larger number of registers on AArch64 make it possible to process few
Expand Down
Expand Up @@ -119,16 +119,19 @@ void pytorch_q8gavgpool_ukernel_mp8x7p7q__neon(

const float32x4_t vscale =
vdupq_n_f32(quantization_params->neon.scale);
#if defined(__aarch64__)
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

i0 = (const uint8_t*)((uintptr_t)i0 + input_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + input_increment);
Expand Down
Expand Up @@ -52,16 +52,19 @@ void pytorch_q8gavgpool_ukernel_up8x7__neon(
}
const int32x4_t vbias = vld1q_dup_s32(&quantization_params->neon.bias);
const float32x4_t vscale = vdupq_n_f32(quantization_params->neon.scale);
#if defined(__aarch64__)
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

do {
const uint8x8_t vi0 = vld1_u8(i0);
Expand Down

0 comments on commit 234b3ce

Please sign in to comment.