Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[qnnpack] Fix unused var warning when building for different archs. #48730

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -30,16 +30,19 @@ void pytorch_q8avgpool_ukernel_mp8x9p8q__neon(

const int32x4_t vbias = vld1q_dup_s32(&quantization_params->neon.bias);
const float32x4_t vscale = vdupq_n_f32(quantization_params->neon.scale);
#if defined(__aarch64__)
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

do {
{
Expand Down
Expand Up @@ -30,16 +30,19 @@ void pytorch_q8avgpool_ukernel_up8x9__neon(
const int32x4_t vbias = vld1q_dup_s32(&quantization_params->neon.bias);
const float32x4_t vscale =
vdupq_n_f32(quantization_params->neon.scale);
#if defined(__aarch64__)
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

do {
const uint8_t* i0 = input[0];
Expand Down
Expand Up @@ -23,14 +23,17 @@ void pytorch_q8dwconv_ukernel_mp8x25_per_channel__neon(
quantization_params[restrict static 1]) {
const uint8x8_t vinput_zero_point =
vld1_dup_u8((const uint8_t*)&quantization_params->neon.input_zero_point);
#ifdef __aarch64__
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

do {
uint8_t* output_start = output;
Expand Down
Expand Up @@ -27,14 +27,17 @@ void pytorch_q8dwconv_ukernel_mp8x25__neon(
vdup_n_u8(quantization_params->neon.kernel_zero_points[0]);
const float32x4_t requantization_scale_v =
vdupq_n_f32(quantization_params->neon.requantization_scales[0]);
#ifdef __aarch64__
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

do {
uint8_t* output_start = output;
Expand Down
Expand Up @@ -23,16 +23,19 @@ void pytorch_q8dwconv_ukernel_up8x9_per_channel__neon(
quantization_params[restrict static 1]) {
const uint8x8_t va_zero_point =
vld1_dup_u8((const uint8_t*)&quantization_params->neon.input_zero_point);
#ifdef __aarch64__
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

#ifdef __aarch64__
/* Larger number of registers on AArch64 make it possible to process few
Expand Down
Expand Up @@ -27,16 +27,19 @@ void pytorch_q8dwconv_ukernel_up8x9__neon(
vdup_n_u8(quantization_params->neon.kernel_zero_points[0]);
const float32x4_t requantization_scale_v =
vdupq_n_f32(quantization_params->neon.requantization_scales[0]);
#ifdef __aarch64__
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

#ifdef __aarch64__
/* Larger number of registers on AArch64 make it possible to process few
Expand Down
Expand Up @@ -119,16 +119,19 @@ void pytorch_q8gavgpool_ukernel_mp8x7p7q__neon(

const float32x4_t vscale =
vdupq_n_f32(quantization_params->neon.scale);
#if defined(__aarch64__)
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

i0 = (const uint8_t*)((uintptr_t)i0 + input_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + input_increment);
Expand Down
Expand Up @@ -52,16 +52,19 @@ void pytorch_q8gavgpool_ukernel_up8x7__neon(
}
const int32x4_t vbias = vld1q_dup_s32(&quantization_params->neon.bias);
const float32x4_t vscale = vdupq_n_f32(quantization_params->neon.scale);
#if defined(__aarch64__)
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif

do {
const uint8x8_t vi0 = vld1_u8(i0);
Expand Down