From 662089fc1c68eebf46e3e06da5be896ea686f1cb Mon Sep 17 00:00:00 2001 From: Matthias Cremon Date: Thu, 27 Mar 2025 08:58:07 -0700 Subject: [PATCH] Support alpha in scalar add/sub cases (#9703) Summary: Support was missing from a previous diff, adding it here. Remove floor divide optimizations because they're wrong. We can add them later if we need them. Differential Revision: D71949902 --- backends/cadence/hifi/operators/op_add.cpp | 9 +++++---- backends/cadence/hifi/operators/op_div.cpp | 16 ---------------- backends/cadence/hifi/operators/op_sub.cpp | 5 +++-- 3 files changed, 8 insertions(+), 22 deletions(-) diff --git a/backends/cadence/hifi/operators/op_add.cpp b/backends/cadence/hifi/operators/op_add.cpp index c8feea37f7d..9823844af7f 100644 --- a/backends/cadence/hifi/operators/op_add.cpp +++ b/backends/cadence/hifi/operators/op_add.cpp @@ -143,14 +143,15 @@ Tensor& add_out( if ((a_dim == 0) && float_types) { for (int i = 0; i < b.numel(); i++) - out.mutable_data_ptr()[i] = - a.const_data_ptr()[0] + b.const_data_ptr()[i]; + out.mutable_data_ptr()[i] = a.const_data_ptr()[0] + + alpha_val * b.const_data_ptr()[i]; return out; } if ((b_dim == 0) && float_types) { + // Precompute the value of b * alpha since it's a constant. + const float val_b = alpha_val * b.const_data_ptr()[0]; for (int i = 0; i < a.numel(); i++) - out.mutable_data_ptr()[i] = - a.const_data_ptr()[i] + b.const_data_ptr()[0]; + out.mutable_data_ptr()[i] = a.const_data_ptr()[i] + val_b; return out; } diff --git a/backends/cadence/hifi/operators/op_div.cpp b/backends/cadence/hifi/operators/op_div.cpp index da3f4ac2d07..ecfd8f884dd 100644 --- a/backends/cadence/hifi/operators/op_div.cpp +++ b/backends/cadence/hifi/operators/op_div.cpp @@ -214,22 +214,6 @@ Tensor& div_out_mode( if ((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) optimized = 0; - bool float_types = - (a_type == ScalarType::Float) && (b_type == ScalarType::Float); - - if ((a_dim == 0) && float_types) { - for (int i = 0; i < b.numel(); i++) - out.mutable_data_ptr()[i] = - a.const_data_ptr()[0] / b.const_data_ptr()[i]; - return out; - } - if ((b_dim == 0) && float_types) { - for (int i = 0; i < a.numel(); i++) - out.mutable_data_ptr()[i] = - a.const_data_ptr()[i] / b.const_data_ptr()[0]; - return out; - } - if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) optimized = 0; int mode_val = -1; diff --git a/backends/cadence/hifi/operators/op_sub.cpp b/backends/cadence/hifi/operators/op_sub.cpp index d1035c2fb1d..c62a04b7b28 100644 --- a/backends/cadence/hifi/operators/op_sub.cpp +++ b/backends/cadence/hifi/operators/op_sub.cpp @@ -143,9 +143,10 @@ Tensor& sub_out( return out; } if ((b_dim == 0) && float_types) { + // Precompute the value of b * alpha since it's a constant. + const float val_b = alpha_val * b.const_data_ptr()[0]; for (int i = 0; i < a.numel(); i++) - out.mutable_data_ptr()[i] = - a.const_data_ptr()[i] - b.const_data_ptr()[0]; + out.mutable_data_ptr()[i] = a.const_data_ptr()[i] - val_b; return out; }