From d5e4bce350e9f9f6896b5c12caa4caca6717292e Mon Sep 17 00:00:00 2001 From: Github Executorch Date: Fri, 10 Jan 2025 13:31:53 -0800 Subject: [PATCH 1/2] add maybe_unused where needed in size_test.cpp At least one internal build have -Werror -Wunused-variable and ExecuTorch logging disabled. Differential Revision: [D68032964](https://our.internmc.facebook.com/intern/diff/D68032964/) ghstack-source-id: 260945746 Pull Request resolved: https://github.com/pytorch/executorch/pull/7602 --- test/size_test.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/size_test.cpp b/test/size_test.cpp index 88b605c3bf2..1fab1e914e0 100644 --- a/test/size_test.cpp +++ b/test/size_test.cpp @@ -94,7 +94,7 @@ int main(int argc, char** argv) { // It assumes the outputs are all tensors. for (size_t i = 0; i < method->outputs_size(); i++) { auto output_tensor = output_list[i].toTensor(); - auto data_output = output_tensor.const_data_ptr(); + [[maybe_unused]] auto data_output = output_tensor.const_data_ptr(); for (size_t j = 0; j < output_list[i].toTensor().numel(); ++j) { ET_LOG(Info, "%f", data_output[j]); } From 21aa1a7e26fc56205bf0e96aac5884a63e2491cc Mon Sep 17 00:00:00 2001 From: Github Executorch Date: Fri, 10 Jan 2025 13:31:56 -0800 Subject: [PATCH 2/2] Reduce size of Method::parse_types It looks like this function was big enough that inlining gave up, since I was seeing non-inlined calls to val_as_Foo() that should have been easily optimized away. Do said optimization manually. Differential Revision: [D68037113](https://our.internmc.facebook.com/intern/diff/D68037113/) ghstack-source-id: 260959235 Pull Request resolved: https://github.com/pytorch/executorch/pull/7603 --- runtime/executor/method.cpp | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/runtime/executor/method.cpp b/runtime/executor/method.cpp index 90d4c71953b..49caf7dc4af 100644 --- a/runtime/executor/method.cpp +++ b/runtime/executor/method.cpp @@ -313,6 +313,8 @@ Error Method::parse_values() { "Null value at index %zu", i); + const auto val = serialization_value->val(); + switch (serialization_value->val_type()) { case executorch_flatbuffer::KernelTypes::Null: { // Placement new as the list elements are not initialized, so calling @@ -321,18 +323,21 @@ Error Method::parse_values() { new (&values_[i]) EValue(); } break; case executorch_flatbuffer::KernelTypes::Int: { - new (&values_[i]) EValue(serialization_value->val_as_Int()->int_val()); + new (&values_[i]) EValue( + static_cast(val)->int_val()); } break; case executorch_flatbuffer::KernelTypes::Double: { new (&values_[i]) - EValue(serialization_value->val_as_Double()->double_val()); + EValue(static_cast(val) + ->double_val()); } break; case executorch_flatbuffer::KernelTypes::Bool: { - new (&values_[i]) - EValue(serialization_value->val_as_Bool()->bool_val()); + new (&values_[i]) EValue( + static_cast(val)->bool_val()); } break; case executorch_flatbuffer::KernelTypes::IntList: { - const auto items = serialization_value->val_as_IntList()->items(); + const auto items = + static_cast(val)->items(); ET_CHECK_OR_RETURN_ERROR( items != nullptr, InvalidProgram, "Missing list at index %zu", i); // Allocate space for boxed and unboxed list representations using @@ -352,7 +357,8 @@ Error Method::parse_values() { BoxedEvalueList(evalp_list, int_list, items->size())); } break; case executorch_flatbuffer::KernelTypes::BoolList: { - const auto items = serialization_value->val_as_BoolList()->items(); + const auto items = + static_cast(val)->items(); ET_CHECK_OR_RETURN_ERROR( items != nullptr, InvalidProgram, "Missing list at index %zu", i); // NOTE: This is technically not portable. A platform could technically @@ -366,14 +372,17 @@ Error Method::parse_values() { (const bool*)items->data(), items->size())); } break; case executorch_flatbuffer::KernelTypes::DoubleList: { - const auto items = serialization_value->val_as_DoubleList()->items(); + const auto items = + static_cast(val)->items(); ET_CHECK_OR_RETURN_ERROR( items != nullptr, InvalidProgram, "Missing list at index %zu", i); new (&values_[i]) EValue(exec_aten::ArrayRef(items->data(), items->size())); } break; case executorch_flatbuffer::KernelTypes::String: { - const auto fb_str = serialization_value->val_as_String()->string_val(); + const auto fb_str = + static_cast(val) + ->string_val(); ET_CHECK_OR_RETURN_ERROR( fb_str != nullptr, InvalidProgram, @@ -383,7 +392,9 @@ Error Method::parse_values() { } break; case executorch_flatbuffer::KernelTypes::Tensor: { auto t = deserialization::parseTensor( - program_, memory_manager_, serialization_value->val_as_Tensor()); + program_, + memory_manager_, + static_cast(val)); if (!t.ok()) { ET_LOG( Error, @@ -398,7 +409,7 @@ Error Method::parse_values() { // get list of serialization tensors and allocate storage for executor // tensors auto tensors = deserialization::parseTensorList( - serialization_value->val_as_TensorList()->items(), + static_cast(val)->items(), values_, memory_manager_); if (!tensors.ok()) { @@ -415,7 +426,9 @@ Error Method::parse_values() { // Same as TensorList but optional instead of Tensor auto tensors = deserialization::parseListOptionalType( - serialization_value->val_as_OptionalTensorList()->items(), + static_cast( + val) + ->items(), values_, memory_manager_); if (!tensors.ok()) {