diff --git a/backends/cadence/hifi/operators/op_mean.cpp b/backends/cadence/hifi/operators/op_mean.cpp index 514813fbe05..ccd54e80698 100644 --- a/backends/cadence/hifi/operators/op_mean.cpp +++ b/backends/cadence/hifi/operators/op_mean.cpp @@ -17,8 +17,8 @@ using executorch::aten::RuntimeContext; using executorch::aten::ScalarType; using executorch::aten::Tensor; using executorch::runtime::ArrayRef; +using std::optional; using torch::executor::Error; -using torch::executor::optional; namespace impl { namespace HiFi { diff --git a/backends/cadence/hifi/operators/op_quantized_matmul_out.cpp b/backends/cadence/hifi/operators/op_quantized_matmul_out.cpp index 90fe483660b..c52bce26246 100644 --- a/backends/cadence/hifi/operators/op_quantized_matmul_out.cpp +++ b/backends/cadence/hifi/operators/op_quantized_matmul_out.cpp @@ -29,7 +29,7 @@ void inline _typed_quantized_matmul( int64_t X_zero_point, const Tensor& Y, int64_t Y_zero_point, - const exec_aten::optional& bias, + const std::optional& bias, int64_t out_multiplier, int64_t out_shift, int64_t out_zero_point, @@ -182,7 +182,7 @@ void quantized_matmul_out( int64_t X_zero_point, const Tensor& Y, int64_t Y_zero_point, - const exec_aten::optional& bias, + const std::optional& bias, int64_t out_multiplier, int64_t out_shift, int64_t out_zero_point, diff --git a/backends/cadence/hifi/operators/op_slice_copy.cpp b/backends/cadence/hifi/operators/op_slice_copy.cpp index 014eaa6698b..815e0c7dfa3 100644 --- a/backends/cadence/hifi/operators/op_slice_copy.cpp +++ b/backends/cadence/hifi/operators/op_slice_copy.cpp @@ -29,8 +29,8 @@ Tensor& slice_copy_Tensor_out( KernelRuntimeContext& ctx, const Tensor& in, int64_t dim, - exec_aten::optional start_val, - exec_aten::optional end_val, + std::optional start_val, + std::optional end_val, int64_t step, Tensor& out) { (void)ctx; diff --git a/backends/cadence/hifi/operators/operators.h b/backends/cadence/hifi/operators/operators.h index f7f5194d91a..788809efdd8 100644 --- a/backends/cadence/hifi/operators/operators.h +++ b/backends/cadence/hifi/operators/operators.h @@ -67,7 +67,7 @@ void quantized_linear_out( const ::executorch::aten::Tensor& out_multiplier, const ::executorch::aten::Tensor& out_shift, int64_t out_zero_point, - const ::executorch::aten::optional<::executorch::aten::Tensor>& offset, + const ::std::optional<::executorch::aten::Tensor>& offset, ::executorch::aten::Tensor& out); void quantized_linear_per_tensor_out( @@ -80,7 +80,7 @@ void quantized_linear_per_tensor_out( int64_t out_multiplier, int64_t out_shift, int64_t out_zero_point, - const ::executorch::aten::optional<::executorch::aten::Tensor>& offset, + const ::std::optional<::executorch::aten::Tensor>& offset, ::executorch::aten::Tensor& out); void quantized_conv2d_nhwc_out( diff --git a/backends/cortex_m/ops/cmsis_scratch_buffer_context.h b/backends/cortex_m/ops/cmsis_scratch_buffer_context.h index 4b9fdaebdf7..5f470012255 100644 --- a/backends/cortex_m/ops/cmsis_scratch_buffer_context.h +++ b/backends/cortex_m/ops/cmsis_scratch_buffer_context.h @@ -50,7 +50,7 @@ class CMSISScratchBufferContext final { Tensor& scratch_buffer, const Tensor& weights, const Tensor& weight_zero_point, - const torch::executor::optional& bias) + const ::std::optional& bias) : scratch_ptr_(scratch_buffer.mutable_data_ptr()), total_size_(scratch_buffer.size(0)), base_ptr_(reinterpret_cast(scratch_ptr_)), diff --git a/backends/cortex_m/ops/op_quantized_linear.cpp b/backends/cortex_m/ops/op_quantized_linear.cpp index d1ccb6d0d45..2949b85dd6f 100644 --- a/backends/cortex_m/ops/op_quantized_linear.cpp +++ b/backends/cortex_m/ops/op_quantized_linear.cpp @@ -27,7 +27,7 @@ Tensor& quantized_linear_out( const Tensor& weight_zero_point, const Tensor& weight_multiplier, const Tensor& weight_shift, - const torch::executor::optional& bias, + const ::std::optional& bias, const Tensor& bias_multiplier, const Tensor& bias_shift, const Tensor& scratch_buffer, @@ -155,7 +155,7 @@ Tensor quantized_linear( const Tensor& weight_zero_point, const Tensor& weight_multiplier, const Tensor& weight_shift, - const torch::executor::optional& bias, + const ::std::optional& bias, const Tensor& bias_multiplier, const Tensor& bias_shift, const Tensor& scratch_buffer, diff --git a/backends/vulkan/test/op_tests/dequantize_test.cpp b/backends/vulkan/test/op_tests/dequantize_test.cpp index 9fca2c632d3..7737b87ff2b 100644 --- a/backends/vulkan/test/op_tests/dequantize_test.cpp +++ b/backends/vulkan/test/op_tests/dequantize_test.cpp @@ -36,7 +36,7 @@ Tensor& dequantize_per_tensor_out( int64_t quant_min, int64_t quant_max, ScalarType dtype, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out); Tensor& dequantize_per_token_out( @@ -57,7 +57,7 @@ Tensor& dequantize_per_channel_out( int64_t quant_min, int64_t quant_max, ScalarType dtype, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out); Tensor& dequantize_per_tensor_tensor_args_out( @@ -67,7 +67,7 @@ Tensor& dequantize_per_tensor_tensor_args_out( int64_t quant_min, int64_t quant_max, ScalarType dtype, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out); // Wrapper function for dequantize_per_tensor_out without context @@ -78,7 +78,7 @@ Tensor& dequantize_per_tensor_out_no_context( int64_t quant_min, int64_t quant_max, ScalarType dtype, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { return torch::executor::native::dequantize_per_tensor_out( input, scale, zero_point, quant_min, quant_max, dtype, out_dtype, out); @@ -107,7 +107,7 @@ Tensor& dequantize_per_channel_out_no_context( int64_t quant_min, int64_t quant_max, ScalarType dtype, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { return torch::executor::native::dequantize_per_channel_out( input, @@ -129,7 +129,7 @@ Tensor& dequantize_per_tensor_tensor_args_out_no_context( int64_t quant_min, int64_t quant_max, ScalarType dtype, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { return torch::executor::native::dequantize_per_tensor_tensor_args_out( input, scale, zero_point, quant_min, quant_max, dtype, out_dtype, out); @@ -149,7 +149,7 @@ at::Tensor dequantize_per_tensor_aten( ScalarType et_dtype = at_scalartype_to_et_scalartype(dtype); ScalarType et_out_dtype = at_scalartype_to_et_scalartype(out_dtype); - executorch::aten::optional opt_et_out_dtype(et_out_dtype); + std::optional opt_et_out_dtype(et_out_dtype); WRAP_TO_ATEN(dequantize_per_tensor_out_no_context, 7) (input, @@ -204,7 +204,7 @@ at::Tensor dequantize_per_channel_aten( ScalarType et_dtype = at_scalartype_to_et_scalartype(dtype); ScalarType et_out_dtype = at_scalartype_to_et_scalartype(out_dtype); - executorch::aten::optional opt_et_out_dtype(et_out_dtype); + std::optional opt_et_out_dtype(et_out_dtype); WRAP_TO_ATEN(dequantize_per_channel_out_no_context, 8) (input, @@ -233,7 +233,7 @@ at::Tensor dequantize_per_tensor_tensor_args_aten( ScalarType et_dtype = at_scalartype_to_et_scalartype(dtype); ScalarType et_out_dtype = at_scalartype_to_et_scalartype(out_dtype); - executorch::aten::optional opt_et_out_dtype(et_out_dtype); + std::optional opt_et_out_dtype(et_out_dtype); WRAP_TO_ATEN(dequantize_per_tensor_tensor_args_out_no_context, 7) (input, diff --git a/backends/vulkan/test/op_tests/quantize_affine_test.cpp b/backends/vulkan/test/op_tests/quantize_affine_test.cpp index 1c0a6c2e6b9..69469b4fa7b 100644 --- a/backends/vulkan/test/op_tests/quantize_affine_test.cpp +++ b/backends/vulkan/test/op_tests/quantize_affine_test.cpp @@ -35,11 +35,11 @@ at::Tensor quantize_affine_reference_impl( const at::Tensor& input_, const std::vector& block_size, const at::Tensor& scale, - const c10::optional& zero_point_opt, + const std::optional& zero_point_opt, int64_t quant_min, int64_t quant_max, at::ScalarType out_dtype, - c10::optional zero_point_domain_opt = std::string("INT")) { + std::optional zero_point_domain_opt = std::string("INT")) { constexpr float kEps = 1e-7f; const int64_t ndim = input_.dim(); @@ -138,11 +138,11 @@ at::Tensor dequantize_affine_reference_impl( const at::Tensor& input_, const std::vector& block_size, const at::Tensor& scale, - const c10::optional& zero_point_opt, + const std::optional& zero_point_opt, int64_t quant_min, int64_t quant_max, at::ScalarType out_dtype, - c10::optional zero_point_domain_opt = std::string("INT")) { + std::optional zero_point_domain_opt = std::string("INT")) { const int64_t ndim = input_.dim(); _check_dims("input", block_size.size(), ndim); @@ -252,7 +252,7 @@ at::Tensor quantize_affine_reference_impl( input, block_size, scale, - c10::optional(zero_point), + std::optional(zero_point), quant_min, quant_max, dtype, @@ -272,7 +272,7 @@ at::Tensor dequantize_affine_reference_impl( input, block_size, scale, - c10::optional(zero_point), + std::optional(zero_point), quant_min, quant_max, dtype, @@ -1373,4 +1373,4 @@ TEST(VulkanChooseQParamsAffineTest, test_symmetric_no_clipping_narrow_range) { 10, // quant_max (narrow range) 1e-5, // eps at::kFloat); // input dtype -} \ No newline at end of file +} diff --git a/codegen/api/et_cpp.py b/codegen/api/et_cpp.py index 88f1eb83fe0..908f2aa8a70 100644 --- a/codegen/api/et_cpp.py +++ b/codegen/api/et_cpp.py @@ -243,7 +243,7 @@ def return_names(f: NativeFunction, *, fallback_name: str = "result") -> Sequenc JIT_TO_CPP_DEFAULT = { "False": "false", "True": "true", - "None": "torch::execustd::nullopt", # UGH this one is type directed + "None": "std::nullopt", # UGH this one is type directed "[]": "{}", "contiguous_format": "torch::executorch::MemoryFormat::Contiguous", "long": "torch::executorch::kLong", @@ -278,7 +278,7 @@ def default_expr(d: str, t: Type) -> str: if isinstance(t, OptionalType): if d == "None": - return "torch::executor::nullopt" + return "std::nullopt" return default_expr(d, t.elem) diff --git a/codegen/api/types/types.py b/codegen/api/types/types.py index 712d7e5e341..07c7f7e196a 100644 --- a/codegen/api/types/types.py +++ b/codegen/api/types/types.py @@ -59,7 +59,7 @@ class OptionalCType(CType): def cpp_type(self, *, strip_ref: bool = False) -> str: # Do not pass `strip_ref` recursively. - return f"torch::executor::optional<{self.elem.cpp_type()}>" + return f"std::optional<{self.elem.cpp_type()}>" def remove_const_ref(self) -> CType: return OptionalCType(self.elem.remove_const_ref()) diff --git a/extension/aten_util/make_aten_functor_from_et_functor.h b/extension/aten_util/make_aten_functor_from_et_functor.h index 104531f0fbb..699470bf2e4 100644 --- a/extension/aten_util/make_aten_functor_from_et_functor.h +++ b/extension/aten_util/make_aten_functor_from_et_functor.h @@ -67,12 +67,12 @@ struct type_map final { // Optional. template -struct type_map> final { +struct type_map> final { using type = std::optional::type>; }; template -struct type_map&> final { +struct type_map&> final { using type = std::optional::type>&; }; @@ -166,7 +166,7 @@ struct type_convert< typename remove_const_ref::type::value_type>> && std::is_same_v< typename remove_const_ref::type, - torch::executor::optional< + std::optional< typename remove_const_ref::type::value_type>>>> final { public: diff --git a/extension/aten_util/test/make_aten_functor_from_et_functor_test.cpp b/extension/aten_util/test/make_aten_functor_from_et_functor_test.cpp index a5b53096ae2..eb68e161712 100644 --- a/extension/aten_util/test/make_aten_functor_from_et_functor_test.cpp +++ b/extension/aten_util/test/make_aten_functor_from_et_functor_test.cpp @@ -32,8 +32,8 @@ Tensor& add_1_out(const Tensor& a, Tensor& out) { } Tensor& add_optional_scalar_out( - torch::executor::optional s1, - torch::executor::optional s2, + std::optional s1, + std::optional s2, Tensor& out) { if (s1.has_value()) { out.mutable_data_ptr()[0] += s1.value(); @@ -45,8 +45,8 @@ Tensor& add_optional_scalar_out( } Tensor& add_optional_tensor_out( - torch::executor::optional s1, - torch::executor::optional s2, + std::optional s1, + std::optional s2, Tensor& out) { if (s1.has_value()) { out.mutable_data_ptr()[0] += @@ -78,8 +78,7 @@ Tensor& sum_arrayref_tensor_out( } Tensor& sum_arrayref_optional_tensor_out( - torch::executor::ArrayRef< - torch::executor::optional> a, + torch::executor::ArrayRef> a, Tensor& out) { for (int i = 0; i < a.size(); i++) { if (a[i].has_value()) { @@ -137,20 +136,19 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestTypeMap_Tensor) { TEST_F(MakeATenFunctorFromETFunctorTest, TestTypeMap_Optionals) { // Scalar. EXPECT_TRUE((std::is_same< - type_map>::type, + type_map>::type, std::optional>::value)); // Tensor. + EXPECT_TRUE((std::is_same< + type_map>::type, + std::optional>::value)); + // ArrayRef. EXPECT_TRUE( (std::is_same< - type_map>::type, - std::optional>::value)); - // ArrayRef. - EXPECT_TRUE((std::is_same< - type_map>>::type, - std::optional>>::value)); + type_map>>::type, + std::optional>>::value)); EXPECT_TRUE((std::is_same< - type_map>>::type, std::optional>>::value)); } @@ -166,13 +164,13 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestTypeMap_ArrayRef) { type_map>::type, c10::ArrayRef>::value)); // Optionals. + EXPECT_TRUE( + (std::is_same< + type_map>>::type, + c10::ArrayRef>>::value)); EXPECT_TRUE((std::is_same< type_map>>::type, - c10::ArrayRef>>::value)); - EXPECT_TRUE((std::is_same< - type_map>>::type, + std::optional>>::type, c10::ArrayRef>>::value)); } @@ -193,17 +191,16 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_OptionalScalar) { // Convert optional at to et. auto optional_at_in = std::optional(); auto optional_et = - type_convert, torch::executor::optional>( + type_convert, std::optional>( optional_at_in) .call(); EXPECT_TRUE( - (std::is_same>:: - value)); + (std::is_same>::value)); // Convert optional et to at. - auto optional_et_in = torch::executor::optional(); + auto optional_et_in = std::optional(); auto optional_at_out = - type_convert, std::optional>( + type_convert, std::optional>( optional_et_in) .call(); EXPECT_TRUE( @@ -213,20 +210,19 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_OptionalScalar) { TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_OptionalTensor) { // Convert optional at to et. auto optional_at_in = std::optional(); - auto optional_et = - type_convert< - std::optional, - torch::executor::optional>(optional_at_in) - .call(); + auto optional_et = type_convert< + std::optional, + std::optional>(optional_at_in) + .call(); EXPECT_TRUE((std::is_same< decltype(optional_et), - torch::executor::optional>::value)); + std::optional>::value)); // Convert optional et to at. torch::executor::testing::TensorFactory tf; - auto et_in = torch::executor::optional(tf.ones({3})); + auto et_in = std::optional(tf.ones({3})); auto optional_at_out = type_convert< - torch::executor::optional, + std::optional, std::optional>(optional_et) .call(); EXPECT_TRUE( @@ -427,9 +423,8 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_ConstRefOptionals) { const std::optional const_optional_at_in = std::optional(42); auto const_optional_et = - type_convert< - const std::optional, - torch::executor::optional>(const_optional_at_in) + type_convert, std::optional>( + const_optional_at_in) .call(); EXPECT_TRUE(const_optional_et.has_value()); EXPECT_EQ(const_optional_et.value(), 42); @@ -437,7 +432,7 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_ConstRefOptionals) { // Test optional scalar reference conversion std::optional optional_at_ref_in = std::optional(24); auto optional_et_from_ref = - type_convert&, torch::executor::optional>( + type_convert&, std::optional>( optional_at_ref_in) .call(); EXPECT_TRUE(optional_et_from_ref.has_value()); @@ -447,9 +442,8 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_ConstRefOptionals) { const std::optional const_optional_at_ref_in = std::optional(84); auto const_optional_et_from_ref = - type_convert< - const std::optional&, - torch::executor::optional>(const_optional_at_ref_in) + type_convert&, std::optional>( + const_optional_at_ref_in) .call(); EXPECT_TRUE(const_optional_et_from_ref.has_value()); EXPECT_EQ(const_optional_et_from_ref.value(), 84); @@ -459,8 +453,7 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_ConstRefOptionals) { std::optional(torch::tensor({5})); auto const_optional_tensor_converter = type_convert< const std::optional, - torch::executor::optional>( - const_optional_tensor_at_in); + std::optional>(const_optional_tensor_at_in); auto const_optional_tensor_et = const_optional_tensor_converter.call(); EXPECT_TRUE(const_optional_tensor_et.has_value()); EXPECT_EQ(const_optional_tensor_et.value().const_data_ptr()[0], 5); @@ -470,8 +463,7 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_ConstRefOptionals) { std::optional(torch::tensor({7})); auto optional_tensor_converter_from_ref = type_convert< std::optional&, - torch::executor::optional>( - optional_tensor_at_ref_in); + std::optional>(optional_tensor_at_ref_in); auto optional_tensor_et_from_ref = optional_tensor_converter_from_ref.call(); EXPECT_TRUE(optional_tensor_et_from_ref.has_value()); EXPECT_EQ( @@ -482,8 +474,7 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_ConstRefOptionals) { std::optional(torch::tensor({9})); auto const_optional_tensor_converter_from_ref = type_convert< const std::optional&, - torch::executor::optional>( - const_optional_tensor_at_ref_in); + std::optional>(const_optional_tensor_at_ref_in); auto const_optional_tensor_et_from_ref = const_optional_tensor_converter_from_ref.call(); EXPECT_TRUE(const_optional_tensor_et_from_ref.has_value()); @@ -494,9 +485,8 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_ConstRefOptionals) { // Test empty const optional conversions const std::optional empty_const_optional_at_in = std::nullopt; auto empty_const_optional_et = - type_convert< - const std::optional, - torch::executor::optional>(empty_const_optional_at_in) + type_convert, std::optional>( + empty_const_optional_at_in) .call(); EXPECT_FALSE(empty_const_optional_et.has_value()); @@ -505,7 +495,7 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_ConstRefOptionals) { auto empty_const_optional_tensor_et = type_convert< const std::optional, - torch::executor::optional>( + std::optional>( empty_const_optional_tensor_at_in) .call(); EXPECT_FALSE(empty_const_optional_tensor_et.has_value()); diff --git a/extension/kernel_util/test/make_boxed_from_unboxed_functor_test.cpp b/extension/kernel_util/test/make_boxed_from_unboxed_functor_test.cpp index b9176cfc826..a5c6a9bb0a3 100644 --- a/extension/kernel_util/test/make_boxed_from_unboxed_functor_test.cpp +++ b/extension/kernel_util/test/make_boxed_from_unboxed_functor_test.cpp @@ -54,8 +54,8 @@ add_tensor_out(KernelRuntimeContext& ctx, ArrayRef a, Tensor& out) { Tensor& add_optional_scalar_out( KernelRuntimeContext& ctx, - optional s1, - optional s2, + std::optional s1, + std::optional s2, Tensor& out) { (void)ctx; if (s1.has_value()) { @@ -182,7 +182,7 @@ TEST_F(MakeBoxedFromUnboxedFunctorTest, UnboxOptionalArrayRef) { // prepare optional tensors. torch::executor::testing::TensorFactory tf; - optional storage[2]; + std::optional storage[2]; EValue evalues[2] = {EValue(tf.ones({5})), EValue()}; EValue* values_p[2] = {&evalues[0], &evalues[1]}; BoxedEvalueList> a_box(values_p, storage, 2); diff --git a/extension/llm/custom_ops/op_sdpa.cpp b/extension/llm/custom_ops/op_sdpa.cpp index c98fa1729fa..682019e5108 100644 --- a/extension/llm/custom_ops/op_sdpa.cpp +++ b/extension/llm/custom_ops/op_sdpa.cpp @@ -33,7 +33,7 @@ bool validate_flash_attention_args( const Tensor& query, const Tensor& key, const Tensor& value, - const optional& attn_mask) { + const std::optional& attn_mask) { ET_CHECK_OR_RETURN_FALSE(query.dim() == 4, "query must be a 4D tensor"); ET_CHECK_OR_RETURN_FALSE(key.dim() == 4, "key must be a 4D tensor"); ET_CHECK_OR_RETURN_FALSE(value.dim() == 4, "value must be a 4D tensor"); @@ -245,11 +245,11 @@ Tensor& flash_attention_kernel_out( const Tensor& query, const Tensor& key, const Tensor& value, - const optional& attn_mask, + const std::optional& attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, + const std::optional scale, Tensor& output) { (void)ctx; ET_KERNEL_CHECK( @@ -281,12 +281,12 @@ Tensor& flash_attention_kernel_out( is_causal, attn_mask, scale, - nullopt, - nullopt, - nullopt, - nullopt, - nullopt, - nullopt); + std::nullopt, + std::nullopt, + std::nullopt, + std::nullopt, + std::nullopt, + std::nullopt); } else if (seq_len >= 192) { sdpa::impl::cpu_flash_attention( output, @@ -297,12 +297,12 @@ Tensor& flash_attention_kernel_out( is_causal, attn_mask, scale, - nullopt, - nullopt, - nullopt, - nullopt, - nullopt, - nullopt); + std::nullopt, + std::nullopt, + std::nullopt, + std::nullopt, + std::nullopt, + std::nullopt); } else { sdpa::impl::cpu_flash_attention( output, @@ -313,12 +313,12 @@ Tensor& flash_attention_kernel_out( is_causal, attn_mask, scale, - nullopt, - nullopt, - nullopt, - nullopt, - nullopt, - nullopt); + std::nullopt, + std::nullopt, + std::nullopt, + std::nullopt, + std::nullopt, + std::nullopt); } }); return output; @@ -330,18 +330,18 @@ Tensor& custom_sdpa_out_impl( const Tensor& k, const Tensor& v, const int64_t start_pos, - const optional& attn_mask, + const std::optional& attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, + const std::optional scale, Tensor& output, - const optional& q_zero_points = nullopt, - const optional& q_scales = nullopt, - const optional& k_zero_points = nullopt, - const optional& k_scales = nullopt, - const optional& v_zero_points = nullopt, - const optional& v_scales = nullopt, + const std::optional& q_zero_points = std::nullopt, + const std::optional& q_scales = std::nullopt, + const std::optional& k_zero_points = std::nullopt, + const std::optional& k_scales = std::nullopt, + const std::optional& v_zero_points = std::nullopt, + const std::optional& v_scales = std::nullopt, bool is_seq_at_dim_2 = false) { ET_KERNEL_CHECK_MSG( ctx, @@ -484,17 +484,17 @@ Tensor& custom_quantized_sdpa_out( const Tensor& k, const Tensor& v, const int64_t start_pos, - const optional& attn_mask, + const std::optional& attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, - const optional& q_zero_points, - const optional& q_scales, - const optional& k_zero_points, - const optional& k_scales, - const optional& v_zero_points, - const optional& v_scales, + const std::optional scale, + const std::optional& q_zero_points, + const std::optional& q_scales, + const std::optional& k_zero_points, + const std::optional& k_scales, + const std::optional& v_zero_points, + const std::optional& v_scales, const bool is_seq_at_dim_2, Tensor& output) { return custom_sdpa_out_impl( @@ -538,11 +538,11 @@ Tensor& custom_sdpa_out( const Tensor& k, const Tensor& v, const int64_t start_pos, - const optional& attn_mask, + const std::optional& attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, + const std::optional scale, Tensor& output) { return custom_sdpa_out_impl( ctx, q, k, v, start_pos, attn_mask, dropout_p, is_causal, scale, output); @@ -572,11 +572,11 @@ Tensor& sdpa_with_kv_cache_out( Tensor& value_cache, const int64_t start_pos, const int64_t seq_len, - const optional& attn_mask, + const std::optional& attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, + const std::optional scale, Tensor& output) { (void)ctx; ET_KERNEL_CHECK( diff --git a/extension/llm/custom_ops/op_sdpa.h b/extension/llm/custom_ops/op_sdpa.h index 9d357eb6ea1..915126359fa 100644 --- a/extension/llm/custom_ops/op_sdpa.h +++ b/extension/llm/custom_ops/op_sdpa.h @@ -24,11 +24,11 @@ Tensor& sdpa_with_kv_cache_out( Tensor& value_cache, const int64_t start_pos, const int64_t seq_len, - const optional& attn_mask, + const std::optional& attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, + const std::optional scale, Tensor& output); Tensor& custom_sdpa_out( @@ -37,11 +37,11 @@ Tensor& custom_sdpa_out( const Tensor& k, const Tensor& v, const int64_t start_pos, - const optional& attn_mask, + const std::optional& attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, + const std::optional scale, Tensor& output); Tensor& flash_attention_kernel_out( @@ -49,11 +49,11 @@ Tensor& flash_attention_kernel_out( const Tensor& query, const Tensor& key, const Tensor& value, - const optional& attn_mask, + const std::optional& attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, + const std::optional scale, Tensor& output); Tensor& custom_quantized_sdpa_out( @@ -62,17 +62,17 @@ Tensor& custom_quantized_sdpa_out( const Tensor& k, const Tensor& v, const int64_t start_pos, - const optional& attn_mask, + const std::optional& attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, - const optional& q_zero_points, - const optional& q_scales, - const optional& k_zero_points, - const optional& k_scales, - const optional& v_zero_points, - const optional& v_scales, + const std::optional scale, + const std::optional& q_zero_points, + const std::optional& q_scales, + const std::optional& k_zero_points, + const std::optional& k_scales, + const std::optional& v_zero_points, + const std::optional& v_scales, const bool is_seq_at_dim_1, Tensor& output); } // namespace native diff --git a/extension/llm/custom_ops/op_sdpa_aot.cpp b/extension/llm/custom_ops/op_sdpa_aot.cpp index 5bbf22d336e..8e3afcfc5a2 100644 --- a/extension/llm/custom_ops/op_sdpa_aot.cpp +++ b/extension/llm/custom_ops/op_sdpa_aot.cpp @@ -27,11 +27,11 @@ Tensor& sdpa_with_kv_cache_out_no_context( const int64_t seq_len, // @lint-ignore CLANGTIDY facebook-hte-ConstantArgumentPassByValue // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional attn_mask, + const std::optional attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, + const std::optional scale, Tensor& output); at::Tensor sdpa_with_kv_cache_aten( @@ -57,11 +57,11 @@ Tensor& custom_sdpa_out_no_context( const int64_t start_pos, // @lint-ignore CLANGTIDY facebook-hte-ConstantArgumentPassByValue // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional attn_mask, + const std::optional attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, + const std::optional scale, Tensor& output); at::Tensor custom_sdpa_aten( @@ -84,17 +84,17 @@ Tensor& custom_quantized_sdpa_out_no_context( const int64_t start_pos, // @lint-ignore CLANGTIDY facebook-hte-ConstantArgumentPassByValue // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional attn_mask, + const std::optional attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, - const optional q_zero_points, - const optional q_scales, - const optional k_zero_points, - const optional k_scales, - const optional v_zero_points, - const optional v_scales, + const std::optional scale, + const std::optional q_zero_points, + const std::optional q_scales, + const std::optional k_zero_points, + const std::optional k_scales, + const std::optional v_zero_points, + const std::optional v_scales, const bool is_seq_at_dim_2, Tensor& output); @@ -153,11 +153,11 @@ Tensor& sdpa_with_kv_cache_out_no_context( const int64_t seq_len, // @lint-ignore CLANGTIDY facebook-hte-ConstantArgumentPassByValue // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional attn_mask, + const std::optional attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, + const std::optional scale, Tensor& output) { executorch::runtime::KernelRuntimeContext context{}; return torch::executor::native::sdpa_with_kv_cache_out( @@ -215,11 +215,11 @@ Tensor& custom_sdpa_out_no_context( const int64_t start_pos, // @lint-ignore CLANGTIDY facebook-hte-ConstantArgumentPassByValue // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional attn_mask, + const std::optional attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, + const std::optional scale, Tensor& output) { executorch::aten::RuntimeContext context{}; return torch::executor::native::custom_sdpa_out( @@ -260,17 +260,17 @@ Tensor& custom_quantized_sdpa_out_no_context( const int64_t start_pos, // @lint-ignore CLANGTIDY facebook-hte-ConstantArgumentPassByValue // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional attn_mask, + const std::optional attn_mask, const double dropout_p, const bool is_causal, // @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy - const optional scale, - const optional q_zero_points, - const optional q_scales, - const optional k_zero_points, - const optional k_scales, - const optional v_zero_points, - const optional v_scales, + const std::optional scale, + const std::optional q_zero_points, + const std::optional q_scales, + const std::optional k_zero_points, + const std::optional k_scales, + const std::optional v_zero_points, + const std::optional v_scales, const bool is_seq_at_dim_2, Tensor& output) { executorch::aten::RuntimeContext context{}; diff --git a/extension/llm/custom_ops/op_sdpa_impl.h b/extension/llm/custom_ops/op_sdpa_impl.h index 8b8cc627afc..e0a81c4650c 100644 --- a/extension/llm/custom_ops/op_sdpa_impl.h +++ b/extension/llm/custom_ops/op_sdpa_impl.h @@ -547,14 +547,14 @@ void cpu_flash_attention( const Tensor& value, double dropout_p, bool is_causal, - const optional& attn_mask, - const optional& scale, - const optional& q_zero_points, - const optional& q_scales, - const optional& k_zero_points, - const optional& k_scales, - const optional& v_zero_points, - const optional& v_scales, + const std::optional& attn_mask, + const std::optional& scale, + const std::optional& q_zero_points, + const std::optional& q_scales, + const std::optional& k_zero_points, + const std::optional& k_scales, + const std::optional& v_zero_points, + const std::optional& v_scales, const SeqDim seq_dim = SeqDim::TWO, const int64_t start_pos = 0, const int64_t num_keys_for_causal_attention = -1) { diff --git a/extension/llm/custom_ops/op_update_cache.cpp b/extension/llm/custom_ops/op_update_cache.cpp index 7ab994deb5f..215cb276a28 100644 --- a/extension/llm/custom_ops/op_update_cache.cpp +++ b/extension/llm/custom_ops/op_update_cache.cpp @@ -26,7 +26,7 @@ bool validate_cache_params( const Tensor& quantized_cache, int64_t start_pos, int64_t seq_length, - const optional& indices = nullopt) { + const std::optional& indices = std::nullopt) { ET_CHECK_OR_RETURN_FALSE( quantized_cache.dim() == 4, "quantized cache must be a 4D tensor"); @@ -94,7 +94,7 @@ Tensor& update_cache_impl( Tensor& cache, const int64_t start_pos, Tensor& output, - const optional& indices = nullopt) { + const std::optional& indices = std::nullopt) { (void)ctx; ET_CHECK_MSG( diff --git a/kernels/aten/cpu/op__clone_dim_order.cpp b/kernels/aten/cpu/op__clone_dim_order.cpp index 5e6c35d64f9..ff0d9852cf2 100644 --- a/kernels/aten/cpu/op__clone_dim_order.cpp +++ b/kernels/aten/cpu/op__clone_dim_order.cpp @@ -28,7 +28,7 @@ using Optional = std::optional; namespace { Optional get_memory_format(OptionalArrayRef dim_order) { if (!dim_order.has_value()) { - return executorch::aten::nullopt; + return std::nullopt; } if (is_contiguous_dim_order( dim_order.value().data(), dim_order.value().size())) { diff --git a/kernels/aten/cpu/op__to_dim_order_copy.cpp b/kernels/aten/cpu/op__to_dim_order_copy.cpp index 0ed10f69d5a..92ee48de31a 100644 --- a/kernels/aten/cpu/op__to_dim_order_copy.cpp +++ b/kernels/aten/cpu/op__to_dim_order_copy.cpp @@ -22,13 +22,11 @@ using MemoryFormat = executorch::aten::MemoryFormat; template using OptionalArrayRef = executorch::aten::OptionalArrayRef; -template -using Optional = std::optional; - namespace { -Optional get_memory_format(OptionalArrayRef dim_order) { +std::optional get_memory_format( + OptionalArrayRef dim_order) { if (!dim_order.has_value()) { - return executorch::aten::nullopt; + return std::nullopt; } if (is_contiguous_dim_order( dim_order.value().data(), dim_order.value().size())) { @@ -105,7 +103,7 @@ Tensor& _to_dim_order_copy_out( InvalidArgument, out); - Optional memory_format = get_memory_format(dim_order); + std::optional memory_format = get_memory_format(dim_order); at::_to_copy_outf(self, non_blocking, memory_format, out); return out; diff --git a/kernels/optimized/cpu/op_linear.cpp b/kernels/optimized/cpu/op_linear.cpp index b855665475b..8ccdc31f92c 100644 --- a/kernels/optimized/cpu/op_linear.cpp +++ b/kernels/optimized/cpu/op_linear.cpp @@ -74,7 +74,7 @@ Tensor& opt_linear_out( RuntimeContext& ctx, const Tensor& in, const Tensor& mat2, - const optional& bias, + const std::optional& bias, Tensor& out) { ET_KERNEL_CHECK(ctx, check_linear_args(in, mat2, out), InvalidArgument, out); diff --git a/kernels/optimized/cpu/op_native_layer_norm.cpp b/kernels/optimized/cpu/op_native_layer_norm.cpp index 8d5410cb581..f9ab9e2b361 100644 --- a/kernels/optimized/cpu/op_native_layer_norm.cpp +++ b/kernels/optimized/cpu/op_native_layer_norm.cpp @@ -27,8 +27,8 @@ template void layer_norm( const Tensor& input, IntArrayRef normalized_shape, - const optional& weight, - const optional& bias, + const std::optional& weight, + const std::optional& bias, CTYPE eps, Tensor& out, Tensor& mean, diff --git a/kernels/portable/cpu/op_any.cpp b/kernels/portable/cpu/op_any.cpp index 0f3a36b6ba7..1acf484d3f0 100644 --- a/kernels/portable/cpu/op_any.cpp +++ b/kernels/portable/cpu/op_any.cpp @@ -54,7 +54,7 @@ Tensor& any_all_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) { Tensor& any_dims_out( KernelRuntimeContext& ctx, const Tensor& in, - optional> dim_list, + std::optional> dim_list, bool keepdim, Tensor& out) { (void)ctx; diff --git a/kernels/portable/cpu/op_argmax.cpp b/kernels/portable/cpu/op_argmax.cpp index a48c152133b..a7c09d06e1d 100644 --- a/kernels/portable/cpu/op_argmax.cpp +++ b/kernels/portable/cpu/op_argmax.cpp @@ -25,7 +25,7 @@ using std::optional; Tensor& argmax_out( KernelRuntimeContext& ctx, const Tensor& in, - optional dim, + std::optional dim, bool keepdim, Tensor& out) { (void)ctx; diff --git a/kernels/portable/cpu/op_argmin.cpp b/kernels/portable/cpu/op_argmin.cpp index 55f2f82b04b..3031948d83b 100644 --- a/kernels/portable/cpu/op_argmin.cpp +++ b/kernels/portable/cpu/op_argmin.cpp @@ -25,7 +25,7 @@ using std::optional; Tensor& argmin_out( KernelRuntimeContext& ctx, const Tensor& in, - optional dim, + std::optional dim, bool keepdim, Tensor& out) { (void)ctx; diff --git a/kernels/portable/cpu/op_as_strided_copy.cpp b/kernels/portable/cpu/op_as_strided_copy.cpp index 060e3cf0761..556813b5fef 100644 --- a/kernels/portable/cpu/op_as_strided_copy.cpp +++ b/kernels/portable/cpu/op_as_strided_copy.cpp @@ -21,7 +21,7 @@ Tensor& as_strided_copy_out( const Tensor& in, ArrayRef size, ArrayRef stride, - optional storage_offset, + std::optional storage_offset, Tensor& out) { (void)ctx; diff --git a/kernels/portable/cpu/op_cdist_forward.cpp b/kernels/portable/cpu/op_cdist_forward.cpp index c4a026f9e29..2e124665ba2 100644 --- a/kernels/portable/cpu/op_cdist_forward.cpp +++ b/kernels/portable/cpu/op_cdist_forward.cpp @@ -120,7 +120,7 @@ Tensor& _cdist_forward_out( const Tensor& x1, const Tensor& x2, double p, - optional compute_mode, + std::optional compute_mode, Tensor& out) { (void)ctx; diff --git a/kernels/portable/cpu/op_cumsum.cpp b/kernels/portable/cpu/op_cumsum.cpp index 3a518d30715..e6374d7d66d 100644 --- a/kernels/portable/cpu/op_cumsum.cpp +++ b/kernels/portable/cpu/op_cumsum.cpp @@ -90,7 +90,7 @@ Tensor& cumsum_out( KernelRuntimeContext& ctx, const Tensor& self, int64_t dim, - optional enforced_dtype, + std::optional enforced_dtype, Tensor& out) { (void)ctx; diff --git a/kernels/portable/cpu/op_full_like.cpp b/kernels/portable/cpu/op_full_like.cpp index 5fefd53c30b..32b4d720d53 100644 --- a/kernels/portable/cpu/op_full_like.cpp +++ b/kernels/portable/cpu/op_full_like.cpp @@ -21,7 +21,7 @@ Tensor& full_like_out( KernelRuntimeContext& ctx, const Tensor& in, const Scalar& fill_value, - optional memory_format, + std::optional memory_format, Tensor& out) { (void)ctx; diff --git a/kernels/portable/cpu/op_index_put.cpp b/kernels/portable/cpu/op_index_put.cpp index 812d3e8fab3..519842db598 100644 --- a/kernels/portable/cpu/op_index_put.cpp +++ b/kernels/portable/cpu/op_index_put.cpp @@ -19,8 +19,7 @@ namespace executor { namespace native { using Tensor = executorch::aten::Tensor; -using TensorOptList = - executorch::aten::ArrayRef>; +using TensorOptList = executorch::aten::ArrayRef>; Tensor& index_put_out( KernelRuntimeContext& ctx, diff --git a/kernels/portable/cpu/op_linear_scratch_example.cpp b/kernels/portable/cpu/op_linear_scratch_example.cpp index b7a263a199f..ba8d1a8fc1c 100644 --- a/kernels/portable/cpu/op_linear_scratch_example.cpp +++ b/kernels/portable/cpu/op_linear_scratch_example.cpp @@ -37,7 +37,7 @@ namespace { bool check_linear_scratch_example_args( const Tensor& input, const Tensor& weight, - const optional& bias, + const std::optional& bias, Tensor& out, Tensor& scratch) { ET_CHECK_OR_RETURN_FALSE( @@ -75,7 +75,7 @@ bool check_linear_scratch_example_args( Tensor& linear_scratch_example( const Tensor& input, const Tensor& weight, - const optional& bias, + const std::optional& bias, Tensor& out, Tensor& scratch) { size_t M, N, K; @@ -135,7 +135,7 @@ Tensor& linear_scratch_example( KernelRuntimeContext& ctx, const Tensor& input, const Tensor& weight, - const optional& bias, + const std::optional& bias, Tensor& out, Tensor& scratch) { // TODO(larryliu): Add a context arg to the real op function and remove this diff --git a/kernels/portable/cpu/op_mean.cpp b/kernels/portable/cpu/op_mean.cpp index 63c78968751..45f297484a3 100644 --- a/kernels/portable/cpu/op_mean.cpp +++ b/kernels/portable/cpu/op_mean.cpp @@ -22,9 +22,9 @@ using ScalarType = executorch::aten::ScalarType; Tensor& mean_dim_out( KernelRuntimeContext& ctx, const Tensor& in, - optional> dim_list, + std::optional> dim_list, bool keepdim, - optional dtype, + std::optional dtype, Tensor& out) { (void)ctx; @@ -78,7 +78,7 @@ Tensor& mean_dim_out( Tensor& mean_dtype_out( KernelRuntimeContext& ctx, const Tensor& in, - optional dtype, + std::optional dtype, Tensor& out) { return mean_dim_out(ctx, in, ArrayRef(), false, dtype, out); } diff --git a/kernels/portable/cpu/op_native_dropout.cpp b/kernels/portable/cpu/op_native_dropout.cpp index 8dafd9e0512..6be19a45df6 100644 --- a/kernels/portable/cpu/op_native_dropout.cpp +++ b/kernels/portable/cpu/op_native_dropout.cpp @@ -9,6 +9,7 @@ #include #include +#include #include #include @@ -17,7 +18,7 @@ std::tuple native_dropout_out( KernelRuntimeContext& ctx, const Tensor& input, double prob, - torch::executor::optional train, + ::std::optional train, Tensor& out, Tensor& mask) { std::tuple ret(out, mask); diff --git a/kernels/portable/cpu/op_native_group_norm.cpp b/kernels/portable/cpu/op_native_group_norm.cpp index 9e300dc7829..195e68173c2 100644 --- a/kernels/portable/cpu/op_native_group_norm.cpp +++ b/kernels/portable/cpu/op_native_group_norm.cpp @@ -24,8 +24,8 @@ namespace { template void group_norm( const Tensor& input, - const optional& weight, - const optional& bias, + const std::optional& weight, + const std::optional& bias, int64_t sN, int64_t sC, int64_t sHxW, diff --git a/kernels/portable/cpu/op_native_layer_norm.cpp b/kernels/portable/cpu/op_native_layer_norm.cpp index 12a03a184f6..72a74b2ff8a 100644 --- a/kernels/portable/cpu/op_native_layer_norm.cpp +++ b/kernels/portable/cpu/op_native_layer_norm.cpp @@ -25,8 +25,8 @@ template void layer_norm( const Tensor& input, IntArrayRef normalized_shape, - const optional& weight, - const optional& bias, + const std::optional& weight, + const std::optional& bias, CTYPE eps, Tensor& out, Tensor& mean, diff --git a/kernels/portable/cpu/op_prod.cpp b/kernels/portable/cpu/op_prod.cpp index ba76a1f200c..e195d3c6504 100644 --- a/kernels/portable/cpu/op_prod.cpp +++ b/kernels/portable/cpu/op_prod.cpp @@ -20,7 +20,7 @@ using ScalarType = executorch::aten::ScalarType; Tensor& prod_out( KernelRuntimeContext& ctx, const Tensor& in, - optional dtype, + std::optional dtype, Tensor& out) { (void)ctx; @@ -53,7 +53,7 @@ Tensor& prod_int_out( const Tensor& in, int64_t dim, bool keepdim, - optional dtype, + std::optional dtype, Tensor& out) { (void)ctx; diff --git a/kernels/portable/cpu/op_sum.cpp b/kernels/portable/cpu/op_sum.cpp index dcd81797dcf..b683c290f6d 100644 --- a/kernels/portable/cpu/op_sum.cpp +++ b/kernels/portable/cpu/op_sum.cpp @@ -23,9 +23,9 @@ using ScalarType = executorch::aten::ScalarType; Tensor& sum_dim_out( KernelRuntimeContext& ctx, const Tensor& in, - optional> dim_list, + std::optional> dim_list, bool keepdim, - optional dtype, + std::optional dtype, Tensor& out) { (void)ctx; diff --git a/kernels/portable/cpu/op_var.cpp b/kernels/portable/cpu/op_var.cpp index 202d7df80bc..3901fdd8376 100644 --- a/kernels/portable/cpu/op_var.cpp +++ b/kernels/portable/cpu/op_var.cpp @@ -24,7 +24,7 @@ void compute_variance( KernelRuntimeContext& ctx, const Tensor& in, Tensor& out, - optional> dim_list, + std::optional> dim_list, const size_t num, const double denominator) { CTYPE_OUT* out_data = out.mutable_data_ptr(); @@ -62,7 +62,7 @@ void compute_variance( Tensor& var_out( KernelRuntimeContext& ctx, const Tensor& in, - optional> dim_list, + std::optional> dim_list, bool unbiased, bool keepdim, Tensor& out) { @@ -105,8 +105,8 @@ Tensor& var_out( Tensor& var_correction_out( KernelRuntimeContext& ctx, const Tensor& in, - optional> dim_list, - const optional& correction, + std::optional> dim_list, + const std::optional& correction, bool keepdim, Tensor& out) { (void)ctx; diff --git a/kernels/portable/cpu/util/copy_ops_util.cpp b/kernels/portable/cpu/util/copy_ops_util.cpp index 1527e6d9e35..5ec82b3126d 100644 --- a/kernels/portable/cpu/util/copy_ops_util.cpp +++ b/kernels/portable/cpu/util/copy_ops_util.cpp @@ -42,7 +42,7 @@ bool check_as_strided_copy_args( const Tensor& in, ArrayRef size, ArrayRef stride, - optional storage_offset, + std::optional storage_offset, Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_CHECK_OR_RETURN_FALSE( diff --git a/kernels/portable/cpu/util/copy_ops_util.h b/kernels/portable/cpu/util/copy_ops_util.h index 15a7916e0e8..bdb9b63f984 100644 --- a/kernels/portable/cpu/util/copy_ops_util.h +++ b/kernels/portable/cpu/util/copy_ops_util.h @@ -58,7 +58,7 @@ bool check_as_strided_copy_args( const Tensor& in, ArrayRef size, ArrayRef stride, - optional storage_offset, + std::optional storage_offset, Tensor& out); template diff --git a/kernels/portable/cpu/util/distance_util.cpp b/kernels/portable/cpu/util/distance_util.cpp index e7f146e2e9d..a3cf4cf318e 100644 --- a/kernels/portable/cpu/util/distance_util.cpp +++ b/kernels/portable/cpu/util/distance_util.cpp @@ -32,7 +32,7 @@ bool check_cdist_args( const Tensor& x1, const Tensor& x2, double p, - optional compute_mode, + std::optional compute_mode, const Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(x1, x2)); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(x1, out)); diff --git a/kernels/portable/cpu/util/distance_util.h b/kernels/portable/cpu/util/distance_util.h index 05406e35489..120db7c3464 100644 --- a/kernels/portable/cpu/util/distance_util.h +++ b/kernels/portable/cpu/util/distance_util.h @@ -127,7 +127,7 @@ bool check_cdist_args( const Tensor& x1, const Tensor& x2, double p, - optional compute_mode, + std::optional compute_mode, const Tensor& out); } // namespace executor diff --git a/kernels/portable/cpu/util/kernel_ops_util.cpp b/kernels/portable/cpu/util/kernel_ops_util.cpp index daa85f6beec..582a89c5cc9 100644 --- a/kernels/portable/cpu/util/kernel_ops_util.cpp +++ b/kernels/portable/cpu/util/kernel_ops_util.cpp @@ -460,7 +460,7 @@ void get_convolution_out_target_size( bool check_cumsum_args( const Tensor& in, int64_t dim, - optional dtype, + std::optional dtype, Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(dim_is_valid(dim, in.dim())); diff --git a/kernels/portable/cpu/util/kernel_ops_util.h b/kernels/portable/cpu/util/kernel_ops_util.h index e3eaf4d043e..8a846d035e1 100644 --- a/kernels/portable/cpu/util/kernel_ops_util.h +++ b/kernels/portable/cpu/util/kernel_ops_util.h @@ -432,7 +432,7 @@ void get_convolution_out_target_size( bool check_cumsum_args( const Tensor& self, int64_t dim, - optional enforced_dtype, + std::optional enforced_dtype, Tensor& out); bool check_max_pool2d_with_indices_args( diff --git a/kernels/portable/cpu/util/reduce_util.cpp b/kernels/portable/cpu/util/reduce_util.cpp index afeb56f719f..d622e758a4e 100644 --- a/kernels/portable/cpu/util/reduce_util.cpp +++ b/kernels/portable/cpu/util/reduce_util.cpp @@ -319,9 +319,9 @@ Error resize_reduction_out( */ bool check_reduction_args( const Tensor& in, - const optional>& dim_list, + const std::optional>& dim_list, bool keepdim, - optional dtype, + std::optional dtype, Tensor& out) { if (dtype.has_value()) { ET_LOG_AND_RETURN_IF_FALSE(dtype.value() == out.scalar_type()); @@ -339,9 +339,9 @@ bool check_reduction_args( */ bool check_reduction_args_single_dim( const Tensor& in, - optional dim, + std::optional dim, bool keepdim, - optional dtype, + std::optional dtype, Tensor& out, bool allow_empty_dim) { if (dtype.has_value()) { @@ -369,9 +369,9 @@ bool check_reduction_args_single_dim( bool check_mean_dim_args( const Tensor& in, - optional> dim_list, + std::optional> dim_list, bool keepdim, - optional dtype, + std::optional dtype, Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE( check_reduction_args(in, dim_list, keepdim, dtype, out)); @@ -402,7 +402,7 @@ bool check_amin_amax_args( bool check_argmin_argmax_args( const Tensor& in, - optional dim, + std::optional dim, bool keepdim, Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE( @@ -432,7 +432,7 @@ bool check_min_max_args( bool check_prod_out_args( const Tensor& in, - optional dtype, + std::optional dtype, Tensor& out) { if (dtype.has_value()) { ET_LOG_AND_RETURN_IF_FALSE(dtype.value() == out.scalar_type()); diff --git a/kernels/portable/cpu/util/reduce_util.h b/kernels/portable/cpu/util/reduce_util.h index 51981328c4f..2e0eaa6e184 100644 --- a/kernels/portable/cpu/util/reduce_util.h +++ b/kernels/portable/cpu/util/reduce_util.h @@ -755,24 +755,24 @@ inline Error resize_reduction_out( #ifndef USE_ATEN_LIB bool check_reduction_args( const Tensor& in, - const optional>& dim_list, + const std::optional>& dim_list, bool keepdim, - optional dtype, + std::optional dtype, Tensor& out); bool check_reduction_args_single_dim( const Tensor& in, - optional dim, + std::optional dim, bool keepdim, - optional dtype, + std::optional dtype, Tensor& out, bool allow_empty_dim = false); bool check_mean_dim_args( const Tensor& in, - optional> dim_list, + std::optional> dim_list, bool keepdim, - optional dtype, + std::optional dtype, Tensor& out); bool check_amin_amax_args( @@ -783,7 +783,7 @@ bool check_amin_amax_args( bool check_argmin_argmax_args( const Tensor& in, - optional dim, + std::optional dim, bool keepdim, Tensor& out); @@ -796,7 +796,7 @@ bool check_min_max_args( bool check_prod_out_args( const Tensor& in, - optional dtype, + std::optional dtype, Tensor& out); #endif diff --git a/kernels/portable/cpu/util/test/reduce_test.cpp b/kernels/portable/cpu/util/test/reduce_test.cpp index 69e1093b183..aa93990b8be 100644 --- a/kernels/portable/cpu/util/test/reduce_test.cpp +++ b/kernels/portable/cpu/util/test/reduce_test.cpp @@ -24,7 +24,7 @@ using torch::executor::apply_over_dim; using torch::executor::apply_over_dim_list; using torch::executor::get_out_numel; -void _apply_over_dim(const Tensor& in, const optional& dim) { +void _apply_over_dim(const Tensor& in, const std::optional& dim) { int64_t* in_data = in.mutable_data_ptr(); for (size_t out_ix = 0; out_ix < get_out_numel(in, dim); ++out_ix) { apply_over_dim( @@ -37,7 +37,7 @@ void _apply_over_dim(const Tensor& in, const optional& dim) { void _apply_over_dim_list( const Tensor& in, - const optional>& dim_list) { + const std::optional>& dim_list) { int64_t* in_data = in.mutable_data_ptr(); for (size_t out_ix = 0; out_ix < get_out_numel(in, dim_list); ++out_ix) { apply_over_dim_list( @@ -50,7 +50,7 @@ void _apply_over_dim_list( TEST(ReduceUtilTest, ApplyOverDim) { TensorFactory tf; - optional> dim_list; + std::optional> dim_list; Tensor in = tf.zeros({2, 4, 5, 3}); _apply_over_dim(in, 0); @@ -86,7 +86,8 @@ TEST(ReduceUtilTest, ApplyOverDim) { in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_2[1] = {2}; - dim_list = optional>(ArrayRef{dim_array_2, 1}); + dim_list = + std::optional>(ArrayRef{dim_array_2, 1}); _apply_over_dim(in, 2); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -104,7 +105,8 @@ TEST(ReduceUtilTest, ApplyOverDim) { in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_3[1] = {3}; - dim_list = optional>(ArrayRef{dim_array_3, 1}); + dim_list = + std::optional>(ArrayRef{dim_array_3, 1}); _apply_over_dim(in, 3); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -123,7 +125,7 @@ TEST(ReduceUtilTest, ApplyOverDim) { TEST(ReduceUtilTest, ApplyOverDimListNull) { TensorFactory tf; - optional> null_dim_list; + std::optional> null_dim_list; Tensor in = tf.ones({2, 4, 5, 3}); _apply_over_dim_list(in, null_dim_list); @@ -132,7 +134,7 @@ TEST(ReduceUtilTest, ApplyOverDimListNull) { TEST(ReduceUtilTest, ApplyOverZeroDimListEmpty) { TensorFactory tf; - optional> null_dim_list; + std::optional> null_dim_list; Tensor in = tf.ones({}); _apply_over_dim_list(in, null_dim_list); @@ -141,9 +143,10 @@ TEST(ReduceUtilTest, ApplyOverZeroDimListEmpty) { TEST(ReduceUtilTest, ApplyOverZeroDim) { TensorFactory tf; - optional> dim_list; + std::optional> dim_list; int64_t dim_array_0[1] = {0}; - dim_list = optional>(ArrayRef{dim_array_0, 1}); + dim_list = + std::optional>(ArrayRef{dim_array_0, 1}); Tensor in = tf.ones({}); _apply_over_dim_list(in, dim_list); @@ -152,7 +155,7 @@ TEST(ReduceUtilTest, ApplyOverZeroDim) { TEST(ReduceUtilTest, ApplyOverDimListEmpty) { TensorFactory tf; - optional> empty_dim_list{ArrayRef{}}; + std::optional> empty_dim_list{ArrayRef{}}; Tensor in = tf.ones({2, 4, 5, 3}); _apply_over_dim_list(in, empty_dim_list); @@ -161,11 +164,12 @@ TEST(ReduceUtilTest, ApplyOverDimListEmpty) { TEST(ReduceUtilTest, ApplyOverDimListLength1) { TensorFactory tf; - optional> dim_list; + std::optional> dim_list; Tensor in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_0[1] = {0}; - dim_list = optional>(ArrayRef{dim_array_0, 1}); + dim_list = + std::optional>(ArrayRef{dim_array_0, 1}); _apply_over_dim_list(in, dim_list); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -183,7 +187,8 @@ TEST(ReduceUtilTest, ApplyOverDimListLength1) { in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_1[1] = {1}; - dim_list = optional>(ArrayRef{dim_array_1, 1}); + dim_list = + std::optional>(ArrayRef{dim_array_1, 1}); _apply_over_dim_list(in, dim_list); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -201,7 +206,8 @@ TEST(ReduceUtilTest, ApplyOverDimListLength1) { in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_2[1] = {2}; - dim_list = optional>(ArrayRef{dim_array_2, 1}); + dim_list = + std::optional>(ArrayRef{dim_array_2, 1}); _apply_over_dim_list(in, dim_list); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -219,7 +225,8 @@ TEST(ReduceUtilTest, ApplyOverDimListLength1) { in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_3[1] = {3}; - dim_list = optional>(ArrayRef{dim_array_3, 1}); + dim_list = + std::optional>(ArrayRef{dim_array_3, 1}); _apply_over_dim_list(in, dim_list); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -238,11 +245,12 @@ TEST(ReduceUtilTest, ApplyOverDimListLength1) { TEST(ReduceUtilTest, ApplyOverDimListLength2) { TensorFactory tf; - optional> dim_list; + std::optional> dim_list; Tensor in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_01[2] = {0, 1}; - dim_list = optional>(ArrayRef{dim_array_01, 2}); + dim_list = + std::optional>(ArrayRef{dim_array_01, 2}); _apply_over_dim_list(in, dim_list); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -260,7 +268,8 @@ TEST(ReduceUtilTest, ApplyOverDimListLength2) { in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_02[2] = {0, 2}; - dim_list = optional>(ArrayRef{dim_array_02, 2}); + dim_list = + std::optional>(ArrayRef{dim_array_02, 2}); _apply_over_dim_list(in, dim_list); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -278,7 +287,8 @@ TEST(ReduceUtilTest, ApplyOverDimListLength2) { in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_03[2] = {0, 3}; - dim_list = optional>(ArrayRef{dim_array_03, 2}); + dim_list = + std::optional>(ArrayRef{dim_array_03, 2}); _apply_over_dim_list(in, dim_list); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -296,7 +306,8 @@ TEST(ReduceUtilTest, ApplyOverDimListLength2) { in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_12[2] = {1, 2}; - dim_list = optional>(ArrayRef{dim_array_12, 2}); + dim_list = + std::optional>(ArrayRef{dim_array_12, 2}); _apply_over_dim_list(in, dim_list); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -314,7 +325,8 @@ TEST(ReduceUtilTest, ApplyOverDimListLength2) { in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_13[2] = {1, 3}; - dim_list = optional>(ArrayRef{dim_array_13, 2}); + dim_list = + std::optional>(ArrayRef{dim_array_13, 2}); _apply_over_dim_list(in, dim_list); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -332,7 +344,8 @@ TEST(ReduceUtilTest, ApplyOverDimListLength2) { in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_23[2] = {2, 3}; - dim_list = optional>(ArrayRef{dim_array_23, 2}); + dim_list = + std::optional>(ArrayRef{dim_array_23, 2}); _apply_over_dim_list(in, dim_list); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -351,11 +364,12 @@ TEST(ReduceUtilTest, ApplyOverDimListLength2) { TEST(ReduceUtilTest, ApplyOverDimListLength3) { TensorFactory tf; - optional> dim_list; + std::optional> dim_list; Tensor in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_012[3] = {0, 1, 2}; - dim_list = optional>(ArrayRef{dim_array_012, 3}); + dim_list = + std::optional>(ArrayRef{dim_array_012, 3}); _apply_over_dim_list(in, dim_list); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -373,7 +387,8 @@ TEST(ReduceUtilTest, ApplyOverDimListLength3) { in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_013[3] = {0, 1, 3}; - dim_list = optional>(ArrayRef{dim_array_013, 3}); + dim_list = + std::optional>(ArrayRef{dim_array_013, 3}); _apply_over_dim_list(in, dim_list); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -391,7 +406,8 @@ TEST(ReduceUtilTest, ApplyOverDimListLength3) { in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_023[3] = {0, 2, 3}; - dim_list = optional>(ArrayRef{dim_array_023, 3}); + dim_list = + std::optional>(ArrayRef{dim_array_023, 3}); _apply_over_dim_list(in, dim_list); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -409,7 +425,8 @@ TEST(ReduceUtilTest, ApplyOverDimListLength3) { in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_123[3] = {1, 2, 3}; - dim_list = optional>(ArrayRef{dim_array_123, 3}); + dim_list = + std::optional>(ArrayRef{dim_array_123, 3}); _apply_over_dim_list(in, dim_list); // clang-format off EXPECT_TENSOR_EQ(in, tf.make({2, 4, 5, 3}, { @@ -428,11 +445,12 @@ TEST(ReduceUtilTest, ApplyOverDimListLength3) { TEST(ReduceUtilTest, ApplyOverDimListLength4) { TensorFactory tf; - optional> dim_list; + std::optional> dim_list; Tensor in = tf.ones({2, 4, 5, 3}); int64_t dim_array_0123[4] = {0, 1, 2, 3}; - dim_list = optional>(ArrayRef{dim_array_0123, 4}); + dim_list = + std::optional>(ArrayRef{dim_array_0123, 4}); _apply_over_dim_list(in, dim_list); EXPECT_TENSOR_EQ(in, tf.zeros({2, 4, 5, 3})); } @@ -447,7 +465,7 @@ TEST(ReduceUtilTest, ApplyOnZeroDimTensorOverDim) { TEST(ReduceUtilTest, ApplyOnZeroDimTensorOverDimListNull) { TensorFactory tf; - optional> null_dim_list; + std::optional> null_dim_list; Tensor in = tf.ones({}); _apply_over_dim_list(in, null_dim_list); @@ -456,7 +474,7 @@ TEST(ReduceUtilTest, ApplyOnZeroDimTensorOverDimListNull) { TEST(ReduceUtilTest, ApplyOnZeroDimTensorOverDimListEmpty) { TensorFactory tf; - optional> empty_dim_list{ArrayRef{}}; + std::optional> empty_dim_list{ArrayRef{}}; Tensor in = tf.ones({}); _apply_over_dim_list(in, empty_dim_list); @@ -466,8 +484,8 @@ TEST(ReduceUtilTest, ApplyOnZeroDimTensorOverDimListEmpty) { TEST(ReduceUtilTest, ApplyOnZeroDimTensorOverDimListNonEmpty) { TensorFactory tf; int64_t dim_array_0[1] = {0}; - optional> dim_list = - optional>(ArrayRef{dim_array_0, 1}); + std::optional> dim_list = + std::optional>(ArrayRef{dim_array_0, 1}); Tensor in = tf.ones({}); _apply_over_dim_list(in, dim_list), ""; @@ -476,7 +494,7 @@ TEST(ReduceUtilTest, ApplyOnZeroDimTensorOverDimListNonEmpty) { TEST(ReduceUtilTest, ApplyOnEmptyTensorOverDim) { TensorFactory tf; - optional> dim_list; + std::optional> dim_list; Tensor in = tf.zeros({2, 0, 5, 3}); Tensor out = tf.zeros({2, 5, 3}); @@ -513,14 +531,15 @@ TEST(ReduceUtilTest, ApplyOnEmptyTensorOverDim) { TEST(ReduceUtilTest, ApplyOnEmptyTensorOverDimList) { TensorFactory tf; - optional> dim_list; + std::optional> dim_list; Tensor in = tf.zeros({2, 0, 5, 3}); Tensor out = tf.zeros({5, 3}); // dim list = {0, 1} int64_t dim_array_01[2] = {0, 1}; - dim_list = optional>(ArrayRef{dim_array_01, 2}); + dim_list = + std::optional>(ArrayRef{dim_array_01, 2}); EXPECT_TRUE(in.numel() == 0); EXPECT_TRUE(out.numel() == 15 && out.numel() == get_out_numel(in, dim_list)); @@ -542,7 +561,8 @@ TEST(ReduceUtilTest, ApplyOnEmptyTensorOverDimList) { // dim list = {0, 2} int64_t dim_array_02[2] = {0, 2}; - dim_list = optional>(ArrayRef{dim_array_02, 2}); + dim_list = + std::optional>(ArrayRef{dim_array_02, 2}); EXPECT_TRUE(in.numel() == 0); EXPECT_TRUE(get_out_numel(in, dim_list) == 0); @@ -554,29 +574,33 @@ TEST(ReduceUtilTest, ApplyOnEmptyTensorOverDimList) { TEST(ReduceUtilTest, ApplyOverDimListInvalid) { TensorFactory tf; - optional> dim_list; + std::optional> dim_list; Tensor in = tf.zeros({2, 4, 5, 3}); int64_t dim_array_09[2] = {0, 9}; - dim_list = optional>(ArrayRef{dim_array_09, 2}); + dim_list = + std::optional>(ArrayRef{dim_array_09, 2}); ET_EXPECT_DEATH( apply_over_dim_list([](size_t in_ix) { return; }, in, dim_list, 0), ""); int64_t dim_array_neg[3] = {0, -5, 3}; - dim_list = optional>(ArrayRef{dim_array_neg, 3}); + dim_list = + std::optional>(ArrayRef{dim_array_neg, 3}); ET_EXPECT_DEATH( apply_over_dim_list([](size_t in_ix) { return; }, in, dim_list, 0), ""); int64_t dim_array_011[3] = {0, 1, 1}; - dim_list = optional>(ArrayRef{dim_array_011, 3}); + dim_list = + std::optional>(ArrayRef{dim_array_011, 3}); ET_EXPECT_DEATH( apply_over_dim_list([](size_t in_ix) { return; }, in, dim_list, 0), ""); int64_t dim_array_1_3[2] = {1, -3}; - dim_list = optional>(ArrayRef{dim_array_1_3, 2}); + dim_list = + std::optional>(ArrayRef{dim_array_1_3, 2}); ET_EXPECT_DEATH( apply_over_dim_list([](size_t in_ix) { return; }, in, dim_list, 0), ""); diff --git a/kernels/quantized/test/op_add_test.cpp b/kernels/quantized/test/op_add_test.cpp index fdf38cc8255..97174ff12fa 100644 --- a/kernels/quantized/test/op_add_test.cpp +++ b/kernels/quantized/test/op_add_test.cpp @@ -191,7 +191,7 @@ TEST(OpQuantizeAddTest, ConsitencyWithReferencePattern) { Tensor qinput2 = tfo.zeros({3, 5}); Tensor qoutput = tfo.zeros({3, 5}); - optional out_dtype = optional(); + std::optional out_dtype = std::optional(); KernelRuntimeContext context{}; // q -> qadd -> dq diff --git a/kernels/quantized/test/op_dequantize_test.cpp b/kernels/quantized/test/op_dequantize_test.cpp index 4a0c195e3ab..ed63d8fe87f 100644 --- a/kernels/quantized/test/op_dequantize_test.cpp +++ b/kernels/quantized/test/op_dequantize_test.cpp @@ -51,7 +51,7 @@ void test_dtype() { quant_min, quant_max, DTYPE, - optional(), + std::optional(), out); EXPECT_TENSOR_EQ(out, expected); @@ -89,7 +89,7 @@ void test_output_dtype() { quant_min, quant_max, ScalarType::Byte, - optional(OUT_DTYPE), + std::optional(OUT_DTYPE), out); EXPECT_TENSOR_EQ(out, expected); @@ -122,7 +122,7 @@ TEST(OpDequantizeOutTest, HalfOutput) { quant_min, quant_max, ScalarType::Byte, - optional(ScalarType::Half), + std::optional(ScalarType::Half), out); // The expected result should be (10 - 100000) * 0.5 = -49995 @@ -149,7 +149,7 @@ TEST(OpDequantizeOutTest, DoubleOutput) { quant_min, quant_max, ScalarType::Byte, - optional(ScalarType::Double), + std::optional(ScalarType::Double), out); // The expected result should be (10 - 100000) * 0.5 = -49995 @@ -178,7 +178,7 @@ TEST(OpDequantizeOutTest, NonWholeNumbers) { quant_min, quant_max, ScalarType::Byte, - optional(), + std::optional(), out); EXPECT_TENSOR_EQ(out, expected); @@ -207,7 +207,7 @@ TEST(OpDequantizeOutTest, TensorArgOverload) { quant_min, quant_max, ScalarType::Byte, - optional(), + std::optional(), out); EXPECT_TENSOR_EQ(out, expected); @@ -238,7 +238,7 @@ void test_per_channel_dtype() { quant_min, quant_max, DTYPE, - optional(), + std::optional(), out); EXPECT_TENSOR_EQ(out, expected); @@ -259,7 +259,7 @@ void test_per_channel_dtype() { quant_min, quant_max, DTYPE, - optional(), + std::optional(), out); EXPECT_TENSOR_EQ(out, expected); @@ -281,7 +281,7 @@ void test_per_channel_dtype() { quant_min, quant_max, DTYPE, - optional(), + std::optional(), out); EXPECT_TENSOR_EQ(out, expected); @@ -308,7 +308,7 @@ void test_per_channel_dtype() { quant_min, quant_max, DTYPE, - optional(), + std::optional(), out); EXPECT_TENSOR_EQ(out, expected); diff --git a/kernels/quantized/test/op_embedding_test.cpp b/kernels/quantized/test/op_embedding_test.cpp index 5d5ad45ace8..b7585ebab7f 100644 --- a/kernels/quantized/test/op_embedding_test.cpp +++ b/kernels/quantized/test/op_embedding_test.cpp @@ -150,7 +150,7 @@ TEST(OpQuantizedEmbeddingTest, ConsitencyWithReferencePattern) { quant_min, quant_max, ScalarType::Byte, - optional(), + std::optional(), weight); embedding_out( diff --git a/kernels/quantized/test/op_mixed_linear_test.cpp b/kernels/quantized/test/op_mixed_linear_test.cpp index e659b41151e..99734375d2f 100644 --- a/kernels/quantized/test/op_mixed_linear_test.cpp +++ b/kernels/quantized/test/op_mixed_linear_test.cpp @@ -48,8 +48,8 @@ void test_dtype() { Tensor weight_scales = tf.make( /*sizes=*/{2}, /*data=*/{0.2, 0.4}); - const optional opt_weight_zp{}; - const optional opt_dtype_out{}; + const std::optional opt_weight_zp{}; + const std::optional opt_dtype_out{}; Tensor out = tf_out.zeros({1, 2}); @@ -101,8 +101,8 @@ void test_dtype_partials() { Tensor weight_scales = tf.make( /*sizes=*/{2, 2}, /*data=*/{0.2, 1, 0.4, 0.5}); - const optional opt_weight_zp{}; - const optional opt_dtype_out{}; + const std::optional opt_weight_zp{}; + const std::optional opt_dtype_out{}; Tensor out = tf_out.zeros({1, 2}); diff --git a/kernels/quantized/test/op_mixed_mm_test.cpp b/kernels/quantized/test/op_mixed_mm_test.cpp index 8051f299fbd..f6e34fdced4 100644 --- a/kernels/quantized/test/op_mixed_mm_test.cpp +++ b/kernels/quantized/test/op_mixed_mm_test.cpp @@ -47,7 +47,7 @@ void test_dtype() { Tensor weight_scales = tf.make( /*sizes=*/{3}, /*data=*/{0.2, 0.4, 0.5}); - const optional opt_weight_zp{}; + const std::optional opt_weight_zp{}; Tensor out = tf.zeros({1, 2}); diff --git a/kernels/test/op__clone_dim_order_test.cpp b/kernels/test/op__clone_dim_order_test.cpp index f009ce1b195..96e295bfab0 100644 --- a/kernels/test/op__clone_dim_order_test.cpp +++ b/kernels/test/op__clone_dim_order_test.cpp @@ -354,7 +354,7 @@ TEST_F(OpDimOrderCloneTest, PreserveChannelsLast) { Tensor ret = op__clone_dim_order_out( /*self*/ x, /*non_blocking*/ false, - /*dim_order*/ executorch::aten::nullopt, + /*dim_order*/ std::nullopt, out); EXPECT_TENSOR_EQ(out, expected); diff --git a/kernels/test/op__to_dim_order_copy_test.cpp b/kernels/test/op__to_dim_order_copy_test.cpp index 4a8afe51267..e6aa766edeb 100644 --- a/kernels/test/op__to_dim_order_copy_test.cpp +++ b/kernels/test/op__to_dim_order_copy_test.cpp @@ -202,7 +202,7 @@ class OpToDimOrderCopyTest : public OperatorTest { op = "op__to_dim_order_copy_out" opt_setup_params = """ bool non_blocking = false; - optional memory_format; + std::optional memory_format; """ opt_extra_params = "non_blocking, memory_format," out_args = "out_shape, dynamism" @@ -653,7 +653,7 @@ TEST_F(OpToDimOrderCopyTest, PreserveChanneslLast) { Tensor ret = op__to_dim_order_copy_out( /*self*/ x, /*non_blocking*/ false, - /*dim_order*/ executorch::aten::nullopt, + /*dim_order*/ std::nullopt, out); EXPECT_TENSOR_EQ(out, expected); diff --git a/kernels/test/op_any_test.cpp b/kernels/test/op_any_test.cpp index 1853b96ab7d..976be801ba8 100644 --- a/kernels/test/op_any_test.cpp +++ b/kernels/test/op_any_test.cpp @@ -31,7 +31,7 @@ class OpAnyOutTest : public OperatorTest { Tensor& op_any_dims_out( const Tensor& input, - optional> dim, + std::optional> dim, bool keepdim, Tensor& out) { return torch::executor::aten::any_outf(context_, input, dim, keepdim, out); @@ -129,7 +129,7 @@ TEST_F(OpAnyOutTest, SmokeTestDims) { Tensor self = tfBool.make({2, 3, 1}, {true, false, true, true, false, false}); int64_t dims[3] = {0, 2}; - optional> opt_dim_list{ArrayRef{dims, 2}}; + std::optional> opt_dim_list{ArrayRef{dims, 2}}; bool keepdim = true; Tensor out = tfBool.zeros({1, 3, 1}); Tensor out_expected = tfBool.make({1, 3, 1}, {true, false, true}); diff --git a/kernels/test/op_argmax_test.cpp b/kernels/test/op_argmax_test.cpp index 21f7be35e85..a2a35026c4c 100644 --- a/kernels/test/op_argmax_test.cpp +++ b/kernels/test/op_argmax_test.cpp @@ -26,7 +26,7 @@ class OpArgmaxTest : public OperatorTest { protected: Tensor& op_argmax_out( const Tensor& in, - optional dim, + std::optional dim, bool keepdim, Tensor& out) { return torch::executor::aten::argmax_outf(context_, in, dim, keepdim, out); @@ -83,7 +83,7 @@ TEST_F(OpArgmaxTest, SanityCheckNullDim) { Tensor out = tf.zeros({}); Tensor expected = tf.make({}, {0}); - optional dim; + std::optional dim; Tensor ret = op_argmax_out(in, dim, false, out); EXPECT_TENSOR_EQ(out, ret); diff --git a/kernels/test/op_argmin_test.cpp b/kernels/test/op_argmin_test.cpp index 3478c21675b..d51762c0350 100644 --- a/kernels/test/op_argmin_test.cpp +++ b/kernels/test/op_argmin_test.cpp @@ -26,7 +26,7 @@ class OpArgminTest : public OperatorTest { protected: Tensor& op_argmin_out( const Tensor& in, - optional dim, + std::optional dim, bool keepdim, Tensor& out) { return torch::executor::aten::argmin_outf(context_, in, dim, keepdim, out); @@ -83,7 +83,7 @@ TEST_F(OpArgminTest, SanityCheckNullDim) { Tensor out = tf.zeros({}); Tensor expected = tf.make({}, {2}); - optional dim; + std::optional dim; Tensor ret = op_argmin_out(in, dim, false, out); EXPECT_TENSOR_EQ(out, ret); diff --git a/kernels/test/op_as_strided_copy_test.cpp b/kernels/test/op_as_strided_copy_test.cpp index cb0191c69a8..6843763f877 100644 --- a/kernels/test/op_as_strided_copy_test.cpp +++ b/kernels/test/op_as_strided_copy_test.cpp @@ -31,7 +31,7 @@ class OpAsStridedCopyOutTest : public OperatorTest { const Tensor& self, ArrayRef size, ArrayRef stride, - optional storage_offset, + std::optional storage_offset, Tensor& out) { return torch::executor::aten::as_strided_copy_outf( context_, self, size, stride, storage_offset, out); @@ -48,7 +48,7 @@ class OpAsStridedCopyOutTest : public OperatorTest { Tensor out = tf.zeros(out_sizes); // Valid input should give the expected output - optional storage_offset; + std::optional storage_offset; int64_t sizes[3] = {2, 2, 2}; int64_t stride[3] = {1, 2, 3}; op_as_strided_copy_out( @@ -78,7 +78,7 @@ class OpAsStridedCopyOutTest : public OperatorTest { Tensor in = tf.ones(in_sizes); Tensor out = tf.zeros(out_sizes); - optional storage_offset; + std::optional storage_offset; int64_t sizes[3] = {2, 2, 2}; int64_t stride[3] = {1, 2, 3}; @@ -149,7 +149,7 @@ void OpAsStridedCopyOutTest::test_detach_copy_out() { Tensor out = tf.zeros(out_sizes); // Valid input should give the expected output - optional storage_offset = 2; + std::optional storage_offset = 2; int64_t sizes[3] = {2, 2, 2}; int64_t stride[3] = {1, 2, 3}; op_as_strided_copy_out( @@ -174,7 +174,7 @@ void OpAsStridedCopyOutTest::test_detach_copy_out() { Tensor out = tf.zeros(out_sizes); // Valid input should give the expected output - optional storage_offset = 2; + std::optional storage_offset = 2; int64_t sizes[3] = {2, 2, 2}; int64_t stride[3] = {1, 2, 3}; op_as_strided_copy_out( @@ -214,7 +214,7 @@ TEST_F(OpAsStridedCopyOutTest, MismatchedInputDtypesDies) { Tensor in = tf_byte.make(in_sizes, {1, 2, 3, 4, 5, 6, 7, 8, 9}); Tensor out = tf_char.zeros(out_sizes); - optional storage_offset; + std::optional storage_offset; int64_t sizes[3] = {2, 2, 2}; int64_t stride[3] = {1, 2, 3}; @@ -237,7 +237,7 @@ op = "op_as_strided_copy_out" opt_setup_params = f""" {declare_array_ref([2, 2, 2], "int64_t", "size")} {declare_array_ref([1, 2, 3], "int64_t", "stride")} - optional storage_offset; + std::optional storage_offset; """ opt_extra_params = "size, stride, storage_offset," dtype = "ScalarType::Float" @@ -276,7 +276,7 @@ TEST_F(OpAsStridedCopyOutTest, DynamicShapeUpperBoundSameAsExpected) { ArrayRef size(sizev.data(), sizev.size()); std::vector stridev = {1, 2, 3}; ArrayRef stride(stridev.data(), stridev.size()); - optional storage_offset; + std::optional storage_offset; Tensor out = tf.zeros({2, 2, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); @@ -317,7 +317,7 @@ TEST_F(OpAsStridedCopyOutTest, DynamicShapeUpperBoundLargerThanExpected) { ArrayRef size(sizev.data(), sizev.size()); std::vector stridev = {1, 2, 3}; ArrayRef stride(stridev.data(), stridev.size()); - optional storage_offset; + std::optional storage_offset; Tensor out = tf.zeros({5, 5, 5}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); @@ -361,7 +361,7 @@ TEST_F(OpAsStridedCopyOutTest, DynamicShapeUnbound) { ArrayRef size(sizev.data(), sizev.size()); std::vector stridev = {1, 2, 3}; ArrayRef stride(stridev.data(), stridev.size()); - optional storage_offset; + std::optional storage_offset; Tensor out = tf.zeros( {1, 1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND); diff --git a/kernels/test/op_cdist_forward_test.cpp b/kernels/test/op_cdist_forward_test.cpp index 73be637569a..bcc181843a9 100644 --- a/kernels/test/op_cdist_forward_test.cpp +++ b/kernels/test/op_cdist_forward_test.cpp @@ -28,7 +28,7 @@ Tensor& op_cdist_forward_out( const Tensor& x1, const Tensor& x2, double p, - optional compute_mode, + std::optional compute_mode, Tensor& out) { KernelRuntimeContext context{}; return torch::executor::aten::_cdist_forward_outf( @@ -58,7 +58,7 @@ class OpCdistForwardOutTest : public ::testing::Test { Tensor x2 = tf.make( {1, 2, 5, 3}, {0, 1, 2, 3, 5, -3, 7, 1, 6, 2, -1, 5, 1, 1, -2, 4, 3, 2, -1, 5, 1, 1, -2, 1, 5, 4, 3, 2, -1, 5}); - optional compute_mode = optional(); + std::optional compute_mode = std::optional(); Tensor out = tf.zeros({2, 2, 4, 5}); diff --git a/kernels/test/op_clamp_test.cpp b/kernels/test/op_clamp_test.cpp index 81138fc8a55..9bb6cb8a783 100644 --- a/kernels/test/op_clamp_test.cpp +++ b/kernels/test/op_clamp_test.cpp @@ -22,11 +22,11 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::nullopt; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; -using std::optional; +using ::std::nullopt; +using ::std::optional; using torch::executor::testing::TensorFactory; using OptScalar = std::optional; @@ -44,8 +44,8 @@ class OpClampOutTest : public OperatorTest { protected: Tensor& op_clamp_out( const Tensor& self, - const optional& min, - const optional& max, + const std::optional& min, + const std::optional& max, Tensor& out) { return torch::executor::aten::clamp_outf(context_, self, min, max, out); } @@ -291,8 +291,8 @@ class OpClampTensorOutTest : public OperatorTest { protected: Tensor& op_clamp_tensor_out( const Tensor& self, - const optional& min, - const optional& max, + const std::optional& min, + const std::optional& max, Tensor& out) { executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::clamp_outf(context, self, min, max, out); diff --git a/kernels/test/op_clone_test.cpp b/kernels/test/op_clone_test.cpp index 43e4576548a..009c3777ef1 100644 --- a/kernels/test/op_clone_test.cpp +++ b/kernels/test/op_clone_test.cpp @@ -26,7 +26,7 @@ class OpCloneTest : public OperatorTest { protected: Tensor& op_clone_out( const Tensor& self, - optional memory_format, + std::optional memory_format, Tensor& out) { return torch::executor::aten::clone_outf( context_, self, memory_format, out); @@ -44,7 +44,7 @@ class OpCloneTest : public OperatorTest { // nullopt or MemoryFormat::Contiguous. Tensor out_nullopt_ret = op_clone_out( /*self=*/input, - /*memory_format=*/executorch::aten::nullopt, + /*memory_format=*/std::nullopt, /*out=*/out_nullopt); Tensor out_contiguous_ret = op_clone_out( /*self=*/input, @@ -65,7 +65,7 @@ class OpCloneTest : public OperatorTest { TensorFactory tf; Tensor input = tf.make(/*sizes=*/{3, 0, 1, 2}, /*data=*/{}); Tensor out = tf.zeros({3, 0, 1, 2}); - op_clone_out(input, /*memory_format=*/executorch::aten::nullopt, out); + op_clone_out(input, /*memory_format=*/std::nullopt, out); // check a and out share same value, but are different object EXPECT_TENSOR_EQ(input, out); } @@ -95,8 +95,7 @@ TEST_F(OpCloneTest, MismatchedSizesDie) { Tensor input = tf.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6}); Tensor out = tf.zeros({3, 2, 1, 1}); ET_EXPECT_KERNEL_FAILURE( - context_, - op_clone_out(input, /*memory_format=*/executorch::aten::nullopt, out)); + context_, op_clone_out(input, /*memory_format=*/std::nullopt, out)); } TEST_F(OpCloneTest, MismatchedTypesDie) { @@ -106,8 +105,7 @@ TEST_F(OpCloneTest, MismatchedTypesDie) { tf_in.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6}); Tensor out = tf_out.zeros({3, 1, 1, 2}); ET_EXPECT_KERNEL_FAILURE( - context_, - op_clone_out(input, /*memory_format=*/executorch::aten::nullopt, out)); + context_, op_clone_out(input, /*memory_format=*/std::nullopt, out)); } // Only contiguous memory is supported, the memory type other than nullopt or diff --git a/kernels/test/op_convolution_test.cpp b/kernels/test/op_convolution_test.cpp index 1e0e406af44..1b3562442e7 100644 --- a/kernels/test/op_convolution_test.cpp +++ b/kernels/test/op_convolution_test.cpp @@ -27,7 +27,7 @@ class OpConvOutTest : public OperatorTest { Tensor& op_convolution_out( const Tensor& input, const Tensor& weight, - const optional& bias, + const std::optional& bias, ArrayRef stride, ArrayRef padding, ArrayRef dilation, @@ -113,7 +113,7 @@ class OpConvOutTest : public OperatorTest { tf.make({4, 2, 3}, {8.1, 6.6, 1.6, 4.9, 3.8, 6.6, 4.6, 2.8, 2.4, 1.3, 3.6, 3.9, 8.1, 8.4, 5.4, 5.1, 8.9, 9.9, 7.9, 1.0, 1.1, 8.2, 6.3, 7.0}); - optional bias(tf.make({4}, {1.0, 1.0, 1.0, 1.0})); + std::optional bias(tf.make({4}, {1.0, 1.0, 1.0, 1.0})); Tensor expected = tf.make( {1, 4, 2}, {172.11, 237.72, 102.24, 132.28, 248.51, 320.18, 189.38, 236.07}); @@ -205,7 +205,7 @@ TEST_F(OpConvCorrectnessTest, NonZeroPadding) { Tensor weight = tf.make( {4, 2, 3}, {8.1, 6.6, 1.6, 4.9, 3.8, 6.6, 4.6, 2.8, 2.4, 1.3, 3.6, 3.9, 8.1, 8.4, 5.4, 5.1, 8.9, 9.9, 7.9, 1.0, 1.1, 8.2, 6.3, 7.0}); - optional bias(tf.make({4}, {1.0, 1.0, 1.0, 1.0})); + std::optional bias(tf.make({4}, {1.0, 1.0, 1.0, 1.0})); Tensor expected = tf.make( {1, 4, 4}, {61.78, @@ -278,7 +278,7 @@ TEST_F(OpConvCorrectnessTest, MultipleInputBatches) { Tensor weight = tf.make( {4, 2, 3}, {1.1, 8.2, 6.3, 7.0, 6.5, 2.5, 9.2, 9.9, 8.1, 9.8, 4.8, 1.3, 2.6, 8.9, 1.1, 8.7, 2.3, 3.5, 4.2, 7.1, 5.0, 3.9, 3.3, 4.1}); - optional bias(tf.make({4}, {1.0, 1.0, 1.0, 1.0})); + std::optional bias(tf.make({4}, {1.0, 1.0, 1.0, 1.0})); Tensor expected = tf.make( {3, 4, 4}, {54.77, 168.21, 208.92, 57.93, 55.01, 241.19, 312.18, 121.3, 34.59, 143.87, 201.88, 78.29, 60.39, 154.12, 194.07, 51.73, @@ -362,7 +362,7 @@ TEST_F(OpConvCorrectnessTest, 2DSanityCheck) { 4.0, 8.3, 5.2, 4.0, 4.8, 7.6, 7.1, 5.9, 9.1, 9.6, 3.9, 6.8, 7.6, 2.5, 8.1, 7.3, 7.5, 7.5, 9.3, 5.6, 5.2, 4.7, 4.5, 8.7, 8.7, 1.3, 4.1, 4.5, 4.9, 6.5, 7.9, 4.6, 7.0, 8.0, 1.6, 3.5}); - optional bias(tf.make({2}, {1.0, 1.0})); + std::optional bias(tf.make({2}, {1.0, 1.0})); Tensor expected = tf.make( {1, 2, 4, 4}, {642.33, 714.6, 687.96, 717.12, 859.79, 939.27, 996.79, 1189.59, @@ -426,7 +426,7 @@ TEST_F(OpConvCorrectnessTest, 2DSanityCheckChannelsLast) { 4.0, 8.3, 5.2, 4.0, 4.8, 7.6, 7.1, 5.9, 9.1, 9.6, 3.9, 6.8, 7.6, 2.5, 8.1, 7.3, 7.5, 7.5, 9.3, 5.6, 5.2, 4.7, 4.5, 8.7, 8.7, 1.3, 4.1, 4.5, 4.9, 6.5, 7.9, 4.6, 7.0, 8.0, 1.6, 3.5}); - optional bias(tf.make({2}, {1.0, 1.0})); + std::optional bias(tf.make({2}, {1.0, 1.0})); Tensor expected = tf.make_channels_last( {1, 2, 4, 4}, {624.92, 656.07, 710.91, 800.45, 622.48, 596.14, 831.26, 882.43, @@ -478,7 +478,7 @@ TEST_F(OpConvCorrectnessTest, InvalidInputShape) { Tensor input = tf.ones({2, 4, 4, 5}); Tensor weight = tf.ones({8, 3, 2, 2}); - optional bias; + std::optional bias; Tensor out = tf.zeros({2, 8, 3, 4}); int64_t stride[1] = {1}; @@ -521,7 +521,7 @@ TEST_F(OpConvCorrectnessTest, TransposedDefaultParams) { Tensor input = tf.full({2, 4, 3, 2}, 2.0); Tensor weight = tf.full({4, 1, 2, 2}, 0.5); - optional bias; + std::optional bias; Tensor out = tf.full({2, 2, 4, 3}, 0.7); Tensor expected = tf.make({2, 2, 4, 3}, {2, 4, 2, 4, 8, 4, 4, 8, 4, 2, 4, 2, 2, 4, 2, 4, @@ -619,7 +619,7 @@ TEST_F(OpConvCorrectnessTest, TransposedDefaultParamsChannelsLast) { Tensor input = tf.full_channels_last({2, 4, 3, 2}, 2.0); Tensor weight = tf.full_channels_last({4, 1, 2, 2}, 0.5); - optional bias; + std::optional bias; Tensor out = tf.full_channels_last({2, 2, 4, 3}, 0.7); Tensor expected = tf.make({2, 2, 4, 3}, {2, 4, 2, 4, 8, 4, 4, 8, 4, 2, 4, 2, 2, 4, 2, 4, diff --git a/kernels/test/op_cumsum_test.cpp b/kernels/test/op_cumsum_test.cpp index 3e0ec164d04..665ec9098b6 100644 --- a/kernels/test/op_cumsum_test.cpp +++ b/kernels/test/op_cumsum_test.cpp @@ -28,7 +28,7 @@ class OpCumSumOutTest : public OperatorTest { Tensor& op_cumsum_out( const Tensor& self, int64_t dim, - optional enforced_dtype, + std::optional enforced_dtype, Tensor& out) { return torch::executor::aten::cumsum_outf( context_, self, dim, enforced_dtype, out); @@ -48,7 +48,7 @@ class OpCumSumOutTest : public OperatorTest { // clang-format on Tensor out = tf_out.zeros({2, 4}); - optional enforced_dtype = OUT_DTYPE; + std::optional enforced_dtype = OUT_DTYPE; op_cumsum_out(in, /*dim=*/1, enforced_dtype, out); // clang-format off @@ -85,7 +85,7 @@ class OpCumSumOutTest : public OperatorTest { Tensor in = tf_float.make({1, 2}, {1, INFINITY}); Tensor out = tf_out.zeros({1, 2}); - optional enforced_dtype = OUT_DTYPE; + std::optional enforced_dtype = OUT_DTYPE; op_cumsum_out(in, /*dim=*/1, enforced_dtype, out); EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 2}, {1, INFINITY})); @@ -115,7 +115,7 @@ TEST_F(OpCumSumOutTest, MismatchedDimensionsDies) { Tensor out = tff.zeros({1, 3}); // Dim out of bounds - optional enforced_dtype; + std::optional enforced_dtype; ET_EXPECT_KERNEL_FAILURE( context_, op_cumsum_out(in, /*dim=*/3, enforced_dtype, out)); @@ -149,7 +149,7 @@ TEST_F(OpCumSumOutTest, TypeCastCornerCases) { // Cast floating point to int Tensor in = tf_float.make({1, 2}, {1.1, 2.2}); Tensor out = tf_int.zeros({1, 2}); - optional enforced_dtype = ScalarType::Int; + std::optional enforced_dtype = ScalarType::Int; op_cumsum_out(in, /*dim=*/1, enforced_dtype, out); EXPECT_TENSOR_CLOSE(out, tf_int.make({1, 2}, {1, 3})); diff --git a/kernels/test/op_empty_test.cpp b/kernels/test/op_empty_test.cpp index 23173b1feae..74709931e80 100644 --- a/kernels/test/op_empty_test.cpp +++ b/kernels/test/op_empty_test.cpp @@ -28,7 +28,7 @@ class OpEmptyOutTest : public OperatorTest { protected: Tensor& op_empty_out( IntArrayRef size, - optional memory_format, + std::optional memory_format, Tensor& out) { return torch::executor::aten::empty_outf( context_, size, memory_format, out); @@ -39,7 +39,7 @@ class OpEmptyOutTest : public OperatorTest { TensorFactory tf; std::vector sizes(size_int32_t.begin(), size_int32_t.end()); auto aref = executorch::aten::ArrayRef(sizes.data(), sizes.size()); - optional memory_format; + std::optional memory_format; Tensor out = tf.ones(size_int32_t); op_empty_out(aref, memory_format, out); @@ -60,7 +60,7 @@ TEST_F(OpEmptyOutTest, DynamicShapeUpperBoundSameAsExpected) { int64_t sizes[2] = {3, 2}; auto sizes_aref = executorch::aten::ArrayRef(sizes); - optional memory_format; + std::optional memory_format; Tensor out = tf.ones({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); op_empty_out(sizes_aref, memory_format, out); @@ -71,7 +71,7 @@ TEST_F(OpEmptyOutTest, DynamicShapeUpperBoundLargerThanExpected) { int64_t sizes[2] = {3, 2}; auto sizes_aref = executorch::aten::ArrayRef(sizes); - optional memory_format; + std::optional memory_format; Tensor out = tf.ones({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); op_empty_out(sizes_aref, memory_format, out); @@ -85,7 +85,7 @@ TEST_F(OpEmptyOutTest, DynamicShapeUnbound) { int64_t sizes[2] = {3, 2}; auto sizes_aref = executorch::aten::ArrayRef(sizes); - optional memory_format; + std::optional memory_format; Tensor out = tf.ones({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND); op_empty_out(sizes_aref, memory_format, out); diff --git a/kernels/test/op_full_like_test.cpp b/kernels/test/op_full_like_test.cpp index 6e7692f5347..2aa7b2d1558 100644 --- a/kernels/test/op_full_like_test.cpp +++ b/kernels/test/op_full_like_test.cpp @@ -30,7 +30,7 @@ class OpFullLikeTest : public OperatorTest { Tensor& op_full_like_out( const Tensor& self, const Scalar& fill_value, - optional memory_format, + std::optional memory_format, Tensor& out) { return torch::executor::aten::full_like_outf( context_, self, fill_value, memory_format, out); @@ -73,7 +73,7 @@ class OpFullLikeTest : public OperatorTest { const std::vector sizes = {2, 2}; Tensor in = tf.zeros(sizes); Tensor out = tf.zeros(sizes); - optional memory_format; + std::optional memory_format; ET_EXPECT_KERNEL_FAILURE( context_, op_full_like_out(in, bad_value, memory_format, out)); @@ -203,7 +203,7 @@ TEST_F(OpFullLikeTest, DynamicShapeUnbound) { TEST_F(OpFullLikeTest, HalfSupport) { TensorFactory tf; - optional memory_format; + std::optional memory_format; Tensor in = tf.ones({2, 3}); Tensor out = tf.zeros({2, 3}); diff --git a/kernels/test/op_index_put_test.cpp b/kernels/test/op_index_put_test.cpp index f1021d9ad61..b006eec712e 100644 --- a/kernels/test/op_index_put_test.cpp +++ b/kernels/test/op_index_put_test.cpp @@ -23,7 +23,7 @@ using executorch::aten::Tensor; using std::optional; using torch::executor::testing::TensorFactory; -using OptTensorArrayRef = ArrayRef>; +using OptTensorArrayRef = ArrayRef>; class OpIndexPutOutTest : public OperatorTest { protected: @@ -72,14 +72,14 @@ class OpIndexPutOutTest : public OperatorTest { // First, index_put to make everything equal to 1 // indices [0, 1, :], [1, 1, :], [2, 1, :] - optional indices[] = { - optional(tfl.make({1, 3}, {0, 1, 2})), - optional(tfl.make({1, 3}, {1, 1, 1})), + std::optional indices[] = { + std::optional(tfl.make({1, 3}, {0, 1, 2})), + std::optional(tfl.make({1, 3}, {1, 1, 1})), }; // bool representation of the same index list - optional indices_bool[] = { - optional(tfb.make({3}, {true, true, true})), - optional(tfb.make({2}, {false, true})), + std::optional indices_bool[] = { + std::optional(tfb.make({3}, {true, true, true})), + std::optional(tfb.make({2}, {false, true})), }; Tensor values = tf.ones({3, 4}); @@ -104,14 +104,14 @@ class OpIndexPutOutTest : public OperatorTest { // Then, index_put to make everything equal to 0 // indices [0, 1, :], [1, 0, :], [2, 0, :] - optional indices_alt[] = { - optional(tfl.make({1, 3}, {0, 1, 2})), - optional(tfl.make({1, 3}, {0, 0, 0})), + std::optional indices_alt[] = { + std::optional(tfl.make({1, 3}, {0, 1, 2})), + std::optional(tfl.make({1, 3}, {0, 0, 0})), }; // bool representation of the same index list - optional indices_alt_bool[] = { - optional(tfb.make({3}, {true, true, true})), - optional(tfb.make({2}, {true, false})), + std::optional indices_alt_bool[] = { + std::optional(tfb.make({3}, {true, true, true})), + std::optional(tfb.make({2}, {true, false})), }; Tensor values_alt = tf.zeros({3, 4}); @@ -177,10 +177,10 @@ class OpIndexPutOutTest : public OperatorTest { 0.518521785736084, 0.6976675987243652, 0.800011396408081, 0.16102945804595947, 0.28226858377456665, 0.6816085577011108, 0.9151939749717712, 0.39709991216659546, 0.8741558790206909}); - optional indices[] = { - optional(tf_indices.make({1}, {1})), - optional(tf_indices.make({1}, {0})), - optional(tf_indices.make({2}, {1, 2}))}; + std::optional indices[] = { + std::optional(tf_indices.make({1}, {1})), + std::optional(tf_indices.make({1}, {0})), + std::optional(tf_indices.make({2}, {1, 2}))}; Tensor values = tf.make({2}, {0.41940832138061523, 0.5529070496559143}); Tensor expected = tf.make( {2, 3, 4}, @@ -412,30 +412,30 @@ TEST_F(OpIndexPutOutTest, PutFrontDimAllIndexes) { // [1, 0, 1], [1, 0, 2]. This is expressed in various ways to test different // indexing expressions. - optional indices_long[] = { - optional(tfl.make({1}, {1})), - optional(tfl.make({1}, {0})), - optional(tfl.make({2}, {1, 2}))}; + std::optional indices_long[] = { + std::optional(tfl.make({1}, {1})), + std::optional(tfl.make({1}, {0})), + std::optional(tfl.make({2}, {1, 2}))}; - optional indices_int[] = { - optional(tfi.make({1}, {1})), - optional(tfi.make({1}, {0})), - optional(tfi.make({2}, {1, 2}))}; + std::optional indices_int[] = { + std::optional(tfi.make({1}, {1})), + std::optional(tfi.make({1}, {0})), + std::optional(tfi.make({2}, {1, 2}))}; - optional indices_negative[] = { - optional(tfl.make({1}, {-1})), - optional(tfl.make({1}, {0})), - optional(tfl.make({2}, {-3, -2}))}; + std::optional indices_negative[] = { + std::optional(tfl.make({1}, {-1})), + std::optional(tfl.make({1}, {0})), + std::optional(tfl.make({2}, {-3, -2}))}; - optional indices_bool[] = { - optional(tfb.make({2}, {false, true})), - optional(tfb.make({3}, {true, false, false})), - optional(tfl.make({2}, {-3, -2}))}; + std::optional indices_bool[] = { + std::optional(tfb.make({2}, {false, true})), + std::optional(tfb.make({3}, {true, false, false})), + std::optional(tfl.make({2}, {-3, -2}))}; - optional indices_mixed[] = { - optional(tfb.make({2}, {false, true})), - optional(tfl.make({1}, {0})), - optional(tfl.make({2}, {-3, -2}))}; + std::optional indices_mixed[] = { + std::optional(tfb.make({2}, {false, true})), + std::optional(tfl.make({1}, {0})), + std::optional(tfl.make({2}, {-3, -2}))}; // clang-format off Tensor values = tf.make( @@ -503,10 +503,10 @@ TEST_F(OpIndexPutOutTest, PutTwoValuesAtSameIndex) { // clang-format on // Try to select the value at the same index - optional indices[] = { - optional(tfl.make({1, 2}, {0, 0})), - optional(tfl.make({1, 2}, {1, 1})), - optional(tfl.make({1, 2}, {2, 2}))}; + std::optional indices[] = { + std::optional(tfl.make({1, 2}, {0, 0})), + std::optional(tfl.make({1, 2}, {1, 1})), + std::optional(tfl.make({1, 2}, {2, 2}))}; // clang-format off Tensor values = tf.make( @@ -575,13 +575,13 @@ TEST_F(OpIndexPutOutTest, IndicesFewerThanInputDimSupported) { // [1, 0, :], [1, 1, :]. This is expressed in various ways to test different // indexing expressions. - optional indices_long[] = { - optional(tfl.make({1}, {1})), - optional(tfl.make({2}, {0, 1}))}; + std::optional indices_long[] = { + std::optional(tfl.make({1}, {1})), + std::optional(tfl.make({2}, {0, 1}))}; - optional indices_mixed[] = { - optional(tfi.make({1}, {-1})), - optional(tfb.make({3}, {true, true, false}))}; + std::optional indices_mixed[] = { + std::optional(tfi.make({1}, {-1})), + std::optional(tfb.make({3}, {true, true, false}))}; // clang-format off Tensor values = tf.make( @@ -650,9 +650,9 @@ TEST_F(OpIndexPutOutTest, IndicesFewerThanInputDimSupportedSameValue) { // Try to select the input value at indices // [1, 0, :], [1, 1, :] - optional indices[] = { - optional(tfl.make({1}, {1})), - optional(tfl.make({2}, {0, 1}))}; + std::optional indices[] = { + std::optional(tfl.make({1}, {1})), + std::optional(tfl.make({2}, {0, 1}))}; // clang-format off Tensor values = tf.make( @@ -912,9 +912,9 @@ TEST_F(OpIndexPutOutTest, InvalidIndicesDtypeDies) { Tensor x = tf.zeros({2, 4, 7, 5}); // clang-format off - optional indices[] = { - optional(tff.make({3}, {1, 1, 1,})), - optional(tff.make({2}, {1, 2}))}; + std::optional indices[] = { + std::optional(tff.make({3}, {1, 1, 1,})), + std::optional(tff.make({2}, {1, 2}))}; // clang-format on Tensor out = tf.ones({2, 4, 7, 5}); @@ -938,9 +938,9 @@ TEST_F(OpIndexPutOutTest, InvalidIndicesShapesDies) { Tensor x = tf.zeros({2, 4, 7, 5}); // clang-format off - optional indices[] = { - optional(tfl.make({3}, {1, 1, 1,})), - optional(tfl.make({2}, {1, 2}))}; + std::optional indices[] = { + std::optional(tfl.make({3}, {1, 1, 1,})), + std::optional(tfl.make({2}, {1, 2}))}; Tensor out = tf.ones({2, 4, 7, 5}); // clang-format on @@ -964,9 +964,9 @@ TEST_F(OpIndexPutOutTest, NonLinearIndices) { Tensor x = tf.zeros({4, 4}); // clang-format off - optional indices[] = { - optional(tfl.make({2, 2}, {1, 1, 1, 1,})), - optional(tfl.make({1, 2}, {3, 0,}))}; + std::optional indices[] = { + std::optional(tfl.make({2, 2}, {1, 1, 1, 1,})), + std::optional(tfl.make({1, 2}, {3, 0,}))}; Tensor out = tf.ones({4, 4}); // clang-format on @@ -1052,9 +1052,9 @@ class OpIndexPutInplaceTest : public OperatorTest { }); // clang-format on - optional indices[] = { - optional(), - optional(tfl.make({2}, {0, 2})), + std::optional indices[] = { + std::optional(), + std::optional(tfl.make({2}, {0, 2})), }; // clang-format off diff --git a/kernels/test/op_index_test.cpp b/kernels/test/op_index_test.cpp index 787eb4612d8..49ae3d9a883 100644 --- a/kernels/test/op_index_test.cpp +++ b/kernels/test/op_index_test.cpp @@ -25,7 +25,7 @@ using executorch::aten::Tensor; using std::optional; using torch::executor::testing::TensorFactory; -using OptTensorArrayRef = ArrayRef>; +using OptTensorArrayRef = ArrayRef>; class OpIndexTensorOutTest : public OperatorTest { protected: diff --git a/kernels/test/op_mean_test.cpp b/kernels/test/op_mean_test.cpp index 65d21b45518..eef2942f143 100644 --- a/kernels/test/op_mean_test.cpp +++ b/kernels/test/op_mean_test.cpp @@ -29,9 +29,9 @@ class OpMeanOutTest : public OperatorTest { protected: Tensor& op_mean_out( const Tensor& self, - optional> dim, + std::optional> dim, bool keepdim, - optional dtype, + std::optional dtype, Tensor& out) { return torch::executor::aten::mean_outf( context_, self, dim, keepdim, dtype, out); @@ -39,7 +39,7 @@ class OpMeanOutTest : public OperatorTest { Tensor& op_mean_dtype_out( const Tensor& self, - optional dtype, + std::optional dtype, Tensor& out) { return torch::executor::aten::mean_outf(context_, self, dtype, out); } @@ -63,11 +63,12 @@ class OpMeanOutTest : public OperatorTest { }); // clang-format on Tensor out = tf_out.zeros({2, 3, 1}); - optional dtype = OUT_DTYPE; + std::optional dtype = OUT_DTYPE; // out-of-bound dim in dim list int64_t dims_1[1] = {3}; - optional> optional_dim_list{ArrayRef{dims_1, 1}}; + std::optional> optional_dim_list{ + ArrayRef{dims_1, 1}}; ET_EXPECT_KERNEL_FAILURE( context_, op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out)); @@ -101,9 +102,10 @@ class OpMeanOutTest : public OperatorTest { // dimension size mismatch when keepdim is true Tensor out = tf_out.zeros({2, 4}); - optional dtype = OUT_DTYPE; + std::optional dtype = OUT_DTYPE; int64_t dims_1[1] = {1}; - optional> optional_dim_list{ArrayRef{dims_1, 1}}; + std::optional> optional_dim_list{ + ArrayRef{dims_1, 1}}; ET_EXPECT_KERNEL_FAILURE( context_, op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out)); @@ -136,8 +138,9 @@ class OpMeanOutTest : public OperatorTest { // keepdim=true should work Tensor out = tf_out.zeros({2, 3, 1}); int64_t dims_1[1] = {2}; - optional> optional_dim_list{ArrayRef{dims_1, 1}}; - optional dtype = OUT_DTYPE; + std::optional> optional_dim_list{ + ArrayRef{dims_1, 1}}; + std::optional dtype = OUT_DTYPE; op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out); // clang-format off EXPECT_TENSOR_CLOSE(out, tf_out.make( @@ -193,11 +196,11 @@ class OpMeanOutTest : public OperatorTest { // empty/null dim list should work out = tf_out.zeros({1, 1, 1}); - optional> null_dim_list; + std::optional> null_dim_list; op_mean_out(self, null_dim_list, /*keepdim=*/true, dtype, out); EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 1, 1}, {11.5})); - optional> empty_dim_list{ArrayRef{}}; + std::optional> empty_dim_list{ArrayRef{}}; op_mean_out(self, empty_dim_list, /*keepdim=*/true, dtype, out); EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 1, 1}, {11.5})); @@ -229,8 +232,9 @@ class OpMeanOutTest : public OperatorTest { Tensor out = tf_float.zeros({1, 1, 4}); int64_t dims[2] = {0, 1}; - optional> optional_dim_list{ArrayRef{dims, 2}}; - optional dtype = OUT_DTYPE; + std::optional> optional_dim_list{ + ArrayRef{dims, 2}}; + std::optional dtype = OUT_DTYPE; op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out); EXPECT_TENSOR_CLOSE( out, @@ -322,8 +326,9 @@ TEST_F(OpMeanOutTest, MismatchedDTypesDies) { // keepdim=true should work Tensor out = tf_float.zeros({2, 3, 1}); int64_t dims_1[1] = {2}; - optional> optional_dim_list{ArrayRef{dims_1, 1}}; - optional dtype; + std::optional> optional_dim_list{ + ArrayRef{dims_1, 1}}; + std::optional dtype; // self tensor must have a floating point dtype when dtype is not specified ET_EXPECT_KERNEL_FAILURE( @@ -383,8 +388,9 @@ TEST_F(OpMeanOutTest, InfinityAndNANTest) { Tensor out = tf_float.zeros({2, 3, 1}); int64_t dims[1] = {-1}; - optional> optional_dim_list{ArrayRef{dims, 1}}; - optional dtype; + std::optional> optional_dim_list{ + ArrayRef{dims, 1}}; + std::optional dtype; op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out); // clang-format off EXPECT_TENSOR_CLOSE(out, tf_float.make( diff --git a/kernels/test/op_native_dropout_test.cpp b/kernels/test/op_native_dropout_test.cpp index 931205f54a5..2558d0d918a 100644 --- a/kernels/test/op_native_dropout_test.cpp +++ b/kernels/test/op_native_dropout_test.cpp @@ -25,7 +25,7 @@ class OpNativeDropoutTest : public OperatorTest { void op_native_dropout_out( const Tensor& self, double prob, - executorch::aten::optional train, + ::std::optional train, Tensor& out, Tensor& mask) { torch::executor::aten::native_dropout_outf( diff --git a/kernels/test/op_native_layer_norm_test.cpp b/kernels/test/op_native_layer_norm_test.cpp index d8cc2d3b2e4..08299abe4dc 100644 --- a/kernels/test/op_native_layer_norm_test.cpp +++ b/kernels/test/op_native_layer_norm_test.cpp @@ -23,10 +23,10 @@ using namespace ::testing; using executorch::aten::ArrayRef; using executorch::aten::IntArrayRef; -using executorch::aten::nullopt; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::nullopt; using std::optional; using torch::executor::testing::TensorFactory; @@ -37,8 +37,8 @@ class OpNativeLayerNormTest : public OperatorTest { ::std::tuple op_native_layer_norm_out( const Tensor& input, IntArrayRef normalized_shape, - const optional& weight, - const optional& bias, + const std::optional& weight, + const std::optional& bias, double eps, Tensor& out0, Tensor& out1, @@ -363,8 +363,8 @@ class OpNativeLayerNormTest : public OperatorTest { 0.13203048706054688, 0.30742281675338745, 0.6340786814689636}); - optional weight(tf.make({3}, {1.0, 1.0, 1.0})); - optional bias(tf.make({3}, {0.0, 0.0, 0.0})); + std::optional weight(tf.make({3}, {1.0, 1.0, 1.0})); + std::optional bias(tf.make({3}, {0.0, 0.0, 0.0})); Tensor expected = tf.make( {2, 3}, {0.16205203533172607, diff --git a/kernels/test/op_prod_test.cpp b/kernels/test/op_prod_test.cpp index d385fd7cb48..ed80626f3d8 100644 --- a/kernels/test/op_prod_test.cpp +++ b/kernels/test/op_prod_test.cpp @@ -31,7 +31,7 @@ Tensor& op_prod_int_out( const Tensor& self, int64_t dim, bool keepdim, - optional dtype, + std::optional dtype, Tensor& out) { executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::prod_outf( @@ -56,7 +56,7 @@ class OpProdOutTest : public ::testing::Test { tf_out; Tensor self = tf.make({2, 3}, {1, 2, 3, 4, 5, 6}); - optional dtype{}; + std::optional dtype{}; Tensor out = tf_out.zeros({}); Tensor out_expected = tf_out.make({}, {DTYPE == ScalarType::Bool ? 1 : 720}); @@ -80,7 +80,7 @@ class OpProdIntOutTest : public ::testing::Test { Tensor self = tf.make({2, 3}, {1, 2, 3, 4, 5, 6}); int64_t dim = 0; bool keepdim = false; - optional dtype{}; + std::optional dtype{}; Tensor out = tf.zeros({3}); Tensor out_expected = tf.make({3}, {4, 10, 18}); op_prod_int_out(self, dim, keepdim, dtype, out); @@ -106,7 +106,7 @@ TEST_F(OpProdIntOutTest, SmokeTestKeepdim) { Tensor self = tfFloat.make({2, 3}, {1, 2, 3, 4, 5, 6}); int64_t dim = 0; bool keepdim = true; - optional dtype{}; + std::optional dtype{}; Tensor out = tfFloat.zeros({1, 3}); Tensor out_expected = tfFloat.make({1, 3}, {4, 10, 18}); op_prod_int_out(self, dim, keepdim, dtype, out); diff --git a/kernels/test/op_repeat_interleave_test.cpp b/kernels/test/op_repeat_interleave_test.cpp index 71018667ac7..a4f4a2ed8ee 100644 --- a/kernels/test/op_repeat_interleave_test.cpp +++ b/kernels/test/op_repeat_interleave_test.cpp @@ -22,7 +22,7 @@ class OpRepeatInterleaveTensorOutTest : public OperatorTest { protected: Tensor& op_repeat_out( const Tensor& repeats, - optional output_size, + std::optional output_size, Tensor& out) { return torch::executor::aten::repeat_interleave_outf( context_, repeats, output_size, out); diff --git a/kernels/test/op_slice_copy_test.cpp b/kernels/test/op_slice_copy_test.cpp index c7e8a0acf66..90acb4dcfb6 100644 --- a/kernels/test/op_slice_copy_test.cpp +++ b/kernels/test/op_slice_copy_test.cpp @@ -28,8 +28,8 @@ class OpSliceCopyTensorOutTest : public OperatorTest { Tensor& op_slice_copy_tensor_out( const Tensor& self, int64_t dim, - optional start, - optional end, + std::optional start, + std::optional end, int64_t step, Tensor& out) { return torch::executor::aten::slice_copy_outf( @@ -568,7 +568,7 @@ TEST_F(OpSliceCopyTensorOutTest, DefaultStartValSupported) { Tensor ret_default_start = op_slice_copy_tensor_out( input, /*dim=*/0, - /*start=*/executorch::aten::nullopt, + /*start=*/std::nullopt, /*end=*/2, /*step=*/1, out); @@ -588,7 +588,7 @@ TEST_F(OpSliceCopyTensorOutTest, DefaultEndValSupported) { input, /*dim=*/0, /*start=*/0, - /*end=*/executorch::aten::nullopt, + /*end=*/std::nullopt, /*step=*/1, out); EXPECT_TENSOR_EQ(ret_default_end, out); diff --git a/kernels/test/op_slice_scatter_test.cpp b/kernels/test/op_slice_scatter_test.cpp index 14a5bd2679d..48b5963491d 100644 --- a/kernels/test/op_slice_scatter_test.cpp +++ b/kernels/test/op_slice_scatter_test.cpp @@ -29,8 +29,8 @@ class OpSliceScatterTensorOutTest : public OperatorTest { const Tensor& self, const Tensor& src, int64_t dim, - optional start, - optional end, + std::optional start, + std::optional end, int64_t step, Tensor& out) { return torch::executor::aten::slice_scatter_outf( @@ -813,7 +813,7 @@ TEST_F(OpSliceScatterTensorOutTest, DefaultStartValSupported) { input, src, /*dim=*/0, - /*start=*/executorch::aten::nullopt, + /*start=*/std::nullopt, /*end=*/2, /*step=*/1, out); @@ -835,7 +835,7 @@ TEST_F(OpSliceScatterTensorOutTest, DefaultEndValSupported) { src, /*dim=*/0, /*start=*/0, - /*end=*/executorch::aten::nullopt, + /*end=*/std::nullopt, /*step=*/1, out); EXPECT_TENSOR_EQ(ret_default_end, out); @@ -857,7 +857,7 @@ TEST_F(OpSliceScatterTensorOutTest, DynamicShapeTest) { src, /*dim=*/0, /*start=*/0, - /*end=*/executorch::aten::nullopt, + /*end=*/std::nullopt, /*step=*/1, out); EXPECT_TENSOR_EQ(ret_default_end, out); diff --git a/kernels/test/op_sum_test.cpp b/kernels/test/op_sum_test.cpp index 58624c2a110..b16292af050 100644 --- a/kernels/test/op_sum_test.cpp +++ b/kernels/test/op_sum_test.cpp @@ -28,9 +28,9 @@ class OpSumOutTest : public OperatorTest { protected: Tensor& op_sum_intlist_out( const Tensor& self, - optional> dim, + std::optional> dim, bool keepdim, - optional dtype, + std::optional dtype, Tensor& out) { return torch::executor::aten::sum_outf( context_, self, dim, keepdim, dtype, out); @@ -55,11 +55,12 @@ class OpSumOutTest : public OperatorTest { }); // clang-format on Tensor out = tf_out.zeros({2, 3, 1}); - optional dtype = OUT_DTYPE; + std::optional dtype = OUT_DTYPE; // out-of-bound dim in dim list int64_t dims_1[1] = {3}; - optional> optional_dim_list{ArrayRef{dims_1, 1}}; + std::optional> optional_dim_list{ + ArrayRef{dims_1, 1}}; ET_EXPECT_KERNEL_FAILURE( context_, op_sum_intlist_out( @@ -95,9 +96,10 @@ class OpSumOutTest : public OperatorTest { // dimension size mismatch when keepdim is true Tensor out = tf_out.zeros({2, 4}); - optional dtype = OUT_DTYPE; + std::optional dtype = OUT_DTYPE; int64_t dims_1[1] = {1}; - optional> optional_dim_list{ArrayRef{dims_1, 1}}; + std::optional> optional_dim_list{ + ArrayRef{dims_1, 1}}; ET_EXPECT_KERNEL_FAILURE( context_, op_sum_intlist_out( @@ -142,8 +144,8 @@ class OpSumOutTest : public OperatorTest { CTYPE(0, 0), }); int64_t dims_1[1] = {2}; - optional> dim_list1{ArrayRef{dims_1, 1}}; - optional dtype = DTYPE; + std::optional> dim_list1{ArrayRef{dims_1, 1}}; + std::optional dtype = DTYPE; op_sum_intlist_out(self, dim_list1, true, dtype, out1); @@ -168,7 +170,7 @@ class OpSumOutTest : public OperatorTest { CTYPE(0, 0), }); int64_t dims_2[1] = {1}; - optional> dim_list2{ArrayRef{dims_2, 1}}; + std::optional> dim_list2{ArrayRef{dims_2, 1}}; op_sum_intlist_out(self, dim_list2, true, dtype, out2); @@ -181,7 +183,7 @@ class OpSumOutTest : public OperatorTest { { CTYPE(0, 0), }); - optional> null_dim_list; + std::optional> null_dim_list; op_sum_intlist_out(self, null_dim_list, true, dtype, out3); @@ -211,8 +213,9 @@ class OpSumOutTest : public OperatorTest { // keepdim=true should work Tensor out = tf_out.zeros({2, 3, 1}); int64_t dims_1[1] = {2}; - optional> optional_dim_list{ArrayRef{dims_1, 1}}; - optional dtype = OUT_DTYPE; + std::optional> optional_dim_list{ + ArrayRef{dims_1, 1}}; + std::optional dtype = OUT_DTYPE; op_sum_intlist_out(self, optional_dim_list, /*keepdim=*/true, dtype, out); // clang-format off EXPECT_TENSOR_CLOSE(out, tf_out.make( @@ -289,11 +292,11 @@ class OpSumOutTest : public OperatorTest { }); // clang-format on out = tf_out.zeros({1, 1, 1}); - optional> null_dim_list; + std::optional> null_dim_list; op_sum_intlist_out(self, null_dim_list, /*keepdim=*/true, dtype, out); EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 1, 1}, {56})); - optional> empty_dim_list{ArrayRef{}}; + std::optional> empty_dim_list{ArrayRef{}}; op_sum_intlist_out(self, empty_dim_list, /*keepdim=*/true, dtype, out); EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 1, 1}, {56})); @@ -365,8 +368,9 @@ TEST_F(OpSumOutTest, MismatchedDTypesDies) { Tensor out = tf_float.zeros({2, 3, 1}); int64_t dims_1[1] = {2}; - optional> optional_dim_list{ArrayRef{dims_1, 1}}; - optional dtype = ScalarType::Double; + std::optional> optional_dim_list{ + ArrayRef{dims_1, 1}}; + std::optional dtype = ScalarType::Double; // out tensor should be of the same dtype with dtype when dtype is specified ET_EXPECT_KERNEL_FAILURE( @@ -407,8 +411,9 @@ TEST_F(OpSumOutTest, TypeConversionTest) { // clang-format on int64_t dims_1[1] = {2}; - optional> optional_dim_list{ArrayRef{dims_1, 1}}; - optional dtype; + std::optional> optional_dim_list{ + ArrayRef{dims_1, 1}}; + std::optional dtype; // int -> bool conversion should work Tensor out = tf_bool.zeros({2, 3, 1}); @@ -473,8 +478,9 @@ TEST_F(OpSumOutTest, InfinityAndNANTest) { Tensor out = tf_float.zeros({2, 3, 1}); int64_t dims[1] = {-1}; - optional> optional_dim_list{ArrayRef{dims, 1}}; - optional dtype; + std::optional> optional_dim_list{ + ArrayRef{dims, 1}}; + std::optional dtype; op_sum_intlist_out(self, optional_dim_list, /*keepdim=*/true, dtype, out); // clang-format off EXPECT_TENSOR_CLOSE(out, tf_float.make( diff --git a/kernels/test/op_to_copy_test.cpp b/kernels/test/op_to_copy_test.cpp index d9798d6d573..a340a440639 100644 --- a/kernels/test/op_to_copy_test.cpp +++ b/kernels/test/op_to_copy_test.cpp @@ -57,7 +57,7 @@ class OpToTest : public OperatorTest { Tensor& op_to_copy_out( const Tensor& self, bool non_blocking, - optional memory_format, + std::optional memory_format, Tensor& out) { return torch::executor::aten::_to_copy_outf( context_, self, non_blocking, memory_format, out); @@ -224,7 +224,7 @@ class OpToTest : public OperatorTest { op = "op_to_copy_out" opt_setup_params = """ bool non_blocking = false; - optional memory_format; + std::optional memory_format; """ opt_extra_params = "non_blocking, memory_format," out_args = "out_shape, dynamism" @@ -257,7 +257,7 @@ class OpToTest : public OperatorTest { 0.6340786814689636}); bool non_blocking = false; - optional memory_format; + std::optional memory_format; Tensor out = tf.zeros(out_shape, dynamism); op_to_copy_out(x, non_blocking, memory_format, out); diff --git a/kernels/test/op_var_test.cpp b/kernels/test/op_var_test.cpp index bfa73bfe15c..5d05da695a8 100644 --- a/kernels/test/op_var_test.cpp +++ b/kernels/test/op_var_test.cpp @@ -42,7 +42,7 @@ class OpVarOutTest : public OperatorTest { protected: Tensor& op_var_out( const Tensor& self, - optional> dim, + std::optional> dim, bool unbiased, bool keepdim, Tensor& out) { @@ -69,11 +69,12 @@ class OpVarOutTest : public OperatorTest { }); // clang-format on Tensor out = tf_out.zeros({2, 3, 1}); - optional dtype = OUT_DTYPE; + std::optional dtype = OUT_DTYPE; // out-of-bound dim in dim list int64_t dims_1[1] = {3}; - optional> optional_dim_list{ArrayRef{dims_1, 1}}; + std::optional> optional_dim_list{ + ArrayRef{dims_1, 1}}; ET_EXPECT_KERNEL_FAILURE( context_, op_var_out( @@ -117,9 +118,10 @@ class OpVarOutTest : public OperatorTest { // dimension size mismatch when keepdim is true Tensor out = tf_out.zeros({2, 4}); - optional dtype = OUT_DTYPE; + std::optional dtype = OUT_DTYPE; int64_t dims_1[1] = {1}; - optional> optional_dim_list{ArrayRef{dims_1, 1}}; + std::optional> optional_dim_list{ + ArrayRef{dims_1, 1}}; ET_EXPECT_KERNEL_FAILURE( context_, op_var_out( @@ -162,8 +164,9 @@ class OpVarOutTest : public OperatorTest { // keepdim=true should work Tensor out = tf_out.zeros({2, 3, 1}); int64_t dims_1[1] = {2}; - optional> optional_dim_list{ArrayRef{dims_1, 1}}; - optional dtype = OUT_DTYPE; + std::optional> optional_dim_list{ + ArrayRef{dims_1, 1}}; + std::optional dtype = OUT_DTYPE; op_var_out( self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/true, out); // clang-format off @@ -238,11 +241,11 @@ class OpVarOutTest : public OperatorTest { // empty/null dim list should work out = tf_out.zeros({1, 1, 1}); - optional> null_dim_list; + std::optional> null_dim_list; op_var_out(self, null_dim_list, /*unbiased=*/true, /*keepdim=*/true, out); expect_tensor_close_with_increased_tol(out, tf_out.make({1, 1, 1}, {50.0})); - optional> empty_dim_list{ArrayRef{}}; + std::optional> empty_dim_list{ArrayRef{}}; op_var_out(self, empty_dim_list, /*unbiased=*/false, /*keepdim=*/true, out); expect_tensor_close_with_increased_tol( out, tf_out.make({1, 1, 1}, {47.916668})); @@ -260,8 +263,8 @@ class OpVarCorrectionOutTest : public OperatorTest { protected: Tensor& op_var_correction_out( const Tensor& self, - optional> dim, - optional& correction, + std::optional> dim, + std::optional& correction, bool keepdim, Tensor& out) { return torch::executor::aten::var_outf( @@ -274,7 +277,7 @@ class OpVarCorrectionOutTest : public OperatorTest { Tensor x = tf.make({2, 3}, {4.9, 4.0, 5.6, 3.8, 4.9, 5.6}); Tensor expected = tf.make({2}, {0.72693, 0.93032}); - optional correction(1.23); + std::optional correction(1.23); Tensor out = tf.zeros({2}); op_var_correction_out( @@ -343,7 +346,8 @@ TEST_F(OpVarOutTest, InvalidDTypeDies) { // keepdim=true should work Tensor out = tf_float.zeros({2, 3, 1}); int64_t dims_1[1] = {2}; - optional> optional_dim_list{ArrayRef{dims_1, 1}}; + std::optional> optional_dim_list{ + ArrayRef{dims_1, 1}}; ET_EXPECT_KERNEL_FAILURE( context_, @@ -405,8 +409,9 @@ TEST_F(OpVarOutTest, InfinityAndNANTest) { Tensor out = tf_float.zeros({2, 3, 1}); int64_t dims[1] = {-1}; - optional> optional_dim_list{ArrayRef{dims, 1}}; - optional dtype; + std::optional> optional_dim_list{ + ArrayRef{dims, 1}}; + std::optional dtype; op_var_out(self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/true, out); // clang-format off EXPECT_TENSOR_CLOSE(out, tf_float.make( diff --git a/runtime/core/evalue.cpp b/runtime/core/evalue.cpp index 121a9a29fa2..2f48f9c5a8a 100644 --- a/runtime/core/evalue.cpp +++ b/runtime/core/evalue.cpp @@ -18,7 +18,7 @@ BoxedEvalueList>::get() const { i < wrapped_vals_.size(); i++) { if (wrapped_vals_[i] == nullptr) { - unwrapped_vals_[i] = executorch::aten::nullopt; + unwrapped_vals_[i] = std::nullopt; } else { unwrapped_vals_[i] = wrapped_vals_[i]->to>(); diff --git a/runtime/core/evalue.h b/runtime/core/evalue.h index 0cea86dc30c..1da84d282d6 100644 --- a/runtime/core/evalue.h +++ b/runtime/core/evalue.h @@ -412,7 +412,7 @@ struct EValue { template inline std::optional toOptional() const { if (this->isNone()) { - return executorch::aten::nullopt; + return std::nullopt; } return this->to(); } diff --git a/runtime/core/exec_aten/exec_aten.h b/runtime/core/exec_aten/exec_aten.h index 8c06045927e..beed562b81e 100644 --- a/runtime/core/exec_aten/exec_aten.h +++ b/runtime/core/exec_aten/exec_aten.h @@ -10,6 +10,7 @@ #include // @manual #include +#include #ifdef USE_ATEN_LIB #include // @manual #include @@ -21,7 +22,6 @@ #include // @manual #include // @manual #include // @manual -#include // @manual #include // @manual #include // @manual #include // @manual @@ -37,7 +37,6 @@ #include // @manual #include // @manual #include // @manual -#include // @manual #include // @manual #include // @manual #include // @manual @@ -153,8 +152,7 @@ using quint2x4 = torch::executor::quint2x4; using IntArrayRef = torch::executor::IntArrayRef; template -using OptionalArrayRef = - torch::executor::optional>; +using OptionalArrayRef = std::optional>; using OptionalIntArrayRef = OptionalArrayRef; using torch::executor::compute_numel; diff --git a/runtime/executor/tensor_parser.h b/runtime/executor/tensor_parser.h index fae183ea6e4..d96bf1a2747 100644 --- a/runtime/executor/tensor_parser.h +++ b/runtime/executor/tensor_parser.h @@ -85,8 +85,7 @@ ET_NODISCARD Result>> parseListOptionalType( // Placement new as the list elements are not initialized, so calling // copy assignment is not defined if its non trivial. if (index == -1) { - new (&optional_tensor_list[output_idx]) - std::optional(executorch::aten::nullopt); + new (&optional_tensor_list[output_idx]) std::optional(std::nullopt); // no value to point to. BoxedEvalueList for optional tensor will convert // this to nullopt. // TODO(T161156879): do something less hacky here.