Skip to content

Commit e37a585

Browse files
authored
Execute on exec_aten namespace deprecation from #5296 (#7950)
fastmod 'exec_aten::' 'executorch::aten::' Manually fix op_sdpa.cpp to use ::executorch::aten, run lintrunner. (FWIW, I disagree with our use of nested namespaces for the reasons outlined in https://abseil.io/tips/130, but we can always find/replace back and leaving the migration unfinished seems worse.)
1 parent 520e0bc commit e37a585

File tree

435 files changed

+2742
-2582
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

435 files changed

+2742
-2582
lines changed

backends/cadence/hifi/operators/op_add.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@
1616
#include <executorch/runtime/kernel/kernel_includes.h>
1717
#include <executorch/runtime/platform/assert.h>
1818

19-
using exec_aten::Scalar;
20-
using exec_aten::ScalarType;
21-
using exec_aten::Tensor;
19+
using executorch::aten::Scalar;
20+
using executorch::aten::ScalarType;
21+
using executorch::aten::Tensor;
2222
using executorch::runtime::can_cast;
2323
using executorch::runtime::CppTypeToScalarType;
2424
using executorch::runtime::KernelRuntimeContext;

backends/cadence/hifi/operators/op_cat.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ namespace native {
3030

3131
Tensor& cat_out(
3232
RuntimeContext& ctx,
33-
exec_aten::ArrayRef<Tensor> tensors,
33+
executorch::aten::ArrayRef<Tensor> tensors,
3434
int64_t dim,
3535
Tensor& out) {
3636
if (dim < 0) {

backends/cadence/hifi/operators/op_clamp.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ namespace native {
5151
Tensor& clamp_tensor_out(
5252
RuntimeContext& ctx,
5353
const Tensor& in,
54-
const exec_aten::optional<Tensor>& min_opt,
55-
const exec_aten::optional<Tensor>& max_opt,
54+
const executorch::aten::optional<Tensor>& min_opt,
55+
const executorch::aten::optional<Tensor>& max_opt,
5656
Tensor& out) {
5757
(void)ctx;
5858

backends/cadence/hifi/operators/op_div.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,10 @@
1717
#include <executorch/runtime/platform/assert.h>
1818
#include <cmath>
1919

20-
using exec_aten::Scalar;
21-
using exec_aten::ScalarType;
22-
using exec_aten::Tensor;
2320
using executorch::aten::RuntimeContext;
21+
using executorch::aten::Scalar;
22+
using executorch::aten::ScalarType;
23+
using executorch::aten::Tensor;
2424
using torch::executor::Error;
2525

2626
namespace cadence {
@@ -165,7 +165,7 @@ Tensor& div_out_mode(
165165
RuntimeContext& ctx,
166166
const Tensor& a,
167167
const Tensor& b,
168-
exec_aten::optional<exec_aten::string_view> mode,
168+
executorch::aten::optional<executorch::aten::string_view> mode,
169169
Tensor& out) {
170170
ET_KERNEL_CHECK(
171171
ctx,

backends/cadence/hifi/operators/op_maximum.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@
1212
#include <executorch/kernels/portable/cpu/util/math_util.h>
1313
#include <executorch/runtime/kernel/kernel_includes.h>
1414

15-
using exec_aten::ScalarType;
16-
using exec_aten::Tensor;
1715
using executorch::aten::RuntimeContext;
16+
using executorch::aten::ScalarType;
17+
using executorch::aten::Tensor;
1818
using executorch::runtime::can_cast;
1919
using executorch::runtime::canCast;
2020
using executorch::runtime::CppTypeToScalarType;

backends/cadence/hifi/operators/op_minimum.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@
1212
#include <executorch/kernels/portable/cpu/util/math_util.h>
1313
#include <executorch/runtime/kernel/kernel_includes.h>
1414

15-
using exec_aten::ScalarType;
16-
using exec_aten::Tensor;
1715
using executorch::aten::RuntimeContext;
16+
using executorch::aten::ScalarType;
17+
using executorch::aten::Tensor;
1818
using executorch::runtime::can_cast;
1919
using executorch::runtime::canCast;
2020
using executorch::runtime::CppTypeToScalarType;

backends/cadence/hifi/operators/op_mul.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@
1515
#include <executorch/runtime/kernel/kernel_includes.h>
1616
#include <executorch/runtime/platform/assert.h>
1717

18-
using exec_aten::Scalar;
19-
using exec_aten::ScalarType;
20-
using exec_aten::Tensor;
2118
using executorch::aten::RuntimeContext;
19+
using executorch::aten::Scalar;
20+
using executorch::aten::ScalarType;
21+
using executorch::aten::Tensor;
2222
using executorch::runtime::can_cast;
2323
using executorch::runtime::CppTypeToScalarType;
2424
using torch::executor::Error;

backends/cadence/hifi/operators/op_rsqrt.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@
1111

1212
#include <executorch/backends/cadence/hifi/kernels/kernels.h>
1313

14-
using exec_aten::ScalarType;
15-
using exec_aten::Tensor;
1614
using executorch::aten::RuntimeContext;
15+
using executorch::aten::ScalarType;
16+
using executorch::aten::Tensor;
1717

1818
namespace cadence {
1919
namespace impl {

backends/cadence/hifi/operators/op_sigmoid.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -14,17 +14,17 @@
1414
#include <executorch/kernels/portable/cpu/util/functional_util.h>
1515
#include <executorch/runtime/kernel/kernel_includes.h>
1616

17-
using exec_aten::ScalarType;
18-
using exec_aten::Tensor;
1917
using executorch::aten::RuntimeContext;
18+
using executorch::aten::ScalarType;
19+
using executorch::aten::Tensor;
2020
using torch::executor::Error;
2121

2222
namespace cadence {
2323
namespace impl {
2424
namespace HiFi {
2525
namespace native {
2626

27-
using Tensor = exec_aten::Tensor;
27+
using Tensor = executorch::aten::Tensor;
2828

2929
Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
3030
(void)ctx;

backends/cadence/hifi/operators/op_softmax.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ Tensor& softmax_out(
5050
// Adjust for negative dim
5151
dim = dim < 0 ? dim + executorch::runtime::nonzero_dim(in) : dim;
5252

53-
const exec_aten::optional<int64_t>& dim_t = dim;
53+
const executorch::aten::optional<int64_t>& dim_t = dim;
5454
const size_t d = ET_NORMALIZE_IX(dim_t.value(), in.dim());
5555
const size_t size = in.size(d);
5656

backends/cadence/hifi/operators/op_sub.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,10 @@
1616
#include <executorch/runtime/kernel/kernel_includes.h>
1717
#include <executorch/runtime/platform/assert.h>
1818

19-
using exec_aten::Scalar;
20-
using exec_aten::ScalarType;
21-
using exec_aten::Tensor;
2219
using executorch::aten::RuntimeContext;
20+
using executorch::aten::Scalar;
21+
using executorch::aten::ScalarType;
22+
using executorch::aten::Tensor;
2323
using executorch::runtime::can_cast;
2424
using executorch::runtime::CppTypeToScalarType;
2525
using torch::executor::Error;

backends/cadence/hifi/operators/op_tanh.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@
1111
#include <executorch/runtime/kernel/kernel_includes.h>
1212
#include <cmath>
1313

14-
using exec_aten::ScalarType;
15-
using exec_aten::Tensor;
1614
using executorch::aten::RuntimeContext;
15+
using executorch::aten::ScalarType;
16+
using executorch::aten::Tensor;
1717
using torch::executor::Error;
1818

1919
namespace cadence {

backends/cadence/hifi/operators/quantized_linear_out.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ void quantized_linear_out(
219219
int64_t out_zero_point,
220220
__ET_UNUSED const optional<Tensor>& offset,
221221
Tensor& out) {
222-
if (out.scalar_type() == exec_aten::ScalarType::Byte) {
222+
if (out.scalar_type() == executorch::aten::ScalarType::Byte) {
223223
_quantized_linear_asym8u(
224224
in,
225225
weight,
@@ -231,7 +231,7 @@ void quantized_linear_out(
231231
out_zero_point,
232232
offset,
233233
out);
234-
} else if (out.scalar_type() == exec_aten::ScalarType::Char) {
234+
} else if (out.scalar_type() == executorch::aten::ScalarType::Char) {
235235
_quantized_linear_asym8s(
236236
in,
237237
weight,
@@ -261,7 +261,7 @@ void quantized_linear_per_tensor_out(
261261
int64_t out_zero_point,
262262
__ET_UNUSED const optional<Tensor>& offset,
263263
Tensor& out) {
264-
if (out.scalar_type() == exec_aten::ScalarType::Byte) {
264+
if (out.scalar_type() == executorch::aten::ScalarType::Byte) {
265265
_quantized_linear_per_tensor_asym8u(
266266
in,
267267
weight,
@@ -273,7 +273,7 @@ void quantized_linear_per_tensor_out(
273273
out_zero_point,
274274
offset,
275275
out);
276-
} else if (out.scalar_type() == exec_aten::ScalarType::Char) {
276+
} else if (out.scalar_type() == executorch::aten::ScalarType::Char) {
277277
_quantized_linear_per_tensor_asym8s(
278278
in,
279279
weight,

backends/vulkan/runtime/VulkanBackend.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -417,10 +417,10 @@ bool maybe_update_scalar_tensor(
417417
executorch::aten::Tensor& scalar_tensor_src) {
418418
const int32_t cur_val = graph->read_symint(ref);
419419
int32_t scalar_tensor_val = 0;
420-
exec_aten::ScalarType dtype = scalar_tensor_src.scalar_type();
421-
if (dtype == exec_aten::ScalarType::Int) {
420+
executorch::aten::ScalarType dtype = scalar_tensor_src.scalar_type();
421+
if (dtype == executorch::aten::ScalarType::Int) {
422422
scalar_tensor_val = *scalar_tensor_src.const_data_ptr<int32_t>();
423-
} else if (dtype == exec_aten::ScalarType::Long) {
423+
} else if (dtype == executorch::aten::ScalarType::Long) {
424424
scalar_tensor_val = int32_t(*scalar_tensor_src.const_data_ptr<int64_t>());
425425
}
426426
bool was_updated = false;

codegen/tools/gen_selected_op_variants.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
from torchgen.code_template import CodeTemplate
1818

1919

20-
ops_and_dtypes_template_str = """((exec_aten::string_view(operator_name).compare("$operator_name") == 0)\n && ($dtype_checks))"""
20+
ops_and_dtypes_template_str = """((executorch::aten::string_view(operator_name).compare("$operator_name") == 0)\n && ($dtype_checks))"""
2121
ops_and_dtypes_template = CodeTemplate(ops_and_dtypes_template_str)
2222

2323
selected_kernel_dtypes_h_template_str = """#pragma once
@@ -27,7 +27,7 @@
2727
2828
inline constexpr bool should_include_kernel_dtype(
2929
const char *operator_name,
30-
exec_aten::ScalarType scalar_type
30+
executorch::aten::ScalarType scalar_type
3131
) {
3232
return $body;
3333
}
@@ -91,7 +91,8 @@ def write_selected_op_variants(yaml_file_path: str, output_dir: str) -> None:
9191
dtype_set = set([x.split(";")[0] for x in tensor_meta])
9292
dtype_list = sorted([dtype_enum_to_type[x] for x in dtype_set])
9393
conditions = [
94-
"scalar_type == exec_aten::ScalarType::" + x for x in dtype_list
94+
"scalar_type == executorch::aten::ScalarType::" + x
95+
for x in dtype_list
9596
]
9697
body_parts.append(
9798
ops_and_dtypes_template.substitute(

codegen/tools/test/test_gen_selected_op_variants.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -71,13 +71,13 @@ def test_generates_correct_header(self) -> None:
7171
7272
inline constexpr bool should_include_kernel_dtype(
7373
const char *operator_name,
74-
exec_aten::ScalarType scalar_type
74+
executorch::aten::ScalarType scalar_type
7575
) {
76-
return ((exec_aten::string_view(operator_name).compare("add.out") == 0)
77-
&& (scalar_type == exec_aten::ScalarType::Float || scalar_type == exec_aten::ScalarType::Int))
78-
|| ((exec_aten::string_view(operator_name).compare("mul.out") == 0)
79-
&& (scalar_type == exec_aten::ScalarType::Float))
80-
|| ((exec_aten::string_view(operator_name).compare("sub.out") == 0)
76+
return ((executorch::aten::string_view(operator_name).compare("add.out") == 0)
77+
&& (scalar_type == executorch::aten::ScalarType::Float || scalar_type == executorch::aten::ScalarType::Int))
78+
|| ((executorch::aten::string_view(operator_name).compare("mul.out") == 0)
79+
&& (scalar_type == executorch::aten::ScalarType::Float))
80+
|| ((executorch::aten::string_view(operator_name).compare("sub.out") == 0)
8181
&& (true));
8282
}
8383
""",
@@ -124,7 +124,7 @@ def test_generates_correct_header(self) -> None:
124124
125125
inline constexpr bool should_include_kernel_dtype(
126126
const char *operator_name,
127-
exec_aten::ScalarType scalar_type
127+
executorch::aten::ScalarType scalar_type
128128
) {
129129
return true;
130130
}

devtools/bundled_program/bundled_program.cpp

+9-9
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,10 @@
2323
#include <executorch/runtime/executor/method.h>
2424
#include <executorch/runtime/platform/log.h>
2525

26-
using exec_aten::ArrayRef;
27-
using exec_aten::Half;
28-
using exec_aten::ScalarType;
29-
using exec_aten::Tensor;
26+
using executorch::aten::ArrayRef;
27+
using executorch::aten::Half;
28+
using executorch::aten::ScalarType;
29+
using executorch::aten::Tensor;
3030
using ::executorch::runtime::Error;
3131
using ::executorch::runtime::EValue;
3232
using ::executorch::runtime::Method;
@@ -67,16 +67,16 @@ TensorImpl impl_like(bundled_program_flatbuffer::Tensor* bundled_tensor) {
6767
ScalarType scalar_type =
6868
static_cast<ScalarType>(bundled_tensor->scalar_type());
6969
ssize_t dim = bundled_tensor->sizes()->size();
70-
exec_aten::SizesType* sizes = bundled_tensor->mutable_sizes()->data();
70+
executorch::aten::SizesType* sizes = bundled_tensor->mutable_sizes()->data();
7171
void* data = bundled_tensor->mutable_data()->data();
72-
exec_aten::DimOrderType* dim_order =
72+
executorch::aten::DimOrderType* dim_order =
7373
bundled_tensor->mutable_dim_order()->data();
7474

7575
// The strides of created tensorimpl will only be actually used when
7676
// comparsion (`tensor_are_close` below). To eliminate the usage of memory
7777
// allocator, here we set the initial strides as null and reconstruct the
7878
// stride array as temporary varible when comparsion.
79-
exec_aten::StridesType* strides = nullptr;
79+
executorch::aten::StridesType* strides = nullptr;
8080
return TensorImpl(scalar_type, dim, sizes, data, dim_order, strides);
8181
}
8282
#endif
@@ -165,7 +165,7 @@ bool tensors_are_close(
165165

166166
// Contruct stride array for bundled tensor based on its dim order since
167167
// strides of bundled_tensor in lean mode is null.
168-
exec_aten::StridesType strides[kMaxDim] = {0};
168+
executorch::aten::StridesType strides[kMaxDim] = {0};
169169
auto status = torch::executor::dim_order_to_stride(
170170
bundled_tensor.sizes().data(),
171171
bundled_tensor.dim_order().data(),
@@ -176,7 +176,7 @@ bool tensors_are_close(
176176

177177
// TODO(T132992348): support comparison between tensors of different strides
178178
ET_CHECK_MSG(
179-
ArrayRef<exec_aten::StridesType>(strides, bundled_tensor.dim()) ==
179+
ArrayRef<executorch::aten::StridesType>(strides, bundled_tensor.dim()) ==
180180
method_output_tensor.strides(),
181181
"The two inputs of `tensors_are_close` function shall have same strides");
182182
#endif

0 commit comments

Comments
 (0)