Skip to content

Commit e37a585

Browse files
authored
Execute on exec_aten namespace deprecation from #5296 (#7950)
fastmod 'exec_aten::' 'executorch::aten::' Manually fix op_sdpa.cpp to use ::executorch::aten, run lintrunner. (FWIW, I disagree with our use of nested namespaces for the reasons outlined in https://abseil.io/tips/130, but we can always find/replace back and leaving the migration unfinished seems worse.)
1 parent 520e0bc commit e37a585

File tree

435 files changed

+2742
-2582
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

435 files changed

+2742
-2582
lines changed

backends/cadence/hifi/operators/op_add.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@
1616
#include <executorch/runtime/kernel/kernel_includes.h>
1717
#include <executorch/runtime/platform/assert.h>
1818

19-
using exec_aten::Scalar;
20-
using exec_aten::ScalarType;
21-
using exec_aten::Tensor;
19+
using executorch::aten::Scalar;
20+
using executorch::aten::ScalarType;
21+
using executorch::aten::Tensor;
2222
using executorch::runtime::can_cast;
2323
using executorch::runtime::CppTypeToScalarType;
2424
using executorch::runtime::KernelRuntimeContext;

backends/cadence/hifi/operators/op_cat.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ namespace native {
3030

3131
Tensor& cat_out(
3232
RuntimeContext& ctx,
33-
exec_aten::ArrayRef<Tensor> tensors,
33+
executorch::aten::ArrayRef<Tensor> tensors,
3434
int64_t dim,
3535
Tensor& out) {
3636
if (dim < 0) {

backends/cadence/hifi/operators/op_clamp.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ namespace native {
5151
Tensor& clamp_tensor_out(
5252
RuntimeContext& ctx,
5353
const Tensor& in,
54-
const exec_aten::optional<Tensor>& min_opt,
55-
const exec_aten::optional<Tensor>& max_opt,
54+
const executorch::aten::optional<Tensor>& min_opt,
55+
const executorch::aten::optional<Tensor>& max_opt,
5656
Tensor& out) {
5757
(void)ctx;
5858

backends/cadence/hifi/operators/op_div.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,10 @@
1717
#include <executorch/runtime/platform/assert.h>
1818
#include <cmath>
1919

20-
using exec_aten::Scalar;
21-
using exec_aten::ScalarType;
22-
using exec_aten::Tensor;
2320
using executorch::aten::RuntimeContext;
21+
using executorch::aten::Scalar;
22+
using executorch::aten::ScalarType;
23+
using executorch::aten::Tensor;
2424
using torch::executor::Error;
2525

2626
namespace cadence {
@@ -165,7 +165,7 @@ Tensor& div_out_mode(
165165
RuntimeContext& ctx,
166166
const Tensor& a,
167167
const Tensor& b,
168-
exec_aten::optional<exec_aten::string_view> mode,
168+
executorch::aten::optional<executorch::aten::string_view> mode,
169169
Tensor& out) {
170170
ET_KERNEL_CHECK(
171171
ctx,

backends/cadence/hifi/operators/op_maximum.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@
1212
#include <executorch/kernels/portable/cpu/util/math_util.h>
1313
#include <executorch/runtime/kernel/kernel_includes.h>
1414

15-
using exec_aten::ScalarType;
16-
using exec_aten::Tensor;
1715
using executorch::aten::RuntimeContext;
16+
using executorch::aten::ScalarType;
17+
using executorch::aten::Tensor;
1818
using executorch::runtime::can_cast;
1919
using executorch::runtime::canCast;
2020
using executorch::runtime::CppTypeToScalarType;

backends/cadence/hifi/operators/op_minimum.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@
1212
#include <executorch/kernels/portable/cpu/util/math_util.h>
1313
#include <executorch/runtime/kernel/kernel_includes.h>
1414

15-
using exec_aten::ScalarType;
16-
using exec_aten::Tensor;
1715
using executorch::aten::RuntimeContext;
16+
using executorch::aten::ScalarType;
17+
using executorch::aten::Tensor;
1818
using executorch::runtime::can_cast;
1919
using executorch::runtime::canCast;
2020
using executorch::runtime::CppTypeToScalarType;

backends/cadence/hifi/operators/op_mul.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@
1515
#include <executorch/runtime/kernel/kernel_includes.h>
1616
#include <executorch/runtime/platform/assert.h>
1717

18-
using exec_aten::Scalar;
19-
using exec_aten::ScalarType;
20-
using exec_aten::Tensor;
2118
using executorch::aten::RuntimeContext;
19+
using executorch::aten::Scalar;
20+
using executorch::aten::ScalarType;
21+
using executorch::aten::Tensor;
2222
using executorch::runtime::can_cast;
2323
using executorch::runtime::CppTypeToScalarType;
2424
using torch::executor::Error;

backends/cadence/hifi/operators/op_rsqrt.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@
1111

1212
#include <executorch/backends/cadence/hifi/kernels/kernels.h>
1313

14-
using exec_aten::ScalarType;
15-
using exec_aten::Tensor;
1614
using executorch::aten::RuntimeContext;
15+
using executorch::aten::ScalarType;
16+
using executorch::aten::Tensor;
1717

1818
namespace cadence {
1919
namespace impl {

backends/cadence/hifi/operators/op_sigmoid.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,17 +14,17 @@
1414
#include <executorch/kernels/portable/cpu/util/functional_util.h>
1515
#include <executorch/runtime/kernel/kernel_includes.h>
1616

17-
using exec_aten::ScalarType;
18-
using exec_aten::Tensor;
1917
using executorch::aten::RuntimeContext;
18+
using executorch::aten::ScalarType;
19+
using executorch::aten::Tensor;
2020
using torch::executor::Error;
2121

2222
namespace cadence {
2323
namespace impl {
2424
namespace HiFi {
2525
namespace native {
2626

27-
using Tensor = exec_aten::Tensor;
27+
using Tensor = executorch::aten::Tensor;
2828

2929
Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
3030
(void)ctx;

backends/cadence/hifi/operators/op_softmax.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ Tensor& softmax_out(
5050
// Adjust for negative dim
5151
dim = dim < 0 ? dim + executorch::runtime::nonzero_dim(in) : dim;
5252

53-
const exec_aten::optional<int64_t>& dim_t = dim;
53+
const executorch::aten::optional<int64_t>& dim_t = dim;
5454
const size_t d = ET_NORMALIZE_IX(dim_t.value(), in.dim());
5555
const size_t size = in.size(d);
5656

0 commit comments

Comments
 (0)