Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 0 additions & 23 deletions backends/apple/coreml/compiler/torch_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
from coremltools.converters.mil.frontend.torch.ops import (
_get_inputs,
_get_kwinputs,
noop,
NUM_TO_NUMPY_DTYPE,
NUM_TO_TORCH_DTYPE,
split,
Expand Down Expand Up @@ -92,28 +91,6 @@ def _to_dim_order_copy(context, node):
to(context, node)


@register_torch_op(
torch_alias=[
"dim_order_ops::_clone_dim_order",
"dim_order_ops._clone_dim_order",
],
override=False,
)
def _clone_dim_order(context, node):
dim_order = _get_kwinputs(context, node, "dim_order", default=[None])[0]
node.kwinputs.pop("dim_order")

# In CoreML, dim_order.val will be a ndarray, so we convert it to a list to check memory format.
dim_order = [int(d) for d in dim_order.val]
memory_format = get_memory_format(dim_order)
assert (
memory_format == _torch.contiguous_format
), "Only contiguous memory format is supported in CoreML"

# Since CoreML only supports contiguous format, no dim_order preservation is needed. Treat this as a no-op clone.
noop(context, node)


# https://github.com/apple/coremltools/pull/2558
@register_torch_op(
torch_alias=["torchao::dequantize_affine", "torchao.dequantize_affine"],
Expand Down
23 changes: 0 additions & 23 deletions backends/apple/coreml/test/test_torch_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,28 +213,6 @@ def test_dequantize_codebook_embedding(self):
et_prog = delegated_program.to_executorch()
self._compare_outputs(et_prog, model, example_inputs)

def test__clone_dim_order_contiguous(self):
class Model(torch.nn.Module):
def forward(self, x):
return torch.ops.dim_order_ops._clone_dim_order(
x, dim_order=[0, 1, 2, 3]
)

model, example_inputs = Model(), (torch.randn(1, 3, 8, 8),)
ep = torch.export.export(model, example_inputs)
delegated_program = executorch.exir.to_edge_transform_and_lower(
ep,
partitioner=[self._coreml_partitioner()],
)
for node in delegated_program.exported_program().graph.nodes:
if node.op == "call_function":
assert node.target.__name__ in [
"executorch_call_delegate",
"getitem",
], f"Got unexpected node target after delegation: {node.target.__name__}"
et_prog = delegated_program.to_executorch()
self._compare_outputs(et_prog, model, example_inputs)


if __name__ == "__main__":
test_runner = TestTorchOps()
Expand All @@ -245,4 +223,3 @@ def forward(self, x):
test_runner.test_dequantize_affine_c8w_embedding_b4w_linear()
test_runner.test_dequantize_codebook_linear()
test_runner.test_dequantize_codebook_embedding()
test_runner.test__clone_dim_order_contiguous()
2 changes: 1 addition & 1 deletion backends/arm/_passes/remove_clone_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ class RemoveClonePass(ExportPass):
"""Remove all clones from graph_module"""

def call_operator(self, op, args, kwargs, meta):
if op != exir_ops.edge.dim_order_ops._clone_dim_order.default:
if op != exir_ops.edge.aten.clone.default:
return super().call_operator(op, args, kwargs, meta)

if len(args) != 1:
Expand Down
1 change: 0 additions & 1 deletion backends/arm/operator_support/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
# pyre-unsafe

from . import ( # noqa
clone_dim_order_support,
convolution_support,
embedding_support,
ethos_u55_support,
Expand Down
76 changes: 0 additions & 76 deletions backends/arm/operator_support/clone_dim_order_support.py

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
]
linear_residual_exir_op: list[str] = [
"executorch_exir_dialects_edge__ops_aten_gelu_default",
"executorch_exir_dialects_edge__ops_dim_order_ops__clone_dim_order_default",
"executorch_exir_dialects_edge__ops_aten_clone_default",
"executorch_exir_dialects_edge__ops_aten_linear_default",
"executorch_exir_dialects_edge__ops_aten_add_Tensor",
]
Expand Down
2 changes: 1 addition & 1 deletion backends/arm/test/ops/test_clone.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
)

aten_op = "torch.ops.aten.clone.default"
exir_op = "executorch_exir_dialects_edge__ops_dim_order_ops__clone_dim_order_default"
exir_op = "executorch_exir_dialects_edge__ops_aten_clone_default"

input_t = Tuple[torch.Tensor]

Expand Down
6 changes: 2 additions & 4 deletions backends/arm/test/passes/test_remove_clone_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,9 @@ def test_remove_clone_tosa_INT():
module.get_inputs(),
quantize=True,
ops_before_pass={
"executorch_exir_dialects_edge__ops_dim_order_ops__clone_dim_order_default": 1,
"executorch_exir_dialects_edge__ops_aten_clone_default": 1,
},
ops_not_after_pass=[
"executorch_exir_dialects_edge__ops_dim_order_ops__clone_dim_order_default"
],
ops_not_after_pass=["executorch_exir_dialects_edge__ops_aten_clone_default"],
pass_list=[RemoveClonePass],
)
pipeline.run()
19 changes: 0 additions & 19 deletions exir/passes/dim_order_ops_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,6 @@
"_empty_dim_order.out(int[] size, *, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)"
)

lib.define(
"_clone_dim_order(Tensor self, *, bool non_blocking=False, int[]? dim_order=None) -> Tensor"
)

lib.define(
"_clone_dim_order.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)"
)


def _op_impl(target, *args, **kwargs):
kwargs["memory_format"] = get_memory_format(kwargs.get("dim_order", None))
Expand Down Expand Up @@ -65,23 +57,12 @@ def _empty_dim_order_out_impl(*args, **kwargs):
return _op_impl(torch.ops.aten.empty.out, *args, **kwargs)


@impl(lib, "_clone_dim_order", "CompositeImplicitAutograd")
def _clone_dim_order_impl(*args, **kwargs):
return _op_impl(torch.ops.aten.clone.default, *args, **kwargs)


@impl(lib, "_clone_dim_order.out", "CompositeImplicitAutograd")
def _clone_dim_order_out_impl(*args, **kwargs):
return _op_impl(torch.ops.aten.clone.out, *args, **kwargs)


"""
Defines a map of edge ops to the corresponding dim_order ops for quick lookup
"""
DimOrderOpsMap = {
exir_ops.edge.aten._to_copy.default: exir_ops.edge.dim_order_ops._to_dim_order_copy.default,
exir_ops.edge.aten.empty.memory_format: exir_ops.edge.dim_order_ops._empty_dim_order.default,
exir_ops.edge.aten.clone.default: exir_ops.edge.dim_order_ops._clone_dim_order.default,
}

"""
Expand Down
52 changes: 0 additions & 52 deletions exir/tests/test_memory_format_ops_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,7 @@
AmbiguousDimOrderError,
MemoryFormatOpsPassTestUtils,
MemoryFormatTestSet,
PropagateToCloneChannelsLastModule,
PropagateToCopyChannalsLastModule,
SimpleCloneChannelsLastModule,
SimpleCloneContiguousModule,
SimpleEmptyChannelLastModule,
SimpleEmptyContiguoustModule,
SimpleToCopyChannelsLastModule,
Expand Down Expand Up @@ -94,36 +91,6 @@ def test_op_empty_replacement_contiguous(self) -> None:
),
)

def test_op_clone_replacement_contiguous(self) -> None:
model = SimpleCloneContiguousModule()
MemoryFormatOpsPassTestUtils.memory_format_test_runner(
self,
MemoryFormatTestSet(
module=model.eval(),
op=torch.ops.aten.clone.default,
sample_input=(
torch.randn((3, 4, 5, 6)).to(memory_format=torch.channels_last),
),
target_memory_format=torch.contiguous_format,
_load_for_executorch_from_buffer=_load_for_executorch_from_buffer,
),
)

def test_op_clone_replacement_channels_last(self) -> None:
model = SimpleCloneChannelsLastModule()
MemoryFormatOpsPassTestUtils.memory_format_test_runner(
self,
MemoryFormatTestSet(
module=model.eval(),
op=torch.ops.aten.clone.default,
sample_input=(
torch.randn((3, 4, 5, 6)).to(memory_format=torch.contiguous_format),
),
target_memory_format=torch.channels_last,
_load_for_executorch_from_buffer=_load_for_executorch_from_buffer,
),
)

def test_op_dim_order_update(self) -> None:
MemoryFormatOpsPassTestUtils.memory_format_test_runner(
self,
Expand Down Expand Up @@ -161,25 +128,6 @@ def test_op_dim_order_propagation(self) -> None:
check_unambiguous_dim_order=True,
)

def test_op_clone_dim_order_propagation(self) -> None:
MemoryFormatOpsPassTestUtils.memory_format_test_runner(
self,
MemoryFormatTestSet(
module=PropagateToCloneChannelsLastModule().eval(),
op=torch.ops.aten.clone.default,
sample_input=(
torch.rand_like(
torch.zeros([2, 2, 2, 2]),
dtype=torch.float32,
memory_format=torch.contiguous_format,
),
),
target_memory_format=torch.channels_last,
_load_for_executorch_from_buffer=_load_for_executorch_from_buffer,
),
check_unambiguous_dim_order=True,
)

def test_op_dim_order_propagation_ambiguous(self) -> None:
try:
MemoryFormatOpsPassTestUtils.memory_format_test_runner(
Expand Down
30 changes: 0 additions & 30 deletions exir/tests/test_memory_format_ops_pass_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,6 @@
"torch.ops.aten.empty.memory_format",
"executorch_exir_dialects_edge__ops_dim_order_ops__empty_dim_order_default",
),
torch.ops.aten.clone.default: (
"torch.ops.aten.clone.default",
"executorch_exir_dialects_edge__ops_dim_order_ops__clone_dim_order_default",
),
}


Expand Down Expand Up @@ -74,22 +70,6 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
return x.to(dtype=torch.double, memory_format=torch.channels_last)


class SimpleCloneContiguousModule(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, x: torch.Tensor) -> torch.Tensor:
return x.clone(memory_format=torch.contiguous_format)


class SimpleCloneChannelsLastModule(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, x: torch.Tensor) -> torch.Tensor:
return x.clone(memory_format=torch.channels_last)


class SimpleEmptyContiguoustModule(torch.nn.Module):
def __init__(self):
super().__init__()
Expand Down Expand Up @@ -122,16 +102,6 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
return t1 * t2


class PropagateToCloneChannelsLastModule(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, x: torch.Tensor) -> torch.Tensor:
t1 = x.clone(memory_format=torch.channels_last)
t2 = t1 + t1
return t1 * t2


class AmbiguousDimOrderError(RuntimeError):
pass

Expand Down
Loading