Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions backends/arm/_passes/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@
ReplaceScalarWithTensorArgPassTOSABI,
ReplaceScalarWithTensorArgPassTOSAMI,
)
from .rewrite_matmul import RewriteMatmulPass # noqa
from .rewrite_upsample import RewriteUpsamplePass # noqa
from .scalars_to_attribute_pass import ScalarsToAttributePass # noqa
from .size_adjust_input_pass import SizeAdjustInputPass # noqa
Expand Down
3 changes: 3 additions & 0 deletions backends/arm/_passes/arm_pass_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@
ReplaceScalarWithTensorArgPassTOSABI,
ReplaceScalarWithTensorArgPassTOSAMI,
RetraceFoldedDtypesPass,
RewriteMatmulPass,
RewriteUpsamplePass,
ScalarsToAttributePass,
SizeAdjustInputPass,
Expand Down Expand Up @@ -211,6 +212,7 @@ def _tosa_INT_pipeline(self, exported_program: ExportedProgram) -> GraphModule:
self.add_pass(RewriteUpsamplePass(exported_program))
self.add_pass(AddBiasPass(exported_program))

self.add_pass(RewriteMatmulPass(exported_program))
self.add_pass(FuseEqualPlaceholdersPass(exported_program))
self.add_pass(ToTosaMemoryFormatPass(exported_program))
self.add_pass(RemoveNoopPass())
Expand Down Expand Up @@ -297,6 +299,7 @@ def _tosa_FP_pipeline(self, exported_program: ExportedProgram) -> GraphModule:
self.add_pass(RewriteUpsamplePass(exported_program))
self.add_pass(AddBiasPass(exported_program))
self.add_pass(InsertTableOpsPass(exported_program))
self.add_pass(RewriteMatmulPass(exported_program))
self.add_pass(FuseEqualPlaceholdersPass(exported_program))
self.add_pass(ToTosaMemoryFormatPass(exported_program))
self.add_pass(RemoveNoopPass())
Expand Down
1 change: 1 addition & 0 deletions backends/arm/_passes/fuse_constant_ops_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ def call(self, graph_module):
if node.op != "call_function":
continue
if node.target in [
exir_ops.backend.tosa.MATMUL.default,
exir_ops.backend.tosa.RESCALE.default,
exir_ops.backend.tosa.RESIZE.default,
exir_ops.backend.tosa.TABLE.default,
Expand Down
97 changes: 97 additions & 0 deletions backends/arm/_passes/rewrite_matmul.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
# Copyright 2025 Arm Limited and/or its affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

from typing import Set, Type

import torch
from executorch.backends.arm._passes import ArmPass
from executorch.backends.arm._passes.arm_pass_utils import (
create_node,
get_first_fake_tensor,
)
from executorch.backends.arm._passes.fold_qdq_with_annotated_qparams_pass import (
get_input_qparams,
get_output_qparams,
)
from executorch.backends.arm.tosa.mapping import TosaSpecialDtype
from executorch.exir.dialects._ops import ops as exir_ops
from executorch.exir.pass_base import ExportPass, PassResult


class RewriteMatmulPass(ArmPass):
"""Rewrites aten.bmm to tosa.MATMUL and inserts a tosa.RESCALE op if needed."""

_passes_required_after: Set[Type[ExportPass]] = set()

def _insert_output_rescale(self, graph_module, node, tosa_matmul_node, dtype):
input_qparams = get_input_qparams(node)
output_qparams = get_output_qparams(node)[0]
scale = (
input_qparams[0].get_scale_per_tensor()
* input_qparams[1].get_scale_per_tensor()
) / output_qparams.get_scale_per_tensor()

with graph_module.graph.inserting_after(tosa_matmul_node):
# If the input is int8, we need to cast the output to int32
rescale_node = create_node(
graph_module.graph,
op_target=exir_ops.backend.tosa.RESCALE.default,
from_node=tosa_matmul_node,
)
tosa_matmul_node.replace_all_uses_with(rescale_node)
rescale_node.args = (
tosa_matmul_node,
dtype,
scale,
0,
output_qparams.get_zp_per_tensor(),
)

def call(self, graph_module):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

curious why not do this in the aten.bmm node visitor?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

aten.bmm is not a one-to-one mapping to TOSA.MATMUL due to TOSA outputting an INT32 for INT8 inputs while aten.bmm would output an INT8. So a rescale needs to be inserted and we're trying to avoid inserting rescales in multiple node visitors.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we're trying to avoid inserting rescales in multiple node visitor

Yeah I saw #14584

I called this out is because this pass feels a lot like bmm lowering which we have been doing through the node visitors.

modified = False
for node in graph_module.graph.nodes:
if (
node.op != "call_function"
or node.target != exir_ops.edge.aten.bmm.default
):
continue
modified = True

x1, x2 = node.args
tosa_matmul_target = exir_ops.backend.tosa.MATMUL.default
with graph_module.graph.inserting_before(node):
tosa_matmul_node = create_node(
graph_module.graph,
op_target=tosa_matmul_target,
args=(x1, x2),
kwargs={},
from_node=node,
)
node.replace_all_uses_with(tosa_matmul_node)
graph_module.graph.erase_node(node)

x1_fake_tensor = get_first_fake_tensor(x1)
x2_fake_tensor = get_first_fake_tensor(x2)
output_fake_tensor = tosa_matmul_target(x1_fake_tensor, x2_fake_tensor)
node_output_fake_tensor = get_first_fake_tensor(node)
if (
output_fake_tensor.dtype == torch.int32
and node_output_fake_tensor.dtype in (torch.int8, torch.int16)
):
self._insert_output_rescale(
graph_module,
node,
tosa_matmul_node,
dtype=node_output_fake_tensor.dtype,
)
if x1_fake_tensor.dtype == torch.int16:
tosa_matmul_node.meta[TosaSpecialDtype.meta_key()] = (
TosaSpecialDtype.INT48
)

if modified:
graph_module.recompile()
graph_module = super().call(graph_module).graph_module
return PassResult(graph_module, modified)
10 changes: 5 additions & 5 deletions backends/arm/operators/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
op_any,
op_avg_pool2d,
op_bitwise_not,
op_bmm,
op_cat,
op_ceil,
op_clamp,
Expand Down Expand Up @@ -42,19 +41,20 @@
op_pow,
op_reciprocal,
op_repeat,
op_rescale,
op_resize,
op_rshift_tensor,
op_rsqrt,
op_sigmoid,
op_sin,
op_slice,
op_sub,
op_sum,
op_table,
op_tanh,
op_to_dim_order_copy,
op_transpose,
op_tosa_matmul,
op_tosa_rescale,
op_tosa_resize,
op_tosa_table,
op_tosa_transpose,
op_view,
op_where,
ops_binary,
Expand Down
143 changes: 0 additions & 143 deletions backends/arm/operators/op_bmm.py

This file was deleted.

Loading
Loading