From f73b80245e75eba5ba9baea1a3224b1d8d9e81cf Mon Sep 17 00:00:00 2001 From: Scott Wolchok Date: Tue, 10 Sep 2024 16:15:36 -0700 Subject: [PATCH] [ExecuTorch] Preserve undelegated Linear ops in Llama demo export Allows us to use optimized op_linear from the previous diff. Differential Revision: [D62262532](https://our.internmc.facebook.com/intern/diff/D62262532/) [ghstack-poisoned] --- extension/llm/export/builder.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/extension/llm/export/builder.py b/extension/llm/export/builder.py index 4237ae7b3a7..338d997297d 100644 --- a/extension/llm/export/builder.py +++ b/extension/llm/export/builder.py @@ -16,6 +16,7 @@ from executorch.backends.transforms.duplicate_dynamic_quant_chain import ( DuplicateDynamicQuantChainPass, ) +from executorch.backends.xnnpack.passes.convert_to_linear import ConvertToLinearPass from executorch.exir import EdgeProgramManager from executorch.exir.backend.partitioner import Partitioner @@ -382,6 +383,10 @@ def to_executorch(self) -> "LLMEdgeManager": ExecutorchBackendConfig( extract_delegate_segments=True, passes=[ + # If there are Linear operations left in the graph, let's execute + # them with the optimized op_linear rather than materializing a + # transpose followed by a regular op_mm. + ConvertToLinearPass(), QuantFusionPass(), ], memory_planning_pass=MemoryPlanningPass(