Skip to content

Commit 3027be6

Browse files
committed
Align aot_arm_compiler to latest export flow
- Update to_edge_transform_and_lower rather than export_to_edge - fix channel last on some models Signed-off-by: Rob Elliott <Robert.Elliott@arm.com> Change-Id: I0f8e9206aa1ff3004a746955010c7bf01c896347
1 parent 6b858f2 commit 3027be6

File tree

1 file changed

+35
-29
lines changed

1 file changed

+35
-29
lines changed

examples/arm/aot_arm_compiler.py

Lines changed: 35 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,11 @@
2525
from executorch.backends.arm.util.arm_model_evaluator import GenericModelEvaluator
2626

2727
from executorch.devtools.backend_debug import get_delegation_info
28-
from executorch.exir import EdgeCompileConfig, ExecutorchBackendConfig
28+
from executorch.exir import (
29+
EdgeCompileConfig,
30+
ExecutorchBackendConfig,
31+
to_edge_transform_and_lower,
32+
)
2933
from executorch.extension.export_util.utils import export_to_edge, save_pte_program
3034
from tabulate import tabulate
3135

@@ -185,7 +189,7 @@ def get_compile_spec(target: str, intermediates: bool) -> ArmCompileSpecBuilder:
185189
memory_mode="Shared_Sram",
186190
extra_flags="--debug-force-regor --output-format=raw",
187191
)
188-
.set_permute_memory_format(args.model_name in MODEL_NAME_TO_MODEL.keys())
192+
.set_permute_memory_format(True)
189193
.set_quantize_io(True)
190194
)
191195
elif "ethos-u85" in target:
@@ -202,7 +206,7 @@ def get_compile_spec(target: str, intermediates: bool) -> ArmCompileSpecBuilder:
202206
)
203207

204208
if intermediates is not None:
205-
spec_builder.dump_intermediate_artifacts_to(args.intermediates)
209+
spec_builder.dump_intermediate_artifacts_to(intermediates)
206210

207211
return spec_builder.build()
208212

@@ -356,40 +360,42 @@ def get_args():
356360
model, example_inputs = get_model_and_inputs_from_name(args.model_name)
357361
model = model.eval()
358362

363+
# export_for_training under the assumption we quantize, the exported form also works
364+
# in to_edge if we don't quantize
365+
exported_program = torch.export.export_for_training(model, example_inputs)
366+
model = exported_program.module()
359367
model_fp32 = model
360368

361-
# pre-autograd export. eventually this will become torch.export
362-
model = torch.export.export_for_training(model, example_inputs).module()
363-
364369
# Quantize if required
365370
model_int8 = None
366371
if args.quantize:
367372
model = quantize(model, example_inputs)
368373
model_int8 = model
374+
# Wrap quantized model back into an exported_program
375+
exported_program = torch.export.export_for_training(model, example_inputs)
376+
377+
if args.delegate:
378+
# As we can target multiple output encodings from ArmBackend, one must
379+
# be specified.
380+
compile_spec = get_compile_spec(args.target, args.intermediates)
381+
edge = to_edge_transform_and_lower(
382+
exported_program,
383+
partitioner=[ArmPartitioner(compile_spec)],
384+
compile_config=EdgeCompileConfig(
385+
_check_ir_validity=False,
386+
_skip_dim_order=True,
387+
),
388+
)
389+
else:
390+
edge = to_edge_transform_and_lower(
391+
exported_program,
392+
compile_config=EdgeCompileConfig(
393+
_check_ir_validity=False,
394+
_skip_dim_order=True,
395+
),
396+
)
369397

370-
edge = export_to_edge(
371-
model,
372-
example_inputs,
373-
edge_compile_config=EdgeCompileConfig(
374-
_check_ir_validity=False,
375-
),
376-
)
377-
378-
# As we can target multiple output encodings from ArmBackend, one must
379-
# be specified.
380-
compile_spec = (
381-
get_compile_spec(args.target, args.intermediates)
382-
if args.delegate is True
383-
else None
384-
)
385-
386-
logging.debug(f"Exported graph:\n{edge.exported_program().graph}")
387-
if args.delegate is True:
388-
edge = edge.to_backend(ArmPartitioner(compile_spec))
389-
390-
dump_delegation_info(edge, args.intermediates)
391-
392-
logging.debug(f"Lowered graph:\n{edge.exported_program().graph}")
398+
dump_delegation_info(edge, args.intermediates)
393399

394400
try:
395401
exec_prog = edge.to_executorch(

0 commit comments

Comments
 (0)