From 2613da29a462b5d41283c01f37b29c86c6d3ca8e Mon Sep 17 00:00:00 2001 From: Hansong <107070759+kirklandsign@users.noreply.github.com> Date: Thu, 21 Nov 2024 15:22:11 -0800 Subject: [PATCH 1/2] Update fuse_ops.py --- backends/cadence/aot/fuse_ops.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/backends/cadence/aot/fuse_ops.py b/backends/cadence/aot/fuse_ops.py index 8137c1fdbd2..c4f63c5b926 100644 --- a/backends/cadence/aot/fuse_ops.py +++ b/backends/cadence/aot/fuse_ops.py @@ -426,6 +426,9 @@ def fuse_quantized_batch_norm_with_conv( # Note: there is a quantized.conv2d.new operator in the resulting graph # that takes a torch.classes.quantized.Conv2dPackedParamsBase as one of the input # this prevents us to directly call graph_module.recompile(). + # pyre-fixme[16]: `GraphModule` has no attribute `_code`. + # pyre-fixme[16]: Item `Tensor` of `Tensor | Module` has no attribute + # `python_code`. graph_module._code = graph_module._graph.python_code(root_module="self").src def __init__(self): From 7fdd2bf57acdd3a580963509022aca401fc30f6c Mon Sep 17 00:00:00 2001 From: Hansong Zhang Date: Thu, 21 Nov 2024 15:27:47 -0800 Subject: [PATCH 2/2] lint --- .../llama/source_transformation/apply_spin_quant_r1_r2.py | 3 ++- exir/emit/_emitter.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/models/llama/source_transformation/apply_spin_quant_r1_r2.py b/examples/models/llama/source_transformation/apply_spin_quant_r1_r2.py index 6512f462fd9..7ec35c7b6c9 100644 --- a/examples/models/llama/source_transformation/apply_spin_quant_r1_r2.py +++ b/examples/models/llama/source_transformation/apply_spin_quant_r1_r2.py @@ -148,7 +148,8 @@ def fuse_ln_linear( linear.bias.data = linear.bias.data.to(dtype=torch.float32) + torch.matmul( # pyre-fixme[6]: For 2nd argument expected `Tensor` but got # `Union[Tensor, Module]`. - W_, layernorm.bias.to(dtype=torch.float32) + W_, + layernorm.bias.to(dtype=torch.float32), ) linear.bias.data = linear.bias.data.to(linear_dtype) diff --git a/exir/emit/_emitter.py b/exir/emit/_emitter.py index 645d0cb47bc..a1dcc23dcee 100644 --- a/exir/emit/_emitter.py +++ b/exir/emit/_emitter.py @@ -1635,7 +1635,8 @@ def plan(self) -> ExecutionPlan: # empty list. non_const_buffer_sizes=typing.cast( # pyre-fixme[29]: `Union[BoundMethod[typing.Callable(torch._C.TensorB... - List[int], self.module.meta["non_const_buffer_sizes"] + List[int], + self.module.meta["non_const_buffer_sizes"], ), container_meta_type=self.container_meta_type, )