-
Couldn't load subscription status.
- Fork 704
pin update #7273
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
pin update #7273
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1 +1 @@ | ||
| 19eff28ff3f19b50da46f5a9ff5f4d4d213806fe | ||
| 2ea4b56ec872424e486c4fe2d55da061067a2ed3 |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -10,9 +10,11 @@ | |
|
|
||
| # pyre-unsafe | ||
|
|
||
| import contextlib | ||
| import logging | ||
| from enum import Enum | ||
| from typing import Any, Callable, Dict, List, Optional | ||
| from unittest.mock import patch | ||
|
|
||
| import torch | ||
| from executorch.backends.transforms.duplicate_dynamic_quant_chain import ( | ||
|
|
@@ -184,15 +186,23 @@ def export(self) -> "LLMEdgeManager": | |
| # 2. torch.no_grad() is for getting rid of the dropout (not sure why training ops will show up) | ||
| with torch.nn.attention.sdpa_kernel([SDPBackend.MATH]), torch.no_grad(): | ||
| if hasattr(self.args, "qnn") and self.args.qnn: | ||
| # TODO: this is temporary and export_for_training doesn't work with qnn either. We need a | ||
| # functional graph. See issue https://github.com/pytorch/executorch/pull/4627 for more details | ||
| exported_module = torch.export.export( | ||
| self.model, | ||
| self.example_inputs, | ||
| self.example_kwarg_inputs, | ||
| dynamic_shapes=dynamic_shape, | ||
| strict=True, | ||
| ) | ||
| # TODO: this is temporary, as qnn flow does not work with new, non-functional export IR. | ||
| # See issue: https://github.com/pytorch/executorch/issues/7373 | ||
|
|
||
| with patch.object( | ||
| torch._utils_internal, | ||
| "export_training_ir_rollout_check", | ||
| return_value=False, | ||
| ): | ||
| # TODO: this is temporary and export_for_training doesn't work with qnn either. We need a | ||
| # functional graph. See issue https://github.com/pytorch/executorch/pull/4627 for more details | ||
| exported_module = torch.export.export( | ||
| self.model, | ||
| self.example_inputs, | ||
| self.example_kwarg_inputs, | ||
| dynamic_shapes=dynamic_shape, | ||
| strict=True, | ||
| ) | ||
| else: | ||
| logging.info("Exporting with:") | ||
| logging.info(f"inputs: {self.example_inputs}") | ||
|
|
@@ -354,15 +364,25 @@ def export_to_edge(self) -> "LLMEdgeManager": | |
| if self.pre_autograd_graph_module is None: | ||
| # Run export() if it didn't run | ||
| self.export() | ||
| self.edge_manager = export_to_edge( | ||
| self.pre_autograd_graph_module, # pyre-fixme[6] | ||
| self.example_inputs, | ||
| example_kwarg_inputs=self.example_kwarg_inputs, | ||
| dynamic_shapes=dynamic_shape, | ||
| edge_constant_methods=self.metadata, | ||
| edge_compile_config=edge_config, | ||
| verbose=self.verbose, | ||
| ) | ||
|
|
||
| override_export_behaviour = contextlib.nullcontext() | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you leave a TODO here and mention the issue you filed? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ah - I left it at the top of the block. |
||
| if hasattr(self.args, "qnn") and self.args.qnn: | ||
| override_export_behaviour = patch.object( | ||
| torch._utils_internal, | ||
| "export_training_ir_rollout_check", | ||
| return_value=False, | ||
| ) | ||
|
|
||
| with override_export_behaviour: | ||
| self.edge_manager = export_to_edge( | ||
| self.pre_autograd_graph_module, # pyre-fixme[6] | ||
| self.example_inputs, | ||
| example_kwarg_inputs=self.example_kwarg_inputs, | ||
| dynamic_shapes=dynamic_shape, | ||
| edge_constant_methods=self.metadata, | ||
| edge_compile_config=edge_config, | ||
| verbose=self.verbose, | ||
| ) | ||
| return self | ||
|
|
||
| def to_backend(self, partitioners: Optional[List[Partitioner]]) -> "LLMEdgeManager": | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -298,7 +298,6 @@ prebuilt_cxx_library( | |
| name = "libtorch", | ||
| shared_lib = ":libtorch_gen[libtorch]", | ||
| exported_preprocessor_flags = [ | ||
| "-D_GLIBCXX_USE_CXX11_ABI=0", # `libtorch` is built without CXX11_ABI so any target depends on it need to use the same build config. | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's see if this works. @atalman Do you know if we have switch libtorch build to CXX11_ABI yet? I don't see it in pytorch/pytorch#143423 yet There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. All the tests are green! Good to land? @huydhn |
||
| "-I$(location :libtorch_gen[include])", # include header directories | ||
| "-I$(location :libtorch_gen[include])/torch/csrc/api/include", # include header directories | ||
| ], | ||
|
|
@@ -318,7 +317,6 @@ prebuilt_cxx_library( | |
| name = "libtorch_python", | ||
| shared_lib = ":libtorch_gen[libtorch_python]", | ||
| exported_preprocessor_flags = [ | ||
| "-D_GLIBCXX_USE_CXX11_ABI=0", # `libtorch` is built without CXX11_ABI so any target depends on it need to use the same build config. | ||
| "-I$(location :libtorch_gen[include])", # include header directories | ||
| "-I$(location :libtorch_gen[include])/torch/csrc/api/include", # include header directories | ||
| ], | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.