From 566c7bbc57b19731d4244df41bcaa5ff6afb93f6 Mon Sep 17 00:00:00 2001 From: Stephen Jia Date: Fri, 24 Jan 2025 08:27:38 -0800 Subject: [PATCH 1/2] [ET-VK][ez] Add back tensor dim check ## Context Vulkan cannot represent higher dimensional tensors (tensors with dim > 4) at the moment, but due to some refactors implemented last year the partitioner check to avoid lowering ops that involve high dimensional tensors was accidentally removed. This diff adds back the check, as well as a test to verify that high dimensional tensors do not get lowered. Differential Revision: [D68630966](https://our.internmc.facebook.com/intern/diff/D68630966/) [ghstack-poisoned] --- .../vulkan/partitioner/vulkan_partitioner.py | 8 +++++ backends/vulkan/test/test_vulkan_delegate.py | 36 ++++++++++++++++--- backends/vulkan/utils.py | 18 ++++++++++ 3 files changed, 58 insertions(+), 4 deletions(-) diff --git a/backends/vulkan/partitioner/vulkan_partitioner.py b/backends/vulkan/partitioner/vulkan_partitioner.py index cb14e96962d..deb4327c7b4 100644 --- a/backends/vulkan/partitioner/vulkan_partitioner.py +++ b/backends/vulkan/partitioner/vulkan_partitioner.py @@ -83,6 +83,10 @@ def op_node_is_compatible( return False, "no operator implementation" features = get_op_features(target) + # Check for high dimensional tensors + if utils.tensor_node_is_high_dim(node): + return False, "contains high dim tensor" + valid_texture_layouts = utils.possible_node_memory_layouts( node, self.texture_limits ) @@ -94,6 +98,10 @@ def op_node_is_compatible( and utils.is_tensor_node(arg) and i not in features.skip_limits_check ): + # Check for high dimensional tensors + if utils.tensor_node_is_high_dim(arg): + return False, "contains high dim tensor" + arg_texture_layouts = utils.possible_node_memory_layouts( arg, self.texture_limits ) diff --git a/backends/vulkan/test/test_vulkan_delegate.py b/backends/vulkan/test/test_vulkan_delegate.py index 8a2701a5c02..129f40df8b1 100644 --- a/backends/vulkan/test/test_vulkan_delegate.py +++ b/backends/vulkan/test/test_vulkan_delegate.py @@ -97,6 +97,7 @@ def lower_module_and_test_output( dynamic_shapes=None, test_inputs=None, first_output_only=False, + expect_no_delegates=False, ): """ Helper testing function that takes a torch.nn.Module and lowers it to Vulkan with @@ -125,10 +126,23 @@ def run_test(): ) executorch_program = edge_program.to_executorch() - self.assertEqual( - executorch_program.executorch_program.execution_plan[0].delegates[0].id, - VulkanBackend.__name__, - ) + if expect_no_delegates: + self.assertEqual( + len( + executorch_program.executorch_program.execution_plan[ + 0 + ].delegates + ), + 0, + ) + return + else: + self.assertEqual( + executorch_program.executorch_program.execution_plan[0] + .delegates[0] + .id, + VulkanBackend.__name__, + ) executorch_module = _load_for_executorch_from_buffer( executorch_program.buffer @@ -1683,3 +1697,17 @@ def forward(self, x): GridPriorsModule(), (torch.rand(size=[1, 5, 2, 3]),), ) + + def test_vulkan_backend_high_dim_tensors_fail(self): + class UnsqueezeHigherDim(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.unsqueeze(x, 2) + + self.lower_module_and_test_output( + UnsqueezeHigherDim(), + (torch.ones(size=[5, 4, 1, 2, 6]),), + expect_no_delegates=True, + ) diff --git a/backends/vulkan/utils.py b/backends/vulkan/utils.py index 1a030e5e8f5..9211c52518f 100644 --- a/backends/vulkan/utils.py +++ b/backends/vulkan/utils.py @@ -130,6 +130,24 @@ def within_buffer_limit(node: torch.fx.Node, buffer_limit: int) -> int: raise RuntimeError(f"Cannot get numel for val of type {type(node.meta['val'])}") +def tensor_node_is_high_dim(node: torch.fx.Node) -> bool: + """ + If the node does not contain a tensor or a collection of tensors, return False. + Otherwise, return True if the tensor is high dimensional (i.e. rank > 4). + """ + if is_tensor_node(node): + if isinstance(node.meta["val"], FakeTensor): + return len(node.meta["val"].shape) > 4 + if isinstance(node.meta["val"], list) or isinstance(node.meta["val"], tuple): + for fake_tensor in node.meta["val"]: + if isinstance(fake_tensor, FakeTensor): + if len(fake_tensor.shape) > 4: + return True + return False + else: + return False + + def required_image_extents(sizes: torch.Size, layout: VkMemoryLayout) -> ImageExtents: """ Calculate the image extents that will be used to represent a tensor with the given sizes From ab55b29a666ab222ae3b78f3db4874a6d34d66f3 Mon Sep 17 00:00:00 2001 From: Stephen Jia Date: Fri, 24 Jan 2025 09:10:23 -0800 Subject: [PATCH 2/2] Update on "[ET-VK][ez] Add back tensor dim check" ## Context Vulkan cannot represent higher dimensional tensors (tensors with dim > 4) at the moment, but due to some refactors implemented last year the partitioner check to avoid lowering ops that involve high dimensional tensors was accidentally removed. This diff adds back the check, as well as a test to verify that high dimensional tensors do not get lowered. Differential Revision: [D68630966](https://our.internmc.facebook.com/intern/diff/D68630966/) [ghstack-poisoned] --- .../vulkan/partitioner/vulkan_partitioner.py | 4 ++-- backends/vulkan/utils.py | 24 ++++++++----------- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/backends/vulkan/partitioner/vulkan_partitioner.py b/backends/vulkan/partitioner/vulkan_partitioner.py index deb4327c7b4..3c31e0316a6 100644 --- a/backends/vulkan/partitioner/vulkan_partitioner.py +++ b/backends/vulkan/partitioner/vulkan_partitioner.py @@ -84,7 +84,7 @@ def op_node_is_compatible( features = get_op_features(target) # Check for high dimensional tensors - if utils.tensor_node_is_high_dim(node): + if utils.is_tensor_node(node) and utils.tensor_node_is_high_dim(node): return False, "contains high dim tensor" valid_texture_layouts = utils.possible_node_memory_layouts( @@ -99,7 +99,7 @@ def op_node_is_compatible( and i not in features.skip_limits_check ): # Check for high dimensional tensors - if utils.tensor_node_is_high_dim(arg): + if utils.is_tensor_node(arg) and utils.tensor_node_is_high_dim(arg): return False, "contains high dim tensor" arg_texture_layouts = utils.possible_node_memory_layouts( diff --git a/backends/vulkan/utils.py b/backends/vulkan/utils.py index 9211c52518f..5034747be9d 100644 --- a/backends/vulkan/utils.py +++ b/backends/vulkan/utils.py @@ -132,20 +132,16 @@ def within_buffer_limit(node: torch.fx.Node, buffer_limit: int) -> int: def tensor_node_is_high_dim(node: torch.fx.Node) -> bool: """ - If the node does not contain a tensor or a collection of tensors, return False. - Otherwise, return True if the tensor is high dimensional (i.e. rank > 4). - """ - if is_tensor_node(node): - if isinstance(node.meta["val"], FakeTensor): - return len(node.meta["val"].shape) > 4 - if isinstance(node.meta["val"], list) or isinstance(node.meta["val"], tuple): - for fake_tensor in node.meta["val"]: - if isinstance(fake_tensor, FakeTensor): - if len(fake_tensor.shape) > 4: - return True - return False - else: - return False + Returns true if a given node contains a tensor with more than 4 dimensions + """ + if isinstance(node.meta["val"], FakeTensor): + return len(node.meta["val"].shape) > 4 + if isinstance(node.meta["val"], list) or isinstance(node.meta["val"], tuple): + for fake_tensor in node.meta["val"]: + if isinstance(fake_tensor, FakeTensor): + if len(fake_tensor.shape) > 4: + return True + return False def required_image_extents(sizes: torch.Size, layout: VkMemoryLayout) -> ImageExtents: