From 2da394eab68a75f99f9ef94c427039c3c562b116 Mon Sep 17 00:00:00 2001 From: Stephen Jia Date: Wed, 13 Nov 2024 11:44:51 -0800 Subject: [PATCH] [ET-VK][Llama] Apply XNNPACK partitoner as well when lowering to Vulkan ## Context The final logit linear layer in the Transformer architecture has extremely large tensors, since the output and weight tensors will have a tensor with dim equal to the vocabulary size, which may be extremely large. Because of this, image textures cannot be used to execute the op when running with the Vulkan delegate, so an implementation using buffer based tensors must be used. Unfortunately, Vulkan does not have a performant implementation of linear with buffer based tensors at the moment. As a result, if this final linear layer is executed in Vulkan, model inference is extremely slow. ## Changes The below diff will prevent the final logit linear layer from being delegated to Vulkan by enforcing a GPU buffer limit. This diff modifies the export llama script to apply the XNNPACK partitioner after the Vulkan partitioner if lowering to Vulkan, to ensure that remaining ops will be accelerated with XNNPACK. 4 bit quantization will also apply an additional Quantizer after applying the Vulkan quantizer (which will skip the final logit linear layer) so that the final logit linear can be quantized as well. ## Long Term This is a temporary measure while an optimized buffer based linear implementation is developed. Once the Vulkan implementation achieves parity with XNNPACK, the final logit linear will be delegated to Vulkan once more. Differential Revision: [D65899827](https://our.internmc.facebook.com/intern/diff/D65899827/) [ghstack-poisoned] --- examples/models/llama/export_llama_lib.py | 4 ++++ .../models/llama/source_transformation/quantize.py | 11 ++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/examples/models/llama/export_llama_lib.py b/examples/models/llama/export_llama_lib.py index 0e015418d42..c4334443f23 100644 --- a/examples/models/llama/export_llama_lib.py +++ b/examples/models/llama/export_llama_lib.py @@ -682,6 +682,10 @@ def _export_llama(args) -> LLMEdgeManager: # noqa: C901 args.enable_dynamic_shape, ) ) + # Apply XNNPACK after Vulkan so that undelegated ops can be accelerated by XNNPACK + partitioners.append( + get_xnnpack_partitioner(dynamic_quant_only_partitioner=False) + ) modelname = f"vulkan_{modelname}" if args.mps: diff --git a/examples/models/llama/source_transformation/quantize.py b/examples/models/llama/source_transformation/quantize.py index d168b7efcdc..958cfc119e4 100644 --- a/examples/models/llama/source_transformation/quantize.py +++ b/examples/models/llama/source_transformation/quantize.py @@ -157,7 +157,16 @@ def quantize( # noqa C901 model = gptq_quantizer.quantize(model, inputs) return model elif qmode == "vulkan_4w": - model = VkInt4WeightOnlyQuantizer().quantize(model) + q_group_size = 256 if group_size is None else group_size + model = VkInt4WeightOnlyQuantizer(groupsize=q_group_size).quantize(model) + + # Apply additional quantizer for linear layers that aren't lowered to Vulkan + # at the moment + from torchao.quantization.quant_api import Int8DynActInt4WeightQuantizer + model = Int8DynActInt4WeightQuantizer( + precision=torch_dtype, groupsize=q_group_size + ).quantize(model) + return model else: raise Exception(f"Unrecognized quantize mode: {qmode}")