From 01cd722593b83ccc758439a1a5c75f59a4bf0b15 Mon Sep 17 00:00:00 2001 From: Nitin Jain Date: Tue, 2 Sep 2025 22:25:07 -0700 Subject: [PATCH 1/3] Add 16A8W quantization configuration utility for ARM backend This diff implements a 16A8W (16-bit activations, 8-bit weights) quantization configuration utility for the ExecutorTorch ARM backend, following the feedback from D79746479. ## Key Changes **1. New Quantization Configuration Function** - Add `get_16a8w_quantization_config()` in `fbcode/executorch/backends/arm/quantizer/arm_quantizer.py` - Provides 16-bit activations with HistogramObserver (better precision than 8A8W) - Maintains 8-bit weights with MinMaxObserver/PerChannelMinMaxObserver (memory efficient) - **Technically supported by TOSA through [EXT-INT16 extension/profile](https://www.mlplatform.org/tosa/tosa_spec.html#_conv2d)** ## Benefits - **Better Precision**: 16-bit activations provide higher precision than 8-bit. Useful for carrying precision for recurring neural nets. ghstack-source-id: 305991462 @exported-using-ghexport @bypass-github-export-checks @bypass-github-pytorch-ci-checks @bypass-github-executorch-ci-checks Differential Revision: [D81550512](https://our.internmc.facebook.com/intern/diff/D81550512/) [ghstack-poisoned] --- backends/arm/quantizer/arm_quantizer.py | 80 +++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/backends/arm/quantizer/arm_quantizer.py b/backends/arm/quantizer/arm_quantizer.py index 92ef5be5781..395ba27349f 100644 --- a/backends/arm/quantizer/arm_quantizer.py +++ b/backends/arm/quantizer/arm_quantizer.py @@ -225,6 +225,86 @@ def get_symmetric_a16w8_quantization_config( return quantization_config +@functools.lru_cache +def get_symmetric_a16w8_quantization_config( + is_per_channel: bool = True, + is_qat: bool = False, + is_dynamic: bool = False, + weight_qmin: int = -127, + weight_qmax: int = 127, +): + """ + 16A8W quantization config: 16-bit activations, 8-bit weights. + + This configuration provides better accuracy than 8A8W while maintaining + reasonable memory usage through 8-bit weights. + + Args: + is_per_channel: Whether to use per-channel quantization for weights + is_qat: Whether this is for Quantization Aware Training + is_dynamic: Whether to use dynamic quantization + weight_qmin: Minimum quantization value for weights + weight_qmax: Maximum quantization value for weights + + Returns: + QuantizationConfig with 16-bit activations and 8-bit weights + """ + extra_args: Dict[str, Any] = {"eps": 2**-12} + + # Setup observer/fake-quant for 16-bit activations + if is_qat: + if is_dynamic: + act_observer_or_fake_quant_ctr = FakeQuantize + dynamic_quant_observer = MovingAverageMinMaxObserver.with_args( + averaging_constant=1 + ) + extra_args["observer"] = dynamic_quant_observer + else: + act_observer_or_fake_quant_ctr = FusedMovingAvgObsFakeQuantize # type: ignore[assignment] + else: + if is_dynamic: + act_observer_or_fake_quant_ctr = PlaceholderObserver # type: ignore[assignment] + else: + # HistogramObserver works well for 16-bit range + act_observer_or_fake_quant_ctr = HistogramObserver # type: ignore[assignment] + + # 16-bit activation quantization spec + act_quantization_spec = QuantizationSpec( + dtype=torch.int16, + quant_min=torch.iinfo(torch.int16).min, # -32768 + quant_max=torch.iinfo(torch.int16).max, # 32767 + qscheme=torch.per_tensor_symmetric, + is_dynamic=is_dynamic, + observer_or_fake_quant_ctr=act_observer_or_fake_quant_ctr.with_args( + **extra_args, + ), + ) + + # Instead of reconstructing quantization_config, just clone and update as needed + # Clone the quantization_config from get_symmetric_quantization_config and update activation spec + base_config = get_symmetric_quantization_config( + is_per_channel=is_per_channel, + is_qat=is_qat, + is_dynamic=is_dynamic, + ) + # Replace activation quantization spec with 16-bit version + if is_dynamic: + quantization_config = QuantizationConfig( + act_quantization_spec, # 16-bit input activations + None, + base_config.weight, # 8-bit weights from base config + None, + ) + else: + quantization_config = QuantizationConfig( + act_quantization_spec, # 16-bit input activations + act_quantization_spec, # 16-bit output activations + base_config.weight, # 8-bit weights from base config + None, + ) + return quantization_config + + NodeFilterType = Callable[[Node], bool] """Type for a Node Filter used by annotators. A Node filter is a function that takes a Node and returns whether the node should be annotated or not. From cd8d1fba6096ec966a0c0b0cdbc3b6bfd988826c Mon Sep 17 00:00:00 2001 From: Nitin Jain Date: Tue, 2 Sep 2025 22:57:48 -0700 Subject: [PATCH 2/3] Update on "Add 16A8W quantization configuration utility for ARM backend" This diff implements a 16A8W (16-bit activations, 8-bit weights) quantization configuration utility for the ExecutorTorch ARM backend, following the feedback from D79746479. ## Key Changes **1. New Quantization Configuration Function** - Add `get_16a8w_quantization_config()` in `fbcode/executorch/backends/arm/quantizer/arm_quantizer.py` - Provides 16-bit activations with HistogramObserver (better precision than 8A8W) - Maintains 8-bit weights with MinMaxObserver/PerChannelMinMaxObserver (memory efficient) - **Technically supported by TOSA through [EXT-INT16 extension/profile](https://www.mlplatform.org/tosa/tosa_spec.html#_conv2d)** ## Benefits - **Better Precision**: 16-bit activations provide higher precision than 8-bit. Useful for carrying precision for recurring neural nets. exported-using-ghexport bypass-github-export-checks bypass-github-pytorch-ci-checks bypass-github-executorch-ci-checks Differential Revision: [D81550512](https://our.internmc.facebook.com/intern/diff/D81550512/) [ghstack-poisoned] From 371cc371a4322aa56c8b240200e77928a2016b4b Mon Sep 17 00:00:00 2001 From: Nitin Jain Date: Tue, 2 Sep 2025 23:41:08 -0700 Subject: [PATCH 3/3] Update on "Add 16A8W quantization configuration utility for ARM backend" This diff implements a 16A8W (16-bit activations, 8-bit weights) quantization configuration utility for the ExecutorTorch ARM backend, following the feedback from D79746479. ## Key Changes **1. New Quantization Configuration Function** - Add `get_16a8w_quantization_config()` in `fbcode/executorch/backends/arm/quantizer/arm_quantizer.py` - Provides 16-bit activations with HistogramObserver (better precision than 8A8W) - Maintains 8-bit weights with MinMaxObserver/PerChannelMinMaxObserver (memory efficient) - **Technically supported by TOSA through [EXT-INT16 extension/profile](https://www.mlplatform.org/tosa/tosa_spec.html#_conv2d)** ## Benefits - **Better Precision**: 16-bit activations provide higher precision than 8-bit. Useful for carrying precision for recurring neural nets. exported-using-ghexport bypass-github-export-checks bypass-github-pytorch-ci-checks bypass-github-executorch-ci-checks Differential Revision: [D81550512](https://our.internmc.facebook.com/intern/diff/D81550512/) [ghstack-poisoned]