From f35cdc66fe7d739a461f272f3a6d7a0555ec16bf Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Tue, 17 Nov 2020 16:43:39 -0800 Subject: [PATCH] Update on "[quant][fix] Fix quant type classification for float_qparam qconfig" Summary: also renamed float_qparam_dynamic_qconfig to float_qparam_weight_only_qconfig It's not used in user code yet so we only need to update the tests. Test Plan: Reviewers: Subscribers: Tasks: Tags: Differential Revision: [D25010175](https://our.internmc.facebook.com/intern/diff/D25010175) [ghstack-poisoned] --- torch/quantization/qconfig.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/torch/quantization/qconfig.py b/torch/quantization/qconfig.py index 8a187b111a89..8da4ad6bb182 100644 --- a/torch/quantization/qconfig.py +++ b/torch/quantization/qconfig.py @@ -67,8 +67,11 @@ def __new__(cls, activation=torch.nn.Identity, weight=torch.nn.Identity): per_channel_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer, weight=default_per_channel_weight_observer) -float_qparams_weight_only_qconfig = QConfig(activation=default_placeholder_observer, - weight=default_float_qparams_observer) +# TODO: this is weight only quant, change this to QConfigWeightOnly +# or remove the QConfigDynamic later +float_qparams_weight_only_qconfig = QConfigDynamic( + activation=default_placeholder_observer, + weight=default_float_qparams_observer) default_qat_qconfig = QConfig(activation=default_fake_quant, weight=default_weight_fake_quant)