From a97f4ca911de7d5b05c74c2165a2e964a99e0416 Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Tue, 1 Dec 2020 17:16:59 -0800 Subject: [PATCH] Update on "[reland][quant][fix] Add bias once in conv_fused (#48593)" Summary: Previously _conv_forward will add self.bias to the result, so bias is added twice in qat ConvBn module this PR added a bias argument to _conv_forward and _conv_forward is called with zero bias in ConvBn module fixes: https://github.com/pytorch/pytorch/issues/48514 Test Plan: Imported from OSS Reviewed By: raghuramank100 Differential Revision: [D25249175](https://our.internmc.facebook.com/intern/diff/D25249175) [ghstack-poisoned] --- torch/nn/intrinsic/qat/modules/conv_fused.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/torch/nn/intrinsic/qat/modules/conv_fused.py b/torch/nn/intrinsic/qat/modules/conv_fused.py index dc6260c7d4a5..b1be8e141fce 100644 --- a/torch/nn/intrinsic/qat/modules/conv_fused.py +++ b/torch/nn/intrinsic/qat/modules/conv_fused.py @@ -93,7 +93,8 @@ def _forward(self, input): bias_shape = [1] * len(self.weight.shape) bias_shape[1] = -1 scaled_weight = self.weight_fake_quant(self.weight * scale_factor.reshape(weight_shape)) - # this does not include the conv bias + # using zero bias here since the bias for original conv + # will be added later if self.bias: zero_bias = torch.zeros_like(self.bias) else: