Skip to content

Commit

Permalink
Merge pull request #226 from LeiWang1999/fix/general_attr
Browse files Browse the repository at this point in the history
Register quant params in GeneralQuantLinear for friendly post process.
  • Loading branch information
PanQiWei committed Aug 4, 2023
2 parents 45152b7 + a0de5c2 commit 5d8fa85
Showing 1 changed file with 4 additions and 5 deletions.
9 changes: 4 additions & 5 deletions auto_gptq/nn_modules/qlinear/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ def __init__(self, quant_linear_module):
out_features=quant_linear_module.outfeatures,
bias=True
)

self.infeatures = quant_linear_module.infeatures
self.outfeatures = quant_linear_module.outfeatures
self.bits = quant_linear_module.bits
Expand All @@ -18,15 +17,15 @@ def __init__(self, quant_linear_module):
self.weight.requires_grad = False

self.weight.data = quant_linear_module.qweight
self.qweight = self.weight
self.register_buffer('qweight', quant_linear_module.qweight)
self.bias.data = quant_linear_module.bias

self.qweight.requires_grad = False
self.bias.requires_grad = False

self.qzeros = quant_linear_module.qzeros
self.scales = quant_linear_module.scales
self.g_idx = quant_linear_module.g_idx
self.register_buffer('qzeros', quant_linear_module.qzeros)
self.register_buffer('scales', quant_linear_module.scales)
self.register_buffer('g_idx', quant_linear_module.g_idx)

if hasattr(quant_linear_module, "wf"):
self.wf = quant_linear_module.wf
Expand Down

0 comments on commit 5d8fa85

Please sign in to comment.