Skip to content

Commit

Permalink
Update on "[quant][eagermode] Move custom_module registration to prep…
Browse files Browse the repository at this point in the history
…are/convert_custom_config_dict"

Summary:

Test Plan:

Reviewers:

Subscribers:

Tasks:

Tags:

Differential Revision: [D24290811](https://our.internmc.facebook.com/intern/diff/D24290811)

[ghstack-poisoned]
  • Loading branch information
jerryzh168 committed Oct 14, 2020
2 parents 099da6c + 6393fd6 commit 8ce1bc6
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 1 deletion.
10 changes: 10 additions & 0 deletions tools/jit/gen_unboxing_wrappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import argparse
import re
from itertools import groupby
from functools import reduce
from ..autograd.gen_autograd import load_aten_declarations
from ..autograd.gen_autograd import RETURNS_VIEWS_OF_INPUT
from ..autograd.utils import CodeTemplate, write, is_out_variant, op_name_without_overload
Expand Down Expand Up @@ -496,6 +497,15 @@ def expand_options(decl, i, arg):
}
write(out, 'generated_unboxing_wrappers_%d.cpp' % i, GENERATED_UNBOXING_WRAPPERS_CPP, env)

all_shards = reduce(
lambda lhs, rhs: lhs + rhs,
shards,
)
env = {
'constructors': all_shards,
}
write(out, 'generated_unboxing_wrappers_everything.cpp', GENERATED_UNBOXING_WRAPPERS_CPP, env)


default_map = {'{}': 'None', 'nullptr': 'None', 'c10::nullopt': 'None'}

Expand Down
2 changes: 1 addition & 1 deletion torch/quantization/fx/quantization_patterns.py
Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,7 @@ class StandaloneModuleQuantizeHandler(QuantizeHandler):
""" Converts an observed standalone module to quantized standalone module
by calling convert_fx on the observed standalone module.
"""
def convert(self, quantizer, node, load_arg, debug=False):
def convert(self, quantizer, node, load_arg, debug=False, convert_custom_config_dict=None):
assert node.op == 'call_module'
qconfig = quantizer.qconfig_map[node.name]
convert = torch.quantization.quantize_fx._convert_standalone_module_fx
Expand Down

0 comments on commit 8ce1bc6

Please sign in to comment.