Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fx quant: types for fusion_patterns.py #49606

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
16 changes: 12 additions & 4 deletions torch/quantization/fx/fusion_patterns.py
@@ -1,9 +1,12 @@
import torch
from torch.fx.graph import Node
from .pattern_utils import (
register_fusion_pattern,
)
from .utils import _parent_name
from .quantization_types import QuantizerCls
from ..fuser_method_mappings import get_fuser_method
from typing import Any, Callable, Dict

# ---------------------
# Fusion Patterns
Expand All @@ -25,24 +28,27 @@
@register_fusion_pattern((torch.nn.functional.relu, (torch.nn.BatchNorm2d, torch.nn.Conv2d)))
@register_fusion_pattern((torch.nn.functional.relu, (torch.nn.BatchNorm3d, torch.nn.Conv3d)))
class ConvBNReLUFusion():
def __init__(self, quantizer, node):
def __init__(self, quantizer: QuantizerCls, node: Node):
super().__init__()
self.relu_node = None
self.bn_node = None
if (node.op == 'call_function' and node.target is torch.nn.functional.relu) or \
(node.op == 'call_module' and type(quantizer.modules[node.target]) == torch.nn.ReLU):
self.relu_node = node
assert isinstance(node.args[0], Node)
node = node.args[0]
assert node.op == 'call_module'
if type(quantizer.modules[node.target]) in [torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d]:
self.bn_node = node
self.bn = quantizer.modules[self.bn_node.target]
assert isinstance(node.args[0], Node)
node = node.args[0]
assert node.op == 'call_module'
self.conv_node = node
self.conv = quantizer.modules[self.conv_node.target]

def fuse(self, quantizer, load_arg, fuse_custom_config_dict=None):
def fuse(self, quantizer: QuantizerCls, load_arg: Callable,
fuse_custom_config_dict: Dict[str, Any] = None) -> Node:
if fuse_custom_config_dict is None:
fuse_custom_config_dict = {}
additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {})
Expand Down Expand Up @@ -89,15 +95,17 @@ def fuse(self, quantizer, load_arg, fuse_custom_config_dict=None):
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.BatchNorm3d))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.BatchNorm3d))
class ModuleReLUFusion():
def __init__(self, quantizer, node):
def __init__(self, quantizer: QuantizerCls, node: Node):
super().__init__()
self.relu_node = node
assert isinstance(node.args[0], Node)
node = node.args[0]
assert node.op == 'call_module'
self.module_node = node
self.module = quantizer.modules[self.module_node.target]

def fuse(self, quantizer, load_arg, fuse_custom_config_dict=None):
def fuse(self, quantizer: QuantizerCls, load_arg: Callable,
fuse_custom_config_dict: Dict[str, Any] = None) -> Node:
if fuse_custom_config_dict is None:
fuse_custom_config_dict = {}
additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {})
Expand Down
7 changes: 2 additions & 5 deletions torch/quantization/fx/quantization_patterns.py
Expand Up @@ -34,17 +34,14 @@
get_linear_prepack_op_for_dtype,
)

from .quantization_types import QuantizerCls

from abc import ABC, abstractmethod
import operator
import warnings

from typing import Any, Callable, Dict

# This is the Quantizer class instance from torch/quantization/fx/quantize.py.
# Define separately to prevent circular imports.
# TODO(future PR): improve this.
QuantizerCls = Any

# -------------------------
# Pattern Registrations
# -------------------------
Expand Down
7 changes: 6 additions & 1 deletion torch/quantization/fx/quantization_types.py
@@ -1,3 +1,8 @@
from typing import Union, Callable, Tuple
from typing import Union, Callable, Tuple, Any

Pattern = Union[Callable, Tuple[Callable, Callable], Tuple[Callable, Callable, Callable]]

# This is the Quantizer class instance from torch/quantization/fx/quantize.py.
# Define separately to prevent circular imports.
# TODO(future PR): improve this.
QuantizerCls = Any