Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 0 additions & 25 deletions torch/ao/quantization/quantizer/quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,23 +19,6 @@
"QuantizationAnnotation",
]

# TODO: maybe remove torch.float32
SUPPORTED_DTYPES = [
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.float16,
torch.float32,
]
SUPPORTED_QSCHEMES = [
torch.per_tensor_affine,
torch.per_tensor_symmetric,
torch.per_channel_affine,
torch.per_channel_symmetric,
torch.per_channel_affine_float_qparams,
]


class QuantizationSpecBase(ABC): # noqa: B024
"""Base class for different types of quantization specs that allows users to
Expand Down Expand Up @@ -64,10 +47,6 @@ class QuantizationSpec(QuantizationSpecBase):
is_dynamic: bool = False

def __post_init__(self):
# check dtype is one of the supported types
if self.dtype not in SUPPORTED_DTYPES:
raise TypeError(f"Unsupported dtype {self.dtype}.")

# quant_min must be less than quant_max
if (
self.quant_min is not None
Expand All @@ -78,10 +57,6 @@ def __post_init__(self):
f"quant_min {self.quant_min} must be <= quant_max {self.quant_max}."
)

# check qscheme is on of the supported ones
if self.qscheme is not None and self.qscheme not in SUPPORTED_QSCHEMES:
raise ValueError(f"Unsupported qscheme {self.qscheme}.")

# ch_axis must be less than the number of channels
# but no way to check here. Just check that it is not < 0.
if self.ch_axis is not None and self.ch_axis < 0:
Expand Down