Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/source/torchao_vllm_integration.md
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ class MyNewQuantConfig(AOBaseConfig):
VERSION: ClassVar[int] = 1

class MyQuantizedTensor(TorchAOBaseTensor):
"""Example based on FbgemmFp8Tensor - stores quantized data + scale"""
"""Example based on Float8Tensor - stores quantized data + scale"""

tensor_data_attrs = ["quantized_data", "scale"]
tensor_attributes = ["dtype"]
Expand Down
2 changes: 0 additions & 2 deletions test/core/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
AWQStep,
)
from torchao.quantization.quant_api import (
FbgemmConfig,
Float8DynamicActivationFloat8WeightConfig,
Float8DynamicActivationInt4WeightConfig,
Float8WeightOnlyConfig,
Expand Down Expand Up @@ -92,7 +91,6 @@
),
AWQConfig(Int4WeightOnlyConfig(group_size=128), step=AWQStep.PREPARE_FOR_LOADING),
AWQConfig(Int4WeightOnlyConfig(group_size=128), step="prepare_for_loading"),
FbgemmConfig(torch.bfloat16, torch.int4, torch.bfloat16, [1, 1, 256]),
]


Expand Down
7 changes: 0 additions & 7 deletions test/dtypes/test_affine_quantized.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,7 @@
to_affine_quantized_intx,
to_affine_quantized_intx_static,
)
from torchao.float8.config import e4m3_dtype
from torchao.quantization import (
FbgemmConfig,
Float8WeightOnlyConfig,
GemliteUIntXWeightOnlyConfig,
Int4DynamicActivationInt4WeightConfig,
Expand All @@ -44,7 +42,6 @@
is_fbcode,
is_ROCM,
is_sm_at_least_89,
is_sm_at_least_90,
)

is_cusparselt_available = (
Expand Down Expand Up @@ -100,10 +97,6 @@ def get_quantization_functions(
if is_sm_at_least_89():
base_functions.append(Float8WeightOnlyConfig())

if is_sm_at_least_90():
base_functions.append(FbgemmConfig(torch.bfloat16, torch.int4, torch.bfloat16))
base_functions.append(FbgemmConfig(e4m3_dtype, e4m3_dtype, torch.bfloat16))

return base_functions


Expand Down
19 changes: 0 additions & 19 deletions torchao/_models/llama/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,25 +434,6 @@ def ffn_or_attn_only(mod, fqn):
model,
Int4WeightOnlyConfig(group_size=group_size, use_hqq=use_hqq, version=1),
)
elif "fbgemm" in quantization and "int4" in quantization:
from torchao.quantization import FbgemmConfig

_, precision, group_size = quantization.split("-")
group_size = int(group_size)
block_size = [1, group_size]
assert precision == "int4", f"FbegemmConfig({precision=}) not supported yet"
quantize_(
model,
FbgemmConfig(torch.bfloat16, torch.int4, torch.bfloat16, block_size),
)
elif "fbgemm" in quantization and "fp8" in quantization:
from torchao.float8.config import e4m3_dtype
from torchao.quantization import FbgemmConfig

quantize_(
model,
FbgemmConfig(e4m3_dtype, e4m3_dtype, torch.bfloat16),
)
elif "int4dq-" in quantization:
from torchao.dtypes import CutlassInt4PackedLayout

Expand Down
1 change: 0 additions & 1 deletion torchao/dtypes/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
to_affine_quantized_intx,
to_affine_quantized_intx_static,
)
from .fbgemm_fp8_tensor import FbgemmFp8Tensor, to_fbgemm_fp8
from .floatx import (
CutlassSemiSparseLayout,
Float8Layout,
Expand Down
268 changes: 0 additions & 268 deletions torchao/dtypes/fbgemm_fp8_tensor.py

This file was deleted.

2 changes: 0 additions & 2 deletions torchao/quantization/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
)
from .quant_api import (
CutlassInt4PackedLayout,
FbgemmConfig,
Float8DynamicActivationFloat8SemiSparseWeightConfig,
Float8DynamicActivationFloat8WeightConfig,
Float8DynamicActivationInt4WeightConfig,
Expand Down Expand Up @@ -161,7 +160,6 @@
"GemliteUIntXWeightOnlyConfig",
"AOPerModuleConfig",
"ModuleFqnToConfig",
"FbgemmConfig",
# tensor subclasses
"Int4Tensor",
"Int4PlainInt32Tensor",
Expand Down
Loading
Loading