Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Run TorchAO Experimental Tests
name: Run Regression Tests (aarch64)

on:
push:
Expand Down Expand Up @@ -44,17 +44,19 @@ jobs:
if: runner.os == 'Linux'
run: |
conda activate venv
pip install coremltools
pip install torch==2.7.0 --index-url https://download.pytorch.org/whl/cpu --force-reinstall
pip install -r dev-requirements.txt
BUILD_TORCHAO_EXPERIMENTAL=1 TORCHAO_BUILD_CPU_AARCH64=1 TORCHAO_BUILD_KLEIDIAI=1 TORCHAO_ENABLE_ARM_NEON_DOT=1 TORCHAO_PARALLEL_BACKEND=OPENMP pip install .
- name: Run python tests
run: |
conda activate venv
pytest -s test/quantization/test_int8_dynamic_activation_intx_weight_config_v1.py
pytest -s torchao/experimental/tests/test_embedding_xbit_quantizer.py
pytest -s torchao/experimental/tests/test_quant_passes.py
pytest -s test/prototype/test_dynamic_activation_lut.py
pytest -s test/quantization/test_int8_dynamic_activation_intx_weight_config_v1.py
pytest -s test/quantization/test_embedding_xbit_quantizer.py
pytest -s test/quantization/quantize_/workflows/intx/test_intx_opaque_tensor.py
pytest -s test/prototype/test_dynamic_activation_lut.py
pytest -s test/prototype/test_groupwise_lowbit_weight_lut_quantizer.py
- name: torchao/csrc/cpu - build and run C++ tests
if: runner.os == 'macOS'
run: |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,12 @@
group_size_to_block_shapes,
)
from torchao.quantization.quant_api import quantize_
from torchao.quantization.quantize_.workflows.intx.intx_opaque_tensor import (
_is_kernel_library_loaded,
)


@unittest.skipIf(not _is_kernel_library_loaded(), "Need torchao lowbit kernels")
class TestGroupwiseLowbitWeightLut(unittest.TestCase):
"""
Test suite for the GroupwiseLutWeight quantization scheme, updated for the
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,13 @@
MappingType,
quantize_,
)
from torchao.quantization.quantize_.workflows.intx.intx_opaque_tensor import (
_is_kernel_library_loaded,
)
from torchao.quantization.utils import compute_error


@unittest.skipIf(not _is_kernel_library_loaded(), "Need torchao lowbit kernels")
class TestEmbeddingQuantizer(unittest.TestCase):
def test_accuracy(self):
granularity = PerGroup(128)
Expand Down
Loading