diff --git a/.github/workflows/unittest_ci_cpu.yml b/.github/workflows/unittest_ci_cpu.yml index a0b0ef9eb..e54ee22ef 100644 --- a/.github/workflows/unittest_ci_cpu.yml +++ b/.github/workflows/unittest_ci_cpu.yml @@ -79,6 +79,7 @@ jobs: conda install -n build_binary -y gxx_linux-64 conda run -n build_binary \ x86_64-conda-linux-gnu-g++ --version + conda install -n build_binary -y -c conda-forge benchmark gtest conda install -n build_binary -c anaconda redis -y conda run -n build_binary redis-server --daemonize yes mkdir cpp-build @@ -86,6 +87,9 @@ jobs: conda run -n build_binary cmake \ -DBUILD_TEST=ON \ -DBUILD_REDIS_IO=ON \ - -DCMAKE_PREFIX_PATH=/opt/conda/envs/build_binary/lib/python${{ matrix.python-version }}/site-packages/torch/share/cmake .. + -DCMAKE_PREFIX_PATH=/opt/conda/envs/build_binary/lib/python${{ matrix.python-version }}/site-packages/torch/share/cmake .. \ + -DCMAKE_CXX_FLAGS="-D_GLIBCXX_USE_CXX11_ABI=1" \ + -DCMAKE_EXE_LINKER_FLAGS="-Wl,--no-as-needed" \ + .. conda run -n build_binary make -j conda run -n build_binary ctest -V . diff --git a/CMakeLists.txt b/CMakeLists.txt index 54446bb67..09c1d1804 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,7 +17,7 @@ include(FetchContent) option(BUILD_TEST "Build C++ test binaries (need gtest and gbenchmark)" OFF) -add_definitions("-D_GLIBCXX_USE_CXX11_ABI=0") +add_definitions("-D_GLIBCXX_USE_CXX11_ABI=1") add_subdirectory(torchrec/csrc) diff --git a/contrib/dynamic_embedding/CMakeLists.txt b/contrib/dynamic_embedding/CMakeLists.txt index 1d182ae27..7981ab63d 100644 --- a/contrib/dynamic_embedding/CMakeLists.txt +++ b/contrib/dynamic_embedding/CMakeLists.txt @@ -25,6 +25,8 @@ endif() option(TDE_WITH_TESTING "Enable unittest in C++ side" ${TDE_IS_TOP_LEVEL_PROJECT}) +set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--allow-shlib-undefined") + if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") option(TDE_WITH_CXX11_ABI "GLIBCXX use c++11 ABI or not. libtorch installed by conda is not use it by default" OFF) if (TDE_WITH_CXX11_ABI) diff --git a/torchrec/ops/tests/faster_hash_bench.py b/torchrec/ops/tests/faster_hash_bench.py index e5211b509..a7e2e3d19 100644 --- a/torchrec/ops/tests/faster_hash_bench.py +++ b/torchrec/ops/tests/faster_hash_bench.py @@ -11,6 +11,7 @@ import contextlib import logging import random +import sys import time from typing import Any, Generator @@ -18,8 +19,14 @@ logger: logging.Logger = logging.getLogger(__name__) -torch.ops.load_library("//caffe2/torch/fb/retrieval:faster_hash_cpu") -torch.ops.load_library("//caffe2/torch/fb/retrieval:faster_hash_cuda") +def load_required_libraries() -> bool: + try: + torch.ops.load_library("//torchrec/ops:faster_hash_cpu") + torch.ops.load_library("//torchrec/ops:faster_hash_cuda") + return True + except Exception as e: + logger.error(f"Failed to load faster_hash libraries, skipping test: {e}") + return False @contextlib.contextmanager @@ -347,6 +354,9 @@ def _run_benchmark_with_eviction( if __name__ == "__main__": + if not load_required_libraries(): + print("Skipping test because libraries were not loaded") + sys.exit(0) logger.setLevel(logging.INFO) handler = logging.StreamHandler() handler.setLevel(logging.INFO) diff --git a/torchrec/ops/tests/faster_hash_test.py b/torchrec/ops/tests/faster_hash_test.py index a56420522..343d279e4 100644 --- a/torchrec/ops/tests/faster_hash_test.py +++ b/torchrec/ops/tests/faster_hash_test.py @@ -13,9 +13,14 @@ import torch from hypothesis import settings -torch.ops.load_library("//torchrec/ops:faster_hash_cpu") -torch.ops.load_library("//torchrec/ops:faster_hash_cuda") - +def load_required_libraries() -> bool: + try: + torch.ops.load_library("//torchrec/ops:faster_hash_cpu") + torch.ops.load_library("//torchrec/ops:faster_hash_cuda") + return True + except Exception as e: + print(f"Skipping tests because libraries were not loaded: {e}") + return False class HashZchKernelEvictionPolicy(IntEnum): THRESHOLD_EVICTION = 0 @@ -23,6 +28,14 @@ class HashZchKernelEvictionPolicy(IntEnum): class FasterHashTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + if not load_required_libraries(): + raise unittest.SkipTest( + "Libraries not loaded, skipping all tests in MyTestCase" + ) + @unittest.skipIf(not torch.cuda.is_available(), "Skip when CUDA is not available") @settings(deadline=None) def test_simple_zch_no_evict(self) -> None: