diff --git a/misc/pytorch/Makefile b/misc/pytorch/Makefile index 4d0250ce1d885..b48fb01272c5e 100644 --- a/misc/pytorch/Makefile +++ b/misc/pytorch/Makefile @@ -1,7 +1,6 @@ PORTNAME= pytorch DISTVERSIONPREFIX= v -DISTVERSION= 2.1.0 -PORTREVISION= 1 +DISTVERSION= 2.1.2 CATEGORIES= misc # machine-learning MASTER_SITES= https://github.com/pytorch/pytorch/releases/download/v${DISTVERSION}/ DIST_SUBDIR= ${PORTNAME} @@ -19,7 +18,9 @@ LICENSE_FILE= ${WRKSRC}/LICENSE BUILD_DEPENDS= gmake:devel/gmake \ ${LOCALBASE}/include/fxdiv.h:devel/fxdiv -LIB_DEPENDS= libopenblas.so:math/openblas \ +LIB_DEPENDS= libabsl_base.so:devel/abseil \ + libblis.so:math/blis \ + libopenblas.so:math/openblas \ libmpi.so:net/openmpi \ libonnx.so:misc/onnx \ libpthreadpool.so:devel/pthreadpool \ @@ -33,8 +34,9 @@ CMAKE_OFF= BUILD_CUSTOM_PROTOBUF USE_CUDA USE_ROCM USE_NNPACK USE_QNNPACK USE_P CMAKE_ON= USE_SYSTEM_PYBIND11 \ USE_SYSTEM_SLEEF \ USE_SYSTEM_ONNX # see other USE_SYSTEM_xx in CMakeLists.txt +CMAKE_ON+= ONNX_USE_LITE_PROTO # workaround for the protobuf-related failure described here: https://github.com/onnx/optimizer/issues/38 CMAKE_ARGS= -DPSIMD_SOURCE_DIR=${WRKSRC}/third_party/psimd \ - -DFREEBSD_PYTHON_VER=${PYTHON_VER} + -DPYTHON_EXECUTABLE=${PYTHON_CMD} MAKE_ENV= USE_NINJA=no # ninja breaks for some reason LDFLAGS+= -lexecinfo diff --git a/misc/pytorch/distinfo b/misc/pytorch/distinfo index 654de6ea7ac3f..f9ee9b1e6a74f 100644 --- a/misc/pytorch/distinfo +++ b/misc/pytorch/distinfo @@ -1,3 +1,3 @@ -TIMESTAMP = 1697035721 -SHA256 (pytorch/pytorch-v2.1.0.tar.gz) = 631c71f7f7d6174952f35b5ed4a45ec115720a4ef3eb619678de5893af54f403 -SIZE (pytorch/pytorch-v2.1.0.tar.gz) = 283041980 +TIMESTAMP = 1703580494 +SHA256 (pytorch/pytorch-v2.1.2.tar.gz) = 85effbcce037bffa290aea775c9a4bad5f769cb229583450c40055501ee1acd7 +SIZE (pytorch/pytorch-v2.1.2.tar.gz) = 282894457 diff --git a/misc/pytorch/files/patch-CMakeLists.txt b/misc/pytorch/files/patch-CMakeLists.txt index 9ebebccbf2e38..1bd4dc09bcbd1 100644 --- a/misc/pytorch/files/patch-CMakeLists.txt +++ b/misc/pytorch/files/patch-CMakeLists.txt @@ -1,6 +1,6 @@ ---- CMakeLists.txt.orig 2023-04-03 19:45:59 UTC +--- CMakeLists.txt.orig 2023-12-15 02:03:27 UTC +++ CMakeLists.txt -@@ -138,7 +138,7 @@ endif() +@@ -139,7 +139,7 @@ set(CPU_INTEL OFF) set(CPU_AARCH64 OFF) set(CPU_INTEL OFF) @@ -9,7 +9,7 @@ set(CPU_INTEL ON) elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64)") set(CPU_AARCH64 ON) -@@ -163,7 +163,7 @@ include(CMakeDependentOption) +@@ -164,7 +164,7 @@ option(BUILD_DOCS "Build Caffe2 documentation" OFF) option(ATEN_NO_TEST "Do not build ATen test binaries" OFF) option(BUILD_BINARY "Build C++ binaries" OFF) option(BUILD_DOCS "Build Caffe2 documentation" OFF) @@ -18,7 +18,7 @@ option(BUILD_PYTHON "Build Python binaries" ON) option(BUILD_CAFFE2 "Master flag to build Caffe2" OFF) option(BUILD_LITE_INTERPRETER "Master flag to build Lite Interpreter" OFF) -@@ -398,15 +398,15 @@ endif() +@@ -408,15 +408,15 @@ option(USE_SYSTEM_CPUINFO "Use system-provided cpuinfo # USE_SYSTEM_LIBS being "OFF". option(USE_SYSTEM_LIBS "Use all available system-provided libraries." OFF) option(USE_SYSTEM_CPUINFO "Use system-provided cpuinfo." OFF) @@ -35,5 +35,33 @@ -option(USE_SYSTEM_ONNX "Use system-provided onnx." OFF) +option(USE_SYSTEM_ONNX "Use system-provided onnx." ON) option(USE_SYSTEM_XNNPACK "Use system-provided xnnpack." OFF) + option(USE_SYSTEM_ZSTD "Use system-provided zstd." OFF) option(USE_GOLD_LINKER "Use ld.gold to link" OFF) - if(USE_SYSTEM_LIBS) +@@ -817,11 +817,11 @@ if(NOT MSVC) + # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459 + string(APPEND CMAKE_CXX_FLAGS " -Wall") + string(APPEND CMAKE_CXX_FLAGS " -Wextra") +- append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS) +- append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS) +- append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS) +- append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS) +- append_cxx_flag_if_supported("-Werror=bool-operation" CMAKE_CXX_FLAGS) ++ #append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS) ++ #append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS) ++ #append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS) ++ #append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS) ++ #append_cxx_flag_if_supported("-Werror=bool-operation" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Wnarrowing" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Wno-missing-field-initializers" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Wno-type-limits" CMAKE_CXX_FLAGS) +@@ -917,8 +917,8 @@ if(NOT MSVC) + string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fno-omit-frame-pointer -O0") + append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS) +- append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS) +- append_cxx_flag_if_supported("-Werror=cast-function-type" CMAKE_CXX_FLAGS) ++ #append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS) ++ #append_cxx_flag_if_supported("-Werror=cast-function-type" CMAKE_CXX_FLAGS) + else() + # skip unwanted includes from windows.h + add_compile_definitions(WIN32_LEAN_AND_MEAN) diff --git a/misc/pytorch/files/patch-aten_src_ATen_native_sparse_ValidateCompressedIndicesCommon.h b/misc/pytorch/files/patch-aten_src_ATen_native_sparse_ValidateCompressedIndicesCommon.h index 70dc6fbd61e28..e4cdf08b4ff36 100644 --- a/misc/pytorch/files/patch-aten_src_ATen_native_sparse_ValidateCompressedIndicesCommon.h +++ b/misc/pytorch/files/patch-aten_src_ATen_native_sparse_ValidateCompressedIndicesCommon.h @@ -1,6 +1,6 @@ ---- aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h.orig 2023-05-07 08:51:40 UTC +--- aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h.orig 2023-12-15 02:03:27 UTC +++ aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h -@@ -39,7 +39,7 @@ namespace { +@@ -40,7 +40,7 @@ INVARIANT_CHECK_FUNC_API // use `cidx/idx` to refer to `compressed_indices/plain_indices` respectively. INVARIANT_CHECK_FUNC_API @@ -9,7 +9,7 @@ #ifdef GPUCC CUDA_KERNEL_ASSERT(cond && message); #else -@@ -57,9 +57,9 @@ INVARIANT_CHECK_FUNC_API _check_first_cidx_is_zero( +@@ -58,9 +58,9 @@ INVARIANT_CHECK_FUNC_API _check_first_cidx_is_zero( const index_t& zero) { const bool invariant = cidx == zero; if (cdim_name == CDimName::CRow) { @@ -21,7 +21,7 @@ } } -@@ -71,9 +71,9 @@ INVARIANT_CHECK_FUNC_API _check_last_cidx_is_nnz( +@@ -72,9 +72,9 @@ INVARIANT_CHECK_FUNC_API _check_last_cidx_is_nnz( const index_t& nnz) { const bool invariant = cidx == nnz; if (cdim_name == CDimName::CRow) { @@ -33,7 +33,7 @@ } } -@@ -88,11 +88,11 @@ INVARIANT_CHECK_FUNC_API _check_cidx_nondecreasing_loc +@@ -89,11 +89,11 @@ INVARIANT_CHECK_FUNC_API _check_cidx_nondecreasing_loc const auto s_cidx = cidx_next - cidx; const bool invariant = zero <= s_cidx && s_cidx <= dim; if (cdim_name == CDimName::CRow) { @@ -47,7 +47,7 @@ invariant, "`0 <= ccol_indices[..., 1:] - ccol_indices[..., :-1] <= nrows` is not satisfied."); } -@@ -107,9 +107,9 @@ INVARIANT_CHECK_FUNC_API _check_idx_bounds( +@@ -108,9 +108,9 @@ INVARIANT_CHECK_FUNC_API _check_idx_bounds( const index_t& dim) { const bool invariant = zero <= idx && idx < dim; if (cdim_name == CDimName::CRow) { @@ -59,7 +59,7 @@ } } -@@ -128,14 +128,14 @@ INVARIANT_CHECK_FUNC_API _check_idx_sorted_distinct_va +@@ -129,14 +129,14 @@ INVARIANT_CHECK_FUNC_API _check_idx_sorted_distinct_va for (auto* RESTRICT curr = slice_begin + 1; curr < slice_end; ++curr) { const auto invariant = *(curr - 1) < *curr; if (cdim_name == CDimName::CRow) {