diff --git a/.ci/docker/ci_commit_pins/pytorch.txt b/.ci/docker/ci_commit_pins/pytorch.txt index e3a53c8bcb5..40331b4a2d8 100644 --- a/.ci/docker/ci_commit_pins/pytorch.txt +++ b/.ci/docker/ci_commit_pins/pytorch.txt @@ -1 +1 @@ -53a2908a10f414a2f85caa06703a26a40e873869 +7d06d03434c7c8de8fe1a432ee454b953f84ab0d diff --git a/install_requirements.py b/install_requirements.py index b84e250cf87..a026e5b9964 100644 --- a/install_requirements.py +++ b/install_requirements.py @@ -12,33 +12,12 @@ from install_utils import determine_torch_url, is_intel_mac_os, python_is_compatible -from torch_pin import NIGHTLY_VERSION, TORCH_VERSION +from torch_pin import NIGHTLY_VERSION, SUPPORTED_CUDA_VERSIONS, TORCH_VERSION # The pip repository that hosts nightly torch packages. # This will be dynamically set based on CUDA availability and CUDA backend enabled/disabled. TORCH_NIGHTLY_URL_BASE = "https://download.pytorch.org/whl/nightly" -# Supported CUDA versions - modify this to add/remove supported versions -# Format: tuple of (major, minor) version numbers -SUPPORTED_CUDA_VERSIONS = ( - (12, 6), - (12, 8), - (13, 0), -) - -# Since ExecuTorch often uses main-branch features of pytorch, only the nightly -# pip versions will have the required features. -# -# NOTE: If a newly-fetched version of the executorch repo changes the value of -# NIGHTLY_VERSION, you should re-run this script to install the necessary -# package versions. -# -# NOTE: If you're changing, make the corresponding change in .ci/docker/ci_commit_pins/pytorch.txt -# by picking the hash from the same date in https://hud.pytorch.org/hud/pytorch/pytorch/nightly/ -# -# NOTE: If you're changing, make the corresponding supported CUDA versions in -# SUPPORTED_CUDA_VERSIONS above if needed. - def install_requirements(use_pytorch_nightly): # Skip pip install on Intel macOS if using nightly. diff --git a/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h b/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h index 558edb175ae..e340e7626a0 100644 --- a/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h +++ b/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h @@ -359,6 +359,7 @@ static inline int C10_WARP_SIZE_INTERNAL() { // Those platforms do not support assert() #define CUDA_KERNEL_ASSERT(cond) #define CUDA_KERNEL_ASSERT_MSG(cond, msg) +#define CUDA_KERNEL_ASSERT_PRINTF(cond, msg, ...) #define SYCL_KERNEL_ASSERT(cond) #elif defined(_MSC_VER) #if defined(NDEBUG) @@ -396,6 +397,26 @@ __host__ __device__ static_cast(__LINE__)), \ 0); \ } +#define CUDA_KERNEL_ASSERT_PRINTF(cond, msg, ...) \ + if (C10_UNLIKELY(!(cond))) { \ + (void)(printf( \ + "[CUDA_KERNEL_ASSERT] " __FILE__ ":" C10_STRINGIZE( \ + __LINE__) ": %s: block: [%d,%d,%d], thread: [%d,%d,%d]: " \ + "Assertion failed: `" #cond "`: " msg "\n", \ + __func__, \ + blockIdx.x, \ + blockIdx.y, \ + blockIdx.z, \ + threadIdx.x, \ + threadIdx.y, \ + threadIdx.z, \ + ##__VA_ARGS__)); \ + (void)(_wassert( \ + _CRT_WIDE(#cond), \ + _CRT_WIDE(__FILE__), \ + static_cast(__LINE__)), \ + 0); \ + } #define SYCL_KERNEL_ASSERT(cond) \ if (C10_UNLIKELY(!(cond))) { \ (void)(_wassert( \ @@ -455,6 +476,10 @@ __host__ __device__ if C10_UNLIKELY (!(cond)) { \ abort(); \ } +#define CUDA_KERNEL_ASSERT_PRINTF(cond, msg, ...) \ + if C10_UNLIKELY (!(cond)) { \ + abort(); \ + } #define SYCL_KERNEL_ASSERT(cond) \ if C10_UNLIKELY (!(cond)) { \ abort(); \ @@ -470,6 +495,23 @@ __host__ __device__ __assert_fail( \ msg, __FILE__, static_cast(__LINE__), __func__); \ } +#define CUDA_KERNEL_ASSERT_PRINTF(cond, msg, ...) \ + if (C10_UNLIKELY(!(cond))) { \ + printf( \ + "[CUDA_KERNEL_ASSERT] " __FILE__ ":" C10_STRINGIZE( \ + __LINE__) ": %s: block: [%d,%d,%d], thread: [%d,%d,%d]: " \ + "Assertion failed: `" #cond "`: " msg "\n", \ + __func__, \ + blockIdx.x, \ + blockIdx.y, \ + blockIdx.z, \ + threadIdx.x, \ + threadIdx.y, \ + threadIdx.z, \ + ##__VA_ARGS__); \ + __assert_fail( \ + #cond, __FILE__, static_cast(__LINE__), __func__); \ + } #define SYCL_KERNEL_ASSERT(cond) \ if (C10_UNLIKELY(!(cond))) { \ __assert_fail( \ diff --git a/torch_pin.py b/torch_pin.py index 02040c91963..765922cfdf0 100644 --- a/torch_pin.py +++ b/torch_pin.py @@ -1,2 +1,19 @@ +# Since ExecuTorch often uses main-branch features of pytorch, only the nightly +# pip versions will have the required features. +# +# NOTE: If a newly-fetched version of the executorch repo changes the value of +# NIGHTLY_VERSION, you should re-run install_executorch.sh script to install the necessary +# package versions. +# +# NOTE: If you're changing, make the corresponding change in .ci/docker/ci_commit_pins/pytorch.txt +# by picking the hash from the same date in https://hud.pytorch.org/hud/pytorch/pytorch/nightly/ +# +# NOTE: If you're changing, make the corresponding supported CUDA versions in +# SUPPORTED_CUDA_VERSIONS above if needed. TORCH_VERSION = "2.10.0" -NIGHTLY_VERSION = "dev20251003" +NIGHTLY_VERSION = "dev20251008" +SUPPORTED_CUDA_VERSIONS = ( + (12, 6), + (12, 8), + (13, 0), +)