diff --git a/.ci/docker/ci_commit_pins/pytorch.txt b/.ci/docker/ci_commit_pins/pytorch.txt index aafc7565373..e3a53c8bcb5 100644 --- a/.ci/docker/ci_commit_pins/pytorch.txt +++ b/.ci/docker/ci_commit_pins/pytorch.txt @@ -1 +1 @@ -cf9d09490c7f6685ec68d5db3acf2e0d73c54d00 +53a2908a10f414a2f85caa06703a26a40e873869 diff --git a/docs/source/conf.py b/docs/source/conf.py index b1c6b8b43a2..f1869d38a46 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -24,7 +24,7 @@ import sys from typing import Any -import pytorch_sphinx_theme2 # type: ignore[import-not-found] +import pytorch_sphinx_theme2 # type: ignore[import-untyped] # To let us import ./custom_directives.py sys.path.insert(0, os.path.abspath(".")) diff --git a/install_requirements.py b/install_requirements.py index a026e5b9964..b84e250cf87 100644 --- a/install_requirements.py +++ b/install_requirements.py @@ -12,12 +12,33 @@ from install_utils import determine_torch_url, is_intel_mac_os, python_is_compatible -from torch_pin import NIGHTLY_VERSION, SUPPORTED_CUDA_VERSIONS, TORCH_VERSION +from torch_pin import NIGHTLY_VERSION, TORCH_VERSION # The pip repository that hosts nightly torch packages. # This will be dynamically set based on CUDA availability and CUDA backend enabled/disabled. TORCH_NIGHTLY_URL_BASE = "https://download.pytorch.org/whl/nightly" +# Supported CUDA versions - modify this to add/remove supported versions +# Format: tuple of (major, minor) version numbers +SUPPORTED_CUDA_VERSIONS = ( + (12, 6), + (12, 8), + (13, 0), +) + +# Since ExecuTorch often uses main-branch features of pytorch, only the nightly +# pip versions will have the required features. +# +# NOTE: If a newly-fetched version of the executorch repo changes the value of +# NIGHTLY_VERSION, you should re-run this script to install the necessary +# package versions. +# +# NOTE: If you're changing, make the corresponding change in .ci/docker/ci_commit_pins/pytorch.txt +# by picking the hash from the same date in https://hud.pytorch.org/hud/pytorch/pytorch/nightly/ +# +# NOTE: If you're changing, make the corresponding supported CUDA versions in +# SUPPORTED_CUDA_VERSIONS above if needed. + def install_requirements(use_pytorch_nightly): # Skip pip install on Intel macOS if using nightly. diff --git a/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h b/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h index e340e7626a0..558edb175ae 100644 --- a/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h +++ b/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h @@ -359,7 +359,6 @@ static inline int C10_WARP_SIZE_INTERNAL() { // Those platforms do not support assert() #define CUDA_KERNEL_ASSERT(cond) #define CUDA_KERNEL_ASSERT_MSG(cond, msg) -#define CUDA_KERNEL_ASSERT_PRINTF(cond, msg, ...) #define SYCL_KERNEL_ASSERT(cond) #elif defined(_MSC_VER) #if defined(NDEBUG) @@ -397,26 +396,6 @@ __host__ __device__ static_cast(__LINE__)), \ 0); \ } -#define CUDA_KERNEL_ASSERT_PRINTF(cond, msg, ...) \ - if (C10_UNLIKELY(!(cond))) { \ - (void)(printf( \ - "[CUDA_KERNEL_ASSERT] " __FILE__ ":" C10_STRINGIZE( \ - __LINE__) ": %s: block: [%d,%d,%d], thread: [%d,%d,%d]: " \ - "Assertion failed: `" #cond "`: " msg "\n", \ - __func__, \ - blockIdx.x, \ - blockIdx.y, \ - blockIdx.z, \ - threadIdx.x, \ - threadIdx.y, \ - threadIdx.z, \ - ##__VA_ARGS__)); \ - (void)(_wassert( \ - _CRT_WIDE(#cond), \ - _CRT_WIDE(__FILE__), \ - static_cast(__LINE__)), \ - 0); \ - } #define SYCL_KERNEL_ASSERT(cond) \ if (C10_UNLIKELY(!(cond))) { \ (void)(_wassert( \ @@ -476,10 +455,6 @@ __host__ __device__ if C10_UNLIKELY (!(cond)) { \ abort(); \ } -#define CUDA_KERNEL_ASSERT_PRINTF(cond, msg, ...) \ - if C10_UNLIKELY (!(cond)) { \ - abort(); \ - } #define SYCL_KERNEL_ASSERT(cond) \ if C10_UNLIKELY (!(cond)) { \ abort(); \ @@ -495,23 +470,6 @@ __host__ __device__ __assert_fail( \ msg, __FILE__, static_cast(__LINE__), __func__); \ } -#define CUDA_KERNEL_ASSERT_PRINTF(cond, msg, ...) \ - if (C10_UNLIKELY(!(cond))) { \ - printf( \ - "[CUDA_KERNEL_ASSERT] " __FILE__ ":" C10_STRINGIZE( \ - __LINE__) ": %s: block: [%d,%d,%d], thread: [%d,%d,%d]: " \ - "Assertion failed: `" #cond "`: " msg "\n", \ - __func__, \ - blockIdx.x, \ - blockIdx.y, \ - blockIdx.z, \ - threadIdx.x, \ - threadIdx.y, \ - threadIdx.z, \ - ##__VA_ARGS__); \ - __assert_fail( \ - #cond, __FILE__, static_cast(__LINE__), __func__); \ - } #define SYCL_KERNEL_ASSERT(cond) \ if (C10_UNLIKELY(!(cond))) { \ __assert_fail( \ diff --git a/torch_pin.py b/torch_pin.py index bb8d32d4716..02040c91963 100644 --- a/torch_pin.py +++ b/torch_pin.py @@ -1,19 +1,2 @@ -# Since ExecuTorch often uses main-branch features of pytorch, only the nightly -# pip versions will have the required features. -# -# NOTE: If a newly-fetched version of the executorch repo changes the value of -# NIGHTLY_VERSION, you should re-run install_executorch.sh script to install the necessary -# package versions. -# -# NOTE: If you're changing, make the corresponding change in .ci/docker/ci_commit_pins/pytorch.txt -# by picking the hash from the same date in https://hud.pytorch.org/hud/pytorch/pytorch/nightly/ -# -# NOTE: If you're changing, make the corresponding supported CUDA versions in -# SUPPORTED_CUDA_VERSIONS above if needed. TORCH_VERSION = "2.10.0" -NIGHTLY_VERSION = "dev20251004" -SUPPORTED_CUDA_VERSIONS = ( - (12, 6), - (12, 8), - (13, 0), -) +NIGHTLY_VERSION = "dev20251003"