Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
126 changes: 112 additions & 14 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,37 +22,37 @@ jobs:
fail-fast: false
matrix:
include:
- name: Linux (CUDA 11.8, Python 3.10, PyTorch 2.1)
- name: Linux (CUDA 12.4, Python 3.13, PyTorch 2.8)
os: ubuntu-22.04
cuda-version: "11.8.0"
cuda-version: "12.5.0"
gcc-version: "10.3.*"
nvcc-version: "11.8"
python-version: "3.10"
pytorch-version: "2.1.*"
nvcc-version: "12.4"
python-version: "3.13"
pytorch-version: "2.8.*"

- name: MacOS ARM (Python 3.11, PyTorch 2.4)
- name: MacOS ARM (Python 3.13, PyTorch 2.5)
os: macos-latest
cuda-version: ""
gcc-version: ""
nvcc-version: ""
python-version: "3.11"
pytorch-version: "2.4.*"
python-version: "3.13"
pytorch-version: "2.5.*"

- name: MacOS Intel (Python 3.11, PyTorch 2.4)
- name: MacOS Intel (Python 3.13, PyTorch 2.5)
os: macos-13
cuda-version: ""
gcc-version: ""
nvcc-version: ""
python-version: "3.11"
pytorch-version: "2.4.*"
python-version: "3.13"
pytorch-version: "2.5.*"


steps:
- name: "Check out"
uses: actions/checkout@v2

- name: "Install CUDA Toolkit on Linux (if needed)"
uses: Jimver/cuda-toolkit@v0.2.15
uses: Jimver/cuda-toolkit@v0.2.24
with:
cuda: ${{ matrix.cuda-version }}
linux-local-args: '["--toolkit", "--override"]'
Expand All @@ -78,14 +78,15 @@ jobs:
NVCC_VERSION: ${{ matrix.nvcc-version }}
PYTORCH_VERSION: ${{ matrix.pytorch-version }}

- uses: conda-incubator/setup-miniconda@v2
- uses: conda-incubator/setup-miniconda@v3
name: "Install dependencies with Mamba"
with:
activate-environment: build
environment-file: devtools/conda-envs/build-${{ matrix.os }}.yml
python-version: ${{ matrix.python-version }}
auto-activate-base: false
miniforge-variant: Miniforge3
use-mamba: true
python-version: ${{ matrix.python-version }}
env:
# Override the CUDA detection as the CI hosts don't have NVIDIA drivers
CONDA_OVERRIDE_CUDA: ${{ matrix.nvcc-version }}
Expand Down Expand Up @@ -140,3 +141,100 @@ jobs:
export LD_LIBRARY_PATH="${CONDA_PREFIX}/lib/python${{ matrix.python-version }}/site-packages/torch/lib:${LD_LIBRARY_PATH}"
cd python/tests
pytest --verbose Test*


windows:
runs-on: ${{ matrix.os }}
name: ${{ matrix.name }}
strategy:
fail-fast: false
matrix:
include:
- name: Windows (CUDA 12.5, Python 3.12, PyTorch 2.5)
os: windows-latest
cuda-version: "12.5.0"
nvcc-version: "12.5"
python-version: "3.12"
pytorch-version: "2.5.*"

steps:
- name: "Check out"
uses: actions/checkout@v2

- name: "Install CUDA Toolkit"
uses: Jimver/cuda-toolkit@v0.2.24
id: cuda-toolkit
with:
cuda: ${{ matrix.cuda-version }}
method: 'network'
sub-packages: '["nvcc", "nvrtc", "nvrtc_dev", "cufft", "cufft_dev", "opencl", "cudart", "cuda_profiler_api"]'

- name: "Update the conda enviroment file"
uses: cschleiden/replace-tokens@v1
with:
tokenPrefix: '@'
tokenSuffix: '@'
files: devtools/conda-envs/build-${{ matrix.os }}.yml
env:
PYTORCH_VERSION: ${{ matrix.pytorch-version }}

- uses: conda-incubator/setup-miniconda@v2
name: "Install dependencies with Mamba"
with:
activate-environment: build
environment-file: devtools/conda-envs/build-${{ matrix.os }}.yml
python-version: ${{ matrix.python-version }}
auto-activate-base: false
env:
# Override the CUDA detection as the CI hosts don't have NVIDIA drivers
CONDA_OVERRIDE_CUDA: ${{ matrix.nvcc-version }}

- name: "List conda packages"
shell: cmd /C call {0}
run: conda list

- name: "Configure"
shell: cmd /C call {0}
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
mkdir build
cd build
for /f "usebackq delims==" %%v in (`python -c "import site; print(site.getsitepackages()[-1])"`) do set SITE_PACKAGES=%%v
cmake .. -G "NMake Makefiles JOM" ^
-DCMAKE_BUILD_TYPE=Release ^
-DCMAKE_INSTALL_PREFIX=%CONDA_PREFIX%/Library ^
-DOPENMM_DIR=%CONDA_PREFIX%/Library ^
-DTorch_DIR=%SITE_PACKAGES%/torch/share/cmake/Torch ^
-DNN_BUILD_OPENCL_LIB=ON ^
-DOPENCL_INCLUDE_DIR="${{steps.cuda-toolkit.outputs.CUDA_PATH}}/include" ^
-DOPENCL_LIBRARY="${{steps.cuda-toolkit.outputs.CUDA_PATH}}/lib/x64/OpenCL.lib"

- name: "Build"
shell: cmd /C call {0}
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
cd build
jom -j 4 install
jom -j 4 PythonInstall

- name: "List plugins"
shell: cmd /C call {0}
run: |
python -c "import openmm as mm; print('---Loaded---', *mm.pluginLoadedLibNames, '---Failed---', *mm.Platform.getPluginLoadFailures(), sep='\n')"

- name: "Run C++ test"
shell: cmd /C call {0}
run: |
cd build
for /f "usebackq delims==" %%v in (`python -c "import site; print(site.getsitepackages()[-1])"`) do set SITE_PACKAGES=%%v
dir %SITE_PACKAGES%\torch\lib
dir %CONDA_PREFIX%\Library\lib
set PATH=%PATH%;%SITE_PACKAGES%\torch\lib;%CONDA_PREFIX%\Library\lib
set PATH
ctest --output-on-failure --exclude-regex "TestCuda|TestOpenCL"

- name: "Run Python test"
shell: cmd /C call {0}
run: |
cd python/tests
pytest --verbose Test*
30 changes: 25 additions & 5 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ LINK_DIRECTORIES("${OPENMM_DIR}/lib" "${OPENMM_DIR}/lib/plugins")
# We need to know where LibTorch is installed so we can access the headers and libraries.
SET(PYTORCH_DIR "" CACHE PATH "Where the PyTorch C++ API is installed")
SET(CMAKE_PREFIX_PATH "${PYTORCH_DIR}")
FIND_PACKAGE(Torch REQUIRED)

# Specify the C++ version we are building for.
SET (CMAKE_CXX_STANDARD 17)
Expand Down Expand Up @@ -84,6 +83,28 @@ FOREACH(subdir ${NN_PLUGIN_SOURCE_SUBDIRS})
INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}/include)
ENDFOREACH(subdir)

# The following is taken from https://discuss.pytorch.org/t/failed-to-find-nvtoolsext/179635.
# CUDA 12 changed nvToolsExt from a library to just a header. This broke compiling against
# libtorch. Almost two years later, they still haven't fixed it. This is a workaround.

FIND_PACKAGE(CUDAToolkit QUIET)
if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 12)
message(STATUS "PyTorch NVTX headers workaround: Yes")
# only do this if nvToolsExt is not defined and CUDA::nvtx3 exists
if(NOT TARGET CUDA::nvToolsExt AND TARGET CUDA::nvtx3)
add_library(CUDA::nvToolsExt INTERFACE IMPORTED)
# ensure that PyTorch is told to use NVTX3 headers
target_compile_definitions(
CUDA::nvToolsExt INTERFACE
TORCH_CUDA_USE_NVTX3
)
target_link_libraries(CUDA::nvToolsExt INTERFACE CUDA::nvtx3)
endif()
else()
message(STATUS "PyTorch NVTX headers workaround: No")
endif()
FIND_PACKAGE(Torch REQUIRED)

# Create the library.

ADD_LIBRARY(${SHARED_NN_TARGET} SHARED ${SOURCE_FILES} ${SOURCE_INCLUDE_FILES} ${API_INCLUDE_FILES})
Expand Down Expand Up @@ -139,12 +160,11 @@ IF(NN_BUILD_OPENCL_LIB)
ADD_SUBDIRECTORY(platforms/opencl)
ENDIF(NN_BUILD_OPENCL_LIB)

FIND_PACKAGE(CUDA QUIET)
IF(CUDA_FOUND)
IF(CUDAToolkit_FOUND)
SET(NN_BUILD_CUDA_LIB ON CACHE BOOL "Build implementation for CUDA")
ELSE(CUDA_FOUND)
ELSE(CUDAToolkit_FOUND)
SET(NN_BUILD_CUDA_LIB OFF CACHE BOOL "Build implementation for CUDA")
ENDIF(CUDA_FOUND)
ENDIF(CUDAToolkit_FOUND)
IF(NN_BUILD_CUDA_LIB)
ADD_SUBDIRECTORY(platforms/cuda)
ENDIF(NN_BUILD_CUDA_LIB)
Expand Down
15 changes: 15 additions & 0 deletions devtools/conda-envs/build-windows-latest.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
name: build
channels:
- conda-forge
dependencies:
- jom
- cmake
- ccache
- m2-coreutils
- python
- cython
- swig
- openmm >=8.2
- pytorch-gpu @PYTORCH_VERSION@
- torchani
- pytest
9 changes: 1 addition & 8 deletions platforms/cuda/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -58,14 +58,7 @@ ADD_CUSTOM_COMMAND(OUTPUT ${CUDA_KERNELS_CPP} ${CUDA_KERNELS_H}
SET_SOURCE_FILES_PROPERTIES(${CUDA_KERNELS_CPP} ${CUDA_KERNELS_H} PROPERTIES GENERATED TRUE)
ADD_LIBRARY(${SHARED_TARGET} SHARED ${SOURCE_FILES} ${SOURCE_INCLUDE_FILES} ${API_INCLUDE_FILES})

TARGET_LINK_LIBRARIES(${SHARED_TARGET} ${CUDA_LIBRARIES})
IF(WIN32)
FIND_LIBRARY(CUDA_DRIVER_LIBRARY cuda HINTS ${CUDA_DRIVER_LIBRARY_PATH})
IF(NOT CUDA_DRIVER_LIBRARY)
MESSAGE(FATAL_ERROR "Could not find CUDA driver library")
ENDIF()
TARGET_LINK_LIBRARIES(${SHARED_TARGET} ${CUDA_DRIVER_LIBRARY})
ENDIF(WIN32)
TARGET_LINK_LIBRARIES(${SHARED_TARGET} CUDA::cuda_driver)
TARGET_LINK_LIBRARIES(${SHARED_TARGET} OpenMM)
TARGET_LINK_LIBRARIES(${SHARED_TARGET} OpenMMCUDA)
TARGET_LINK_LIBRARIES(${SHARED_TARGET} ${NN_LIBRARY_NAME})
Expand Down
Loading