Skip to content

Commit fa50a63

Browse files
committed
add github ci for gpu pt install check
1 parent 0d29f40 commit fa50a63

File tree

1 file changed

+84
-0
lines changed

1 file changed

+84
-0
lines changed

.ci/scripts/test-cuda-build.sh

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
#!/bin/bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
8+
set -exu
9+
10+
# shellcheck source=/dev/null
11+
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
12+
13+
CUDA_VERSION=${1:-"12.6"}
14+
15+
echo "=== Testing ExecutorTorch CUDA ${CUDA_VERSION} Build ==="
16+
17+
# Function to build and test ExecutorTorch with CUDA support
18+
test_executorch_cuda_build() {
19+
local cuda_version=$1
20+
21+
echo "Building ExecutorTorch with CUDA ${cuda_version} support..."
22+
echo "ExecutorTorch will automatically detect CUDA and install appropriate PyTorch wheel"
23+
24+
# Set CMAKE_ARGS to enable CUDA build - ExecutorTorch will handle PyTorch installation automatically
25+
export CMAKE_ARGS="-DEXECUTORCH_BUILD_CUDA=ON"
26+
27+
# Install ExecutorTorch with CUDA support - this will automatically:
28+
# 1. Detect CUDA version using nvcc
29+
# 2. Install appropriate PyTorch wheel for the detected CUDA version
30+
# 3. Build ExecutorTorch with CUDA support
31+
./install_executorch.sh
32+
33+
echo "SUCCESS: ExecutorTorch CUDA build completed"
34+
35+
# Verify the installation
36+
echo "=== Verifying ExecutorTorch CUDA Installation ==="
37+
38+
# Test that ExecutorTorch was built successfully
39+
python -c "
40+
import executorch
41+
print('SUCCESS: ExecutorTorch imported successfully')
42+
"
43+
44+
# Test CUDA availability and show details
45+
python -c "
46+
try:
47+
import torch
48+
print('INFO: PyTorch version:', torch.__version__)
49+
print('INFO: CUDA available:', torch.cuda.is_available())
50+
51+
if torch.cuda.is_available():
52+
print('SUCCESS: CUDA is available for ExecutorTorch')
53+
print('INFO: CUDA version:', torch.version.cuda)
54+
print('INFO: GPU device count:', torch.cuda.device_count())
55+
print('INFO: Current GPU device:', torch.cuda.current_device())
56+
print('INFO: GPU device name:', torch.cuda.get_device_name())
57+
58+
# Test basic CUDA tensor operation
59+
device = torch.device('cuda')
60+
x = torch.randn(10, 10).to(device)
61+
y = torch.randn(10, 10).to(device)
62+
z = torch.mm(x, y)
63+
print('SUCCESS: CUDA tensor operation completed on device:', z.device)
64+
print('INFO: Result tensor shape:', z.shape)
65+
66+
print('SUCCESS: ExecutorTorch CUDA integration verified')
67+
else:
68+
print('WARNING: CUDA not detected, but ExecutorTorch built successfully')
69+
exit(1)
70+
except Exception as e:
71+
print('ERROR: ExecutorTorch CUDA test failed:', e)
72+
exit(1)
73+
"
74+
75+
echo "SUCCESS: ExecutorTorch CUDA ${cuda_version} build and verification completed successfully"
76+
}
77+
78+
# Main execution
79+
echo "Current working directory: $(pwd)"
80+
echo "Directory contents:"
81+
ls -la
82+
83+
# Run the CUDA build test
84+
test_executorch_cuda_build "${CUDA_VERSION}"

0 commit comments

Comments
 (0)