-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathbuild_cuda.py
More file actions
177 lines (153 loc) Β· 5.81 KB
/
build_cuda.py
File metadata and controls
177 lines (153 loc) Β· 5.81 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
#!/usr/bin/env python3
"""
Manual CUDA Extension Builder for CRAYON
Guaranteed to work on all systems with CUDA
"""
import os
import sys
import subprocess
import shutil
from pathlib import Path
def build_cuda_extension():
"""Manually build CUDA extension with maximum compatibility"""
print("π§ Building CRAYON CUDA Extension Manually...")
# Get paths
script_dir = Path(__file__).parent
src_dir = script_dir / "src" / "crayon" / "c_ext"
# Find Python include
python_version = f"{sys.version_info.major}.{sys.version_info.minor}"
python_includes = [
f"/usr/include/python{python_version}",
f"/usr/local/include/python{python_version}",
]
python_include = None
for inc in python_includes:
if Path(inc).exists():
python_include = inc
break
if not python_include:
import distutils.sysconfig
python_include = distutils.sysconfig.get_python_inc()
print(f"β Python include: {python_include}")
# Find CUDA include
cuda_paths = [
os.environ.get('CUDA_HOME', ''),
'/usr/local/cuda',
'/usr/cuda',
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.8',
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.7',
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.6',
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.5',
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.4',
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.3',
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.2',
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1',
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.0',
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.8',
]
cuda_home = None
cuda_include = None
for path in cuda_paths:
if path and Path(path).exists():
cuda_home = path
cuda_include = f"{path}/include"
print(f"β CUDA found: {cuda_home}")
break
if not cuda_home:
cuda_home = '/usr/local/cuda'
cuda_include = '/usr/local/cuda/include'
print("! Using default CUDA path")
# Get site packages directory
import site
site_packages = site.getsitepackages()[0]
c_ext_dir = Path(site_packages) / "crayon" / "c_ext"
print(f"β Target directory: {c_ext_dir}")
# Build command
if sys.platform == "win32":
output_file = "crayon_cuda.pyd"
cmd = [
"nvcc",
"-O3", "-std=c++17",
"--compiler-options", "/MD",
"-shared",
"-o", str(c_ext_dir / output_file),
str(src_dir / "gpu_engine_cuda.cu"),
f"-I{python_include}",
f"-I{cuda_include}",
"-D_GLIBCXX_USE_CXX11_ABI=0",
"-Xcompiler", "/EHsc",
]
else:
output_file = "crayon_cuda.so"
cmd = [
"nvcc",
"-O3", "-std=c++17",
"--compiler-options", "-fPIC",
"-shared",
"-o", str(c_ext_dir / output_file),
str(src_dir / "gpu_engine_cuda.cu"),
f"-I{python_include}",
f"-I{cuda_include}",
"-D_GLIBCXX_USE_CXX11_ABI=0",
]
# Add GPU architecture
try:
import torch
if torch.cuda.is_available():
major, minor = torch.cuda.get_device_capability()
arch = f"{major}{minor}"
cmd.extend([f"-gencode=arch=compute_{arch},code=sm_{arch}"])
print(f"β GPU architecture: sm_{arch}")
else:
cmd.extend(["-gencode=arch=compute_75,code=sm_75"])
print("β Using default GPU architecture: sm_75")
except:
cmd.extend(["-gencode=arch=compute_75,code=sm_75"])
print("β Using default GPU architecture: sm_75")
print(f"π¨ Build command: {' '.join(cmd)}")
# Run build
try:
result = subprocess.run(cmd, capture_output=True, text=True, cwd=src_dir)
if result.returncode == 0:
print(f"β
CUDA extension built successfully!")
print(f"π¦ Output: {c_ext_dir / output_file}")
# Update __init__.py to include CUDA import
init_file = c_ext_dir / "__init__.py"
init_content = init_file.read_text()
# Add CUDA import if not present
if "try:\n from . import crayon_cuda" not in init_content:
cuda_import = """
# CUDA Extension
try:
from . import crayon_cuda
except ImportError:
pass
"""
# Add at the end before other imports
lines = init_content.split('\n')
insert_idx = -1
# Find where to insert (after CPU import)
for i, line in enumerate(lines):
if "from . import crayon_cpu" in line:
insert_idx = i + 1
break
if insert_idx >= 0:
lines.insert(insert_idx, cuda_import.strip())
init_file.write_text('\n'.join(lines))
print("β
Updated __init__.py with CUDA import")
return True
else:
print(f"β Build failed:")
print(result.stderr)
return False
except Exception as e:
print(f"β Build error: {e}")
return False
if __name__ == "__main__":
success = build_cuda_extension()
if success:
print("π CUDA extension is ready!")
print("π§ͺ Test with: python -c 'from crayon.c_ext import crayon_cuda; print(\"CUDA works!\")'")
else:
print("π₯ CUDA extension build failed")
print("π§ Install CUDA Toolkit and try again")