Skip to content

Commit f4c05e0

Browse files
authored
Merge pull request #2941 from JuliaGPU/tb/backwards_compatibility
Relax checks as per backwards compatibility.
2 parents f1c4cdd + 25bc5a9 commit f4c05e0

File tree

4 files changed

+20
-20
lines changed

4 files changed

+20
-20
lines changed

Project.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ AbstractFFTs = "0.5, 1.0"
5555
Adapt = "4.4"
5656
BFloat16s = "0.5, 0.6"
5757
CEnum = "0.2, 0.3, 0.4, 0.5"
58-
CUDA_Compiler_jll = "0.2"
58+
CUDA_Compiler_jll = "0.3"
5959
CUDA_Driver_jll = "13"
6060
CUDA_Runtime_Discovery = "1"
6161
CUDA_Runtime_jll = "0.19"
@@ -65,7 +65,7 @@ DataFrames = "1.5"
6565
EnzymeCore = "0.8.2"
6666
ExprTools = "0.1"
6767
GPUArrays = "11.2.4"
68-
GPUCompiler = "1.1"
68+
GPUCompiler = "1.4"
6969
GPUToolbox = "0.3, 1"
7070
KernelAbstractions = "0.9.38"
7171
LLVM = "9.3.1"

src/compatibility.jl

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -296,15 +296,19 @@ function llvm_compat(version=LLVM.version())
296296
return (cap=cap_support, ptx=ptx_support)
297297
end
298298

299-
function cuda_compat(driver=driver_version(), compiler=compiler_version())
300-
# devices have to be supported by both the compiler and the driver
301-
driver_cap_support = cuda_cap_support(driver)
302-
compiler_cap_support = cuda_cap_support(compiler)
303-
cap_support = sort(collect(driver_cap_support compiler_cap_support))
304-
305-
# PTX code only has to be supported by the compiler
306-
compiler_ptx_support = cuda_ptx_support(compiler)
307-
ptx_support = cuda_ptx_support(compiler)
299+
function cuda_compat(version=runtime_version())
300+
# we don't have to check the driver version, because it offers backwards compatbility
301+
# beyond the CUDA toolkit version (e.g. R580 for CUDA 13 still supports Volta as
302+
# deprecated in CUDA 13), and we don't have a reliable way to query the actual version
303+
# as NVML isn't available on all platforms. let's instead simply assume that unsupported
304+
# devices will not be exposed to the CUDA runtime and thus won't be visible to us.
305+
306+
# we also don't have to check the compiler version, because CUDA_Compiler_jll is
307+
# guaranteed to have the same major version as CUDA_Runtime_jll, meaning that the
308+
# compiler will always support at least the same devices as the runtime.
309+
310+
cap_support = sort(collect(cuda_cap_support(version)))
311+
ptx_support = sort(collect(cuda_ptx_support(version)))
308312

309313
return (cap=cap_support, ptx=ptx_support)
310314
end

src/compiler/compilation.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,7 @@ end
204204
error("CUDA.jl requires PTX $requested_ptx, which is not supported by LLVM $(LLVM.version())")
205205
llvm_ptx = maximum(llvm_ptxs)
206206
isempty(cuda_ptxs) &&
207-
error("CUDA.jl requires PTX $requested_ptx, which is not supported by CUDA driver $(driver_version()) / runtime $(runtime_version())")
207+
error("CUDA.jl requires PTX $requested_ptx, which is not supported by CUDA $(runtime_version())")
208208
cuda_ptx = maximum(cuda_ptxs)
209209
end
210210

@@ -229,7 +229,7 @@ end
229229
## use the highest capability supported by CUDA
230230
cuda_caps = filter(<=(capability(dev)), cuda_support.cap)
231231
isempty(cuda_caps) &&
232-
error("Compute capability $(requested_cap) is not supported by CUDA driver $(driver_version()) / runtime $(runtime_version())")
232+
error("Compute capability $(requested_cap) is not supported by CUDA $(runtime_version())")
233233
cuda_cap = maximum(cuda_caps)
234234
end
235235

src/initialization.jl

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,9 @@ function __init__()
6666
return
6767
end
6868

69-
if !(v"12" <= driver < v"14-")
70-
@error "This version of CUDA.jl only supports NVIDIA drivers for CUDA 12.x or 13.x (yours is for CUDA $driver)"
71-
_initialization_error[] = "CUDA driver unsupported"
69+
if driver < v"12"
70+
@error "This version of CUDA.jl requires an NVIDIA driver for CUDA 12.x or higher (yours only supports up to CUDA $driver)"
71+
_initialization_error[] = "NVIDIA driver too old"
7272
return
7373
end
7474

@@ -133,10 +133,6 @@ function __init__()
133133
if runtime < v"12"
134134
@error "This version of CUDA.jl only supports CUDA 12 or higher (your toolkit provides CUDA $runtime)"
135135
end
136-
if runtime.major != driver.major
137-
@warn """You are using CUDA $runtime with a driver for CUDA $(driver.major).x.
138-
It is recommended to upgrade your driver, or switch to automatic installation of CUDA."""
139-
end
140136

141137
# ensure the loaded runtime matches what we precompiled for.
142138
if toolkit_version == nothing

0 commit comments

Comments
 (0)