@@ -31,27 +31,44 @@ spack_setup:
3131.spack_intel :
3232 variables :
3333 SPACK_PACKAGE_COMPILER : intel
34- .spack_coreneuron :
35- variables :
36- SPACK_PACKAGE : coreneuron
37- SPACK_PACKAGE_SPEC : +debug+tests~legacy-unit
3834.spack_neuron :
3935 variables :
4036 SPACK_PACKAGE : neuron
41- SPACK_PACKAGE_REF : ' ' # Take the default branch
37+ SPACK_PACKAGE_REF : ' '
4238 SPACK_PACKAGE_SPEC : +coreneuron+debug+tests~legacy-unit
4339
44- # CoreNEURON will be checked out for us by default
4540build:coreneuron:intel :
4641 extends :
4742 - .spack_build
48- - .spack_coreneuron
4943 - .spack_intel
44+ variables :
45+ SPACK_PACKAGE : coreneuron
46+ SPACK_PACKAGE_SPEC : +tests~legacy-unit build_type=Debug
47+
48+ build:coreneuron:gpu :
49+ extends :
50+ - .spack_build
51+ variables :
52+ SPACK_PACKAGE : coreneuron
53+ # With NVHPC 20.9 and CUDA 11.1 we cannot compile with build_type=Debug
54+ # +report would try to build hdf5%nvhpc, which doesn't work. For now we use
55+ # ~report instead of something like ^hdf5%intel.
56+ SPACK_PACKAGE_SPEC : +gpu+tests~legacy-unit~report build_type=RelWithDebInfo
57+ # We have to run GPU builds on GPU nodes for driver/makelocalrc reasons.
58+ bb5_constraint : volta
59+ SPACK_PACKAGE_COMPILER : nvhpc
5060
5161test:coreneuron:intel :
62+ extends : [.ctest]
63+ needs : ["build:coreneuron:intel"]
64+
65+ test:coreneuron:gpu :
5266 extends :
5367 - .ctest
54- needs : ["build:coreneuron:intel"]
68+ variables :
69+ # GPU tests need to run on nodes with GPUs.
70+ bb5_constraint : volta
71+ needs : ["build:coreneuron:gpu"]
5572
5673build:neuron:intel :
5774 stage : build_neuron
@@ -61,12 +78,44 @@ build:neuron:intel:
6178 - .spack_intel
6279 needs : ["build:coreneuron:intel"]
6380
81+ build:neuron:gpu :
82+ stage : build_neuron
83+ extends :
84+ - .spack_build
85+ - .spack_neuron
86+ variables :
87+ # Avoid trying to build all the NEURON dependencies with the PGI/NVHPC/GPU
88+ # compiler.
89+ SPACK_PACKAGE_COMPILER : nvhpc
90+ SPACK_EXPORT_SPECS : gettext
91+ # Compiling NEURON invokes `nrnivmodl -coreneuron`, which invokes the NVHPC
92+ # compiler used to build CoreNEURON. As noted above, this only works on the
93+ # nodes with GPUs.
94+ bb5_constraint : volta
95+ before_script :
96+ # The +rx3d variant is not compatible with the NVHPC compiler.
97+ # See: https://github.com/neuronsimulator/nrn/issues/1109
98+ - SPACK_PACKAGE_SPEC="~rx3d${SPACK_PACKAGE_SPEC}"
99+ # Get the hash of a gcc build of py-cython so we can force the nvhpc build
100+ # of neuron to use it. py-cython does not build with nvhpc.
101+ - . ${SPACK_ROOT}/share/spack/setup-env.sh
102+ - CYTHON_HASH=$(spack find --json py-cython%gcc | python -c "import json, sys; print(json.loads(sys.stdin.read())[0]['hash'])")
103+ # Inject the hash for py-cython after the one for coreneuron; also say
104+ # numpy has to be built with GCC to minimise the amount of time we spend
105+ # building dependencies in the CI.
106+ - SPACK_PACKAGE_DEPENDENCIES="${SPACK_PACKAGE_DEPENDENCIES}^/${CYTHON_HASH}^py-numpy%gcc"
107+ - !reference [.spack_build, before_script]
108+ needs : ["build:coreneuron:gpu"]
109+
64110test:neuron:intel :
65111 stage : test_neuron
66- before_script :
67- - !reference [.ctest, before_script]
68- # Wipe Slurm environment to try and keep mpiexec happy.
69- - unset $(env|awk -F= '/^SLURM_/ {if (match($1, "_(ACCOUNT|PARTITION)$")==0) print $1}')
70- extends :
71- - .ctest
112+ extends : [.ctest]
72113 needs : ["build:neuron:intel"]
114+
115+ test:neuron:gpu :
116+ stage : test_neuron
117+ extends : [.ctest]
118+ variables :
119+ # GPU tests need to run on nodes with GPUs.
120+ bb5_constraint : volta
121+ needs : ["build:neuron:gpu"]
0 commit comments