@@ -22,7 +22,11 @@ class PyMelissaCore(PythonPackage, CudaPackage):
2222
2323 license ("BSD-3-Clause" )
2424
25- version ("develop" , branch = "develop" )
25+ version ("develop" , branch = "develop" , preferred = True )
26+ version ("2.2.0" , sha256 = "e805c9ac08de5aa666768d5d92bfc680f064bd9108415a911dfd08ad7b0a3cf3" )
27+ version ("2.1.1" , sha256 = "6b92852429f13b144860edc37c7914723addabb0ec0bd108929ff567334d3f71" )
28+ version ("2.1.0" , sha256 = "cf0f105ed5b1da260cc7476aec23df084470b50a61df997c0e457c38948bed93" )
29+ version ("2.0.1" , sha256 = "a7ff4df75ea09af435b0c28c3fa3cab9335c1c76e1c48757facce36786b4962c" )
2630 version ("2.0.0" , sha256 = "75957d1933cd9c228a6e8643bc855587162c31f3b0ca94c3f5e0e380d01775dd" )
2731
2832 # define variants for the deep learning server (torch, tf)
@@ -35,67 +39,63 @@ class PyMelissaCore(PythonPackage, CudaPackage):
3539 when = "~torch" ,
3640 description = "Install Deep Learning requirements with TensorFlow only" ,
3741 )
38- # ==============================
39- # Base dependencies
40- # ==============================
42+
43+ # ============================================================
44+ # Base dependencies
45+ # ============================================================
46+
4147 depends_on ("c" , type = "build" ) # generated
48+ depends_on ("cxx" , type = "build" )
4249 depends_on ("fortran" , type = "build" ) # generated
43-
44- depends_on (
"[email protected] :3.12" ,
type = (
"build" ,
"run" ))
4550 depends_on (
"[email protected] :" ,
type = "build" )
46- depends_on (
"[email protected] :" ,
type = "run" )
47- depends_on (
"[email protected] :3" ,
type = "run" )
48- depends_on (
"[email protected] :1" ,
type = "run" )
49- depends_on (
"[email protected] :" ,
type = "run" )
50- depends_on (
"[email protected] :" ,
type = "run" )
51- depends_on (
"[email protected] :1" ,
type = "run" )
52- depends_on (
"[email protected] :" ,
type = "run" )
53- depends_on (
"[email protected] :" ,
type = "run" )
54- depends_on (
"[email protected] :" ,
type = "run" )
55- depends_on ("py-psutil@5:" , type = "run" )
56- # ==============================
57- # DL dependencies
58- # ==============================
51+
52+ with default_args (type = ("build" , "run" )):
53+ depends_on (
"[email protected] :3.12" ,
when = "@:2.1.0" )
54+ depends_on (
"[email protected] :3.12" ,
when = "@2.1.1:" )
55+
56+ with default_args (type = "run" ):
57+ 58+ 59+ 60+ 61+ 62+ 63+ 64+ 65+ 66+ depends_on ("py-psutil@5:" )
67+
68+ # ============================================================
69+ # DL dependencies
70+ # ============================================================
71+
5972 for framework in ["+tf" , "+torch" ]:
60- conflicts (
61- "%gcc@:9" ,
62- when = framework ,
63- msg = f"GCC must be greater than version 9 when using { framework } " ,
64- )
65- depends_on (
"[email protected] :2" ,
type = "run" ,
when = framework )
66- depends_on ("py-matplotlib" , type = "run" , when = framework )
67- depends_on ("py-pandas" , type = "run" , when = framework )
68- # WARNING: If using a gcc compiler, then support with AVX512-VNNI is
69- # expected for bazel source builds.
70- # The instruction set comes with binutils. If you are installing a gcc
71- # through spack then do spack install `gcc+binutils`
72- depends_on (
"[email protected] :" ,
type = "build" ,
when = f"{ framework } %gcc" )
73-
74- # WARNING: do not change the upper limit for tensorflow beyond 2.17, which requires
75- # AVX-VNNI-INT8 support.
76- # Check cpu flags to ensure if avxvnniint8 is available on your machine,
77- # if you want to increase the upper limit.
78- depends_on (
"[email protected] :2.17 ~cuda" ,
type = "run" ,
when = "+tf ~cuda" )
79- depends_on (
"[email protected] :2.6 ~cuda" ,
type = "run" ,
when = "+torch ~cuda" )
80-
81- # ==============================
82- # CUDA dependencies
83- # ==============================
84- for arch in CudaPackage .cuda_arch_values :
85- # Support beyond ampere (A100) GPUs hasn't been tested yet.
86- # FIXME: free to modify and test
87- if arch .isdigit () and 60 <= int (arch ) <= 80 :
73+ with when (framework ):
74+ conflicts ("%gcc@:9" , msg = f"GCC must be greater than version 9 when using { framework } " )
75+
76+ with default_args (type = "run" ):
77+ 78+ depends_on ("py-matplotlib" )
79+ depends_on ("py-pandas" )
80+
81+ depends_on (
"[email protected] :" ,
type = "build" ,
when = "%gcc" )
82+
83+ # ============================================================
84+ # Frameworks with/out CUDA
85+ # ============================================================
86+
87+ with default_args (type = "run" ):
88+ # Without CUDA
89+ with when ("~cuda" ):
90+ # WARNING: Do not set tensorflow upper limit above 2.17.
91+ # Versions >2.17 require AVX-VNNI-INT8 CPU support.
92+ # Check your CPU flags for 'avxvnniint8' before increasing.
93+ depends_on (
"[email protected] :2.17 ~cuda" ,
when = "+tf" )
94+ depends_on (
"[email protected] :2 ~cuda" ,
when = "+torch" )
95+
96+ # With CUDA
97+ for arch in CudaPackage .cuda_arch_values :
8898 cuda_specs = f"+cuda cuda_arch={ arch } "
89- depends_on (f"nccl { cuda_specs } " , when = cuda_specs ) # it is set by default
90- depends_on (
91- f"[email protected] :2.17 { cuda_specs } " ,
type = "run" ,
when = f"+tf { cuda_specs } " 92- )
93- depends_on (
94- f"[email protected] :2.6 { cuda_specs } " ,
type = "run" ,
when = f"+torch { cuda_specs } " 95- )
96- else :
97- conflicts (
98- f"+cuda cuda_arch={ arch } " ,
99- msg = "Support beyond Ampere GPUs has not been tested yet. "
100- "Accepted values are between 60 and 80 inclusive." ,
101- )
99+ with when (cuda_specs ):
100+ depends_on (
f"[email protected] :2.17+nccl{ cuda_specs } " ,
when = "+tf" )
101+ depends_on (
f"[email protected] :2+cudnn+nccl{ cuda_specs } " ,
when = "+torch" )
0 commit comments