66import typing
77from collections .abc import Mapping
88
9+ import psutil
910import pydantic
1011import yaml
1112from packaging .utils import BuildTag , NormalizedName , canonicalize_name
@@ -142,6 +143,27 @@ def validate_destination_filename(cls, v):
142143 return v
143144
144145
146+ class BuildOptions (pydantic .BaseModel ):
147+ """Build system options"""
148+
149+ model_config = MODEL_CONFIG
150+
151+ cpu_cores_per_job : int = Field (default = 1 , ge = 1 )
152+ """Scale parallel jobs by available CPU cores
153+
154+ Examples:
155+ 1: as many parallel jobs as CPU logical cores
156+ 2: allocate 2 cores per job
157+ """
158+
159+ memory_per_job_gb : float = Field (default = 1.0 , ge = 0.1 )
160+ """Scale parallel jobs by available virtual memory (without swap)
161+
162+ Examples:
163+ 0.5: assume each parallel job requires 512 MB virtual memory
164+ """
165+
166+
145167class VariantInfo (pydantic .BaseModel ):
146168 """Variant information for a package"""
147169
@@ -209,6 +231,9 @@ class PackageSettings(pydantic.BaseModel):
209231 resolve_source : ResolveSource = Field (default_factory = ResolveSource )
210232 """Resolve distribution version"""
211233
234+ build_options : BuildOptions = Field (default_factory = BuildOptions )
235+ """Build system options"""
236+
212237 variants : Mapping [Variant , VariantInfo ] = Field (default_factory = dict )
213238 """Variant configuration"""
214239
@@ -320,16 +345,30 @@ def _resolve_template(
320345 raise
321346
322347
348+ def get_cpu_count () -> int :
349+ """CPU count from scheduler affinity"""
350+ if hasattr (os , "sched_getaffinity" ):
351+ return len (os .sched_getaffinity (0 ))
352+ else :
353+ return os .cpu_count () or 1
354+
355+
356+ def get_available_memory_gib () -> float :
357+ """available virtual memory in GiB"""
358+ return psutil .virtual_memory ().available / (1024 ** 3 )
359+
360+
323361class PackageBuildInfo :
324362 """Package build information
325363
326364 Public API for PackageSettings with i
327365 """
328366
329- def __init__ (self , ctx : "Settings" , ps : PackageSettings ) -> None :
330- self ._variant = typing .cast (Variant , ctx .variant )
331- self ._patches_dir = ctx .patches_dir
332- self ._variant_changelog = ctx .variant_changelog ()
367+ def __init__ (self , settings : "Settings" , ps : PackageSettings ) -> None :
368+ self ._variant = typing .cast (Variant , settings .variant )
369+ self ._patches_dir = settings .patches_dir
370+ self ._variant_changelog = settings .variant_changelog ()
371+ self ._max_jobs : int | None = settings .max_jobs
333372 self ._ps = ps
334373 self ._plugin_module : types .ModuleType | None | typing .Literal [False ] = False
335374 self ._patches : PatchMap | None = None
@@ -497,6 +536,35 @@ def get_extra_environ(
497536
498537 return extra_environ
499538
539+ def parallel_jobs (self ) -> int :
540+ """How many parallel jobs?"""
541+ # adjust by CPU cores, at least 1
542+ cpu_cores_per_job = self ._ps .build_options .cpu_cores_per_job
543+ cpu_count = get_cpu_count ()
544+ max_num_job_cores = int (max (1 , cpu_count // cpu_cores_per_job ))
545+ logger .debug (
546+ f"{ self .package } : { max_num_job_cores = } , { cpu_cores_per_job = } , { cpu_count = } "
547+ )
548+
549+ # adjust by memory consumption per job, at least 1
550+ memory_per_job_gb = self ._ps .build_options .memory_per_job_gb
551+ free_memory = get_available_memory_gib ()
552+ max_num_jobs_memory = int (max (1.0 , free_memory // memory_per_job_gb ))
553+ logger .debug (
554+ f"{ self .package } : { max_num_jobs_memory = } , { memory_per_job_gb = } , { free_memory = :0.1f} GiB"
555+ )
556+
557+ # limit by smallest amount of CPU, memory, and --jobs parameter
558+ max_jobs = cpu_count if self ._max_jobs is None else self ._max_jobs
559+ parallel_builds = min (max_num_job_cores , max_num_jobs_memory , max_jobs )
560+
561+ logger .debug (
562+ f"{ self .package } : parallel builds { parallel_builds = } "
563+ f"({ free_memory = :0.1f} GiB, { cpu_count = } , { max_jobs = } )"
564+ )
565+
566+ return parallel_builds
567+
500568 def serialize (self , ** kwargs ) -> dict [str , typing .Any ]:
501569 return self ._ps .serialize (** kwargs )
502570
@@ -556,13 +624,15 @@ def __init__(
556624 package_settings : typing .Iterable [PackageSettings ],
557625 variant : Variant | str ,
558626 patches_dir : pathlib .Path ,
627+ max_jobs : int | None ,
559628 ) -> None :
560629 self ._settings = settings
561630 self ._package_settings : dict [Package , PackageSettings ] = {
562631 p .name : p for p in package_settings
563632 }
564633 self ._variant = typing .cast (Variant , variant )
565634 self ._patches_dir = patches_dir
635+ self ._max_jobs = max_jobs
566636 self ._pbi_cache : dict [Package , PackageBuildInfo ] = {}
567637
568638 @classmethod
@@ -573,6 +643,7 @@ def from_files(
573643 settings_dir : pathlib .Path ,
574644 variant : Variant | str ,
575645 patches_dir : pathlib .Path ,
646+ max_jobs : int | None ,
576647 ) -> "Settings" :
577648 """Create Settings from settings.yaml and directory"""
578649 if settings_file .is_file ():
@@ -591,6 +662,7 @@ def from_files(
591662 package_settings = package_settings ,
592663 variant = variant ,
593664 patches_dir = patches_dir ,
665+ max_jobs = max_jobs ,
594666 )
595667
596668 @property
@@ -612,10 +684,21 @@ def patches_dir(self) -> pathlib.Path:
612684
613685 @patches_dir .setter
614686 def patches_dir (self , path : pathlib .Path ) -> None :
615- """Change patches_dr (for testing)"""
687+ """Change patches_dir (for testing)"""
616688 self ._pbi_cache .clear ()
617689 self ._patches_dir = path
618690
691+ @property
692+ def max_jobs (self ) -> int | None :
693+ """Get max parallel jobs"""
694+ return self ._max_jobs
695+
696+ @max_jobs .setter
697+ def max_jobs (self , jobs : int | None ) -> None :
698+ """Change max jobs (for testing)"""
699+ self ._pbi_cache .clear ()
700+ self ._max_jobs = jobs
701+
619702 def variant_changelog (self ) -> list [str ]:
620703 """Get global changelog for current variant"""
621704 return list (self ._settings .changelog .get (self .variant , []))
0 commit comments