1+ import atexit
12import copy
23import dataclasses
34import graphlib
4041from twisted .logger import Logger
4142from twisted .python .failure import Failure
4243
43- from . import failed_builds , models
44+ from . import models
45+ from .failed_builds import FailedBuild , FailedBuildDB
4446from .gitea_projects import GiteaBackend
4547from .github_projects import (
4648 GithubBackend ,
@@ -86,6 +88,7 @@ class BuildTrigger(buildstep.ShellMixin, steps.BuildStep):
8688 wait_for_finish_deferred : defer .Deferred [tuple [list [int ], int ]] | None
8789 brids : list [int ]
8890 consumers : dict [int , Any ]
91+ failed_builds_db : FailedBuildDB
8992
9093 @dataclass
9194 class ScheduledJob :
@@ -118,6 +121,7 @@ def __init__(
118121 successful_jobs : list [NixEvalJobSuccess ],
119122 failed_jobs : list [NixEvalJobError ],
120123 combine_builds : bool ,
124+ failed_builds_db : FailedBuildDB ,
121125 ** kwargs : Any ,
122126 ) -> None :
123127 self .project = project
@@ -130,6 +134,7 @@ def __init__(
130134 self .failed_eval_scheduler = failed_eval_scheduler
131135 self .cached_failure_scheduler = cached_failure_scheduler
132136 self .dependency_failed_scheduler = dependency_failed_scheduler
137+ self .failed_builds_db = failed_builds_db
133138 self ._result_list : list [int ] = []
134139 self .ended = False
135140 self .running = False
@@ -193,7 +198,9 @@ def schedule_eval_failure(self, job: NixEvalJobError) -> tuple[str, Properties]:
193198 return (self .failed_eval_scheduler , props )
194199
195200 def schedule_cached_failure (
196- self , job : NixEvalJobSuccess , first_failure : datetime , first_failure_url : str ,
201+ self ,
202+ job : NixEvalJobSuccess ,
203+ first_failure : FailedBuild ,
197204 ) -> tuple [str , Properties ]:
198205 source = "nix-eval-nix"
199206
@@ -470,7 +477,7 @@ def run(self) -> Generator[Any, Any, None]:
470477 # check which jobs should be scheduled now
471478 schedule_now = []
472479 for build in list (build_schedule_order ):
473- failed_build = failed_builds .check_build (build .drvPath )
480+ failed_build = self . failed_builds_db .check_build (build .drvPath )
474481 if job_closures .get (build .drvPath ):
475482 pass
476483 elif failed_build is not None and self .build .reason != "rebuild" :
@@ -482,14 +489,14 @@ def run(self) -> Generator[Any, Any, None]:
482489
483490 brids , results_deferred = yield self .schedule (
484491 ss_for_trigger ,
485- * self .schedule_cached_failure (build , failed_build . time , failed_build . url ),
492+ * self .schedule_cached_failure (build , failed_build ),
486493 )
487494 scheduled .append (
488495 BuildTrigger .ScheduledJob (build , brids , results_deferred )
489496 )
490497 self .brids .extend (brids .values ())
491498 elif failed_build is not None and self .build .reason == "rebuild" :
492- failed_builds .remove_build (build .drvPath )
499+ self . failed_builds_db .remove_build (build .drvPath )
493500 scheduler_log .addStdout (
494501 f"\t - not skipping { build .attr } with cached failure due to rebuild, first failed at { failed_build .time } \n "
495502 )
@@ -545,9 +552,14 @@ def run(self) -> Generator[Any, Any, None]:
545552 # if it failed, remove all dependent jobs, schedule placeholders and add them to the list of scheduled jobs
546553 if isinstance (job , NixEvalJobSuccess ):
547554 if result != SUCCESS :
548- if self .build .reason == "rebuild" or not failed_builds .check_build (job .drvPath ):
549- url = yield self .build .getUrl ()
550- failed_builds .add_build (job .drvPath , datetime .now (tz = UTC ), url )
555+ if (
556+ self .build .reason == "rebuild"
557+ or not self .failed_builds_db .check_build (job .drvPath )
558+ ):
559+ url = yield self .build .getUrl ()
560+ self .failed_builds_db .add_build (
561+ job .drvPath , datetime .now (tz = UTC ), url
562+ )
551563
552564 removed = self .get_failed_dependents (
553565 job , build_schedule_order , job_closures
@@ -611,6 +623,7 @@ def __init__(
611623 project : GitProject ,
612624 supported_systems : list [str ],
613625 job_report_limit : int | None ,
626+ failed_builds_db : FailedBuildDB ,
614627 ** kwargs : Any ,
615628 ) -> None :
616629 kwargs = self .setupShellMixin (kwargs )
@@ -620,6 +633,7 @@ def __init__(
620633 self .addLogObserver ("stdio" , self .observer )
621634 self .supported_systems = supported_systems
622635 self .job_report_limit = job_report_limit
636+ self .failed_builds_db = failed_builds_db
623637
624638 @defer .inlineCallbacks
625639 def produce_event (
@@ -687,6 +701,7 @@ def run(self) -> Generator[Any, object, Any]:
687701 self .job_report_limit is not None
688702 and self .number_of_jobs > self .job_report_limit
689703 ),
704+ failed_builds_db = self .failed_builds_db ,
690705 ),
691706 ]
692707 )
@@ -861,6 +876,7 @@ def nix_eval_config(
861876 worker_count : int ,
862877 max_memory_size : int ,
863878 job_report_limit : int | None ,
879+ failed_builds_db : FailedBuildDB ,
864880) -> BuilderConfig :
865881 """Uses nix-eval-jobs to evaluate hydraJobs from flake.nix in parallel.
866882 For each evaluated attribute a new build pipeline is started.
@@ -887,6 +903,7 @@ def nix_eval_config(
887903 name = "evaluate flake" ,
888904 supported_systems = supported_systems ,
889905 job_report_limit = job_report_limit ,
906+ failed_builds_db = failed_builds_db ,
890907 command = [
891908 "nix-eval-jobs" ,
892909 "--workers" ,
@@ -1170,6 +1187,7 @@ def config_for_project(
11701187 eval_lock : MasterLock ,
11711188 post_build_steps : list [steps .BuildStep ],
11721189 job_report_limit : int | None ,
1190+ failed_builds_db : FailedBuildDB ,
11731191 outputs_path : Path | None = None ,
11741192) -> None :
11751193 config ["projects" ].append (Project (project .name ))
@@ -1258,6 +1276,7 @@ def config_for_project(
12581276 worker_count = nix_eval_worker_count ,
12591277 max_memory_size = nix_eval_max_memory_size ,
12601278 eval_lock = eval_lock ,
1279+ failed_builds_db = failed_builds_db ,
12611280 ),
12621281 nix_build_config (
12631282 project ,
@@ -1464,6 +1483,10 @@ def configure(self, config: dict[str, Any]) -> None:
14641483 )
14651484 )
14661485
1486+ db = FailedBuildDB (Path ("failed_builds.dbm" ))
1487+ # Hacky but we have no other hooks just now to run code on shutdown.
1488+ atexit .register (lambda : db .close ())
1489+
14671490 for project in projects :
14681491 config_for_project (
14691492 config ,
@@ -1475,7 +1498,8 @@ def configure(self, config: dict[str, Any]) -> None:
14751498 eval_lock ,
14761499 [x .to_buildstep () for x in self .config .post_build_steps ],
14771500 self .config .job_report_limit ,
1478- self .config .outputs_path ,
1501+ failed_builds_db = db ,
1502+ outputs_path = self .config .outputs_path ,
14791503 )
14801504
14811505 config ["workers" ].append (worker .LocalWorker (SKIPPED_BUILDER_NAME ))
@@ -1533,5 +1557,3 @@ def configure(self, config: dict[str, Any]) -> None:
15331557 backends = list (backends .values ()),
15341558 projects = projects ,
15351559 )
1536-
1537- failed_builds .initialize_database (Path ("failed_builds.dbm" ))
0 commit comments