-
Notifications
You must be signed in to change notification settings - Fork 50
Expand file tree
/
Copy path__init__.py
More file actions
1675 lines (1553 loc) · 69.8 KB
/
__init__.py
File metadata and controls
1675 lines (1553 loc) · 69.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
__author__ = "David Lähnemann, Johannes Köster, Christian Meesters"
__copyright__ = "Copyright 2023, David Lähnemann, Johannes Köster, Christian Meesters"
__email__ = "johannes.koester@uni-due.de"
__license__ = "MIT"
import atexit
import asyncio
import base64
import errno
from concurrent.futures import ThreadPoolExecutor
import json
import os
from pathlib import Path
import re
import shlex
import shutil
import subprocess
import time
from dataclasses import dataclass, field
from typing import List, Generator, Optional
import uuid
import zlib
from snakemake_interface_executor_plugins.executors.base import (
SubmittedJobInfo,
)
from snakemake_interface_executor_plugins.executors.remote import (
RemoteExecutor,
)
from snakemake_interface_executor_plugins.settings import (
ExecutorSettingsBase,
CommonSettings,
)
from snakemake_interface_executor_plugins.jobs import (
JobExecutorInterface,
)
from snakemake_interface_common.exceptions import WorkflowError
from .accounts import (
test_account,
get_account,
)
from .utils import (
get_max_array_size,
get_job_wildcards,
pending_jobs_for_rule,
delete_slurm_environment,
delete_empty_dirs,
set_gres_string,
)
from .job_status_query import (
get_min_job_age,
is_query_tool_available,
should_recommend_squeue_status_command,
query_job_status_squeue,
query_job_status_sacct,
query_job_status,
)
from .job_cancellation import cancel_slurm_jobs
from .efficiency_report import create_efficiency_report
from .submit_string import get_submit_command
from .partitions import (
get_default_partition,
read_partition_file,
get_best_partition,
)
from .validation import (
validate_or_get_slurm_job_id,
validate_slurm_extra,
validate_executor_settings,
validate_status_command_settings,
)
def _get_status_command_default():
"""Get smart default for status_command based on cluster configuration."""
sacct_available = is_query_tool_available("sacct")
squeue_available = is_query_tool_available("squeue")
# squeue is assumed to always be available on SLURM clusters
is_slurm_available = shutil.which("sinfo") is not None
if not is_slurm_available:
return None
if not squeue_available and not sacct_available:
raise WorkflowError(
"Neither 'sacct' nor 'squeue' commands are available on this "
"system. At least one of these commands is required for job "
"status queries."
)
if sacct_available:
return "sacct"
else:
return "squeue"
def _get_status_command_help():
"""Get help text with computed default."""
default_cmd = _get_status_command_default()
# if SLURM is not available (should not occur, only
# in 3rd party CI tests)
if default_cmd is None:
return (
"Command to query job status. Options: 'sacct', 'squeue'. "
"SLURM not detected on this system, so no status command can be used."
)
sacct_available = is_query_tool_available("sacct")
squeue_recommended = should_recommend_squeue_status_command()
base_help = "Command to query job status. Options: 'sacct', 'squeue'. "
if default_cmd == "sacct":
if sacct_available and not squeue_recommended:
info = (
"'sacct' detected and will be used "
"(MinJobAge may be too low for reliable 'squeue' usage)"
)
else:
info = "'sacct' detected and will be used"
else: # default_cmd == "squeue"
if squeue_recommended:
# cumbersome, due to black and the need to stay below 80 chars
msg_part1 = "'squeue' recommended (MinJobAge is sufficient )"
msg_part2 = " for reliable usage"
info = msg_part1 + msg_part2
elif not sacct_available:
info = (
"'sacct' not available, falling back to 'squeue'. "
"WARNING: 'squeue' may not work reliably if MinJobAge is "
"too low"
)
else:
info = (
"'squeue' will be used. "
"WARNING: MinJobAge may be too low for reliable 'squeue' usage"
)
return (
f"{base_help}Default: '{default_cmd}' ({info}). "
f"Set explicitly to override auto-detection."
)
def _status_lookup_ids(external_jobid: str) -> List[str]:
"""Return candidate IDs for status lookup.
For array jobs, Snakemake tracks task IDs as ``<jobid>_<taskid>``.
Depending on SLURM command/options (e.g. ``sacct -X``), status output may
only contain the parent array ID ``<jobid>``. This helper returns IDs in
lookup order so callers can transparently fall back from task ID to parent
ID.
"""
candidates = [external_jobid]
if "_" in external_jobid:
parent_id, task_id = external_jobid.split("_", 1)
if parent_id.isdigit() and task_id.isdigit():
candidates.append(parent_id)
return candidates
@dataclass
class ExecutorSettings(ExecutorSettingsBase):
"""Settings for the SLURM executor plugin."""
array_jobs: Optional[str] = field(
default=None,
metadata={
"help": "Will submit jobs as SLURM job arrays, if possible. "
"Use as: --slurm-array-jobs='rule1, rule2' to submit jobs of "
"rule1 and rule2 as array jobs. If a DAG contains only one job for "
"a rule, it cannot be submitted as an array job. Selecting "
"--slurm-array-jobs=all will submit all eligiblejobs as array jobs. "
"Note: When choosing array job submission, the required jobs are "
"subject to a synchronization overhead.",
"env_var": False,
"required": False,
},
)
array_limit: Optional[int] = field(
default=1000,
metadata={
"help": "When submitting array jobs, this flag defines the maximum "
"number of array tasks to be submitted in one sbatch call. If the "
"number of tasks exceeds this limit, multiple array job submissions "
"will be performed. This is useful to avoid hitting cluster limits on "
"the maximum number of array tasks per job. "
"Please obey your cluster limits and set this flag accordingly.",
"env_var": False,
"required": False,
},
)
logdir: Optional[Path] = field(
default=None,
metadata={
"help": "Per default the SLURM log directory is relative to "
"the working directory. "
"This flag allows to set an alternative directory.",
"env_var": False,
"required": False,
},
)
keep_successful_logs: bool = field(
default=False,
metadata={
"help": "Per default SLURM log files will be deleted upon "
"successful completion of a job. Whenever a SLURM job fails, "
"its log file will be preserved. "
"This flag allows to keep all SLURM log files, even those "
"of successful jobs.",
"env_var": False,
"required": False,
},
)
delete_logfiles_older_than: Optional[int] = field(
default=10,
metadata={
"help": "Per default SLURM log files in the SLURM log directory "
"of a workflow will be deleted after 10 days. For this, "
"best leave the default log directory unaltered. "
"Setting this flag allows to change this behaviour. "
"If set to <=0, no old files will be deleted.",
},
)
init_seconds_before_status_checks: Optional[int] = field(
default=40,
metadata={
"help": "Defines the time in seconds before the first status "
"check is performed on submitted jobs. Must be a positive "
"integer",
},
)
requeue: bool = field(
default=False,
metadata={
"help": "Requeue jobs if they fail with exit code != 0, "
"if no cluster default. Results in "
"`sbatch ... --requeue ...` "
"This flag has no effect, if not set.",
"env_var": False,
"required": False,
},
)
exclude_failed_nodes: Optional[str] = field(
default=None,
metadata={
"help": "Comma-separated list of nodes to exclude from job "
"submission. This is useful to exclude known problematic nodes. "
"This flag has no effect, if not set.",
"env_var": False,
"required": False,
},
)
no_account: bool = field(
default=False,
metadata={
"help": "Do not use any account for submission. "
"This flag has no effect, if not set.",
"env_var": False,
"required": False,
},
)
partition_config: Optional[Path] = field(
default=None,
metadata={
"help": "Path to YAML file defining partition limits for dynamic "
"partition selection. When provided, jobs will be dynamically "
"assigned to the best-fitting partition based on their resource "
"requirements. See documentation for complete list of available limits. "
"Alternatively, the environment variable SNAKEMAKE_SLURM_PARTITIONS "
"can be set to point to such a file. "
"If both are set, this flag takes precedence.",
"env_var": False,
"required": False,
},
)
efficiency_report: bool = field(
default=False,
metadata={
"help": (
"Generate an efficiency report at the end of the workflow. "
"This flag has no effect, if not set."
),
"env_var": False,
"required": False,
},
)
efficiency_report_path: Optional[Path] = field(
default=None,
metadata={
"help": "Path to the efficiency report file. "
"If not set, the report will be written to "
"the current working directory with the name "
"'efficiency_report_<run_uuid>.csv'. "
"This flag has no effect, if not set.",
"env_var": False,
"required": False,
},
)
efficiency_threshold: Optional[float] = field(
default=0.8,
metadata={
"help": "Threshold for efficiency report. "
"Jobs with efficiency below this threshold will be reported.",
"env_var": False,
"required": False,
},
)
jobname_prefix: Optional[str] = field(
default="",
metadata={
"help": "Prefix that is added to the job names. "
"Must contain only alphanumeric characters, "
"underscores or hyphens. Maximum length should "
"not exceed 50 characters.",
"env_var": False,
"required": False,
},
)
status_command: Optional[str] = field(
default_factory=_get_status_command_default,
metadata={
"help": _get_status_command_help(),
"env_var": False,
"required": False,
},
)
status_attempts: Optional[int] = field(
default=5,
metadata={
"help": "Defines the number of attempts to query the status of "
"all active jobs. If the status query fails, the next attempt "
"will be performed after the next status check interval. "
"The default is 5 status attempts before giving up. The maximum "
"time between status checks is 180 seconds.",
"env_var": False,
"required": False,
},
)
qos: Optional[str] = field(
default=None,
metadata={
"help": "If set, the given QoS will be used for job submission.",
"env_var": False,
"required": False,
},
)
reservation: Optional[str] = field(
default=None,
metadata={
"help": ("If set, the given reservation will be used for job submission."),
"env_var": False,
"required": False,
},
)
pass_command_as_script: bool = field(
default=False,
metadata={
"help": (
"Pass to sbatch and srun the command to be executed as a shell script"
" (fed through stdin) instead of wrapping it in the command line "
"call. Useful when a limit exists on SLURM command line length (ie. "
"max_submit_line_size)."
),
"env_var": False,
"required": False,
},
)
def __post_init__(self):
"""Validate settings after initialization."""
validate_executor_settings(self)
# Required:
# Specify common settings shared by various executors.
common_settings = CommonSettings(
# define whether your executor plugin executes locally
# or remotely. In virtually all cases, it will be remote execution
# (cluster, cloud, etc.). Only Snakemake's standard execution
# plugins (snakemake-executor-plugin-dryrun,
# snakemake-executor-plugin-local)
# are expected to specify False here.
non_local_exec=True,
# Define whether your executor plugin implies that there is no shared
# filesystem (True) or not (False).
# This is e.g. the case for cloud execution.
implies_no_shared_fs=False,
job_deploy_sources=False,
pass_default_storage_provider_args=True,
pass_default_resources_args=True,
pass_envvar_declarations_to_cmd=False,
auto_deploy_default_storage_provider=False,
)
def _select_logdir(workflow):
"""Selects where slurm_logdir should be created"""
logdir = workflow.executor_settings.logdir
# logdir is defined as absolute, keep "as is"
if logdir and str(logdir).startswith("/"):
return Path(logdir)
# logdir is relative, we ensure it stays relative to the wokflow's workdir
elif logdir:
return Path(workflow.workdir_init) / workflow.executor_settings.logdir
# logdir is unset
else:
return Path(".snakemake/slurm_logs").resolve()
# Required:
# Implementation of your executor
class Executor(RemoteExecutor):
def __post_init__(self, test_mode: bool = False):
# run check whether we are running in a SLURM job context
self.warn_on_jobcontext()
self.test_mode = test_mode
self.run_uuid = str(uuid.uuid4())
if self.workflow.executor_settings.exclude_failed_nodes:
excluded_nodes = self.workflow.executor_settings.exclude_failed_nodes
self._failed_nodes = set(
node.strip() for node in excluded_nodes.split(",") if node.strip()
)
else:
self._failed_nodes = set()
# validate prefix: only allow alphanumeric, underscore, hyphen
# cap length:
if self.workflow.executor_settings.jobname_prefix:
if not re.match(
r"^[A-Za-z0-9_-]{1,50}$",
self.workflow.executor_settings.jobname_prefix,
):
raise WorkflowError(
"The jobname_prefix may only contain alphanumeric "
"characters, underscores or hyphens, and must not "
"exceed 50 characters in length."
)
self.run_uuid = "_".join(
[self.workflow.executor_settings.jobname_prefix, self.run_uuid]
)
self.logger.info(f"SLURM run ID: {self.run_uuid}")
self._fallback_account_arg = None
self._fallback_partition = None
self._preemption_warning = False # no preemption warning has been issued
self._submitted_job_clusters = set() # track clusters of submitted jobs
self._job_submission_executor = ThreadPoolExecutor(
max_workers=2, thread_name_prefix="slurm_job_submit"
)
self._main_event_loop = None
self._status_query_calls = 0
self._status_query_failures = 0
self._status_query_total_seconds = 0.0
self._status_query_min_seconds = None
self._status_query_max_seconds = 0.0
self._status_query_cycle_rows = []
array_job_setting = self.workflow.executor_settings.array_jobs
if array_job_setting:
normalized_setting = array_job_setting.replace(";", ",")
self.array_jobs = {
rule.strip() for rule in normalized_setting.split(",") if rule.strip()
}
else:
self.array_jobs = set()
self.max_array_size = min(
get_max_array_size(), int(self.workflow.executor_settings.array_limit)
)
if self.max_array_size <= 10:
self.logger.warning(
"Array limit is set to "
f"{self.max_array_size}, "
"which is very low and may lead to excessive numbers of array "
"job submissions. Please consider increasing this limit if your "
"cluster allows it."
)
if self.max_array_size < 2:
self.logger.error(
"Array job submission is effectively disabled due to "
f"max_array_size={self.max_array_size}. Consider increasing "
"the array_limit setting to enable array job submission."
)
raise WorkflowError(
"Array job submission is effectively disabled due to "
"low array_limit."
)
self.slurm_logdir = _select_logdir(self.workflow)
# Check the environment variable "SNAKEMAKE_SLURM_PARTITIONS",
# if set, read the partitions from the given file. Let the CLI
# option override this behavior.
if (
os.getenv("SNAKEMAKE_SLURM_PARTITIONS")
and not self.workflow.executor_settings.partition_config
):
partition_file = Path(os.getenv("SNAKEMAKE_SLURM_PARTITIONS"))
self.logger.info(
f"Reading SLURM partition configuration from "
f"environment variable file: {partition_file}"
)
self._partitions = read_partition_file(partition_file)
else:
self._partitions = (
read_partition_file(self.workflow.executor_settings.partition_config)
if self.workflow.executor_settings.partition_config
else None
)
atexit.register(self.clean_old_logs)
# moved validation to validation.py
validate_status_command_settings(self.workflow.executor_settings, self.logger)
def get_status_command(self):
"""Get the status command to use, with fallback logic."""
if hasattr(self.workflow.executor_settings, "status_command"):
return self.workflow.executor_settings.status_command
else:
# Fallback: determine the best command based on
# cluster configuration
return _get_status_command_default()
def shutdown(self) -> None:
"""
Shutdown the executor.
This method is overloaded, to include the cleaning of old log files
and to optionally create an efficiency report.
"""
# Ensure background submission tasks are finished before shutting down.
self._job_submission_executor.shutdown(wait=True)
# First, we invoke the original shutdown method
super().shutdown()
# Next, clean up old log files, unconditionally.
self.clean_old_logs()
# If the efficiency report is enabled, create it.
if self.workflow.executor_settings.efficiency_report:
threshold = self.workflow.executor_settings.efficiency_threshold
report_path = self.workflow.executor_settings.efficiency_report_path
create_efficiency_report(
e_threshold=threshold,
run_uuid=self.run_uuid,
e_report_path=report_path,
logger=self.logger,
)
# Finally, create a summary of status query timings and report it.
# (only in debug mode).
cumulative_avg_duration = (
self._status_query_total_seconds / self._status_query_calls
if self._status_query_calls
else 0.0
)
min_duration = (
f"{self._status_query_min_seconds:.3f}s"
if self._status_query_min_seconds is not None
else "n/a"
)
self.logger.debug(
"Status query timing summary at shutdown: "
f"calls={self._status_query_calls}, "
f"failures={self._status_query_failures}, "
f"min={min_duration}, "
f"avg={cumulative_avg_duration:.3f}s, "
f"max={self._status_query_max_seconds:.3f}s, "
f"total={self._status_query_total_seconds:.3f}s"
)
# Report any failed nodes that were tracked during execution
if self._failed_nodes:
failed_nodes_str = ", ".join(sorted(self._failed_nodes))
self.logger.warning(
f"The following nodes failed during job execution and were "
f"excluded from subsequent submissions: {failed_nodes_str}"
)
def clean_old_logs(self) -> None:
"""
Delete files older than specified age from the SLURM log directory.
"""
# shorthands:
age_cutoff = self.workflow.executor_settings.delete_logfiles_older_than
keep_all = self.workflow.executor_settings.keep_successful_logs
if age_cutoff <= 0 or keep_all:
return
cutoff_secs = age_cutoff * 86400
current_time = time.time()
self.logger.info(f"Cleaning up SLURM log files older than {age_cutoff} day(s).")
for path in self.slurm_logdir.rglob("*.log"):
if path.is_file():
try:
file_age = current_time - path.stat().st_mtime
if file_age > cutoff_secs:
path.unlink()
except (OSError, FileNotFoundError) as e:
self.logger.error(f"Could not delete logfile {path}: {e}")
# we need a 2nd iteration to remove putatively empty directories
try:
delete_empty_dirs(self.slurm_logdir)
except (OSError, FileNotFoundError) as e:
self.logger.error(
f"Could not delete empty directories in {self.slurm_logdir}: {e}"
)
def warn_on_jobcontext(self, done=None):
if not done:
if "SLURM_JOB_ID" in os.environ:
self.logger.warning(
"You are running snakemake in a SLURM job context. "
"This is not recommended, as it may lead to unexpected "
"behavior. "
"If possible, please run Snakemake directly on the "
"login node."
)
time.sleep(5)
delete_slurm_environment()
done = True
def additional_general_args(self):
"""
This function defines additional arguments to be
passed to `exec_job`.
"""
general_args = "--executor slurm-jobstep --jobs 1"
# need to pass
if self.workflow.executor_settings.pass_command_as_script:
general_args += " --slurm-jobstep-pass-command-as-script"
return general_args
def run_jobs(self, jobs: List[JobExecutorInterface]):
"""
This is a meta rule to delegate the job execution to either
- `run_job` for individual job submission, or
- `run_array_jobs` for array job submission, or
- `run_pool_jobs` for pool job submission (to be implemented in the future).
"""
if self._main_event_loop is None:
try:
self._main_event_loop = asyncio.get_running_loop()
except RuntimeError:
self._main_event_loop = None
ready_jobs_by_rule = {}
# check whether any other job is a group job, as these cannot be
# submitted as array jobs and require special handling
for job in jobs:
if job.is_group():
if job.name in self.array_jobs or "all" in self.array_jobs:
self.logger.warning(
f"Job '{job.name}' is a group job and cannot be "
"submitted as an array job. "
"Submitting it as a regular job instead."
)
self._job_submission_executor.submit(self.run_job, job)
else:
ready_jobs_by_rule.setdefault(job.rule.name, []).append(job)
for rule_name, same_rule_jobs in ready_jobs_by_rule.items():
array_selected_for_rule = (
"all" in self.array_jobs or rule_name in self.array_jobs
)
# TODO: use more sensible logging information, once finished
self.logger.info(
f"Running jobs for rule: {rule_name}, " f"{same_rule_jobs}"
)
self.logger.info("Current array job settings: " f"{self.array_jobs}")
if array_selected_for_rule:
dag = getattr(self.workflow, "dag", None)
if dag is not None:
eligible_jobs = pending_jobs_for_rule(dag, rule_name)
else:
eligible_jobs = len(same_rule_jobs)
self.logger.debug(
"workflow.dag unavailable in run_jobs(); "
"falling back to ready-job count for eligibility "
f"({rule_name}: {eligible_jobs})."
)
# Keep synchronization against DAG eligibility, but do not block
# once at least one full array chunk is ready.
chunk_size = self.max_array_size
if len(same_rule_jobs) == 1:
if eligible_jobs <= 1:
self.logger.debug(
f"Array submission requested for rule {rule_name}, "
"but only one pending job is available; submitting "
"as a regular job."
)
self._job_submission_executor.submit(
self.run_job, same_rule_jobs[0]
)
else:
self.logger.debug(
"Array job collection incomplete for rule "
f"{rule_name}: 1/{eligible_jobs} arrived. Waiting "
"for at least one full chunk."
)
else:
if (
len(same_rule_jobs) < eligible_jobs
and len(same_rule_jobs) < chunk_size
):
self.logger.debug(
"Array job collection incomplete for rule "
f"{rule_name}: {len(same_rule_jobs)}/{eligible_jobs} "
"arrived (< chunk size), waiting for more jobs."
)
continue
self.logger.debug(
"Submitting array-selected jobs for rule "
f"{rule_name}: {len(same_rule_jobs)} ready, "
f"{eligible_jobs} eligible, chunk_size={chunk_size}."
)
self._job_submission_executor.submit(
self.run_array_jobs, same_rule_jobs
)
continue
# Non-array mode: submit all ready jobs individually.
elif len(same_rule_jobs) == 1:
self.logger.debug(
f"Submitting single job for rule {rule_name} as "
"array mode is disabled."
)
self._job_submission_executor.submit(self.run_job, same_rule_jobs[0])
continue
else:
self.logger.debug(
f"Submitting {len(same_rule_jobs)} ready jobs for rule "
f"{rule_name} individually (array mode disabled)."
)
for job in same_rule_jobs:
self._job_submission_executor.submit(self.run_job, job)
def _report_job_submission_threadsafe(self, job_info: SubmittedJobInfo):
if self._main_event_loop is not None:
self._main_event_loop.call_soon_threadsafe(
self.report_job_submission,
job_info,
)
else:
self.report_job_submission(job_info)
def _report_job_error_threadsafe(self, job_info: SubmittedJobInfo, msg: str):
if self._main_event_loop is not None:
self._main_event_loop.call_soon_threadsafe(
self.report_job_error,
job_info,
msg,
)
else:
self.report_job_error(job_info, msg=msg)
def run_array_jobs(self, jobs: List[JobExecutorInterface]):
try:
self.logger.debug(
f"Preparing to submit array job for rule {jobs[0].rule.name} "
f"with {len(jobs)} tasks."
)
group_or_rule = (
f"group_{jobs[0].name}"
if jobs[0].is_group()
else f"rule_{jobs[0].name}"
)
# in an array job `sbatch --output` gets a single value
# hence, we can only consider the first wildcard string
# to create the SLURM log file path.
wildcard_strs = [get_job_wildcards(job) for job in jobs]
wildcard_str = wildcard_strs[0]
# the wildcard string shall be ignored for the SLURM log
# file path.
slurm_logfile = self.slurm_logdir / group_or_rule / r"%A_%a.log"
slurm_logfile.parent.mkdir(parents=True, exist_ok=True)
# this behavior has been fixed in slurm 23.02, but there might be
# plenty of older versions around, hence we should rather be
# conservative here.
assert "%A" not in str(self.slurm_logdir), (
"bug: jobid placeholder in parent dir of logfile. This does not "
"work as we have to create that dir before submission in order to "
"make sbatch happy. Otherwise we get silent fails without "
"logfiles being created."
)
assert r"%a" not in str(self.slurm_logdir), (
"bug: jobid placeholder in parent dir of logfile. This does not "
"work as we have to create that dir before submission in order to "
"make sbatch happy. Otherwise we get silent fails without "
"logfiles being created."
)
# generic part of a submission string:
# we use a run_uuid as the job-name, to allow `--name`-based
# filtering in the job status checks (`sacct --name` and
# `squeue --name`)
if wildcard_str == "":
comment_str = f"rule_{jobs[0].name}"
else:
self.logger.warning(
"Array job submission does not allow for multiple different "
"wildcard combinations in the comment string. Only the first "
"one will be used."
)
comment_str = f"rule_{jobs[0].name}_wildcards_{wildcard_strs[0]}"
for job in jobs:
# check whether the 'slurm_extra' parameter is used correctly
# prior to putatively setting in the sbatch call
validate_slurm_extra(job)
self.logger.debug("Building job params for array job")
# Note: all jobs have the same resource requirement.
# Thus, we can simply take the first job to extract
# the relevant parameters for the sbatch call.
job_params = {
"run_uuid": self.run_uuid,
"slurm_logfile": slurm_logfile,
"comment_str": comment_str,
"account": next(self.get_account_arg(jobs[0])),
"partition": self.get_partition_arg(jobs[0]),
"workdir": self.workflow.workdir_init,
}
call = get_submit_command(
jobs[0],
job_params,
settings=self.workflow.executor_settings,
failed_nodes=self._failed_nodes,
)
if self._failed_nodes:
self.logger.debug(
"Excluding failed nodes from array job submission: "
f"{','.join(self._failed_nodes)}"
)
call += set_gres_string(jobs[0])
if not jobs[0].resources.get("runtime"):
self.logger.warning(
"No wall time information given. This might or might not "
"work on your cluster. "
"If not, specify the resource runtime in your rule or as "
"a reasonable default via --default-resources."
)
if not jobs[0].resources.get("mem_mb_per_cpu") and not jobs[
0
].resources.get("mem_mb"):
self.logger.warning(
"No job memory information ('mem_mb' or 'mem_mb_per_cpu') is "
"given - submitting without. This might or might not work on "
"your cluster."
)
# Build a compressed map of array task id -> full execution string
# for all jobs.
array_execs = {
index: zlib.compress(
self.format_job_exec(job).encode("utf-8"), level=9
).hex()
for index, job in enumerate(jobs, start=1)
}
# the actual array job call:
# we need to cycle over all jobs and submit up to `array_limit` jobs per
# submission, to avoid hitting cluster limits or oversaturating the
# command line limits
array_limit = min(self.max_array_size, len(jobs))
for start_index in range(1, len(jobs) + 1, array_limit):
end_index = min(start_index + array_limit - 1, len(jobs))
# The first task of each chunk runs via the plain base command.
# Remaining tasks are dispatched from --slurm-jobstep-array-execs.
exec_job = self.format_job_exec(jobs[start_index - 1])
sub_array_execs = {
str(i): array_execs[i]
for i in range(start_index + 1, end_index + 1)
}
array_execs_payload = base64.b64encode(
json.dumps(sub_array_execs).encode("utf-8")
).decode()
use_script_submission = (
self.workflow.executor_settings.pass_command_as_script
)
submission_failed = False
while True:
call_with_array = call + f" --array={start_index}-{end_index}"
if not use_script_submission:
# Use --wrap for the base execution command.
call_with_array += (
f' --wrap="{exec_job}'
f" --slurm-jobstep-array-execs="
f"{shlex.quote(array_execs_payload)}"
'"'
)
subprocess_stdin = None
self.logger.debug(f"call with array: {call_with_array}")
else:
# Use /dev/stdin to pass the base execution command as a script.
sbatch_script = "\n".join(
[
"#!/bin/sh",
f"{exec_job}",
"--slurm-jobstep-array-execs",
shlex.quote(array_execs_payload),
]
)
call_with_array += " /dev/stdin"
subprocess_stdin = sbatch_script
self.logger.debug(
f"Submitting array job with sbatch call: {call_with_array}"
)
try:
process = subprocess.Popen(
call_with_array,
shell=True,
text=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = process.communicate(input=subprocess_stdin)
if process.returncode != 0:
raise subprocess.CalledProcessError(
process.returncode, call_with_array, output=err
)
break
except OSError as e:
if e.errno == errno.E2BIG and not use_script_submission:
self.logger.warning(
"Array sbatch command exceeds argument-length "
"limits; retrying via /dev/stdin script mode "
f"for tasks {start_index}-{end_index}."
)
use_script_submission = True
continue
raise
except subprocess.CalledProcessError as e:
error_msg = (
"SLURM sbatch failed for array job submission "
f"(tasks {start_index}-{end_index}). "
f"The error message was '{e.output.strip()}'.\n"
f" sbatch call:\n {call_with_array}\n"
)
self.logger.error(error_msg)
for job in jobs[start_index - 1 : end_index]:
self._report_job_error_threadsafe(
SubmittedJobInfo(job),
(
f"Part of failed array sbatch submission "
f"(tasks {start_index}-{end_index}); "
"see log for details."
),
)
submission_failed = True
break
if submission_failed:
continue
# To extract the job id we split by semicolon and take the first
# element (this also works if no cluster name was provided)
slurm_jobid = out.strip().split(";")[0]
# this slurm_jobid might be wrong: some cluster admin give convoluted
# sbatch outputs. So we need to validate it properly (and replace it
# if necessary).
slurm_jobid = validate_or_get_slurm_job_id(slurm_jobid, out)
# here, however we are dealing with array jobs and the job id is of
# the form <jobid>_<array_task_id>, so we need to add the task ids
job_ids = list() # Snakemake interal ids
for index in range(start_index, end_index + 1):
# Calculate the actual logfile path for this array task
job = jobs[index - 1]
job_ids.append(job.jobid)
job_wildcard_str = get_job_wildcards(job)
job_logfile = (
self.slurm_logdir
/ group_or_rule
/ job_wildcard_str
/ f"{slurm_jobid}_{index}.log"
)