|
| 1 | +# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich) |
| 2 | +# ReFrame Project Developers. See the top-level LICENSE file for details. |
| 3 | +# |
| 4 | +# SPDX-License-Identifier: BSD-3-Clause |
| 5 | + |
| 6 | +# |
| 7 | +# OAR backend |
| 8 | +# |
| 9 | +# - Initial version submitted by Mahendra Paipuri, INRIA |
| 10 | +# |
| 11 | + |
| 12 | +import functools |
| 13 | +import os |
| 14 | +import re |
| 15 | +import time |
| 16 | + |
| 17 | +import reframe.core.runtime as rt |
| 18 | +import reframe.utility.osext as osext |
| 19 | +from reframe.core.backends import register_scheduler |
| 20 | +from reframe.core.exceptions import JobError, JobSchedulerError |
| 21 | +from reframe.core.schedulers.pbs import PbsJobScheduler |
| 22 | +from reframe.utility import seconds_to_hms |
| 23 | + |
| 24 | + |
| 25 | +# States can be found here: |
| 26 | +# https://github.com/oar-team/oar/blob/0fccc4fc3bb86ee935ce58effc5aec514a3e155d/sources/core/qfunctions/oarstat#L293 |
| 27 | +def oar_state_completed(state): |
| 28 | + completion_states = { |
| 29 | + 'Error', |
| 30 | + 'Terminated', |
| 31 | + } |
| 32 | + if state: |
| 33 | + return all(s in completion_states for s in state.split(',')) |
| 34 | + |
| 35 | + return False |
| 36 | + |
| 37 | + |
| 38 | +def oar_state_pending(state): |
| 39 | + pending_states = { |
| 40 | + 'Waiting', |
| 41 | + 'toLaunch', |
| 42 | + 'Launching', |
| 43 | + 'Hold', |
| 44 | + 'Running', |
| 45 | + 'toError', |
| 46 | + 'Finishing', |
| 47 | + 'Suspended', |
| 48 | + 'Resuming', |
| 49 | + } |
| 50 | + if state: |
| 51 | + return any(s in pending_states for s in state.split(',')) |
| 52 | + |
| 53 | + return False |
| 54 | + |
| 55 | + |
| 56 | +_run_strict = functools.partial(osext.run_command, check=True) |
| 57 | + |
| 58 | + |
| 59 | +@register_scheduler('oar') |
| 60 | +class OarJobScheduler(PbsJobScheduler): |
| 61 | + # host is de-facto nodes and core is number of cores requested per node |
| 62 | + # number of sockets can also be specified using cpu={num_sockets} |
| 63 | + TASKS_OPT = '-l /host={num_nodes}/core={num_tasks_per_node}' |
| 64 | + |
| 65 | + def __init__(self): |
| 66 | + self._prefix = '#OAR' |
| 67 | + self._submit_timeout = rt.runtime().get_option( |
| 68 | + f'schedulers/@{self.registered_name}/job_submit_timeout' |
| 69 | + ) |
| 70 | + |
| 71 | + def emit_preamble(self, job): |
| 72 | + # Same reason as oarsub, we give full path to output and error files to |
| 73 | + # avoid writing them in the working dir |
| 74 | + preamble = [ |
| 75 | + self._format_option(f'-n "{job.name}"'), |
| 76 | + self._format_option(f'-O {os.path.join(job.workdir, job.stdout)}'), |
| 77 | + self._format_option(f'-E {os.path.join(job.workdir, job.stderr)}'), |
| 78 | + ] |
| 79 | + |
| 80 | + if job.time_limit is not None: |
| 81 | + h, m, s = seconds_to_hms(job.time_limit) |
| 82 | + self.TASKS_OPT += ',walltime=%d:%d:%d' % (h, m, s) |
| 83 | + |
| 84 | + # Get number of nodes in the reservation |
| 85 | + num_tasks_per_node = job.num_tasks_per_node or 1 |
| 86 | + num_nodes = job.num_tasks // num_tasks_per_node |
| 87 | + |
| 88 | + # Emit main resource reservation option |
| 89 | + options = [self.TASKS_OPT.format( |
| 90 | + num_nodes=num_nodes, num_tasks_per_node=num_tasks_per_node, |
| 91 | + )] |
| 92 | + |
| 93 | + # Emit the rest of the options |
| 94 | + options += job.sched_access + job.options + job.cli_options |
| 95 | + for opt in options: |
| 96 | + if opt.startswith('#'): |
| 97 | + preamble.append(opt) |
| 98 | + else: |
| 99 | + preamble.append(self._format_option(opt)) |
| 100 | + |
| 101 | + # OAR starts the job in the home directory by default |
| 102 | + preamble.append(f'cd {job.workdir}') |
| 103 | + return preamble |
| 104 | + |
| 105 | + def submit(self, job): |
| 106 | + # For some reason OAR job manager says that job launching dir is |
| 107 | + # working dir of the repo and not stage dir. A workaround is to give |
| 108 | + # full path of script to oarsub |
| 109 | + job_script_fullpath = os.path.join(job.workdir, job.script_filename) |
| 110 | + |
| 111 | + # OAR needs -S to submit job in batch mode |
| 112 | + cmd = f'oarsub -S {job_script_fullpath}' |
| 113 | + completed = _run_strict(cmd, timeout=self._submit_timeout) |
| 114 | + jobid_match = re.search(r'.*OAR_JOB_ID=(?P<jobid>\S+)', |
| 115 | + completed.stdout) |
| 116 | + if not jobid_match: |
| 117 | + raise JobSchedulerError('could not retrieve the job id ' |
| 118 | + 'of the submitted job') |
| 119 | + |
| 120 | + job._jobid = jobid_match.group('jobid') |
| 121 | + job._submit_time = time.time() |
| 122 | + |
| 123 | + def cancel(self, job): |
| 124 | + _run_strict(f'oardel {job.jobid}', timeout=self._submit_timeout) |
| 125 | + job._cancelled = True |
| 126 | + |
| 127 | + def poll(self, *jobs): |
| 128 | + if jobs: |
| 129 | + # Filter out non-jobs |
| 130 | + jobs = [job for job in jobs if job is not None] |
| 131 | + |
| 132 | + if not jobs: |
| 133 | + return |
| 134 | + |
| 135 | + for job in jobs: |
| 136 | + completed = _run_strict( |
| 137 | + f'oarstat -fj {job.jobid}' |
| 138 | + ) |
| 139 | + |
| 140 | + # Store information for each job separately |
| 141 | + jobinfo = {} |
| 142 | + |
| 143 | + # Typical oarstat -fj <job_id> output: |
| 144 | + # https://github.com/oar-team/oar/blob/0fccc4fc3bb86ee935ce58effc5aec514a3e155d/sources/core/qfunctions/oarstat#L310 |
| 145 | + job_raw_info = completed.stdout |
| 146 | + jobid_match = re.search( |
| 147 | + r'^Job_Id:\s*(?P<jobid>\S+)', completed.stdout, re.MULTILINE |
| 148 | + ) |
| 149 | + if jobid_match: |
| 150 | + jobid = jobid_match.group('jobid') |
| 151 | + jobinfo[jobid] = job_raw_info |
| 152 | + |
| 153 | + if job.jobid not in jobinfo: |
| 154 | + self.log(f'Job {job.jobid} not known to scheduler, ' |
| 155 | + f'assuming job completed') |
| 156 | + job._state = 'Terminated' |
| 157 | + job._completed = True |
| 158 | + continue |
| 159 | + |
| 160 | + info = jobinfo[job.jobid] |
| 161 | + state_match = re.search( |
| 162 | + r'^\s*state = (?P<state>[A-Z]\S+)', info, re.MULTILINE |
| 163 | + ) |
| 164 | + if not state_match: |
| 165 | + self.log(f'Job state not found (job info follows):\n{info}') |
| 166 | + continue |
| 167 | + |
| 168 | + job._state = state_match.group('state') |
| 169 | + if oar_state_completed(job.state): |
| 170 | + exitcode_match = re.search( |
| 171 | + r'^\s*exit_code = (?P<code>\d+)', |
| 172 | + info, re.MULTILINE, |
| 173 | + ) |
| 174 | + |
| 175 | + if exitcode_match: |
| 176 | + job._exitcode = int(exitcode_match.group('code')) |
| 177 | + |
| 178 | + # We report a job as finished only when its stdout/stderr are |
| 179 | + # written back to the working directory |
| 180 | + stdout = os.path.join(job.workdir, job.stdout) |
| 181 | + stderr = os.path.join(job.workdir, job.stderr) |
| 182 | + out_ready = os.path.exists(stdout) and os.path.exists(stderr) |
| 183 | + done = job.cancelled or out_ready |
| 184 | + if done: |
| 185 | + job._completed = True |
| 186 | + elif oar_state_pending(job.state) and job.max_pending_time: |
| 187 | + if time.time() - job.submit_time >= job.max_pending_time: |
| 188 | + self.cancel(job) |
| 189 | + job._exception = JobError('maximum pending time exceeded', |
| 190 | + job.jobid) |
0 commit comments