1
+ # -*- coding: utf-8 -*-
2
+ """ Single and multi-threaded executors."""
1
3
import os
2
4
import tempfile
3
5
import threading
@@ -46,33 +48,33 @@ def run_jobs(self,
46
48
process , # type: Process
47
49
job_order_object , # type: Dict[Text, Any]
48
50
logger ,
49
- runtimeContext # type: RuntimeContext
51
+ runtime_context # type: RuntimeContext
50
52
): # type: (...) -> None
51
53
""" Execute the jobs for the given Process. """
52
54
pass
53
55
54
56
def execute (self ,
55
57
process , # type: Process
56
58
job_order_object , # type: Dict[Text, Any]
57
- runtimeContext , # type: RuntimeContext
59
+ runtime_context , # type: RuntimeContext
58
60
logger = _logger ,
59
61
): # type: (...) -> Tuple[Optional[Dict[Text, Any]], Text]
60
62
""" Execute the process. """
61
63
62
- if not runtimeContext .basedir :
64
+ if not runtime_context .basedir :
63
65
raise WorkflowException ("Must provide 'basedir' in runtimeContext" )
64
66
65
67
finaloutdir = None # Type: Optional[Text]
66
- original_outdir = runtimeContext .outdir
68
+ original_outdir = runtime_context .outdir
67
69
if isinstance (original_outdir , string_types ):
68
70
finaloutdir = os .path .abspath (original_outdir )
69
- runtimeContext = runtimeContext .copy ()
70
- runtimeContext .outdir = tempfile .mkdtemp (
71
- prefix = getdefault (runtimeContext .tmp_outdir_prefix , DEFAULT_TMP_PREFIX ))
72
- self .output_dirs .add (runtimeContext .outdir )
73
- runtimeContext .mutation_manager = MutationManager ()
74
- runtimeContext .toplevel = True
75
- runtimeContext .workflow_eval_lock = threading .Condition (threading .RLock ())
71
+ runtime_context = runtime_context .copy ()
72
+ runtime_context .outdir = tempfile .mkdtemp (
73
+ prefix = getdefault (runtime_context .tmp_outdir_prefix , DEFAULT_TMP_PREFIX ))
74
+ self .output_dirs .add (runtime_context .outdir )
75
+ runtime_context .mutation_manager = MutationManager ()
76
+ runtime_context .toplevel = True
77
+ runtime_context .workflow_eval_lock = threading .Condition (threading .RLock ())
76
78
77
79
job_reqs = None
78
80
if "cwl:requirements" in job_order_object :
@@ -84,20 +86,20 @@ def execute(self,
84
86
for req in job_reqs :
85
87
process .requirements .append (req )
86
88
87
- self .run_jobs (process , job_order_object , logger , runtimeContext )
89
+ self .run_jobs (process , job_order_object , logger , runtime_context )
88
90
89
91
if self .final_output and self .final_output [0 ] and finaloutdir :
90
92
self .final_output [0 ] = relocateOutputs (
91
93
self .final_output [0 ], finaloutdir , self .output_dirs ,
92
- runtimeContext .move_outputs , runtimeContext .make_fs_access ("" ),
93
- getdefault (runtimeContext .compute_checksum , True ))
94
+ runtime_context .move_outputs , runtime_context .make_fs_access ("" ),
95
+ getdefault (runtime_context .compute_checksum , True ))
94
96
95
- if runtimeContext .rm_tmpdir :
97
+ if runtime_context .rm_tmpdir :
96
98
cleanIntermediate (self .output_dirs )
97
99
98
100
if self .final_output and self .final_status :
99
101
100
- if runtimeContext .research_obj is not None and \
102
+ if runtime_context .research_obj is not None and \
101
103
isinstance (process , (JobBase , Process , WorkflowJobStep ,
102
104
WorkflowJob )) and process .parent_wf :
103
105
process_run_id = None
@@ -118,45 +120,46 @@ def run_jobs(self,
118
120
process , # type: Process
119
121
job_order_object , # type: Dict[Text, Any]
120
122
logger ,
121
- runtimeContext # type: RuntimeContext
123
+ runtime_context # type: RuntimeContext
122
124
): # type: (...) -> None
123
125
124
126
process_run_id = None # type: Optional[str]
125
127
reference_locations = {} # type: Dict[Text,Text]
126
128
127
129
# define provenance profile for single commandline tool
128
130
if not isinstance (process , Workflow ) \
129
- and runtimeContext .research_obj is not None :
130
- orcid = runtimeContext .orcid
131
- full_name = runtimeContext .cwl_full_name
131
+ and runtime_context .research_obj is not None :
132
+ orcid = runtime_context .orcid
133
+ full_name = runtime_context .cwl_full_name
132
134
process .provenance_object = CreateProvProfile (
133
- runtimeContext .research_obj , orcid , full_name )
135
+ runtime_context .research_obj , orcid , full_name )
134
136
process .parent_wf = process .provenance_object
135
- jobiter = process .job (job_order_object , self .output_callback , runtimeContext )
137
+ jobiter = process .job (job_order_object , self .output_callback ,
138
+ runtime_context )
136
139
137
140
try :
138
141
for job in jobiter :
139
142
if job :
140
- if runtimeContext .builder is not None :
141
- job .builder = runtimeContext .builder
143
+ if runtime_context .builder is not None :
144
+ job .builder = runtime_context .builder
142
145
if job .outdir :
143
146
self .output_dirs .add (job .outdir )
144
- if runtimeContext .research_obj is not None :
147
+ if runtime_context .research_obj is not None :
145
148
if not isinstance (process , Workflow ):
146
- runtimeContext .prov_obj = process .provenance_object
149
+ runtime_context .prov_obj = process .provenance_object
147
150
else :
148
- runtimeContext .prov_obj = job .prov_obj
149
- assert runtimeContext .prov_obj
151
+ runtime_context .prov_obj = job .prov_obj
152
+ assert runtime_context .prov_obj
150
153
process_run_id , reference_locations = \
151
- runtimeContext .prov_obj .evaluate (
152
- process , job , job_order_object ,
153
- runtimeContext .make_fs_access ,
154
- runtimeContext )
155
- runtimeContext = runtimeContext .copy ()
156
- runtimeContext .process_run_id = process_run_id
157
- runtimeContext .reference_locations = \
154
+ runtime_context .prov_obj .evaluate (
155
+ process , job , job_order_object ,
156
+ runtime_context .make_fs_access ,
157
+ runtime_context )
158
+ runtime_context = runtime_context .copy ()
159
+ runtime_context .process_run_id = process_run_id
160
+ runtime_context .reference_locations = \
158
161
reference_locations
159
- job .run (runtimeContext )
162
+ job .run (runtime_context )
160
163
else :
161
164
logger .error ("Workflow cannot make any more progress." )
162
165
break
@@ -182,20 +185,24 @@ def __init__(self): # type: () -> None
182
185
self .exceptions = [] # type: List[WorkflowException]
183
186
self .pending_jobs = [] # type: List[JobBase]
184
187
185
- self .max_ram = psutil .virtual_memory ().total / 2 ** 20
188
+ self .max_ram = psutil .virtual_memory ().available / 2 ** 20
186
189
self .max_cores = psutil .cpu_count ()
187
190
self .allocated_ram = 0
188
191
self .allocated_cores = 0
189
192
190
- def select_resources (self , request , runtimeContext ):
191
- result = {}
193
+ def select_resources (self , request , runtime_context ): # pylint: disable=unused-argument
194
+ # type: (Dict[str, int], RuntimeContext) -> Dict[str, int]
195
+ """ Naïve check for available cpu cores and memory. """
196
+ result = {} # type: Dict[str, int]
192
197
maxrsc = {
193
198
"cores" : self .max_cores ,
194
199
"ram" : self .max_ram
195
200
}
196
201
for rsc in ("cores" , "ram" ):
197
202
if request [rsc + "Min" ] > maxrsc [rsc ]:
198
- raise WorkflowException ("Requested at least %d %s but only %d available" , request [rsc + "Min" ], rsc , maxrsc [rsc ])
203
+ raise WorkflowException (
204
+ "Requested at least %d %s but only %d available" %
205
+ (request [rsc + "Min" ], rsc , maxrsc [rsc ]))
199
206
if request [rsc + "Max" ] < maxrsc [rsc ]:
200
207
result [rsc ] = request [rsc + "Max" ]
201
208
else :
@@ -204,8 +211,8 @@ def select_resources(self, request, runtimeContext):
204
211
return result
205
212
206
213
def run_job (self ,
207
- job , # type: JobBase
208
- runtimeContext # type: RuntimeContext
214
+ job , # type: JobBase
215
+ runtime_context # type: RuntimeContext
209
216
): # type: (...) -> None
210
217
""" Execute a single Job in a seperate thread. """
211
218
@@ -216,28 +223,28 @@ def run_job(self,
216
223
job = self .pending_jobs [0 ]
217
224
if isinstance (job , JobBase ):
218
225
if ((self .allocated_ram + job .builder .resources ["ram" ]) > self .max_ram or
219
- (self .allocated_cores + job .builder .resources ["cores" ]) > self .max_cores ):
226
+ (self .allocated_cores + job .builder .resources ["cores" ]) > self .max_cores ):
220
227
return
221
228
222
229
self .pending_jobs .pop (0 )
223
230
224
231
def runner ():
225
232
""" Job running thread. """
226
233
try :
227
- job .run (runtimeContext )
234
+ job .run (runtime_context )
228
235
except WorkflowException as err :
229
236
_logger .exception ("Got workflow error" )
230
237
self .exceptions .append (err )
231
- except Exception as err :
238
+ except Exception as err : # pylint: disable=broad-except
232
239
_logger .exception ("Got workflow error" )
233
240
self .exceptions .append (WorkflowException (Text (err )))
234
241
finally :
235
- with runtimeContext .workflow_eval_lock :
242
+ with runtime_context .workflow_eval_lock :
236
243
self .threads .remove (thread )
237
244
if isinstance (job , JobBase ):
238
245
self .allocated_ram -= job .builder .resources ["ram" ]
239
246
self .allocated_cores -= job .builder .resources ["cores" ]
240
- runtimeContext .workflow_eval_lock .notifyAll ()
247
+ runtime_context .workflow_eval_lock .notifyAll ()
241
248
242
249
thread = threading .Thread (target = runner )
243
250
thread .daemon = True
@@ -258,30 +265,32 @@ def run_jobs(self,
258
265
process , # type: Process
259
266
job_order_object , # type: Dict[Text, Any]
260
267
logger ,
261
- runtimeContext # type: RuntimeContext
268
+ runtime_context # type: RuntimeContext
262
269
): # type: (...) -> None
263
270
264
- jobiter = process .job (job_order_object , self .output_callback , runtimeContext )
271
+ jobiter = process .job (job_order_object , self .output_callback ,
272
+ runtime_context )
265
273
266
- if runtimeContext .workflow_eval_lock is None :
267
- raise WorkflowException ("runtimeContext.workflow_eval_lock must not be None" )
274
+ if runtime_context .workflow_eval_lock is None :
275
+ raise WorkflowException (
276
+ "runtimeContext.workflow_eval_lock must not be None" )
268
277
269
- runtimeContext .workflow_eval_lock .acquire ()
278
+ runtime_context .workflow_eval_lock .acquire ()
270
279
for job in jobiter :
271
280
if job is not None :
272
- if runtimeContext .builder is not None :
273
- job .builder = runtimeContext .builder
281
+ if runtime_context .builder is not None :
282
+ job .builder = runtime_context .builder
274
283
if job .outdir :
275
284
self .output_dirs .add (job .outdir )
276
285
277
- self .run_job (job , runtimeContext )
286
+ self .run_job (job , runtime_context )
278
287
279
288
if job is None :
280
289
if self .threads :
281
- self .wait_for_next_completion (runtimeContext )
290
+ self .wait_for_next_completion (runtime_context )
282
291
else :
283
292
logger .error ("Workflow cannot make any more progress." )
284
293
break
285
294
286
295
while self .threads :
287
- self .wait_for_next_completion (runtimeContext )
296
+ self .wait_for_next_completion (runtime_context )
0 commit comments