Skip to content

Commit 250b6d3

Browse files
committed
Added runtime_profile to run by default unless the necessary packages arent available
1 parent a170644 commit 250b6d3

File tree

3 files changed

+34
-39
lines changed

3 files changed

+34
-39
lines changed

nipype/interfaces/base.py

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1032,6 +1032,7 @@ def run(self, **inputs):
10321032
self._check_mandatory_inputs()
10331033
self._check_version_requirements(self.inputs)
10341034
interface = self.__class__
1035+
10351036
# initialize provenance tracking
10361037
env = deepcopy(dict(os.environ))
10371038
runtime = Bunch(cwd=os.getcwd(),
@@ -1211,7 +1212,6 @@ def _get_num_threads(proc):
12111212

12121213
# Import packages
12131214
import psutil
1214-
import logging as lg
12151215

12161216
# Init variables
12171217
num_threads = proc.num_threads()
@@ -1255,13 +1255,19 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False):
12551255
The returned runtime contains a merged stdout+stderr log with timestamps
12561256
"""
12571257

1258-
# Import packages
1258+
# Init logger
1259+
logger = logging.getLogger('workflow')
1260+
1261+
# Default to profiling the runtime
12591262
try:
12601263
import memory_profiler
12611264
import psutil
1262-
mem_prof = True
1263-
except:
1264-
mem_prof = False
1265+
runtime_profile = True
1266+
except ImportError as exc:
1267+
logger.info('Unable to import packages needed for runtime '\
1268+
'profiling. Turning off runtime profiler.\n'\
1269+
'Error: %s' % exc)
1270+
runtime_profile = False
12651271

12661272
# Init variables
12671273
PIPE = subprocess.PIPE
@@ -1317,7 +1323,7 @@ def _process(drain=0):
13171323
for stream in res[0]:
13181324
stream.read(drain)
13191325
while proc.returncode is None:
1320-
if mem_prof:
1326+
if runtime_profile:
13211327
mem_mb, num_threads = \
13221328
_get_max_resources_used(proc, mem_mb, num_threads)
13231329
proc.poll()
@@ -1335,7 +1341,7 @@ def _process(drain=0):
13351341
result['merged'] = [r[1] for r in temp]
13361342

13371343
if output == 'allatonce':
1338-
if mem_prof:
1344+
if runtime_profile:
13391345
while proc.returncode is None:
13401346
mem_mb, num_threads = \
13411347
_get_max_resources_used(proc, mem_mb, num_threads, poll=True)
@@ -1355,7 +1361,7 @@ def _process(drain=0):
13551361
result['stderr'] = str(stderr).split('\n')
13561362
result['merged'] = ''
13571363
if output == 'file':
1358-
if mem_prof:
1364+
if runtime_profile:
13591365
while proc.returncode is None:
13601366
mem_mb, num_threads = \
13611367
_get_max_resources_used(proc, mem_mb, num_threads, poll=True)
@@ -1366,7 +1372,7 @@ def _process(drain=0):
13661372
result['stderr'] = [line.strip() for line in open(errfile).readlines()]
13671373
result['merged'] = ''
13681374
if output == 'none':
1369-
if mem_prof:
1375+
if runtime_profile:
13701376
while proc.returncode is None:
13711377
mem_mb, num_threads = \
13721378
_get_max_resources_used(proc, mem_mb, num_threads, poll=True)

nipype/interfaces/tests/test_runtime_profiler.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,6 @@ def _run_workflow(self):
137137
# Run workflow
138138
plugin_args = {'n_procs' : num_procs,
139139
'memory' : num_gb,
140-
'runtime_profile' : True,
141140
'status_callback' : log_nodes_cb}
142141
wf.run(plugin='ResourceMultiProc', plugin_args=plugin_args)
143142

nipype/pipeline/plugins/multiproc.py

Lines changed: 19 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
logger = logging.getLogger('workflow')
2424

2525
# Run node
26-
def run_node(node, updatehash, runtime_profile=False):
26+
def run_node(node, updatehash):
2727
"""docstring
2828
"""
2929

@@ -33,29 +33,22 @@ def run_node(node, updatehash, runtime_profile=False):
3333
# Init variables
3434
result = dict(result=None, traceback=None)
3535

36-
# If we're profiling the run
37-
if runtime_profile:
38-
try:
39-
start = datetime.datetime.now()
40-
retval = node.run(updatehash=updatehash)
41-
run_secs = (datetime.datetime.now() - start).total_seconds()
42-
result['result'] = retval
43-
result['runtime_seconds'] = run_secs
44-
if hasattr(retval.runtime, 'get'):
45-
result['runtime_memory'] = retval.runtime.get('runtime_memory')
46-
result['runtime_threads'] = retval.runtime.get('runtime_threads')
47-
except:
48-
etype, eval, etr = sys.exc_info()
49-
result['traceback'] = format_exception(etype,eval,etr)
50-
result['result'] = node.result
51-
# Otherwise, execute node.run as normal
52-
else:
53-
try:
54-
result['result'] = node.run(updatehash=updatehash)
55-
except:
56-
etype, eval, etr = sys.exc_info()
57-
result['traceback'] = format_exception(etype,eval,etr)
58-
result['result'] = node.result
36+
#
37+
try:
38+
start = datetime.datetime.now()
39+
retval = node.run(updatehash=updatehash)
40+
run_secs = (datetime.datetime.now() - start).total_seconds()
41+
result['result'] = retval
42+
result['runtime_seconds'] = run_secs
43+
if hasattr(retval.runtime, 'get'):
44+
result['runtime_memory'] = retval.runtime.get('runtime_memory')
45+
result['runtime_threads'] = retval.runtime.get('runtime_threads')
46+
except:
47+
etype, eval, etr = sys.exc_info()
48+
result['traceback'] = format_exception(etype,eval,etr)
49+
result['result'] = node.result
50+
51+
# Return the result dictionary
5952
return result
6053

6154

@@ -160,13 +153,10 @@ def _submit_job(self, node, updatehash=False):
160153
node.inputs.terminal_output = 'allatonce'
161154
except:
162155
pass
163-
try:
164-
runtime_profile = self.plugin_args['runtime_profile']
165-
except:
166-
runtime_profile = False
156+
167157
self._taskresult[self._taskid] = \
168158
self.pool.apply_async(run_node,
169-
(node, updatehash, runtime_profile),
159+
(node, updatehash),
170160
callback=release_lock)
171161
return self._taskid
172162

0 commit comments

Comments
 (0)