17
17
import numpy as np
18
18
19
19
from ... import logging
20
- from ...utils .misc import str2bool
21
20
from ...utils .profiler import get_system_total_memory_gb
22
21
from ..engine import MapNode
23
22
from .base import DistributedPluginBase
@@ -44,10 +43,6 @@ def run_node(node, updatehash, taskid):
44
43
dictionary containing the node runtime results and stats
45
44
"""
46
45
47
- from nipype import logging
48
- logger = logging .getLogger ('workflow' )
49
-
50
- logger .debug ('run_node called on %s' , node .name )
51
46
# Init variables
52
47
result = dict (result = None , traceback = None , taskid = taskid )
53
48
@@ -148,6 +143,9 @@ def _submit_job(self, node, updatehash=False):
148
143
self ._task_obj [self ._taskid ] = self .pool .apply_async (
149
144
run_node , (node , updatehash , self ._taskid ),
150
145
callback = self ._async_callback )
146
+
147
+ logger .debug ('MultiProc submitted task %s (taskid=%d).' ,
148
+ node .fullname , self ._taskid )
151
149
return self ._taskid
152
150
153
151
def _prerun_check (self , graph ):
@@ -245,7 +243,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None):
245
243
free_memory_gb -= next_job_gb
246
244
free_processors -= next_job_th
247
245
logger .debug ('Allocating %s ID=%d (%0.2fGB, %d threads). Free: %0.2fGB, %d threads.' ,
248
- self .procs [jobid ]._id , jobid , next_job_gb , next_job_th ,
246
+ self .procs [jobid ].fullname , jobid , next_job_gb , next_job_th ,
249
247
free_memory_gb , free_processors )
250
248
251
249
# change job status in appropriate queues
@@ -274,7 +272,6 @@ def _send_procs_to_workers(self, updatehash=False, graph=None):
274
272
275
273
# Task should be submitted to workers
276
274
# Send job to task manager and add to pending tasks
277
- logger .debug ('MultiProc submitting job ID %d' , jobid )
278
275
if self ._status_callback :
279
276
self ._status_callback (self .procs [jobid ], 'start' )
280
277
tid = self ._submit_job (deepcopy (self .procs [jobid ]),
0 commit comments