|
3 | 3 | """Callback logger for recording workflow and node run stats
|
4 | 4 | """
|
5 | 5 |
|
6 |
| -# Import packages |
7 |
| -import datetime |
8 |
| -import logging |
9 | 6 |
|
10 | 7 | # Log node stats function
|
11 | 8 | def log_nodes_cb(node, status):
|
12 | 9 | """Function to record node run statistics to a log file as json
|
13 | 10 | dictionaries
|
| 11 | +
|
| 12 | + Parameters |
| 13 | + ---------- |
| 14 | + node : nipype.pipeline.engine.Node |
| 15 | + the node being logged |
| 16 | + status : string |
| 17 | + acceptable values are 'start', 'end'; otherwise it is |
| 18 | + considered and error |
| 19 | +
|
| 20 | + Returns |
| 21 | + ------- |
| 22 | + None |
| 23 | + this function does not return any values, it logs the node |
| 24 | + status info to the callback logger |
14 | 25 | """
|
15 | 26 |
|
16 |
| - # Init variables |
17 |
| - logger = logging.getLogger('callback') |
| 27 | + # Import packages |
| 28 | + import datetime |
| 29 | + import logging |
| 30 | + import json |
18 | 31 |
|
19 | 32 | # Check runtime profile stats
|
20 | 33 | if node.result is not None:
|
21 | 34 | try:
|
22 | 35 | runtime = node.result.runtime
|
23 | 36 | runtime_memory_gb = runtime.runtime_memory_gb
|
24 | 37 | runtime_threads = runtime.runtime_threads
|
25 |
| - except: |
26 |
| - runtime_memory_gb = runtime_threads = 'Unkown' |
| 38 | + except AttributeError: |
| 39 | + runtime_memory_gb = runtime_threads = 'Unknown' |
27 | 40 | else:
|
28 | 41 | runtime_memory_gb = runtime_threads = 'N/A'
|
29 | 42 |
|
| 43 | + # Init variables |
| 44 | + logger = logging.getLogger('callback') |
| 45 | + status_dict = {'name' : node.name, |
| 46 | + 'id' : node._id, |
| 47 | + 'estimated_memory_gb' : node._interface.estimated_memory_gb, |
| 48 | + 'num_threads' : node._interface.num_threads} |
| 49 | + |
30 | 50 | # Check status and write to log
|
31 | 51 | # Start
|
32 | 52 | if status == 'start':
|
33 |
| - message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' +\ |
34 |
| - node._id + '"' + ',"start":' + '"' +str(datetime.datetime.now()) +\ |
35 |
| - '"' + ',"estimated_memory_gb":' + str(node._interface.estimated_memory_gb) + \ |
36 |
| - ',"num_threads":' + str(node._interface.num_threads) + '}' |
37 |
| - |
38 |
| - logger.debug(message) |
| 53 | + status_dict['start'] = str(datetime.datetime.now()) |
39 | 54 | # End
|
40 | 55 | elif status == 'end':
|
41 |
| - message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' + \ |
42 |
| - node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) + \ |
43 |
| - '"' + ',"estimated_memory_gb":' + '"'+ str(node._interface.estimated_memory_gb) + \ |
44 |
| - '"'+ ',"num_threads":' + '"'+ str(node._interface.num_threads) + '"'+ \ |
45 |
| - ',"runtime_threads":' + '"'+ str(runtime_threads) + '"'+ \ |
46 |
| - ',"runtime_memory_gb":' + '"'+ str(runtime_memory_gb) + '"' + '}' |
47 |
| - |
48 |
| - logger.debug(message) |
| 56 | + status_dict['finish'] = str(datetime.datetime.now()) |
| 57 | + status_dict['runtime_threads'] = runtime_threads |
| 58 | + status_dict['runtime_memory_gb'] = runtime_memory_gb |
49 | 59 | # Other
|
50 | 60 | else:
|
51 |
| - message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' + \ |
52 |
| - node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) +\ |
53 |
| - '"' + ',"estimated_memory_gb":' + str(node._interface.estimated_memory_gb) + \ |
54 |
| - ',"num_threads":' + str(node._interface.num_threads) + ',"error":"True"}' |
| 61 | + status_dict['finish'] = str(datetime.datetime.now()) |
| 62 | + status_dict['error'] = True |
55 | 63 |
|
56 |
| - logger.debug(message) |
| 64 | + # Dump string to log |
| 65 | + logger.debug(json.dumps(status_dict)) |
0 commit comments