Skip to content

Commit 2b0a6e2

Browse files
committed
Remove proc terminology from variable names
1 parent a52395a commit 2b0a6e2

File tree

2 files changed

+85
-81
lines changed

2 files changed

+85
-81
lines changed

nipype/interfaces/tests/test_runtime_profiler.py

Lines changed: 71 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ class UseResourcesInputSpec(CommandLineInputSpec):
2828
# Init attributes
2929
num_gb = traits.Float(desc='Number of GB of RAM to use',
3030
argstr='-g %f')
31-
num_procs = traits.Int(desc='Number of processors to use',
31+
num_threads = traits.Int(desc='Number of threads to use',
3232
argstr='-p %d')
3333

3434

@@ -52,8 +52,8 @@ class UseResources(CommandLine):
5252
_cmd = exec_path
5353

5454

55-
# Spin multiple processors
56-
def use_resources(num_procs, num_gb):
55+
# Spin multiple threads
56+
def use_resources(num_threads, num_gb):
5757
'''
5858
Function to execute multiple use_gb_ram functions in parallel
5959
'''
@@ -82,19 +82,19 @@ def _use_gb_ram(num_gb):
8282
# Init variables
8383
num_gb = float(num_gb)
8484

85-
# Build proc list
86-
proc_list = []
87-
for idx in range(num_procs):
88-
proc = Thread(target=_use_gb_ram, args=(num_gb/num_procs,), name=str(idx))
89-
proc_list.append(proc)
85+
# Build thread list
86+
thread_list = []
87+
for idx in range(num_threads):
88+
thread = Thread(target=_use_gb_ram, args=(num_gb/num_threads,), name=str(idx))
89+
thread_list.append(thread)
9090

9191
# Run multi-threaded
92-
print 'Using %.3f GB of memory over %d sub-threads...' % (num_gb, num_procs)
93-
for idx, proc in enumerate(proc_list):
94-
proc.start()
92+
print 'Using %.3f GB of memory over %d sub-threads...' % (num_gb, num_threads)
93+
for idx, thread in enumerate(thread_list):
94+
thread.start()
9595

96-
for proc in proc_list:
97-
proc.join()
96+
for thread in thread_list:
97+
thread.join()
9898

9999

100100
# Test case for the run function
@@ -134,9 +134,9 @@ def setUp(self):
134134
self.mem_err_percent = 5
135135

136136
# ! Only used for benchmarking the profiler over a range of
137-
# ! processors and RAM usage
138-
# ! Requires a LOT of RAM and PROCS to be tested
139-
def _collect_range_runtime_stats(self):
137+
# ! RAM usage
138+
# ! Requires a LOT of RAM to be tested
139+
def _collect_range_runtime_stats(self, num_threads):
140140
'''
141141
Function to collect a range of runtime stats
142142
'''
@@ -147,54 +147,58 @@ def _collect_range_runtime_stats(self):
147147
import pandas as pd
148148

149149
# Init variables
150-
num_procs_range = 8
151150
ram_gb_range = 10.0
152151
ram_gb_step = 0.25
153152
dict_list = []
154153

155154
# Iterate through all combos
156-
for num_procs in np.arange(1, num_procs_range+1, 1):
157-
for num_gb in np.arange(0.25, ram_gb_range+ram_gb_step, ram_gb_step):
158-
# Cmd-level
159-
cmd_fin_str = self._run_cmdline_workflow(num_gb, num_procs)
160-
cmd_node_stats = json.loads(cmd_fin_str)
161-
cmd_runtime_procs = int(cmd_node_stats['runtime_threads'])
162-
cmd_runtime_gb = float(cmd_node_stats['runtime_memory_gb'])
163-
164-
# Func-level
165-
func_fin_str = self._run_function_workflow(num_gb, num_procs)
166-
func_node_stats = json.loads(func_fin_str)
167-
func_runtime_procs = int(func_node_stats['runtime_threads'])
168-
func_runtime_gb = float(func_node_stats['runtime_memory_gb'])
169-
170-
# Calc errors
171-
cmd_procs_err = cmd_runtime_procs - num_procs
172-
cmd_gb_err = cmd_runtime_gb - num_gb
173-
func_procs_err = func_runtime_procs - num_procs
174-
func_gb_err = func_runtime_gb - num_gb
175-
176-
# Node dictionary
177-
results_dict = {'input_procs' : num_procs,
178-
'input_gb' : num_gb,
179-
'cmd_runtime_procs' : cmd_runtime_procs,
180-
'cmd_runtime_gb' : cmd_runtime_gb,
181-
'func_runtime_procs' : func_runtime_procs,
182-
'func_runtime_gb' : func_runtime_gb,
183-
'cmd_procs_err' : cmd_procs_err,
184-
'cmd_gb_err' : cmd_gb_err,
185-
'func_procs_err' : func_procs_err,
186-
'func_gb_err' : func_gb_err}
187-
# Append to list
188-
dict_list.append(results_dict)
155+
for num_gb in np.arange(0.25, ram_gb_range+ram_gb_step, ram_gb_step):
156+
# Cmd-level
157+
cmd_fin_str = self._run_cmdline_workflow(num_gb, num_threads)
158+
cmd_node_stats = json.loads(cmd_fin_str)
159+
cmd_runtime_threads = int(cmd_node_stats['runtime_threads'])
160+
cmd_runtime_gb = float(cmd_node_stats['runtime_memory_gb'])
161+
162+
# Func-level
163+
func_fin_str = self._run_function_workflow(num_gb, num_threads)
164+
func_node_stats = json.loads(func_fin_str)
165+
func_runtime_threads = int(func_node_stats['runtime_threads'])
166+
func_runtime_gb = float(func_node_stats['runtime_memory_gb'])
167+
168+
# Calc errors
169+
cmd_threads_err = cmd_threads_threads - num_threads
170+
cmd_gb_err = cmd_runtime_gb - num_gb
171+
func_threads_err = func_runtime_threads - num_threads
172+
func_gb_err = func_runtime_gb - num_gb
173+
174+
# Node dictionary
175+
results_dict = {'input_threads' : num_threads,
176+
'input_gb' : num_gb,
177+
'cmd_runtime_threads' : cmd_runtime_threads,
178+
'cmd_runtime_gb' : cmd_runtime_gb,
179+
'func_runtime_threads' : func_runtime_threads,
180+
'func_runtime_gb' : func_runtime_gb,
181+
'cmd_thread_err' : cmd_thread_err,
182+
'cmd_gb_err' : cmd_gb_err,
183+
'func_thread_err' : func_thread_err,
184+
'func_gb_err' : func_gb_err}
185+
# Append to list
186+
dict_list.append(results_dict)
189187

190188
# Create dataframe
191189
runtime_results_df = pd.DataFrame(dict_list)
192190

193191
# Return dataframe
194192
return runtime_results_df
195193

194+
def tiest_collect_range(self):
195+
num_threads = 1
196+
df = self._collect_range_suntime_stats(num_threads)
197+
198+
df.to_csv('/root/%d_thread_df.csv')
199+
196200
# Test node
197-
def _run_cmdline_workflow(self, num_gb, num_procs):
201+
def _run_cmdline_workflow(self, num_gb, num_threads):
198202
'''
199203
Function to run the use_resources cmdline script in a nipype workflow
200204
and return the runtime stats recorded by the profiler
@@ -237,22 +241,22 @@ def _run_cmdline_workflow(self, num_gb, num_procs):
237241

238242
# Input node
239243
input_node = pe.Node(util.IdentityInterface(fields=['num_gb',
240-
'num_procs']),
244+
'num_threads']),
241245
name='input_node')
242246
input_node.inputs.num_gb = num_gb
243-
input_node.inputs.num_procs = num_procs
247+
input_node.inputs.num_threads = num_threads
244248

245249
# Resources used node
246250
resource_node = pe.Node(UseResources(), name='resource_node')
247251
resource_node.interface.estimated_memory_gb = num_gb
248-
resource_node.interface.num_threads = num_procs
252+
resource_node.interface.num_threads = num_threads
249253

250254
# Connect workflow
251255
wf.connect(input_node, 'num_gb', resource_node, 'num_gb')
252-
wf.connect(input_node, 'num_procs', resource_node, 'num_procs')
256+
wf.connect(input_node, 'num_threads', resource_node, 'num_threads')
253257

254258
# Run workflow
255-
plugin_args = {'n_procs' : num_procs,
259+
plugin_args = {'n_procs' : num_threads,
256260
'memory' : num_gb,
257261
'status_callback' : log_nodes_cb}
258262
wf.run(plugin='MultiProc', plugin_args=plugin_args)
@@ -267,7 +271,7 @@ def _run_cmdline_workflow(self, num_gb, num_procs):
267271
return finish_str
268272

269273
# Test node
270-
def _run_function_workflow(self, num_gb, num_procs):
274+
def _run_function_workflow(self, num_gb, num_threads):
271275
'''
272276
Function to run the use_resources() function in a nipype workflow
273277
and return the runtime stats recorded by the profiler
@@ -310,26 +314,26 @@ def _run_function_workflow(self, num_gb, num_procs):
310314

311315
# Input node
312316
input_node = pe.Node(util.IdentityInterface(fields=['num_gb',
313-
'num_procs']),
317+
'num_threads']),
314318
name='input_node')
315319
input_node.inputs.num_gb = num_gb
316-
input_node.inputs.num_procs = num_procs
320+
input_node.inputs.num_threads = num_threads
317321

318322
# Resources used node
319-
resource_node = pe.Node(util.Function(input_names=['num_procs',
323+
resource_node = pe.Node(util.Function(input_names=['num_threads',
320324
'num_gb'],
321325
output_names=[],
322326
function=use_resources),
323327
name='resource_node')
324328
resource_node.interface.estimated_memory_gb = num_gb
325-
resource_node.interface.num_threads = num_procs
329+
resource_node.interface.num_threads = num_threads
326330

327331
# Connect workflow
328332
wf.connect(input_node, 'num_gb', resource_node, 'num_gb')
329-
wf.connect(input_node, 'num_procs', resource_node, 'num_procs')
333+
wf.connect(input_node, 'num_threads', resource_node, 'num_threads')
330334

331335
# Run workflow
332-
plugin_args = {'n_procs' : num_procs,
336+
plugin_args = {'n_procs' : num_threads,
333337
'memory' : num_gb,
334338
'status_callback' : log_nodes_cb}
335339
wf.run(plugin='MultiProc', plugin_args=plugin_args)
@@ -378,12 +382,12 @@ def test_cmdline_profiling(self):
378382
# Error message formatting
379383
mem_err = 'Input memory: %f is not within %.1f%% of runtime '\
380384
'memory: %f' % (num_gb, self.mem_err_percent, runtime_gb)
381-
procs_err = 'Input threads: %d is not equal to runtime threads: %d' \
385+
threads_err = 'Input threads: %d is not equal to runtime threads: %d' \
382386
% (expected_runtime_threads, runtime_threads)
383387

384388
# Assert runtime stats are what was input
385389
self.assertLessEqual(runtime_gb_err, allowed_gb_err, msg=mem_err)
386-
self.assertEqual(expected_runtime_threads, runtime_threads, msg=procs_err)
390+
self.assertEqual(expected_runtime_threads, runtime_threads, msg=threads_err)
387391

388392
# Test resources were used as expected
389393
@unittest.skipIf(run_profiler == False, skip_profile_msg)
@@ -420,12 +424,12 @@ def test_function_profiling(self):
420424
# Error message formatting
421425
mem_err = 'Input memory: %f is not within %.1f%% of runtime '\
422426
'memory: %f' % (num_gb, self.mem_err_percent, runtime_gb)
423-
procs_err = 'Input procs: %d is not equal to runtime procs: %d' \
427+
threads_err = 'Input threads: %d is not equal to runtime threads: %d' \
424428
% (expected_runtime_threads, runtime_threads)
425429

426430
# Assert runtime stats are what was input
427431
self.assertLessEqual(runtime_gb_err, allowed_gb_err, msg=mem_err)
428-
self.assertEqual(expected_runtime_threads, runtime_threads, msg=procs_err)
432+
self.assertEqual(expected_runtime_threads, runtime_threads, msg=threads_err)
429433

430434

431435
# Command-line run-able unittest module

nipype/interfaces/tests/use_resources

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,10 @@
44

55
'''
66
Python script to use a certain amount of RAM on disk and number of
7-
processors
7+
threads
88
99
Usage:
10-
use_resources -g <num_gb> -p <num_procs>
10+
use_resources -g <num_gb> -p <num_threads>
1111
'''
1212

1313
# Function to occupy GB of memory
@@ -42,25 +42,25 @@ if __name__ == '__main__':
4242
# Add arguments
4343
parser.add_argument('-g', '--num_gb', nargs=1, required=True,
4444
help='Number of GB RAM to use, can be float or int')
45-
parser.add_argument('-p', '--num_procs', nargs=1, required=True,
46-
help='Number of processors to run in parallel')
45+
parser.add_argument('-p', '--num_threads', nargs=1, required=True,
46+
help='Number of threads to run in parallel')
4747

4848
# Parse args
4949
args = parser.parse_args()
5050

5151
# Init variables
5252
num_gb = float(args.num_gb[0])
53-
num_procs = int(args.num_procs[0])
53+
num_threads = int(args.num_threads[0])
5454

55-
# Build proc list
56-
proc_list = []
57-
for idx in range(num_procs):
58-
proc_list.append(Thread(target=use_gb_ram, args=(num_gb/num_procs,)))
55+
# Build thread list
56+
thread_list = []
57+
for idx in range(num_threads):
58+
thread_list.append(Thread(target=use_gb_ram, args=(num_gb/num_threads,)))
5959

6060
# Run multi-threaded
61-
print 'Using %.3f GB of memory over %d sub-threads...' % (num_gb, num_procs)
62-
for proc in proc_list:
63-
proc.start()
61+
print 'Using %.3f GB of memory over %d sub-threads...' % (num_gb, num_threads)
62+
for thread in thread_list:
63+
thread.start()
6464

65-
for proc in proc_list:
66-
proc.join()
65+
for thread in thread_list:
66+
thread.join()

0 commit comments

Comments
 (0)