|
17 | 17 | import os |
18 | 18 | import sys |
19 | 19 | import warnings |
20 | | -import yaml |
21 | 20 | from multiprocessing import Process |
22 | 21 | from time import strftime |
| 22 | +import yaml |
23 | 23 | from voluptuous.error import Invalid |
24 | | -from CPAC.utils.configuration import Configuration |
| 24 | +from CPAC.utils.configuration import check_pname, Configuration, set_subject |
25 | 25 | from CPAC.utils.ga import track_run |
26 | 26 | from CPAC.utils.monitoring import failed_to_start, log_nodes_cb |
27 | | -from CPAC.longitudinal_pipeline.longitudinal_workflow import ( |
28 | | - anat_longitudinal_wf, |
29 | | - func_preproc_longitudinal_wf, |
30 | | - func_longitudinal_template_wf |
31 | | -) |
| 27 | +from CPAC.longitudinal_pipeline.longitudinal_workflow import \ |
| 28 | + anat_longitudinal_wf |
32 | 29 | from CPAC.utils.yaml_template import upgrade_pipeline_to_1_8 |
33 | 30 |
|
34 | 31 |
|
@@ -324,10 +321,11 @@ def run(subject_list_file, config_file=None, p_name=None, plugin=None, |
324 | 321 | warnings.warn("We recommend that the working directory full path " |
325 | 322 | "should have less then 70 characters. " |
326 | 323 | "Long paths might not work in your operating system.") |
327 | | - warnings.warn("Current working directory: %s" % c.pipeline_setup['working_directory']['path']) |
| 324 | + warnings.warn("Current working directory: " |
| 325 | + f"{c.pipeline_setup['working_directory']['path']}") |
328 | 326 |
|
329 | 327 | # Get the pipeline name |
330 | | - p_name = p_name or c.pipeline_setup['pipeline_name'] |
| 328 | + p_name = check_pname(p_name, c) |
331 | 329 |
|
332 | 330 | # Load in subject list |
333 | 331 | try: |
@@ -577,82 +575,89 @@ def replace_index(target1, target2, file_path): |
577 | 575 | p_name, plugin, plugin_args, test_config) |
578 | 576 | except Exception as exception: # pylint: disable=broad-except |
579 | 577 | exitcode = 1 |
580 | | - failed_to_start(c['pipeline_setup', 'log_directory', |
581 | | - 'path'], exception) |
| 578 | + failed_to_start(set_subject(sub, c)[2], exception) |
582 | 579 | return exitcode |
583 | 580 |
|
584 | | - pid = open(os.path.join( |
585 | | - c.pipeline_setup['working_directory']['path'], 'pid.txt' |
586 | | - ), 'w') |
587 | | - |
588 | 581 | # Init job queue |
589 | 582 | job_queue = [] |
590 | 583 |
|
591 | 584 | # Allocate processes |
592 | | - processes = [ |
593 | | - Process(target=run_workflow, |
594 | | - args=(sub, c, True, pipeline_timing_info, |
595 | | - p_name, plugin, plugin_args, test_config)) |
596 | | - for sub in sublist |
597 | | - ] |
598 | | - |
599 | | - # If we're allocating more processes than are subjects, run them all |
600 | | - if len(sublist) <= c.pipeline_setup['system_config']['num_participants_at_once']: |
601 | | - for p in processes: |
602 | | - try: |
603 | | - p.start() |
604 | | - print(p.pid, file=pid) |
605 | | - except Exception: # pylint: disable=broad-except |
606 | | - exitcode = 1 |
607 | | - failed_to_start(c['pipeline_setup', 'log_directory', |
608 | | - 'path']) |
609 | | - |
610 | | - # Otherwise manage resources to run processes incrementally |
611 | | - else: |
612 | | - idx = 0 |
613 | | - while idx < len(sublist): |
614 | | - # If the job queue is empty and we haven't started indexing |
615 | | - if len(job_queue) == 0 and idx == 0: |
616 | | - # Init subject process index |
617 | | - idc = idx |
618 | | - # Launch processes (one for each subject) |
619 | | - for p in processes[idc: idc + c.pipeline_setup[ |
620 | | - 'system_config']['num_participants_at_once']]: |
621 | | - try: |
622 | | - p.start() |
623 | | - print(p.pid, file=pid) |
624 | | - job_queue.append(p) |
625 | | - idx += 1 |
626 | | - except Exception: # pylint: disable=broad-except |
627 | | - exitcode = 1 |
628 | | - failed_to_start(c['pipeline_setup', |
629 | | - 'log_directory', 'path']) |
630 | | - # Otherwise, jobs are running - check them |
631 | | - else: |
632 | | - # Check every job in the queue's status |
633 | | - for job in job_queue: |
634 | | - # If the job is not alive |
635 | | - if not job.is_alive(): |
636 | | - # Find job and delete it from queue |
637 | | - print('found dead job ', job) |
638 | | - loc = job_queue.index(job) |
639 | | - del job_queue[loc] |
640 | | - # ...and start the next available process |
641 | | - # (subject) |
| 585 | + processes = [Process(target=run_workflow, |
| 586 | + args=(sub, c, True, pipeline_timing_info, p_name, |
| 587 | + plugin, plugin_args, test_config)) for |
| 588 | + sub in sublist] |
| 589 | + working_dir = os.path.join(c['pipeline_setup', 'working_directory', |
| 590 | + 'path'], p_name) |
| 591 | + # Create pipeline-specific working dir if not exists |
| 592 | + if not os.path.exists(working_dir): |
| 593 | + os.makedirs(working_dir) |
| 594 | + # Set PID context to pipeline-specific file |
| 595 | + with open(os.path.join(working_dir, 'pid.txt'), 'w', encoding='utf-8' |
| 596 | + ) as pid: |
| 597 | + # If we're allocating more processes than are subjects, run |
| 598 | + # them all |
| 599 | + if len(sublist) <= c.pipeline_setup['system_config'][ |
| 600 | + 'num_participants_at_once']: |
| 601 | + for i, _p in enumerate(processes): |
| 602 | + try: |
| 603 | + _p.start() |
| 604 | + print(_p.pid, file=pid) |
| 605 | + # pylint: disable=broad-except |
| 606 | + except Exception as exception: |
| 607 | + exitcode = 1 |
| 608 | + failed_to_start(set_subject(sublist[i], c)[2], |
| 609 | + exception) |
| 610 | + # Otherwise manage resources to run processes incrementally |
| 611 | + else: |
| 612 | + idx = 0 |
| 613 | + while idx < len(sublist): |
| 614 | + # If the job queue is empty and we haven't started indexing |
| 615 | + if len(job_queue) == 0 and idx == 0: |
| 616 | + # Init subject process index |
| 617 | + idc = idx |
| 618 | + # Launch processes (one for each subject) |
| 619 | + for _p in processes[idc: idc + c.pipeline_setup[ |
| 620 | + 'system_config']['num_participants_at_once']]: |
642 | 621 | try: |
643 | | - processes[idx].start() |
644 | | - # Append this to job queue and increment index |
645 | | - job_queue.append(processes[idx]) |
| 622 | + _p.start() |
| 623 | + print(_p.pid, file=pid) |
| 624 | + job_queue.append(_p) |
646 | 625 | idx += 1 |
647 | | - except Exception: # pylint: disable=broad-except |
| 626 | + # pylint: disable=broad-except |
| 627 | + except Exception as exception: |
648 | 628 | exitcode = 1 |
649 | | - failed_to_start(c['pipeline_setup', |
650 | | - 'log_directory', 'path']) |
651 | | - # Add sleep so while loop isn't consuming 100% of CPU |
652 | | - time.sleep(2) |
653 | | - # set exitcode to 1 if any exception |
654 | | - if hasattr(pid, 'exitcode'): |
655 | | - exitcode = exitcode or pid.exitcode |
656 | | - # Close PID txt file to indicate finish |
657 | | - pid.close() |
| 629 | + failed_to_start(set_subject(sublist[idx], |
| 630 | + c)[2], exception) |
| 631 | + # Otherwise, jobs are running - check them |
| 632 | + else: |
| 633 | + # Check every job in the queue's status |
| 634 | + for job in job_queue: |
| 635 | + # If the job is not alive |
| 636 | + if not job.is_alive(): |
| 637 | + # Find job and delete it from queue |
| 638 | + print('found dead job ', job) |
| 639 | + loc = job_queue.index(job) |
| 640 | + del job_queue[loc] |
| 641 | + # ...and start the next available |
| 642 | + # process (subject) |
| 643 | + try: |
| 644 | + processes[idx].start() |
| 645 | + # Append this to job queue and |
| 646 | + # increment index |
| 647 | + # pylint: disable=modified-iterating-list |
| 648 | + job_queue.append(processes[idx]) |
| 649 | + idx += 1 |
| 650 | + # pylint: disable=broad-except |
| 651 | + except Exception as exception: |
| 652 | + exitcode = 1 |
| 653 | + failed_to_start(set_subject(sublist[idx], |
| 654 | + c)[2], |
| 655 | + exception) |
| 656 | + # Add sleep so while loop isn't consuming 100% of CPU |
| 657 | + time.sleep(2) |
| 658 | + # set exitcode to 1 if any exception |
| 659 | + if hasattr(pid, 'exitcode'): |
| 660 | + exitcode = exitcode or pid.exitcode |
| 661 | + # Close PID txt file to indicate finish |
| 662 | + pid.close() |
658 | 663 | sys.exit(exitcode) |
0 commit comments