|
9 | 9 | * University of Stuttgart. All rights reserved. |
10 | 10 | * Copyright (c) 2004-2005 The Regents of the University of California. |
11 | 11 | * All rights reserved. |
12 | | - * Copyright (c) 2011-2012 Los Alamos National Security, LLC. All rights |
| 12 | + * Copyright (c) 2011-2017 Los Alamos National Security, LLC. All rights |
13 | 13 | * reserved. |
14 | 14 | * Copyright (c) 2014-2017 Intel, Inc. All rights reserved. |
15 | 15 | * Copyright (c) 2015 Research Organization for Information Science |
@@ -78,33 +78,24 @@ int orte_ras_base_node_insert(opal_list_t* nodes, orte_job_t *jdata) |
78 | 78 |
|
79 | 79 | /* get the hnp node's info */ |
80 | 80 | hnp_node = (orte_node_t*)opal_pointer_array_get_item(orte_node_pool, 0); |
81 | | -#if SLURM_CRAY_ENV |
82 | | - /* if we are in a Cray-SLURM environment, then we cannot |
83 | | - * launch procs local to the HNP. The problem |
84 | | - * is the MPI processes launched on the head node (where the |
85 | | - * ORTE_PROC_IS_HNP evalues to true) get launched by a daemon |
86 | | - * (mpirun) which is not a child of a slurmd daemon. This |
87 | | - * means that any RDMA credentials obtained via the odls/alps |
88 | | - * local launcher are incorrect. Test for this condition. If |
89 | | - * found, then take steps to ensure we launch a daemon on |
90 | | - * the same node as mpirun and that it gets used to fork |
91 | | - * local procs instead of mpirun so they get the proper |
92 | | - * credential */ |
93 | | - if (NULL != hnp_node) { |
94 | | - OPAL_LIST_FOREACH(node, nodes, orte_node_t) { |
95 | | - if (orte_ifislocal(node->name)) { |
96 | | - orte_hnp_is_allocated = true; |
97 | | - break; |
| 81 | + |
| 82 | + if ((orte_ras_base.launch_orted_on_hn == true) && |
| 83 | + (orte_managed_allocation)) { |
| 84 | + if (NULL != hnp_node) { |
| 85 | + OPAL_LIST_FOREACH(node, nodes, orte_node_t) { |
| 86 | + if (orte_ifislocal(node->name)) { |
| 87 | + orte_hnp_is_allocated = true; |
| 88 | + break; |
| 89 | + } |
| 90 | + } |
| 91 | + if (orte_hnp_is_allocated && !(ORTE_GET_MAPPING_DIRECTIVE(orte_rmaps_base.mapping) & |
| 92 | + ORTE_MAPPING_NO_USE_LOCAL)) { |
| 93 | + hnp_node->name = strdup("mpirun"); |
| 94 | + skiphnp = true; |
| 95 | + ORTE_SET_MAPPING_DIRECTIVE(orte_rmaps_base.mapping, ORTE_MAPPING_NO_USE_LOCAL); |
98 | 96 | } |
99 | | - } |
100 | | - if (orte_hnp_is_allocated && !(ORTE_GET_MAPPING_DIRECTIVE(orte_rmaps_base.mapping) & ORTE_MAPPING_NO_USE_LOCAL)) { |
101 | | - hnp_node->name = strdup("mpirun"); |
102 | | - skiphnp = true; |
103 | | - ORTE_SET_MAPPING_DIRECTIVE(orte_rmaps_base.mapping, ORTE_MAPPING_NO_USE_LOCAL); |
104 | 97 | } |
105 | 98 | } |
106 | | -#endif |
107 | | - |
108 | 99 |
|
109 | 100 | /* cycle through the list */ |
110 | 101 | while (NULL != (item = opal_list_remove_first(nodes))) { |
|
0 commit comments