Skip to content

Commit ba47f73

Browse files
author
Ralph Castain
authored
Merge pull request #2992 from rhc54/topic/pe1
Fix binding policy bug and support pe=1 modifier
2 parents 578d881 + 0ae873d commit ba47f73

File tree

4 files changed

+82
-57
lines changed

4 files changed

+82
-57
lines changed

opal/mca/hwloc/hwloc.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
22
/*
33
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
4-
* Copyright (c) 2013-2015 Intel, Inc. All rights reserved.
4+
* Copyright (c) 2013-2017 Intel, Inc. All rights reserved.
55
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
66
* reserved.
77
*
@@ -191,7 +191,7 @@ typedef uint16_t opal_binding_policy_t;
191191
#define OPAL_GET_BINDING_POLICY(pol) \
192192
((pol) & 0x0fff)
193193
#define OPAL_SET_BINDING_POLICY(target, pol) \
194-
(target) = (pol) | (((target) & 0xf000) | OPAL_BIND_GIVEN)
194+
(target) = (pol) | (((target) & 0x2000) | OPAL_BIND_GIVEN)
195195
#define OPAL_SET_DEFAULT_BINDING_POLICY(target, pol) \
196196
do { \
197197
if (!OPAL_BINDING_POLICY_IS_SET((target))) { \

orte/mca/rmaps/base/rmaps_base_frame.c

Lines changed: 22 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
* Copyright (c) 2006-2015 Cisco Systems, Inc. All rights reserved.
1313
* Copyright (c) 2011-2013 Los Alamos National Security, LLC.
1414
* All rights reserved.
15-
* Copyright (c) 2014-2015 Intel, Inc. All rights reserved.
15+
* Copyright (c) 2014-2017 Intel, Inc. All rights reserved.
1616
* Copyright (c) 2014-2015 Research Organization for Information Science
1717
* and Technology (RIST). All rights reserved.
1818
* $COPYRIGHT$
@@ -149,7 +149,7 @@ static int orte_rmaps_base_register(mca_base_register_flag_t flags)
149149
MCA_BASE_VAR_SCOPE_READONLY, &rmaps_base_bynode);
150150

151151
/* #cpus/rank to use */
152-
orte_rmaps_base.cpus_per_rank = 1;
152+
orte_rmaps_base.cpus_per_rank = 0;
153153
var_id = mca_base_var_register("orte", "rmaps", "base", "cpus_per_proc",
154154
"Number of cpus to use for each rank [1-2**15 (default=1)]",
155155
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
@@ -280,7 +280,7 @@ static int orte_rmaps_base_open(mca_base_open_flag_t flags)
280280
return ORTE_ERR_SILENT;
281281
}
282282
}
283-
if (1 < orte_rmaps_base.cpus_per_rank) {
283+
if (0 < orte_rmaps_base.cpus_per_rank) {
284284
orte_show_help("help-orte-rmaps-base.txt", "deprecated", true,
285285
"--cpus-per-proc, -cpus-per-proc, --cpus-per-rank, -cpus-per-rank",
286286
"--map-by <obj>:PE=N, default <obj>=NUMA",
@@ -376,8 +376,8 @@ static int orte_rmaps_base_open(mca_base_open_flag_t flags)
376376
ORTE_SET_RANKING_DIRECTIVE(orte_rmaps_base.ranking, ORTE_RANKING_GIVEN);
377377
}
378378

379-
if (1 < orte_rmaps_base.cpus_per_rank) {
380-
/* if we were asked for multiple cpus/proc, then we have to
379+
if (0 < orte_rmaps_base.cpus_per_rank) {
380+
/* if we were asked for cpus/proc, then we have to
381381
* bind to those cpus - any other binding policy is an
382382
* error
383383
*/
@@ -403,24 +403,27 @@ static int orte_rmaps_base_open(mca_base_open_flag_t flags)
403403
if (opal_hwloc_use_hwthreads_as_cpus) {
404404
OPAL_SET_BINDING_POLICY(opal_hwloc_binding_policy, OPAL_BIND_TO_HWTHREAD);
405405
} else {
406+
opal_output(0, "SETTING BINDING TO CORE");
406407
OPAL_SET_BINDING_POLICY(opal_hwloc_binding_policy, OPAL_BIND_TO_CORE);
407408
}
408409
}
409-
/* we also need to ensure we are mapping to a high-enough level to have
410-
* multiple cpus beneath it - by default, we'll go to the NUMA level */
411-
if (ORTE_MAPPING_GIVEN & ORTE_GET_MAPPING_DIRECTIVE(orte_rmaps_base.mapping)) {
412-
if (ORTE_GET_MAPPING_POLICY(orte_rmaps_base.mapping) == ORTE_MAPPING_BYHWTHREAD ||
413-
(ORTE_GET_MAPPING_POLICY(orte_rmaps_base.mapping) == ORTE_MAPPING_BYCORE &&
414-
!opal_hwloc_use_hwthreads_as_cpus)) {
415-
orte_show_help("help-orte-rmaps-base.txt", "mapping-too-low-init", true);
416-
return ORTE_ERR_SILENT;
410+
if (1 < orte_rmaps_base.cpus_per_rank) {
411+
/* we need to ensure we are mapping to a high-enough level to have
412+
* multiple cpus beneath it - by default, we'll go to the NUMA level */
413+
if (ORTE_MAPPING_GIVEN & ORTE_GET_MAPPING_DIRECTIVE(orte_rmaps_base.mapping)) {
414+
if (ORTE_GET_MAPPING_POLICY(orte_rmaps_base.mapping) == ORTE_MAPPING_BYHWTHREAD ||
415+
(ORTE_GET_MAPPING_POLICY(orte_rmaps_base.mapping) == ORTE_MAPPING_BYCORE &&
416+
!opal_hwloc_use_hwthreads_as_cpus)) {
417+
orte_show_help("help-orte-rmaps-base.txt", "mapping-too-low-init", true);
418+
return ORTE_ERR_SILENT;
419+
}
420+
} else {
421+
opal_output_verbose(5, orte_rmaps_base_framework.framework_output,
422+
"%s rmaps:base pe/rank set - setting mapping to BYNUMA",
423+
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
424+
ORTE_SET_MAPPING_POLICY(orte_rmaps_base.mapping, ORTE_MAPPING_BYNUMA);
425+
ORTE_SET_MAPPING_DIRECTIVE(orte_rmaps_base.mapping, ORTE_MAPPING_GIVEN);
417426
}
418-
} else {
419-
opal_output_verbose(5, orte_rmaps_base_framework.framework_output,
420-
"%s rmaps:base pe/rank set - setting mapping to BYNUMA",
421-
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
422-
ORTE_SET_MAPPING_POLICY(orte_rmaps_base.mapping, ORTE_MAPPING_BYNUMA);
423-
ORTE_SET_MAPPING_DIRECTIVE(orte_rmaps_base.mapping, ORTE_MAPPING_GIVEN);
424427
}
425428
}
426429

orte/mca/rmaps/base/rmaps_base_map_job.c

Lines changed: 57 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ void orte_rmaps_base_map_job(int fd, short args, void *cbdata)
5050
{
5151
orte_job_t *jdata;
5252
orte_node_t *node;
53-
int rc, i;
54-
bool did_map, given;
53+
int rc, i, ppx;
54+
bool did_map, given, pernode;
5555
orte_rmaps_base_selected_module_t *mod;
5656
orte_job_t *parent;
5757
orte_state_caddy_t *caddy = (orte_state_caddy_t*)cbdata;
@@ -71,6 +71,22 @@ void orte_rmaps_base_map_job(int fd, short args, void *cbdata)
7171
"mca:rmaps: mapping job %s",
7272
ORTE_JOBID_PRINT(jdata->jobid));
7373

74+
if (NULL == jdata->map->ppr && NULL != orte_rmaps_base.ppr) {
75+
jdata->map->ppr = strdup(orte_rmaps_base.ppr);
76+
}
77+
if (NULL != jdata->map->ppr) {
78+
/* get the procs/object */
79+
ppx = strtoul(jdata->map->ppr, NULL, 10);
80+
if (NULL != strstr(jdata->map->ppr, "node")) {
81+
pernode = true;
82+
} else {
83+
pernode = false;
84+
}
85+
}
86+
if (0 == jdata->map->cpus_per_rank) {
87+
jdata->map->cpus_per_rank = orte_rmaps_base.cpus_per_rank;
88+
}
89+
7490
/* compute the number of procs and check validity */
7591
nprocs = 0;
7692
for (i=0; i < jdata->apps->size; i++) {
@@ -80,34 +96,47 @@ void orte_rmaps_base_map_job(int fd, short args, void *cbdata)
8096
orte_std_cntr_t slots;
8197
OBJ_CONSTRUCT(&nodes, opal_list_t);
8298
orte_rmaps_base_get_target_nodes(&nodes, &slots, app, ORTE_MAPPING_BYNODE, true, true);
83-
/* if we are in a managed allocation, then all is good - otherwise,
84-
* we have to do a little more checking */
85-
if (!orte_managed_allocation) {
86-
/* if all the nodes have their slots given, then we are okay */
87-
given = true;
88-
OPAL_LIST_FOREACH(node, &nodes, orte_node_t) {
89-
if (!ORTE_FLAG_TEST(node, ORTE_NODE_FLAG_SLOTS_GIVEN)) {
90-
given = false;
91-
break;
99+
if (NULL != jdata->map->ppr) {
100+
if (pernode) {
101+
nprocs += ppx * opal_list_get_size(&nodes);
102+
} else {
103+
/* must be procs/socket, so add in #sockets for each node */
104+
slots = 0;
105+
OPAL_LIST_FOREACH(node, &nodes, orte_node_t) {
106+
slots += ppx * opal_hwloc_base_get_nbobjs_by_type(node->topology->topo,
107+
HWLOC_OBJ_SOCKET, 0,
108+
OPAL_HWLOC_AVAILABLE);
92109
}
110+
nprocs += slots;
93111
}
94-
/* if -host or -hostfile was given, and the slots were not,
95-
* then this is no longer allowed */
96-
if (!given &&
97-
(orte_get_attribute(&app->attributes, ORTE_APP_DASH_HOST, NULL, OPAL_STRING) ||
98-
orte_get_attribute(&app->attributes, ORTE_APP_HOSTFILE, NULL, OPAL_STRING))) {
99-
/* inform the user of the error */
100-
orte_show_help("help-orte-rmaps-base.txt", "num-procs-not-specified", true);
101-
ORTE_ACTIVATE_JOB_STATE(jdata, ORTE_JOB_STATE_MAP_FAILED);
102-
OBJ_RELEASE(caddy);
103-
OPAL_LIST_DESTRUCT(&nodes);
104-
return;
112+
} else {
113+
/* if we are in a managed allocation, then all is good - otherwise,
114+
* we have to do a little more checking */
115+
if (!orte_managed_allocation) {
116+
/* if all the nodes have their slots given, then we are okay */
117+
given = true;
118+
OPAL_LIST_FOREACH(node, &nodes, orte_node_t) {
119+
if (!ORTE_FLAG_TEST(node, ORTE_NODE_FLAG_SLOTS_GIVEN)) {
120+
given = false;
121+
break;
122+
}
123+
}
124+
/* if -host or -hostfile was given, and the slots were not,
125+
* then this is no longer allowed */
126+
if (!given &&
127+
(orte_get_attribute(&app->attributes, ORTE_APP_DASH_HOST, NULL, OPAL_STRING) ||
128+
orte_get_attribute(&app->attributes, ORTE_APP_HOSTFILE, NULL, OPAL_STRING))) {
129+
/* inform the user of the error */
130+
orte_show_help("help-orte-rmaps-base.txt", "num-procs-not-specified", true);
131+
ORTE_ACTIVATE_JOB_STATE(jdata, ORTE_JOB_STATE_MAP_FAILED);
132+
OBJ_RELEASE(caddy);
133+
OPAL_LIST_DESTRUCT(&nodes);
134+
return;
135+
}
105136
}
106-
}
107-
OPAL_LIST_DESTRUCT(&nodes);
108-
if (ORTE_MAPPING_PPR != ORTE_GET_MAPPING_POLICY(jdata->map->mapping)) {
109137
nprocs += slots;
110138
}
139+
OPAL_LIST_DESTRUCT(&nodes);
111140
} else {
112141
nprocs += app->num_procs;
113142
}
@@ -116,8 +145,8 @@ void orte_rmaps_base_map_job(int fd, short args, void *cbdata)
116145

117146

118147
opal_output_verbose(5, orte_rmaps_base_framework.framework_output,
119-
"mca:rmaps: setting mapping policies for job %s",
120-
ORTE_JOBID_PRINT(jdata->jobid));
148+
"mca:rmaps: setting mapping policies for job %s nprocs %d",
149+
ORTE_JOBID_PRINT(jdata->jobid), (int)nprocs);
121150

122151
if (!jdata->map->display_map) {
123152
jdata->map->display_map = orte_rmaps_base.display_map;
@@ -187,13 +216,6 @@ void orte_rmaps_base_map_job(int fd, short args, void *cbdata)
187216
jdata->map->ranking = orte_rmaps_base.ranking;
188217
}
189218

190-
if (NULL == jdata->map->ppr && NULL != orte_rmaps_base.ppr) {
191-
jdata->map->ppr = strdup(orte_rmaps_base.ppr);
192-
}
193-
if (0 == jdata->map->cpus_per_rank) {
194-
jdata->map->cpus_per_rank = orte_rmaps_base.cpus_per_rank;
195-
}
196-
197219
/* define the binding policy for this job - if the user specified one
198220
* already (e.g., during the call to comm_spawn), then we don't
199221
* override it */
@@ -205,7 +227,7 @@ void orte_rmaps_base_map_job(int fd, short args, void *cbdata)
205227
opal_output_verbose(5, orte_rmaps_base_framework.framework_output,
206228
"mca:rmaps[%d] binding policy given", __LINE__);
207229
jdata->map->binding = opal_hwloc_binding_policy;
208-
} else if (1 < jdata->map->cpus_per_rank) {
230+
} else if (0 < jdata->map->cpus_per_rank) {
209231
/* bind to cpus */
210232
if (opal_hwloc_use_hwthreads_as_cpus) {
211233
/* if we are using hwthread cpus, then bind to those */

orte/runtime/orte_globals.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -833,7 +833,7 @@ static void orte_job_map_construct(orte_job_map_t* map)
833833
map->ranking = 0;
834834
map->binding = 0;
835835
map->ppr = NULL;
836-
map->cpus_per_rank = 1;
836+
map->cpus_per_rank = 0;
837837
map->display_map = false;
838838
map->num_new_daemons = 0;
839839
map->daemon_vpid_start = ORTE_VPID_INVALID;

0 commit comments

Comments
 (0)