@@ -341,37 +341,20 @@ int orte_rmaps_base_get_target_nodes(opal_list_t *allocated_nodes, orte_std_cntr
341341 }
342342
343343 addknown :
344- /* if the hnp was allocated, include it unless flagged not to */
345- if (orte_hnp_is_allocated && !(ORTE_GET_MAPPING_DIRECTIVE (policy ) & ORTE_MAPPING_NO_USE_LOCAL )) {
346- if (NULL != (node = (orte_node_t * )opal_pointer_array_get_item (orte_node_pool , 0 ))) {
347- if (ORTE_NODE_STATE_DO_NOT_USE == node -> state ) {
348- OPAL_OUTPUT_VERBOSE ((10 , orte_rmaps_base_framework .framework_output ,
349- "HNP IS MARKED NO_USE" ));
350- /* clear this for future use, but don't include it */
351- node -> state = ORTE_NODE_STATE_UP ;
352- } else if (ORTE_NODE_STATE_NOT_INCLUDED != node -> state ) {
353- OBJ_RETAIN (node );
354- if (initial_map ) {
355- /* if this is the first app_context we
356- * are getting for an initial map of a job,
357- * then mark all nodes as unmapped
358- */
359- ORTE_FLAG_UNSET (node , ORTE_NODE_FLAG_MAPPED );
360- }
361- opal_list_append (allocated_nodes , & node -> super );
362- }
363- }
364- }
365-
366344 /* add everything in the node pool that can be used - add them
367345 * in daemon order, which may be different than the order in the
368346 * node pool. Since an empty list is passed into us, the list at
369347 * this point either has the HNP node or nothing, and the HNP
370348 * node obviously has a daemon on it (us!)
371349 */
372350 if (0 == opal_list_get_size (allocated_nodes )) {
373- /* the list is empty */
374- nd = NULL ;
351+ /* the list is empty - if the HNP is allocated, then add it */
352+ if (orte_hnp_is_allocated ) {
353+ nd = (orte_node_t * )opal_pointer_array_get_item (orte_node_pool , 0 );
354+ opal_list_append (allocated_nodes , & nd -> super );
355+ } else {
356+ nd = NULL ;
357+ }
375358 } else {
376359 nd = (orte_node_t * )opal_list_get_last (allocated_nodes );
377360 }
@@ -487,18 +470,34 @@ int orte_rmaps_base_get_target_nodes(opal_list_t *allocated_nodes, orte_std_cntr
487470 } else {
488471 item = opal_list_get_first (allocated_nodes );
489472 while (item != opal_list_get_end (allocated_nodes )) {
473+ node = (orte_node_t * )item ;
474+ opal_output (0 , "CHECKING NODE %s" , node -> name );
490475 /** save the next pointer in case we remove this node */
491476 next = opal_list_get_next (item );
477+ /* if the hnp was not allocated, or flagged not to be used,
478+ * then remove it here */
479+ if (!orte_hnp_is_allocated || (ORTE_GET_MAPPING_DIRECTIVE (policy ) & ORTE_MAPPING_NO_USE_LOCAL )) {
480+ node = (orte_node_t * )opal_pointer_array_get_item (orte_node_pool , 0 );
481+ if (node == (orte_node_t * )item ) {
482+ opal_output (0 , "REMOVING HNP NODE" );
483+ opal_list_remove_item (allocated_nodes , item );
484+ OBJ_RELEASE (item ); /* "un-retain" it */
485+ item = next ;
486+ continue ;
487+ }
488+ }
492489 /** check to see if this node is fully used - remove if so */
493- node = (orte_node_t * )item ;
494490 if (0 != node -> slots_max && node -> slots_inuse > node -> slots_max ) {
495491 OPAL_OUTPUT_VERBOSE ((5 , orte_rmaps_base_framework .framework_output ,
496492 "%s Removing node %s: max %d inuse %d" ,
497493 ORTE_NAME_PRINT (ORTE_PROC_MY_NAME ),
498494 node -> name , node -> slots_max , node -> slots_inuse ));
499495 opal_list_remove_item (allocated_nodes , item );
500496 OBJ_RELEASE (item ); /* "un-retain" it */
501- } else if (node -> slots <= node -> slots_inuse &&
497+ item = next ;
498+ continue ;
499+ }
500+ if (node -> slots <= node -> slots_inuse &&
502501 (ORTE_MAPPING_NO_OVERSUBSCRIBE & ORTE_GET_MAPPING_DIRECTIVE (policy ))) {
503502 /* remove the node as fully used */
504503 OPAL_OUTPUT_VERBOSE ((5 , orte_rmaps_base_framework .framework_output ,
@@ -507,14 +506,20 @@ int orte_rmaps_base_get_target_nodes(opal_list_t *allocated_nodes, orte_std_cntr
507506 node -> name , node -> slots , node -> slots_inuse ));
508507 opal_list_remove_item (allocated_nodes , item );
509508 OBJ_RELEASE (item ); /* "un-retain" it */
510- } else if (node -> slots > node -> slots_inuse ) {
509+ item = next ;
510+ continue ;
511+ }
512+ if (node -> slots > node -> slots_inuse ) {
511513 /* add the available slots */
512514 OPAL_OUTPUT_VERBOSE ((5 , orte_rmaps_base_framework .framework_output ,
513515 "%s node %s has %d slots available" ,
514516 ORTE_NAME_PRINT (ORTE_PROC_MY_NAME ),
515517 node -> name , node -> slots - node -> slots_inuse ));
516518 num_slots += node -> slots - node -> slots_inuse ;
517- } else if (!(ORTE_MAPPING_NO_OVERSUBSCRIBE & ORTE_GET_MAPPING_DIRECTIVE (policy ))) {
519+ item = next ;
520+ continue ;
521+ }
522+ if (!(ORTE_MAPPING_NO_OVERSUBSCRIBE & ORTE_GET_MAPPING_DIRECTIVE (policy ))) {
518523 /* nothing needed to do here - we don't add slots to the
519524 * count as we don't have any available. Just let the mapper
520525 * do what it needs to do to meet the request
0 commit comments