Skip to content
This repository was archived by the owner on Sep 30, 2022. It is now read-only.

Commit e7ba103

Browse files
committed
Merge pull request #813 from hjelmn/2.x_ompi_proc_threads
ompi/proc: make proc system always thread safe
2 parents c3caf15 + eb325b0 commit e7ba103

File tree

1 file changed

+23
-23
lines changed

1 file changed

+23
-23
lines changed

ompi/proc/proc.c

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -84,10 +84,10 @@ void ompi_proc_destruct(ompi_proc_t* proc)
8484
if (NULL != proc->super.proc_hostname) {
8585
free(proc->super.proc_hostname);
8686
}
87-
OPAL_THREAD_LOCK(&ompi_proc_lock);
87+
opal_mutex_lock (&ompi_proc_lock);
8888
opal_list_remove_item(&ompi_proc_list, (opal_list_item_t*)proc);
8989
opal_hash_table_remove_value_ptr (&ompi_proc_hash, &proc->super.proc_name, sizeof (proc->super.proc_name));
90-
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
90+
opal_mutex_unlock (&ompi_proc_lock);
9191
}
9292

9393
/**
@@ -209,7 +209,7 @@ opal_proc_t *ompi_proc_for_name (const opal_process_name_t proc_name)
209209
return &proc->super;
210210
}
211211

212-
OPAL_THREAD_LOCK(&ompi_proc_lock);
212+
opal_mutex_lock (&ompi_proc_lock);
213213
do {
214214
/* double-check that another competing thread has not added this proc */
215215
ret = opal_hash_table_get_value_ptr (&ompi_proc_hash, &proc_name, sizeof (proc_name), (void **) &proc);
@@ -231,7 +231,7 @@ opal_proc_t *ompi_proc_for_name (const opal_process_name_t proc_name)
231231
break;
232232
}
233233
} while (0);
234-
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
234+
opal_mutex_unlock (&ompi_proc_lock);
235235

236236
return (opal_proc_t *) proc;
237237
}
@@ -319,7 +319,7 @@ int ompi_proc_complete_init(void)
319319
ompi_proc_t *proc;
320320
int ret, errcode = OMPI_SUCCESS;
321321

322-
OPAL_THREAD_LOCK(&ompi_proc_lock);
322+
opal_mutex_lock (&ompi_proc_lock);
323323

324324
OPAL_LIST_FOREACH(proc, &ompi_proc_list, ompi_proc_t) {
325325
ret = ompi_proc_complete_init_single (proc);
@@ -328,7 +328,7 @@ int ompi_proc_complete_init(void)
328328
break;
329329
}
330330
}
331-
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
331+
opal_mutex_unlock (&ompi_proc_lock);
332332

333333
if (ompi_process_info.num_procs >= ompi_add_procs_cutoff) {
334334
uint16_t u16, *u16ptr;
@@ -420,7 +420,7 @@ ompi_proc_t **ompi_proc_get_allocated (size_t *size)
420420
my_name = *OMPI_CAST_RTE_NAME(&ompi_proc_local_proc->super.proc_name);
421421

422422
/* First count how many match this jobid */
423-
OPAL_THREAD_LOCK(&ompi_proc_lock);
423+
opal_mutex_lock (&ompi_proc_lock);
424424
OPAL_LIST_FOREACH(proc, &ompi_proc_list, ompi_proc_t) {
425425
if (OPAL_EQUAL == ompi_rte_compare_name_fields(mask, OMPI_CAST_RTE_NAME(&proc->super.proc_name), &my_name)) {
426426
++count;
@@ -430,7 +430,7 @@ ompi_proc_t **ompi_proc_get_allocated (size_t *size)
430430
/* allocate an array */
431431
procs = (ompi_proc_t**) malloc(count * sizeof(ompi_proc_t*));
432432
if (NULL == procs) {
433-
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
433+
opal_mutex_unlock (&ompi_proc_lock);
434434
return NULL;
435435
}
436436

@@ -455,7 +455,7 @@ ompi_proc_t **ompi_proc_get_allocated (size_t *size)
455455
procs[count++] = proc;
456456
}
457457
}
458-
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
458+
opal_mutex_unlock (&ompi_proc_lock);
459459

460460
*size = count;
461461
return procs;
@@ -519,7 +519,7 @@ ompi_proc_t** ompi_proc_all(size_t* size)
519519
return NULL;
520520
}
521521

522-
OPAL_THREAD_LOCK(&ompi_proc_lock);
522+
opal_mutex_lock (&ompi_proc_lock);
523523
OPAL_LIST_FOREACH(proc, &ompi_proc_list, ompi_proc_t) {
524524
/* We know this isn't consistent with the behavior in ompi_proc_world,
525525
* but we are leaving the RETAIN for now because the code using this function
@@ -530,7 +530,7 @@ ompi_proc_t** ompi_proc_all(size_t* size)
530530
OBJ_RETAIN(proc);
531531
procs[count++] = proc;
532532
}
533-
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
533+
opal_mutex_unlock (&ompi_proc_lock);
534534
*size = count;
535535
return procs;
536536
}
@@ -561,14 +561,14 @@ ompi_proc_t * ompi_proc_find ( const ompi_process_name_t * name )
561561

562562
/* return the proc-struct which matches this jobid+process id */
563563
mask = OMPI_RTE_CMP_JOBID | OMPI_RTE_CMP_VPID;
564-
OPAL_THREAD_LOCK(&ompi_proc_lock);
564+
opal_mutex_lock (&ompi_proc_lock);
565565
OPAL_LIST_FOREACH(proc, &ompi_proc_list, ompi_proc_t) {
566566
if (OPAL_EQUAL == ompi_rte_compare_name_fields(mask, &proc->super.proc_name, name)) {
567567
rproc = proc;
568568
break;
569569
}
570570
}
571-
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
571+
opal_mutex_unlock (&ompi_proc_lock);
572572

573573
return rproc;
574574
}
@@ -580,7 +580,7 @@ int ompi_proc_refresh(void)
580580
ompi_vpid_t i = 0;
581581
int ret=OMPI_SUCCESS;
582582

583-
OPAL_THREAD_LOCK(&ompi_proc_lock);
583+
opal_mutex_lock (&ompi_proc_lock);
584584

585585
OPAL_LIST_FOREACH(proc, &ompi_proc_list, ompi_proc_t) {
586586
/* Does not change: proc->super.proc_name.vpid */
@@ -603,7 +603,7 @@ int ompi_proc_refresh(void)
603603
}
604604
}
605605

606-
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
606+
opal_mutex_unlock (&ompi_proc_lock);
607607

608608
return ret;
609609
}
@@ -615,7 +615,7 @@ ompi_proc_pack(ompi_proc_t **proclist, int proclistsize,
615615
int rc;
616616
char *nspace;
617617

618-
OPAL_THREAD_LOCK(&ompi_proc_lock);
618+
opal_mutex_lock (&ompi_proc_lock);
619619

620620
/* cycle through the provided array, packing the OMPI level
621621
* data for each proc. This data may or may not be included
@@ -634,7 +634,7 @@ ompi_proc_pack(ompi_proc_t **proclist, int proclistsize,
634634
rc = opal_dss.pack(buf, &(proclist[i]->super.proc_name), 1, OMPI_NAME);
635635
if(rc != OPAL_SUCCESS) {
636636
OMPI_ERROR_LOG(rc);
637-
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
637+
opal_mutex_unlock (&ompi_proc_lock);
638638
return rc;
639639
}
640640
/* retrieve and send the corresponding nspace for this job
@@ -643,25 +643,25 @@ ompi_proc_pack(ompi_proc_t **proclist, int proclistsize,
643643
rc = opal_dss.pack(buf, &nspace, 1, OPAL_STRING);
644644
if(rc != OPAL_SUCCESS) {
645645
OMPI_ERROR_LOG(rc);
646-
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
646+
opal_mutex_unlock (&ompi_proc_lock);
647647
return rc;
648648
}
649649
/* pack architecture flag */
650650
rc = opal_dss.pack(buf, &(proclist[i]->super.proc_arch), 1, OPAL_UINT32);
651651
if(rc != OPAL_SUCCESS) {
652652
OMPI_ERROR_LOG(rc);
653-
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
653+
opal_mutex_unlock (&ompi_proc_lock);
654654
return rc;
655655
}
656656
/* pass the name of the host this proc is on */
657657
rc = opal_dss.pack(buf, &(proclist[i]->super.proc_hostname), 1, OPAL_STRING);
658658
if(rc != OPAL_SUCCESS) {
659659
OMPI_ERROR_LOG(rc);
660-
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
660+
opal_mutex_unlock (&ompi_proc_lock);
661661
return rc;
662662
}
663663
}
664-
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
664+
opal_mutex_unlock (&ompi_proc_lock);
665665
return OMPI_SUCCESS;
666666
}
667667

@@ -673,7 +673,7 @@ ompi_proc_find_and_add(const ompi_process_name_t * name, bool* isnew)
673673

674674
/* return the proc-struct which matches this jobid+process id */
675675
mask = OMPI_RTE_CMP_JOBID | OMPI_RTE_CMP_VPID;
676-
OPAL_THREAD_LOCK(&ompi_proc_lock);
676+
opal_mutex_lock (&ompi_proc_lock);
677677
OPAL_LIST_FOREACH(proc, &ompi_proc_list, ompi_proc_t) {
678678
if (OPAL_EQUAL == ompi_rte_compare_name_fields(mask, &proc->super.proc_name, name)) {
679679
rproc = proc;
@@ -696,7 +696,7 @@ ompi_proc_find_and_add(const ompi_process_name_t * name, bool* isnew)
696696
going to be pain later... */
697697
}
698698

699-
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
699+
opal_mutex_unlock (&ompi_proc_lock);
700700

701701
return rproc;
702702
}

0 commit comments

Comments
 (0)