Skip to content

Commit 7018e24

Browse files
authored
Fix a use-after-free bug for detached threads (#420)
* Fix a use-after-free bug for detached threads the issue was pointed out by @alexcrichton in #405 * Rename map_base_lazy_free_queue as it only keeps a single item Also, align the comment style with musl.
1 parent 5862047 commit 7018e24

File tree

1 file changed

+30
-1
lines changed

1 file changed

+30
-1
lines changed

libc-top-half/musl/src/thread/pthread_create.c

Lines changed: 30 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,17 @@ void __tl_sync(pthread_t td)
6060
if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0);
6161
}
6262

63+
#ifndef __wasilibc_unmodified_upstream
64+
static void *map_base_deferred_free;
65+
66+
static void process_map_base_deferred_free()
67+
{
68+
/* called with __tl_lock held */
69+
free(map_base_deferred_free);
70+
map_base_deferred_free = NULL;
71+
}
72+
#endif
73+
6374
#ifdef __wasilibc_unmodified_upstream
6475
_Noreturn void __pthread_exit(void *result)
6576
#else
@@ -182,7 +193,17 @@ static void __pthread_exit(void *result)
182193
}
183194
#else
184195
if (state==DT_DETACHED && self->map_base) {
185-
free(self->map_base);
196+
/* As we use malloc/free which is considerably more complex
197+
* than mmap/munmap to call and can even require a valid
198+
* thread context, it's difficult to implement __unmapself.
199+
*
200+
* Here we take an alternative approach which simply defers
201+
* the deallocation. An obvious downside of this approach is
202+
* that it keeps the stack longer. (possibly forever.)
203+
* To avoid wasting too much memory, we only defer a single
204+
* item at most. */
205+
process_map_base_deferred_free();
206+
map_base_deferred_free = self->map_base;
186207
// Can't use `exit()` here, because it is too high level
187208
return;
188209
}
@@ -424,6 +445,14 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
424445
if (map == MAP_FAILED) goto fail;
425446
}
426447
#else
448+
/* Process the deferred free request if any before
449+
* allocationg a new one. Hopefully it enables a reuse of the memory.
450+
*
451+
* Note: We can't perform a simple "handoff" becasue allocation
452+
* sizes might be different. (eg. the stack size might differ) */
453+
__tl_lock();
454+
process_map_base_deferred_free();
455+
__tl_unlock();
427456
map = malloc(size);
428457
if (!map) goto fail;
429458
#endif

0 commit comments

Comments
 (0)