Skip to content

Commit 52f91b4

Browse files
committed
Update mimalloc to microsoft/mimalloc@9b75377
1 parent 55934a5 commit 52f91b4

33 files changed

+2963
-2159
lines changed

system/lib/mimalloc/README.emscripten

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11

2-
This contains mimalloc 8c532c32c3c96e5ba1f2283e032f69ead8add00f (v2.1.7) with
2+
This contains mimalloc 9b7537755ace8f4a5110a06dca90826e60188037 with
33
Emscripten-specific changes.
44

55
Origin: https://github.com/microsoft/mimalloc

system/lib/mimalloc/include/mimalloc.h

Lines changed: 36 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
88
#ifndef MIMALLOC_H
99
#define MIMALLOC_H
1010

11-
#define MI_MALLOC_VERSION 217 // major + 2 digits minor
11+
#define MI_MALLOC_VERSION 188 // major + 2 digits minor
1212

1313
// ------------------------------------------------------
1414
// Compiler specific attributes
@@ -97,7 +97,6 @@ terms of the MIT license. A copy of the license can be found in the file
9797

9898
#include <stddef.h> // size_t
9999
#include <stdbool.h> // bool
100-
#include <stdint.h> // INTPTR_MAX
101100

102101
#ifdef __cplusplus
103102
extern "C" {
@@ -149,6 +148,7 @@ typedef void (mi_cdecl mi_error_fun)(int err, void* arg);
149148
mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg);
150149

151150
mi_decl_export void mi_collect(bool force) mi_attr_noexcept;
151+
mi_decl_export void mi_collect_reduce(size_t target_thread_owned) mi_attr_noexcept;
152152
mi_decl_export int mi_version(void) mi_attr_noexcept;
153153
mi_decl_export void mi_stats_reset(void) mi_attr_noexcept;
154154
mi_decl_export void mi_stats_merge(void) mi_attr_noexcept;
@@ -259,11 +259,12 @@ typedef struct mi_heap_area_s {
259259
size_t used; // number of allocated blocks
260260
size_t block_size; // size in bytes of each block
261261
size_t full_block_size; // size in bytes of a full block including padding and metadata.
262+
int heap_tag; // heap tag associated with this area
262263
} mi_heap_area_t;
263264

264265
typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
265266

266-
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
267+
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
267268

268269
// Experimental
269270
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
@@ -289,8 +290,31 @@ mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_co
289290
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id);
290291
#endif
291292

293+
294+
// Experimental: allow sub-processes whose memory segments stay separated (and no reclamation between them)
295+
// Used for example for separate interpreter's in one process.
296+
typedef void* mi_subproc_id_t;
297+
mi_decl_export mi_subproc_id_t mi_subproc_main(void);
298+
mi_decl_export mi_subproc_id_t mi_subproc_new(void);
299+
mi_decl_export void mi_subproc_delete(mi_subproc_id_t subproc);
300+
mi_decl_export void mi_subproc_add_current_thread(mi_subproc_id_t subproc); // this should be called right after a thread is created (and no allocation has taken place yet)
301+
302+
// Experimental: visit abandoned heap areas (from threads that have been terminated)
303+
mi_decl_export bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
304+
305+
// Experimental: create a new heap with a specified heap tag. Set `allow_destroy` to false to allow the thread
306+
// to reclaim abandoned memory (with a compatible heap_tag and arena_id) but in that case `mi_heap_destroy` will
307+
// fall back to `mi_heap_delete`.
308+
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id);
309+
292310
// deprecated
293-
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
311+
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
312+
313+
// Experimental: objects followed by a guard page.
314+
// A sample rate of 0 disables guarded objects, while 1 uses a guard page for every object.
315+
// A seed of 0 uses a random start point. Only objects within the size bound are eligable for guard pages.
316+
mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed);
317+
mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max);
294318

295319

296320
// ------------------------------------------------------
@@ -332,7 +356,7 @@ typedef enum mi_option_e {
332356
mi_option_deprecated_segment_cache,
333357
mi_option_deprecated_page_reset,
334358
mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
335-
mi_option_deprecated_segment_reset,
359+
mi_option_deprecated_segment_reset,
336360
mi_option_eager_commit_delay, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
337361
mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all. (=10)
338362
mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
@@ -348,6 +372,13 @@ typedef enum mi_option_e {
348372
mi_option_abandoned_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1)
349373
mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's)
350374
mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows)
375+
mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0)
376+
mi_option_guarded_min, // only used when building with MI_GUARDED: minimal rounded object size for guarded objects (=0)
377+
mi_option_guarded_max, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects (=0)
378+
mi_option_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
379+
mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
380+
mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0)
381+
mi_option_target_segments_per_thread, // experimental (=0)
351382
_mi_option_last,
352383
// legacy option names
353384
mi_option_large_os_pages = mi_option_allow_large_os_pages,

system/lib/mimalloc/include/mimalloc/atomic.h

Lines changed: 125 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,17 @@ terms of the MIT license. A copy of the license can be found in the file
88
#ifndef MIMALLOC_ATOMIC_H
99
#define MIMALLOC_ATOMIC_H
1010

11+
// include windows.h or pthreads.h
12+
#if defined(_WIN32)
13+
#ifndef WIN32_LEAN_AND_MEAN
14+
#define WIN32_LEAN_AND_MEAN
15+
#endif
16+
#include <windows.h>
17+
#elif !defined(__wasi__) && (!defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__))
18+
#define MI_USE_PTHREADS
19+
#include <pthread.h>
20+
#endif
21+
1122
// --------------------------------------------------------------------------------------------
1223
// Atomics
1324
// We need to be portable between C, C++, and MSVC.
@@ -24,9 +35,9 @@ terms of the MIT license. A copy of the license can be found in the file
2435
#define mi_atomic(name) std::atomic_##name
2536
#define mi_memory_order(name) std::memory_order_##name
2637
#if (__cplusplus >= 202002L) // c++20, see issue #571
27-
#define MI_ATOMIC_VAR_INIT(x) x
38+
#define MI_ATOMIC_VAR_INIT(x) x
2839
#elif !defined(ATOMIC_VAR_INIT)
29-
#define MI_ATOMIC_VAR_INIT(x) x
40+
#define MI_ATOMIC_VAR_INIT(x) x
3041
#else
3142
#define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
3243
#endif
@@ -133,10 +144,6 @@ static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) {
133144
#elif defined(_MSC_VER)
134145

135146
// Legacy MSVC plain C compilation wrapper that uses Interlocked operations to model C11 atomics.
136-
#ifndef WIN32_LEAN_AND_MEAN
137-
#define WIN32_LEAN_AND_MEAN
138-
#endif
139-
#include <windows.h>
140147
#include <intrin.h>
141148
#ifdef _WIN64
142149
typedef LONG64 msc_intptr_t;
@@ -302,11 +309,16 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) {
302309
return (intptr_t)mi_atomic_addi(p, -sub);
303310
}
304311

312+
313+
// ----------------------------------------------------------------------
314+
// Once and Guard
315+
// ----------------------------------------------------------------------
316+
305317
typedef _Atomic(uintptr_t) mi_atomic_once_t;
306318

307319
// Returns true only on the first invocation
308320
static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
309-
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
321+
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
310322
uintptr_t expected = 0;
311323
return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
312324
}
@@ -322,17 +334,16 @@ typedef _Atomic(uintptr_t) mi_atomic_guard_t;
322334

323335

324336

337+
// ----------------------------------------------------------------------
325338
// Yield
339+
// ----------------------------------------------------------------------
340+
326341
#if defined(__cplusplus)
327342
#include <thread>
328343
static inline void mi_atomic_yield(void) {
329344
std::this_thread::yield();
330345
}
331346
#elif defined(_WIN32)
332-
#ifndef WIN32_LEAN_AND_MEAN
333-
#define WIN32_LEAN_AND_MEAN
334-
#endif
335-
#include <windows.h>
336347
static inline void mi_atomic_yield(void) {
337348
YieldProcessor();
338349
}
@@ -390,4 +401,107 @@ static inline void mi_atomic_yield(void) {
390401
#endif
391402

392403

404+
// ----------------------------------------------------------------------
405+
// Locks are only used for abandoned segment visiting in `arena.c`
406+
// ----------------------------------------------------------------------
407+
408+
#if defined(_WIN32)
409+
410+
#define mi_lock_t CRITICAL_SECTION
411+
412+
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
413+
return TryEnterCriticalSection(lock);
414+
}
415+
static inline bool mi_lock_acquire(mi_lock_t* lock) {
416+
EnterCriticalSection(lock);
417+
return true;
418+
}
419+
static inline void mi_lock_release(mi_lock_t* lock) {
420+
LeaveCriticalSection(lock);
421+
}
422+
static inline void mi_lock_init(mi_lock_t* lock) {
423+
InitializeCriticalSection(lock);
424+
}
425+
static inline void mi_lock_done(mi_lock_t* lock) {
426+
DeleteCriticalSection(lock);
427+
}
428+
429+
430+
#elif defined(MI_USE_PTHREADS)
431+
432+
#define mi_lock_t pthread_mutex_t
433+
434+
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
435+
return (pthread_mutex_trylock(lock) == 0);
436+
}
437+
static inline bool mi_lock_acquire(mi_lock_t* lock) {
438+
return (pthread_mutex_lock(lock) == 0);
439+
}
440+
static inline void mi_lock_release(mi_lock_t* lock) {
441+
pthread_mutex_unlock(lock);
442+
}
443+
static inline void mi_lock_init(mi_lock_t* lock) {
444+
pthread_mutex_init(lock, NULL);
445+
}
446+
static inline void mi_lock_done(mi_lock_t* lock) {
447+
pthread_mutex_destroy(lock);
448+
}
449+
450+
/*
451+
#elif defined(__cplusplus)
452+
453+
#include <mutex>
454+
#define mi_lock_t std::mutex
455+
456+
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
457+
return lock->lock_try_acquire();
458+
}
459+
static inline bool mi_lock_acquire(mi_lock_t* lock) {
460+
lock->lock();
461+
return true;
462+
}
463+
static inline void mi_lock_release(mi_lock_t* lock) {
464+
lock->unlock();
465+
}
466+
static inline void mi_lock_init(mi_lock_t* lock) {
467+
(void)(lock);
468+
}
469+
static inline void mi_lock_done(mi_lock_t* lock) {
470+
(void)(lock);
471+
}
472+
*/
473+
474+
#else
475+
476+
// fall back to poor man's locks.
477+
// this should only be the case in a single-threaded environment (like __wasi__)
478+
479+
#define mi_lock_t _Atomic(uintptr_t)
480+
481+
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
482+
uintptr_t expected = 0;
483+
return mi_atomic_cas_strong_acq_rel(lock, &expected, (uintptr_t)1);
484+
}
485+
static inline bool mi_lock_acquire(mi_lock_t* lock) {
486+
for (int i = 0; i < 1000; i++) { // for at most 1000 tries?
487+
if (mi_lock_try_acquire(lock)) return true;
488+
mi_atomic_yield();
489+
}
490+
return true;
491+
}
492+
static inline void mi_lock_release(mi_lock_t* lock) {
493+
mi_atomic_store_release(lock, (uintptr_t)0);
494+
}
495+
static inline void mi_lock_init(mi_lock_t* lock) {
496+
mi_lock_release(lock);
497+
}
498+
static inline void mi_lock_done(mi_lock_t* lock) {
499+
(void)(lock);
500+
}
501+
502+
#endif
503+
504+
505+
506+
393507
#endif // __MIMALLOC_ATOMIC_H

0 commit comments

Comments
 (0)