Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ChangeLog.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ See docs/process.md for more on how version tagging works.
3.1.73 (in development)
-----------------------
- libunwind was updated to LLVM 19.1.4. (#22394)
- mimalloc was updated to 2.1.7. (#21548)
- mimalloc was updated to microsoft/mimalloc@9b75377. (#21548)

3.1.72 - 11/19/24
-----------------
Expand Down
2 changes: 1 addition & 1 deletion system/lib/mimalloc/README.emscripten
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@

This contains mimalloc 8c532c32c3c96e5ba1f2283e032f69ead8add00f (v2.1.7) with
This contains mimalloc 9b7537755ace8f4a5110a06dca90826e60188037 with
Emscripten-specific changes.

Origin: https://github.com/microsoft/mimalloc
Expand Down
41 changes: 36 additions & 5 deletions system/lib/mimalloc/include/mimalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_H
#define MIMALLOC_H

#define MI_MALLOC_VERSION 217 // major + 2 digits minor
#define MI_MALLOC_VERSION 188 // major + 2 digits minor
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, I just noticed that I synced with the dev branch. Perhaps it should be synced with the dev-slice branch instead.

### Branches
* `master`: latest stable release (based on `dev-slice`).
* `dev`: development branch for mimalloc v1. Use this branch for submitting PR's.
* `dev-slice`: development branch for mimalloc v2. This branch is downstream of `dev` (and is essentially equal to `dev` except for
`src/segment.c`)


// ------------------------------------------------------
// Compiler specific attributes
Expand Down Expand Up @@ -97,7 +97,6 @@ terms of the MIT license. A copy of the license can be found in the file

#include <stddef.h> // size_t
#include <stdbool.h> // bool
#include <stdint.h> // INTPTR_MAX

#ifdef __cplusplus
extern "C" {
Expand Down Expand Up @@ -149,6 +148,7 @@ typedef void (mi_cdecl mi_error_fun)(int err, void* arg);
mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg);

mi_decl_export void mi_collect(bool force) mi_attr_noexcept;
mi_decl_export void mi_collect_reduce(size_t target_thread_owned) mi_attr_noexcept;
mi_decl_export int mi_version(void) mi_attr_noexcept;
mi_decl_export void mi_stats_reset(void) mi_attr_noexcept;
mi_decl_export void mi_stats_merge(void) mi_attr_noexcept;
Expand Down Expand Up @@ -259,11 +259,12 @@ typedef struct mi_heap_area_s {
size_t used; // number of allocated blocks
size_t block_size; // size in bytes of each block
size_t full_block_size; // size in bytes of a full block including padding and metadata.
int heap_tag; // heap tag associated with this area
} mi_heap_area_t;

typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);

mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);

// Experimental
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
Expand All @@ -289,8 +290,31 @@ mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_co
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id);
#endif


// Experimental: allow sub-processes whose memory segments stay separated (and no reclamation between them)
// Used for example for separate interpreter's in one process.
typedef void* mi_subproc_id_t;
mi_decl_export mi_subproc_id_t mi_subproc_main(void);
mi_decl_export mi_subproc_id_t mi_subproc_new(void);
mi_decl_export void mi_subproc_delete(mi_subproc_id_t subproc);
mi_decl_export void mi_subproc_add_current_thread(mi_subproc_id_t subproc); // this should be called right after a thread is created (and no allocation has taken place yet)

// Experimental: visit abandoned heap areas (from threads that have been terminated)
mi_decl_export bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);

// Experimental: create a new heap with a specified heap tag. Set `allow_destroy` to false to allow the thread
// to reclaim abandoned memory (with a compatible heap_tag and arena_id) but in that case `mi_heap_destroy` will
// fall back to `mi_heap_delete`.
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id);

// deprecated
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;

// Experimental: objects followed by a guard page.
// A sample rate of 0 disables guarded objects, while 1 uses a guard page for every object.
// A seed of 0 uses a random start point. Only objects within the size bound are eligable for guard pages.
mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed);
mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max);


// ------------------------------------------------------
Expand Down Expand Up @@ -332,7 +356,7 @@ typedef enum mi_option_e {
mi_option_deprecated_segment_cache,
mi_option_deprecated_page_reset,
mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
mi_option_deprecated_segment_reset,
mi_option_deprecated_segment_reset,
mi_option_eager_commit_delay, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all. (=10)
mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
Expand All @@ -348,6 +372,13 @@ typedef enum mi_option_e {
mi_option_abandoned_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1)
mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's)
mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows)
mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0)
mi_option_guarded_min, // only used when building with MI_GUARDED: minimal rounded object size for guarded objects (=0)
mi_option_guarded_max, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects (=0)
mi_option_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0)
mi_option_target_segments_per_thread, // experimental (=0)
_mi_option_last,
// legacy option names
mi_option_large_os_pages = mi_option_allow_large_os_pages,
Expand Down
136 changes: 125 additions & 11 deletions system/lib/mimalloc/include/mimalloc/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,17 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_ATOMIC_H
#define MIMALLOC_ATOMIC_H

// include windows.h or pthreads.h
#if defined(_WIN32)
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#elif !defined(__wasi__) && (!defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__))
#define MI_USE_PTHREADS
#include <pthread.h>
#endif

// --------------------------------------------------------------------------------------------
// Atomics
// We need to be portable between C, C++, and MSVC.
Expand All @@ -24,9 +35,9 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_atomic(name) std::atomic_##name
#define mi_memory_order(name) std::memory_order_##name
#if (__cplusplus >= 202002L) // c++20, see issue #571
#define MI_ATOMIC_VAR_INIT(x) x
#define MI_ATOMIC_VAR_INIT(x) x
#elif !defined(ATOMIC_VAR_INIT)
#define MI_ATOMIC_VAR_INIT(x) x
#define MI_ATOMIC_VAR_INIT(x) x
#else
#define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
#endif
Expand Down Expand Up @@ -133,10 +144,6 @@ static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) {
#elif defined(_MSC_VER)

// Legacy MSVC plain C compilation wrapper that uses Interlocked operations to model C11 atomics.
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#include <intrin.h>
#ifdef _WIN64
typedef LONG64 msc_intptr_t;
Expand Down Expand Up @@ -302,11 +309,16 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) {
return (intptr_t)mi_atomic_addi(p, -sub);
}


// ----------------------------------------------------------------------
// Once and Guard
// ----------------------------------------------------------------------

typedef _Atomic(uintptr_t) mi_atomic_once_t;

// Returns true only on the first invocation
static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
uintptr_t expected = 0;
return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
}
Expand All @@ -322,17 +334,16 @@ typedef _Atomic(uintptr_t) mi_atomic_guard_t;



// ----------------------------------------------------------------------
// Yield
// ----------------------------------------------------------------------

#if defined(__cplusplus)
#include <thread>
static inline void mi_atomic_yield(void) {
std::this_thread::yield();
}
#elif defined(_WIN32)
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
static inline void mi_atomic_yield(void) {
YieldProcessor();
}
Expand Down Expand Up @@ -390,4 +401,107 @@ static inline void mi_atomic_yield(void) {
#endif


// ----------------------------------------------------------------------
// Locks are only used for abandoned segment visiting in `arena.c`
// ----------------------------------------------------------------------

#if defined(_WIN32)

#define mi_lock_t CRITICAL_SECTION

static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
return TryEnterCriticalSection(lock);
}
static inline bool mi_lock_acquire(mi_lock_t* lock) {
EnterCriticalSection(lock);
return true;
}
static inline void mi_lock_release(mi_lock_t* lock) {
LeaveCriticalSection(lock);
}
static inline void mi_lock_init(mi_lock_t* lock) {
InitializeCriticalSection(lock);
}
static inline void mi_lock_done(mi_lock_t* lock) {
DeleteCriticalSection(lock);
}


#elif defined(MI_USE_PTHREADS)

#define mi_lock_t pthread_mutex_t

static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
return (pthread_mutex_trylock(lock) == 0);
}
static inline bool mi_lock_acquire(mi_lock_t* lock) {
return (pthread_mutex_lock(lock) == 0);
}
static inline void mi_lock_release(mi_lock_t* lock) {
pthread_mutex_unlock(lock);
}
static inline void mi_lock_init(mi_lock_t* lock) {
pthread_mutex_init(lock, NULL);
}
static inline void mi_lock_done(mi_lock_t* lock) {
pthread_mutex_destroy(lock);
}

/*
#elif defined(__cplusplus)

#include <mutex>
#define mi_lock_t std::mutex

static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
return lock->lock_try_acquire();
}
static inline bool mi_lock_acquire(mi_lock_t* lock) {
lock->lock();
return true;
}
static inline void mi_lock_release(mi_lock_t* lock) {
lock->unlock();
}
static inline void mi_lock_init(mi_lock_t* lock) {
(void)(lock);
}
static inline void mi_lock_done(mi_lock_t* lock) {
(void)(lock);
}
*/

#else

// fall back to poor man's locks.
// this should only be the case in a single-threaded environment (like __wasi__)

#define mi_lock_t _Atomic(uintptr_t)

static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
uintptr_t expected = 0;
return mi_atomic_cas_strong_acq_rel(lock, &expected, (uintptr_t)1);
}
static inline bool mi_lock_acquire(mi_lock_t* lock) {
for (int i = 0; i < 1000; i++) { // for at most 1000 tries?
if (mi_lock_try_acquire(lock)) return true;
mi_atomic_yield();
}
return true;
}
static inline void mi_lock_release(mi_lock_t* lock) {
mi_atomic_store_release(lock, (uintptr_t)0);
}
static inline void mi_lock_init(mi_lock_t* lock) {
mi_lock_release(lock);
}
static inline void mi_lock_done(mi_lock_t* lock) {
(void)(lock);
}

#endif




#endif // __MIMALLOC_ATOMIC_H
Loading