diff --git a/Cargo.toml b/Cargo.toml index 97702798021..9ef18a772b4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["mozjs-sys", "mozjs"] +members = ["mozjs-sys", "mozjs", "examples/embedder_allocator"] resolver = "2" [workspace.package] diff --git a/examples/embedder_allocator/Cargo.toml b/examples/embedder_allocator/Cargo.toml new file mode 100644 index 00000000000..b3f305bc9a3 --- /dev/null +++ b/examples/embedder_allocator/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "embedder_allocator" +version = "0.1.0" +repository.workspace = true +license.workspace = true +edition.workspace = true +description = "Example usage of mozjs, with the embedder (this crate) providing a custom allocator for mozjs" +publish = false + +[dependencies] +mozjs = { path = "../../mozjs", features = ["custom-alloc"] } +mimalloc = "0.1.48" \ No newline at end of file diff --git a/examples/embedder_allocator/Readme.md b/examples/embedder_allocator/Readme.md new file mode 100644 index 00000000000..293036553f8 --- /dev/null +++ b/examples/embedder_allocator/Readme.md @@ -0,0 +1,2 @@ +# Example: Using mozjs with an embedder-provided allocator + diff --git a/examples/embedder_allocator/build.py b/examples/embedder_allocator/build.py new file mode 100644 index 00000000000..c6156434ec2 --- /dev/null +++ b/examples/embedder_allocator/build.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 + +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +# This file + +import os +import pathlib +import subprocess +import sys +from typing import Mapping + +def create_env() -> Mapping[str, str]: + env = os.environ.copy() + mimalloc_include_dir = pathlib.Path(__file__).parent.joinpath('mimalloc/include') + assert mimalloc_include_dir.is_dir(), "Could not find mimalloc include directory" + env['SERVO_CUSTOM_ALLOC_INCLUDE_DIR'] = mimalloc_include_dir.as_posix() + return env + + +def main(): + completed_process = subprocess.run(sys.argv[1:], env=create_env()) + sys.exit(completed_process.returncode) + + +if __name__ == '__main__': + main() diff --git a/examples/embedder_allocator/build.rs b/examples/embedder_allocator/build.rs new file mode 100644 index 00000000000..8711f251632 --- /dev/null +++ b/examples/embedder_allocator/build.rs @@ -0,0 +1,4 @@ +fn main() { + // todo: Should we do this here or in the mozjs-sys build-script? + println!("cargo:rustc-link-lib=mimalloc"); +} \ No newline at end of file diff --git a/examples/embedder_allocator/mimalloc/include/mimalloc.h b/examples/embedder_allocator/mimalloc/include/mimalloc.h new file mode 100644 index 00000000000..f887278a988 --- /dev/null +++ b/examples/embedder_allocator/mimalloc/include/mimalloc.h @@ -0,0 +1,612 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2025, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_H +#define MIMALLOC_H + +#define MI_MALLOC_VERSION 224 // major + 2 digits minor + +// ------------------------------------------------------ +// Compiler specific attributes +// ------------------------------------------------------ + +#ifdef __cplusplus + #if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 + #define mi_attr_noexcept noexcept + #else + #define mi_attr_noexcept throw() + #endif +#else + #define mi_attr_noexcept +#endif + +#if defined(__cplusplus) && (__cplusplus >= 201703) + #define mi_decl_nodiscard [[nodiscard]] +#elif (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) // includes clang, icc, and clang-cl + #define mi_decl_nodiscard __attribute__((warn_unused_result)) +#elif defined(_HAS_NODISCARD) + #define mi_decl_nodiscard _NODISCARD +#elif (_MSC_VER >= 1700) + #define mi_decl_nodiscard _Check_return_ +#else + #define mi_decl_nodiscard +#endif + +#if defined(_MSC_VER) || defined(__MINGW32__) + #if !defined(MI_SHARED_LIB) + #define mi_decl_export + #elif defined(MI_SHARED_LIB_EXPORT) + #define mi_decl_export __declspec(dllexport) + #else + #define mi_decl_export __declspec(dllimport) + #endif + #if defined(__MINGW32__) + #define mi_decl_restrict + #define mi_attr_malloc __attribute__((malloc)) + #else + #if (_MSC_VER >= 1900) && !defined(__EDG__) + #define mi_decl_restrict __declspec(allocator) __declspec(restrict) + #else + #define mi_decl_restrict __declspec(restrict) + #endif + #define mi_attr_malloc + #endif + #define mi_cdecl __cdecl + #define mi_attr_alloc_size(s) + #define mi_attr_alloc_size2(s1,s2) + #define mi_attr_alloc_align(p) +#elif defined(__GNUC__) // includes clang and icc + #if defined(MI_SHARED_LIB) && defined(MI_SHARED_LIB_EXPORT) + #define mi_decl_export __attribute__((visibility("default"))) + #else + #define mi_decl_export + #endif + #define mi_cdecl // leads to warnings... __attribute__((cdecl)) + #define mi_decl_restrict + #define mi_attr_malloc __attribute__((malloc)) + #if (defined(__clang_major__) && (__clang_major__ < 4)) || (__GNUC__ < 5) + #define mi_attr_alloc_size(s) + #define mi_attr_alloc_size2(s1,s2) + #define mi_attr_alloc_align(p) + #elif defined(__INTEL_COMPILER) + #define mi_attr_alloc_size(s) __attribute__((alloc_size(s))) + #define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2))) + #define mi_attr_alloc_align(p) + #else + #define mi_attr_alloc_size(s) __attribute__((alloc_size(s))) + #define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2))) + #define mi_attr_alloc_align(p) __attribute__((alloc_align(p))) + #endif +#else + #define mi_cdecl + #define mi_decl_export + #define mi_decl_restrict + #define mi_attr_malloc + #define mi_attr_alloc_size(s) + #define mi_attr_alloc_size2(s1,s2) + #define mi_attr_alloc_align(p) +#endif + +// ------------------------------------------------------ +// Includes +// ------------------------------------------------------ + +#include // size_t +#include // bool +#include // INTPTR_MAX + +#ifdef __cplusplus +extern "C" { +#endif + +// ------------------------------------------------------ +// Standard malloc interface +// ------------------------------------------------------ + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); +mi_decl_nodiscard mi_decl_export void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); +mi_decl_export void* mi_expand(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); + +mi_decl_export void mi_free(void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc; + +// ------------------------------------------------------ +// Extended functionality +// ------------------------------------------------------ +#define MI_SMALL_WSIZE_MAX (128) +#define MI_SMALL_SIZE_MAX (MI_SMALL_WSIZE_MAX*sizeof(void*)) + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); +mi_decl_nodiscard mi_decl_export void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); +mi_decl_nodiscard mi_decl_export void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); + +mi_decl_nodiscard mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept; + + +// ------------------------------------------------------ +// Internals +// ------------------------------------------------------ + +typedef void (mi_cdecl mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg); +mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg) mi_attr_noexcept; + +typedef void (mi_cdecl mi_output_fun)(const char* msg, void* arg); +mi_decl_export void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept; + +typedef void (mi_cdecl mi_error_fun)(int err, void* arg); +mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg); + +mi_decl_export void mi_collect(bool force) mi_attr_noexcept; +mi_decl_export int mi_version(void) mi_attr_noexcept; +mi_decl_export void mi_stats_reset(void) mi_attr_noexcept; +mi_decl_export void mi_stats_merge(void) mi_attr_noexcept; +mi_decl_export void mi_stats_print(void* out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL +mi_decl_export void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; +mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; +mi_decl_export void mi_options_print(void) mi_attr_noexcept; + +mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, + size_t* current_rss, size_t* peak_rss, + size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept; + + +// Generally do not use the following as these are usually called automatically +mi_decl_export void mi_process_init(void) mi_attr_noexcept; +mi_decl_export void mi_cdecl mi_process_done(void) mi_attr_noexcept; +mi_decl_export void mi_thread_init(void) mi_attr_noexcept; +mi_decl_export void mi_thread_done(void) mi_attr_noexcept; + + +// ------------------------------------------------------------------------------------- +// Aligned allocation +// Note that `alignment` always follows `size` for consistency with unaligned +// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`. +// ------------------------------------------------------------------------------------- + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); +mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2); + + +// ------------------------------------------------------------------------------------- +// Heaps: first-class, but can only allocate from the same thread that created it. +// ------------------------------------------------------------------------------------- + +struct mi_heap_s; +typedef struct mi_heap_s mi_heap_t; + +mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new(void); +mi_decl_export void mi_heap_delete(mi_heap_t* heap); +mi_decl_export void mi_heap_destroy(mi_heap_t* heap); +mi_decl_export mi_heap_t* mi_heap_set_default(mi_heap_t* heap); +mi_decl_export mi_heap_t* mi_heap_get_default(void); +mi_decl_export mi_heap_t* mi_heap_get_backing(void); +mi_decl_export void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept; + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); + +mi_decl_nodiscard mi_decl_export void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); +mi_decl_nodiscard mi_decl_export void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4); +mi_decl_nodiscard mi_decl_export void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); + +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc; + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); +mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3); + + +// -------------------------------------------------------------------------------- +// Zero initialized re-allocation. +// Only valid on memory that was originally allocated with zero initialization too. +// e.g. `mi_calloc`, `mi_zalloc`, `mi_zalloc_aligned` etc. +// see +// -------------------------------------------------------------------------------- + +mi_decl_nodiscard mi_decl_export void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export void* mi_recalloc(void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); + +mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(2,3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(2,3); + +mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); +mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4); + +mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3); +mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(3,4) mi_attr_alloc_align(5); +mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(3,4); + + +// ------------------------------------------------------ +// Analysis +// ------------------------------------------------------ + +mi_decl_export bool mi_heap_contains_block(mi_heap_t* heap, const void* p); +mi_decl_export bool mi_heap_check_owned(mi_heap_t* heap, const void* p); +mi_decl_export bool mi_check_owned(const void* p); + +// An area of heap space contains blocks of a single size. +typedef struct mi_heap_area_s { + void* blocks; // start of the area containing heap blocks + size_t reserved; // bytes reserved for this area (virtual) + size_t committed; // current available bytes for this area + size_t used; // number of allocated blocks + size_t block_size; // size in bytes of each block + size_t full_block_size; // size in bytes of a full block including padding and metadata. + int heap_tag; // heap tag associated with this area +} mi_heap_area_t; + +typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg); + +mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg); + +// Experimental +mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept; + +mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept; +mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept; + +mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept; +mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept; + +mi_decl_export void mi_debug_show_arenas(void) mi_attr_noexcept; +mi_decl_export void mi_arenas_print(void) mi_attr_noexcept; + +// Experimental: heaps associated with specific memory arena's +typedef int mi_arena_id_t; +mi_decl_export void* mi_arena_area(mi_arena_id_t arena_id, size_t* size); +mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; +mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; +mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; + +#if MI_MALLOC_VERSION >= 182 +// Create a heap that only allocates in the specified arena +mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id); +#endif + + +// Experimental: allow sub-processes whose memory areas stay separated (and no reclamation between them) +// Used for example for separate interpreters in one process. +typedef void* mi_subproc_id_t; +mi_decl_export mi_subproc_id_t mi_subproc_main(void); +mi_decl_export mi_subproc_id_t mi_subproc_new(void); +mi_decl_export void mi_subproc_delete(mi_subproc_id_t subproc); +mi_decl_export void mi_subproc_add_current_thread(mi_subproc_id_t subproc); // this should be called right after a thread is created (and no allocation has taken place yet) + +// Experimental: visit abandoned heap areas (that are not owned by a specific heap) +mi_decl_export bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg); + +// Experimental: objects followed by a guard page. +// A sample rate of 0 disables guarded objects, while 1 uses a guard page for every object. +// A seed of 0 uses a random start point. Only objects within the size bound are eligable for guard pages. +mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed); +mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max); + +// Experimental: communicate that the thread is part of a threadpool +mi_decl_export void mi_thread_set_in_threadpool(void) mi_attr_noexcept; + +// Experimental: create a new heap with a specified heap tag. Set `allow_destroy` to false to allow the thread +// to reclaim abandoned memory (with a compatible heap_tag and arena_id) but in that case `mi_heap_destroy` will +// fall back to `mi_heap_delete`. +mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id); + +// deprecated +mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept; +mi_decl_export void mi_collect_reduce(size_t target_thread_owned) mi_attr_noexcept; + + + +// ------------------------------------------------------ +// Convenience +// ------------------------------------------------------ + +#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp))) +#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp))) +#define mi_calloc_tp(tp,n) ((tp*)mi_calloc(n,sizeof(tp))) +#define mi_mallocn_tp(tp,n) ((tp*)mi_mallocn(n,sizeof(tp))) +#define mi_reallocn_tp(p,tp,n) ((tp*)mi_reallocn(p,n,sizeof(tp))) +#define mi_recalloc_tp(p,tp,n) ((tp*)mi_recalloc(p,n,sizeof(tp))) + +#define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp))) +#define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp))) +#define mi_heap_calloc_tp(hp,tp,n) ((tp*)mi_heap_calloc(hp,n,sizeof(tp))) +#define mi_heap_mallocn_tp(hp,tp,n) ((tp*)mi_heap_mallocn(hp,n,sizeof(tp))) +#define mi_heap_reallocn_tp(hp,p,tp,n) ((tp*)mi_heap_reallocn(hp,p,n,sizeof(tp))) +#define mi_heap_recalloc_tp(hp,p,tp,n) ((tp*)mi_heap_recalloc(hp,p,n,sizeof(tp))) + + +// ------------------------------------------------------ +// Options +// ------------------------------------------------------ + +typedef enum mi_option_e { + // stable options + mi_option_show_errors, // print error messages + mi_option_show_stats, // print statistics on termination + mi_option_verbose, // print verbose messages + // advanced options + mi_option_eager_commit, // eager commit segments? (after `eager_commit_delay` segments) (=1) + mi_option_arena_eager_commit, // eager commit arenas? Use 2 to enable just on overcommit systems (=2) + mi_option_purge_decommits, // should a memory purge decommit? (=1). Set to 0 to use memory reset on a purge (instead of decommit) + mi_option_allow_large_os_pages, // allow large (2 or 4 MiB) OS pages, implies eager commit. If false, also disables THP for the process. + mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB pages) at startup + mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node + mi_option_reserve_os_memory, // reserve specified amount of OS memory in an arena at startup (internally, this value is in KiB; use `mi_option_get_size`) + mi_option_deprecated_segment_cache, + mi_option_deprecated_page_reset, + mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination + mi_option_deprecated_segment_reset, + mi_option_eager_commit_delay, // the first N segments per thread are not eagerly committed (but per page in the segment on demand) + mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all. (=10) + mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes. + mi_option_disallow_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas) + mi_option_os_tag, // tag used for OS logging (macOS only for now) (=100) + mi_option_max_errors, // issue at most N error messages + mi_option_max_warnings, // issue at most N warning messages + mi_option_max_segment_reclaim, // max. percentage of the abandoned segments can be reclaimed per try (=10%) + mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe + mi_option_arena_reserve, // initial memory size for arena reservation (= 1 GiB on 64-bit) (internally, this value is in KiB; use `mi_option_get_size`) + mi_option_arena_purge_mult, // multiplier for `purge_delay` for the purging delay for arenas (=10) + mi_option_purge_extend_delay, + mi_option_abandoned_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1) + mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's) + mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows) + mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0) + mi_option_guarded_min, // only used when building with MI_GUARDED: minimal rounded object size for guarded objects (=0) + mi_option_guarded_max, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects (=0) + mi_option_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0) + mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000) + mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0) + mi_option_target_segments_per_thread, // experimental (=0) + mi_option_generic_collect, // collect heaps every N (=10000) generic allocation calls + _mi_option_last, + // legacy option names + mi_option_large_os_pages = mi_option_allow_large_os_pages, + mi_option_eager_region_commit = mi_option_arena_eager_commit, + mi_option_reset_decommits = mi_option_purge_decommits, + mi_option_reset_delay = mi_option_purge_delay, + mi_option_abandoned_page_reset = mi_option_abandoned_page_purge, + mi_option_limit_os_alloc = mi_option_disallow_os_alloc +} mi_option_t; + + +mi_decl_nodiscard mi_decl_export bool mi_option_is_enabled(mi_option_t option); +mi_decl_export void mi_option_enable(mi_option_t option); +mi_decl_export void mi_option_disable(mi_option_t option); +mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable); +mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable); + +mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option); +mi_decl_nodiscard mi_decl_export long mi_option_get_clamp(mi_option_t option, long min, long max); +mi_decl_nodiscard mi_decl_export size_t mi_option_get_size(mi_option_t option); +mi_decl_export void mi_option_set(mi_option_t option, long value); +mi_decl_export void mi_option_set_default(mi_option_t option, long value); + + +// ------------------------------------------------------------------------------------------------------- +// "mi" prefixed implementations of various posix, Unix, Windows, and C++ allocation functions. +// (This can be convenient when providing overrides of these functions as done in `mimalloc-override.h`.) +// note: we use `mi_cfree` as "checked free" and it checks if the pointer is in our heap before free-ing. +// ------------------------------------------------------------------------------------------------------- + +mi_decl_export void mi_cfree(void* p) mi_attr_noexcept; +mi_decl_export void* mi__expand(void* p, size_t newsize) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_malloc_size(const void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_malloc_good_size(size_t size) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept; + +mi_decl_export int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1); + +mi_decl_nodiscard mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); +mi_decl_nodiscard mi_decl_export int mi_reallocarr(void* p, size_t count, size_t size) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept; + +mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_export int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept; +mi_decl_export int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name) mi_attr_noexcept; + +mi_decl_export void mi_free_size(void* p, size_t size) mi_attr_noexcept; +mi_decl_export void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept; +mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept; + +// The `mi_new` wrappers implement C++ semantics on out-of-memory instead of directly returning `NULL`. +// (and call `std::get_new_handler` and potentially raise a `std::bad_alloc` exception). +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1, 2); +mi_decl_nodiscard mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3); + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(2, 3); + +#ifdef __cplusplus +} +#endif + +// --------------------------------------------------------------------------------------------- +// Implement the C++ std::allocator interface for use in STL containers. +// (note: see `mimalloc-new-delete.h` for overriding the new/delete operators globally) +// --------------------------------------------------------------------------------------------- +#ifdef __cplusplus + +#include // std::size_t +#include // PTRDIFF_MAX +#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 +#include // std::true_type +#include // std::forward +#endif + +template struct _mi_stl_allocator_common { + typedef T value_type; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef value_type& reference; + typedef value_type const& const_reference; + typedef value_type* pointer; + typedef value_type const* const_pointer; + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + template void construct(U* p, Args&& ...args) { ::new(p) U(std::forward(args)...); } + template void destroy(U* p) mi_attr_noexcept { p->~U(); } + #else + void construct(pointer p, value_type const& val) { ::new(p) value_type(val); } + void destroy(pointer p) { p->~value_type(); } + #endif + + size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); } + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { return &x; } +}; + +template struct mi_stl_allocator : public _mi_stl_allocator_common { + using typename _mi_stl_allocator_common::size_type; + using typename _mi_stl_allocator_common::value_type; + using typename _mi_stl_allocator_common::pointer; + template struct rebind { typedef mi_stl_allocator other; }; + + mi_stl_allocator() mi_attr_noexcept = default; + mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept = default; + template mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept { } + mi_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T* p, size_type) { mi_free(p); } + + #if (__cplusplus >= 201703L) // C++17 + mi_decl_nodiscard T* allocate(size_type count) { return static_cast(mi_new_n(count, sizeof(T))); } + mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); } + #else + mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast(mi_new_n(count, sizeof(value_type))); } + #endif + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 + using is_always_equal = std::true_type; + #endif +}; + +template bool operator==(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return true; } +template bool operator!=(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return false; } + + +#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) // C++11 +#define MI_HAS_HEAP_STL_ALLOCATOR 1 + +#include // std::shared_ptr + +// Common base class for STL allocators in a specific heap +template struct _mi_heap_stl_allocator_common : public _mi_stl_allocator_common { + using typename _mi_stl_allocator_common::size_type; + using typename _mi_stl_allocator_common::value_type; + using typename _mi_stl_allocator_common::pointer; + + _mi_heap_stl_allocator_common(mi_heap_t* hp) : heap(hp, [](mi_heap_t*) {}) {} /* will not delete nor destroy the passed in heap */ + + #if (__cplusplus >= 201703L) // C++17 + mi_decl_nodiscard T* allocate(size_type count) { return static_cast(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(T))); } + mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); } + #else + mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(value_type))); } + #endif + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 + using is_always_equal = std::false_type; + #endif + + void collect(bool force) { mi_heap_collect(this->heap.get(), force); } + template bool is_equal(const _mi_heap_stl_allocator_common& x) const { return (this->heap == x.heap); } + +protected: + std::shared_ptr heap; + template friend struct _mi_heap_stl_allocator_common; + + _mi_heap_stl_allocator_common() { + mi_heap_t* hp = mi_heap_new(); + this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */ + } + _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { } + template _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { } + +private: + static void heap_delete(mi_heap_t* hp) { if (hp != NULL) { mi_heap_delete(hp); } } + static void heap_destroy(mi_heap_t* hp) { if (hp != NULL) { mi_heap_destroy(hp); } } +}; + +// STL allocator allocation in a specific heap +template struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common { + using typename _mi_heap_stl_allocator_common::size_type; + mi_heap_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is deleted when the destructor is called + mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap + template mi_heap_stl_allocator(const mi_heap_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { } + + mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T* p, size_type) { mi_free(p); } + template struct rebind { typedef mi_heap_stl_allocator other; }; +}; + +template bool operator==(const mi_heap_stl_allocator& x, const mi_heap_stl_allocator& y) mi_attr_noexcept { return (x.is_equal(y)); } +template bool operator!=(const mi_heap_stl_allocator& x, const mi_heap_stl_allocator& y) mi_attr_noexcept { return (!x.is_equal(y)); } + + +// STL allocator allocation in a specific heap, where `free` does nothing and +// the heap is destroyed in one go on destruction -- use with care! +template struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common { + using typename _mi_heap_stl_allocator_common::size_type; + mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is destroyed when the destructor is called + mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap + template mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { } + + mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T*, size_type) { /* do nothing as we destroy the heap on destruct. */ } + template struct rebind { typedef mi_heap_destroy_stl_allocator other; }; +}; + +template bool operator==(const mi_heap_destroy_stl_allocator& x, const mi_heap_destroy_stl_allocator& y) mi_attr_noexcept { return (x.is_equal(y)); } +template bool operator!=(const mi_heap_destroy_stl_allocator& x, const mi_heap_destroy_stl_allocator& y) mi_attr_noexcept { return (!x.is_equal(y)); } + +#endif // C++11 + +#endif // __cplusplus + +#endif diff --git a/examples/embedder_allocator/mimalloc/include/servo_embedder_malloc_prefix.h b/examples/embedder_allocator/mimalloc/include/servo_embedder_malloc_prefix.h new file mode 100644 index 00000000000..318bc364145 --- /dev/null +++ b/examples/embedder_allocator/mimalloc/include/servo_embedder_malloc_prefix.h @@ -0,0 +1,5 @@ +#pragma once + +/// Defines the prefix for all malloc functions, i.e. +/// mi_malloc, mi_calloc, mi_realloc, mi_free etc. +#define SERVO_EMBEDDER_MALLOC_PREFIX mi_ diff --git a/examples/embedder_allocator/mimalloc/include/servo_embedder_memory_wrap.h b/examples/embedder_allocator/mimalloc/include/servo_embedder_memory_wrap.h new file mode 100644 index 00000000000..461e080666a --- /dev/null +++ b/examples/embedder_allocator/mimalloc/include/servo_embedder_memory_wrap.h @@ -0,0 +1,10 @@ +#pragma once + +#include +#include "servo_embedder_malloc_prefix.h" + +#define SERVO_CONCAT(x, y) x ## y +#define SERVO_CONCAT2(x, y) SERVO_CONCAT(x, y) + +#define mozmem_malloc_impl(fn) SERVO_CONCAT2(SERVO_EMBEDDER_MALLOC_PREFIX, fn) +#define mozmem_dup_impl(fn) SERVO_CONCAT2(SERVO_EMBEDDER_MALLOC_PREFIX, fn) diff --git a/examples/embedder_allocator/src/main.rs b/examples/embedder_allocator/src/main.rs new file mode 100644 index 00000000000..93999ec2d78 --- /dev/null +++ b/examples/embedder_allocator/src/main.rs @@ -0,0 +1,76 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use std::ptr; +use std::sync::mpsc::channel; +use std::thread; + +use mozjs::jsapi::GCContext; +use mozjs::jsapi::JSCLASS_FOREGROUND_FINALIZE; +use mozjs::jsapi::{JSClass, JSClassOps, JSObject, OnNewGlobalHookOption}; +use mozjs::realm::AutoRealm; +use mozjs::rooted; +use mozjs::rust::wrappers2::{JS_NewGlobalObject, JS_NewObject}; +use mozjs::rust::{JSEngine, RealmOptions, Runtime, SIMPLE_GLOBAL_CLASS}; + +fn main() { + println!("Hello, world!"); + let engine = JSEngine::init().expect("Could not init JSEngine"); + println!("JSEngine initialized"); + let mut runtime = Runtime::new(engine.handle()); + println!("Runtime initialized"); + let context = runtime.cx(); + let h_option = OnNewGlobalHookOption::FireOnNewGlobalHook; + let c_option = RealmOptions::default(); + + unsafe { + rooted!(&in(context) let global = JS_NewGlobalObject( + context, + &SIMPLE_GLOBAL_CLASS, + ptr::null_mut(), + h_option, + &*c_option, + )); + let mut realm = AutoRealm::new_from_handle(context, global.handle()); + let context = &mut realm; + rooted!(&in(context) let _object = JS_NewObject(context, &CLASS as *const _)); + } + + let parent = runtime.prepare_for_new_child(); + let (sender, receiver) = channel(); + thread::spawn(move || { + let runtime = unsafe { Runtime::create_with_parent(parent) }; + assert!(Runtime::get().is_some()); + drop(runtime); + let _ = sender.send(()); + }); + let _ = receiver.recv(); + println!("Example ran without issues..."); +} + +unsafe extern "C" fn finalize(_fop: *mut GCContext, _object: *mut JSObject) { + assert!(Runtime::get().is_some()); +} + +static CLASS_OPS: JSClassOps = JSClassOps { + addProperty: None, + delProperty: None, + enumerate: None, + newEnumerate: None, + resolve: None, + mayResolve: None, + finalize: Some(finalize), + call: None, + construct: None, + trace: None, +}; + +static CLASS: JSClass = JSClass { + name: c"EventTargetPrototype".as_ptr(), + flags: JSCLASS_FOREGROUND_FINALIZE, + cOps: &CLASS_OPS as *const JSClassOps, + spec: ptr::null(), + ext: ptr::null(), + oOps: ptr::null(), +}; diff --git a/mozjs-sys/Cargo.toml b/mozjs-sys/Cargo.toml index 204e311e703..1fb4f88c2eb 100644 --- a/mozjs-sys/Cargo.toml +++ b/mozjs-sys/Cargo.toml @@ -16,6 +16,8 @@ name = "mozjs_sys" doctest = false [features] +default = [] +custom-alloc = [] debugmozjs = [] profilemozjs = [] jit = [] diff --git a/mozjs-sys/build.rs b/mozjs-sys/build.rs index c664b67dee2..af85a25dfd7 100644 --- a/mozjs-sys/build.rs +++ b/mozjs-sys/build.rs @@ -138,6 +138,7 @@ fn main() { fn build_spidermonkey(build_dir: &Path) { let target = env::var("TARGET").unwrap(); + let cargo_manifest_dir = PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").unwrap()); let make; #[cfg(windows)] @@ -210,7 +211,6 @@ fn build_spidermonkey(build_dir: &Path) { } cppflags.push(get_cc_rs_env_os("CPPFLAGS").unwrap_or_default()); - cmd.env("CPPFLAGS", cppflags); if let Some(makeflags) = env::var_os("CARGO_MAKEFLAGS") { cmd.env("MAKEFLAGS", makeflags); @@ -218,16 +218,31 @@ fn build_spidermonkey(build_dir: &Path) { let mut cxxflags = vec![]; + if env::var_os("CARGO_FEATURE_CUSTOM_ALLOC").is_some() { + let mut flags = vec![]; + let include_dir_str = env::var("SERVO_CUSTOM_ALLOC_INCLUDE_DIR").expect("Required variable not set with feature custom-alloc"); + let include_dir = Path::new(&include_dir_str); + assert!(include_dir.is_dir(), "SERVO_CUSTOM_ALLOC_INCLUDE_DIR must be set to a valid directory"); + assert!(include_dir.join("servo_embedder_memory_wrap.h").is_file(), "SERVO_CUSTOM_ALLOC_INCLUDE_DIR must contain header `servo_embedder_memory_wrap.h`"); + flags.push(format!("-I{}", &include_dir_str.replace("\\", "/"))); + flags.push("-DSERVO_EMBEDDER_MEMORY".to_string()); + println!("cargo:rerun-if-changed={}", include_dir_str); + + cppflags.extend(flags.iter().map(|s| OsString::from(s))); + cxxflags.extend(flags); + } + if target.contains("apple") || target.contains("freebsd") || target.contains("ohos") { cxxflags.push(String::from("-stdlib=libc++")); } + cmd.env("CPPFLAGS", cppflags); + let base_cxxflags = env::var("CXXFLAGS").unwrap_or_default(); let mut cxxflags = cxxflags.join(" "); cxxflags.push_str(&base_cxxflags); cmd.env("CXXFLAGS", cxxflags); - let cargo_manifest_dir = PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").unwrap()); let result = cmd .args(&["-R", "-f"]) .arg(cargo_manifest_dir.join("makefile.cargo")) @@ -292,6 +307,16 @@ fn build(build_dir: &Path, target: BuildTarget) { build.flag(include_file_flag(build.get_compiler().is_like_msvc())); build.flag(&js_config_path(build_dir)); + let cargo_manifest_dir = PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").unwrap()); + + if env::var_os("CARGO_FEATURE_CUSTOM_ALLOC").is_some() { + let include_dir_str = env::var("SERVO_CUSTOM_ALLOC_INCLUDE_DIR").expect("Required variable not set with feature custom-alloc"); + let include_dir = Path::new(&include_dir_str); + assert!(include_dir.is_dir(), "SERVO_CUSTOM_ALLOC_INCLUDE_DIR must be set to a valid directory"); + assert!(include_dir.join("servo_embedder_memory_wrap.h").is_file(), "SERVO_CUSTOM_ALLOC_INCLUDE_DIR must contain header `servo_embedder_memory_wrap.h`"); + build.include(include_dir); + build.define("SERVO_EMBEDDER_MEMORY", ""); + } for path in target.include_paths(build_dir) { build.include(path); @@ -313,6 +338,8 @@ fn build_bindings(build_dir: &Path, target: BuildTarget) { config &= !CodegenConfig::DESTRUCTORS; config &= !CodegenConfig::METHODS; + let cargo_manifest_dir = PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").unwrap()); + let mut builder = bindgen::builder() .rust_target(minimum_rust_target()) .header(target.path()) @@ -332,6 +359,21 @@ fn build_bindings(build_dir: &Path, target: BuildTarget) { .clang_arg(env::var("WASI_SYSROOT").unwrap().to_string()); } + let custom_alloc_flags = if env::var_os("CARGO_FEATURE_CUSTOM_ALLOC").is_some() { + let mut flags = vec![]; + let include_dir_str = env::var("SERVO_CUSTOM_ALLOC_INCLUDE_DIR").expect("Required variable not set with feature custom-alloc"); + let include_dir = Path::new(&include_dir_str); + assert!(include_dir.is_dir(), "SERVO_CUSTOM_ALLOC_INCLUDE_DIR must be set to a valid directory"); + assert!(include_dir.join("servo_embedder_memory_wrap.h").is_file(), "SERVO_CUSTOM_ALLOC_INCLUDE_DIR must contain header `servo_embedder_memory_wrap.h`"); + flags.push(format!("-I{}", &include_dir_str.replace("\\", "/"))); + flags.push("-DSERVO_EMBEDDER_MEMORY".to_string()); + flags + } else { + vec![] + }; + builder = builder.clang_args(custom_alloc_flags); + + if target == BuildTarget::JSGlue { builder = builder .parse_callbacks(Box::new(JSGlueCargoCallbacks::default())) @@ -432,6 +474,8 @@ fn link_static_lib_binaries(build_dir: &Path) { // needing to use the WASI-SDK's clang for linking, which is annoying. println!("cargo:rustc-link-lib=stdc++") } + // TODO: link against lib from env + // println!("cargo:rustc-link-lib=mimalloc"); if target.contains("wasi") { println!("cargo:rustc-link-lib=wasi-emulated-getpid"); @@ -463,6 +507,9 @@ fn should_build_from_source() -> bool { } else if env::var_os("CARGO_FEATURE_INTL").is_none() { println!("intl feature is disabled. Building from source directly."); true + } else if env::var_os("CARGO_FEATURE_CUSTOM_ALLOC").is_some() { + println!("custom-alloc feature is enabled. Building from source directly."); + true } else if !env::var_os("CARGO_FEATURE_JIT").is_some() { println!("jit feature is NOT enabled. Building from source directly."); true diff --git a/mozjs-sys/mozjs/js/public/Utility.h b/mozjs-sys/mozjs/js/public/Utility.h index f3d5d947b7a..6c578362022 100644 --- a/mozjs-sys/mozjs/js/public/Utility.h +++ b/mozjs-sys/mozjs/js/public/Utility.h @@ -431,7 +431,7 @@ static inline void js_free(void* p) { // currently can't enforce that all memory freed here was allocated by // js_malloc(). All other memory should go through a different allocator and // deallocator. - free(p); + free_impl(p); } #endif /* JS_USE_CUSTOM_ALLOCATOR */ diff --git a/mozjs-sys/mozjs/memory/build/embedder_fallback.cpp b/mozjs-sys/mozjs/memory/build/embedder_fallback.cpp new file mode 100644 index 00000000000..d10291460c5 --- /dev/null +++ b/mozjs-sys/mozjs/memory/build/embedder_fallback.cpp @@ -0,0 +1,38 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozmemory.h" +#include "mozjemalloc.h" + +// Expected to provide SERVO_EMBEDDER_MALLOC_PREFIX +#include "servo_embedder_malloc_prefix.h" + +#define MOZ_EMBED_CONCAT(a, b) a##b +#define MOZ_EMBED_CONCAT1(a, b) MOZ_EMBED_CONCAT(a, b) + +// embedder responsible for providing memalign + +struct EmbedderMalloc { +#define MALLOC_DECL(name, return_type, ...) \ + static inline return_type name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) { \ + return :: MOZ_EMBED_CONCAT1(SERVO_EMBEDDER_MALLOC_PREFIX, name)(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \ + } +#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE +#include "malloc_decls.h" +}; + +#undef MOZ_EMBED_CONCAT +#undef MOZ_EMBED_CONCAT1 + +// Todo: why are these functions free-standing, is this correct? (copied from fallback) + +#define MALLOC_DECL(name, return_type, ...) \ + MOZ_JEMALLOC_API return_type name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) { \ + return DummyArenaAllocator::name( \ + ARGS_HELPER(ARGS, ##__VA_ARGS__)); \ + } +#define MALLOC_FUNCS MALLOC_FUNCS_ARENA +#include "malloc_decls.h" diff --git a/mozjs-sys/mozjs/memory/build/moz.build b/mozjs-sys/mozjs/memory/build/moz.build index df1403933e5..f5ad9654d11 100644 --- a/mozjs-sys/mozjs/memory/build/moz.build +++ b/mozjs-sys/mozjs/memory/build/moz.build @@ -38,6 +38,10 @@ if CONFIG["MOZ_MEMORY"]: "mozmemory_wrap.cpp", "Mutex.cpp", ] +elif CONFIG["SERVO_EMBEDDER_MEMORY"]: + UNIFIED_SOURCES += [ + "embedder_fallback.cpp", + ] else: UNIFIED_SOURCES += [ "fallback.cpp", diff --git a/mozjs-sys/mozjs/memory/build/mozmemory_wrap.h b/mozjs-sys/mozjs/memory/build/mozmemory_wrap.h index 6d7fc33fd25..cf66c842e55 100644 --- a/mozjs-sys/mozjs/memory/build/mozmemory_wrap.h +++ b/mozjs-sys/mozjs/memory/build/mozmemory_wrap.h @@ -131,6 +131,14 @@ # define MOZ_JEMALLOC_API_NODISCARD MOZ_EXTERN_C [[nodiscard]] #endif +#ifdef SERVO_EMBEDDER_MEMORY +// Provide embedder versions of mozmem_malloc_impl +# include +# if ! (defined(mozmem_malloc_impl) && defined(mozmem_dup_impl)) +# error "servo_embedder_memory_wrap.h must contain mozmem_malloc_impl and mozmem_dup_impl definition" +# endif +#endif // SERVO_EMBEDDER_MEMORY + #ifndef mozmem_malloc_impl # define mozmem_malloc_impl(a) a #endif diff --git a/mozjs-sys/mozjs/memory/mozalloc/mozalloc.cpp b/mozjs-sys/mozjs/memory/mozalloc/mozalloc.cpp index aef8ab943a6..1b6dec9ac31 100644 --- a/mozjs-sys/mozjs/memory/mozalloc/mozalloc.cpp +++ b/mozjs-sys/mozjs/memory/mozalloc/mozalloc.cpp @@ -7,11 +7,23 @@ #include // for size_t -#if defined(MALLOC_H) +// We want the embedder to define all allocation functions. +#if defined(MALLOC_H) && ! defined(SERVO_EMBEDDER_MEMORY) # include MALLOC_H // for memalign, malloc_size, malloc_us #endif // if defined(MALLOC_H) -#if !defined(MOZ_MEMORY) + +#if defined(SERVO_EMBEDDER_MEMORY) +#include +# define malloc_impl mozmem_malloc_impl(malloc) +# define calloc_impl mozmem_malloc_impl(calloc) +# define realloc_impl mozmem_malloc_impl(realloc) +# define free_impl mozmem_malloc_impl(free) +# define memalign_impl mozmem_malloc_impl(memalign) +# define malloc_usable_size_impl mozmem_malloc_impl(malloc_usable_size) +# define strdup_impl mozmem_dup_impl(strdup) +# define strndup_impl mozmem_dup_impl(strndup) +#elif !defined(MOZ_MEMORY) // When jemalloc is disabled, or when building the static runtime variant, // we need not to use the suffixes. @@ -124,7 +136,9 @@ void* moz_xmemalign(size_t boundary, size_t size) { size_t moz_malloc_usable_size(void* ptr) { if (!ptr) return 0; -#if defined(XP_DARWIN) +#if defined(SERVO_EMBEDDER_MEMORY) + return malloc_usable_size_impl(ptr); +#elif defined(XP_DARWIN) return malloc_size(ptr); #elif defined(HAVE_MALLOC_USABLE_SIZE) || defined(MOZ_MEMORY) return malloc_usable_size_impl(ptr); diff --git a/mozjs-sys/mozjs/mfbt/StringBuffer.h b/mozjs-sys/mozjs/mfbt/StringBuffer.h index 2ba42627eae..c3a038d1a25 100644 --- a/mozjs-sys/mozjs/mfbt/StringBuffer.h +++ b/mozjs-sys/mozjs/mfbt/StringBuffer.h @@ -61,7 +61,7 @@ class StringBuffer { "mStorageSize will truncate"); size_t bytes = sizeof(StringBuffer) + aSize; - void* hdr = aArena ? moz_arena_malloc(*aArena, bytes) : malloc(bytes); + void* hdr = aArena ? moz_arena_malloc(*aArena, bytes) : malloc_impl(bytes); if (!hdr) { return nullptr; } @@ -152,7 +152,7 @@ class StringBuffer { size_t bytes = sizeof(StringBuffer) + aSize; aHdr = aArena ? (StringBuffer*)moz_arena_realloc(*aArena, aHdr, bytes) - : (StringBuffer*)realloc(aHdr, bytes); + : (StringBuffer*)realloc_impl(aHdr, bytes); if (aHdr) { detail::RefCountLogger::logAddRef(aHdr, 1); aHdr->mStorageSize = aSize; @@ -187,7 +187,7 @@ class StringBuffer { // on other threads, that is, to ensure that writes prior to that release // are now visible on this thread. count = mRefCount.load(std::memory_order_acquire); - free(this); // We were allocated with malloc. + free_impl(this); // We were allocated with malloc. } } diff --git a/mozjs/Cargo.toml b/mozjs/Cargo.toml index 3933afe2dea..7a35c36e36d 100644 --- a/mozjs/Cargo.toml +++ b/mozjs/Cargo.toml @@ -12,6 +12,7 @@ doctest = false [features] default = ["jit", "libz-sys", "intl"] +custom-alloc = ["mozjs_sys/custom-alloc"] debugmozjs = ["mozjs_sys/debugmozjs"] profilemozjs = ["mozjs_sys/profilemozjs"] jit = ['mozjs_sys/jit']