Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 24 additions & 26 deletions crates/wasmtime/src/runtime/vm/cow.rs
Original file line number Diff line number Diff line change
Expand Up @@ -514,21 +514,23 @@ impl MemoryImageSlot {
/// argument is the maximum amount of memory to keep resident in this
/// process's memory on Linux. Up to that much memory will be `memset` to
/// zero where the rest of it will be reset or released with `madvise`.
///
/// Returns the number of bytes still resident in memory after this function
/// has returned.
#[allow(dead_code, reason = "only used in some cfgs")]
pub(crate) fn clear_and_remain_ready(
&mut self,
pagemap: Option<&PageMap>,
keep_resident: HostAlignedByteCount,
decommit: impl FnMut(*mut u8, usize),
) -> Result<()> {
) -> Result<usize> {
assert!(self.dirty);

unsafe {
self.reset_all_memory_contents(pagemap, keep_resident, decommit)?;
}
let bytes_resident =
unsafe { self.reset_all_memory_contents(pagemap, keep_resident, decommit)? };

self.dirty = false;
Ok(())
Ok(bytes_resident)
}

#[allow(dead_code, reason = "only used in some cfgs")]
Expand All @@ -537,7 +539,7 @@ impl MemoryImageSlot {
pagemap: Option<&PageMap>,
keep_resident: HostAlignedByteCount,
decommit: impl FnMut(*mut u8, usize),
) -> Result<()> {
) -> Result<usize> {
match vm::decommit_behavior() {
DecommitBehavior::Zero => {
// If we're not on Linux then there's no generic platform way to
Expand All @@ -546,13 +548,13 @@ impl MemoryImageSlot {
//
// Additionally the previous image, if any, is dropped here
// since it's no longer applicable to this mapping.
self.reset_with_anon_memory()
self.reset_with_anon_memory()?;
Ok(0)
}
DecommitBehavior::RestoreOriginalMapping => {
unsafe {
self.reset_with_original_mapping(pagemap, keep_resident, decommit);
}
Ok(())
let bytes_resident =
unsafe { self.reset_with_original_mapping(pagemap, keep_resident, decommit) };
Ok(bytes_resident)
}
}
}
Expand All @@ -563,29 +565,25 @@ impl MemoryImageSlot {
pagemap: Option<&PageMap>,
keep_resident: HostAlignedByteCount,
decommit: impl FnMut(*mut u8, usize),
) {
) -> usize {
assert_eq!(
vm::decommit_behavior(),
DecommitBehavior::RestoreOriginalMapping
);

unsafe {
match &self.image {
return match &self.image {
// If there's a backing image then manually resetting a region
// is a bit trickier than without an image, so delegate to the
// helper function below.
Some(image) => {
reset_with_pagemap(
pagemap,
self.base.as_mut_ptr(),
self.accessible,
keep_resident,
|region| {
manually_reset_region(self.base.as_mut_ptr().addr(), image, region)
},
decommit,
);
}
Some(image) => reset_with_pagemap(
pagemap,
self.base.as_mut_ptr(),
self.accessible,
keep_resident,
|region| manually_reset_region(self.base.as_mut_ptr().addr(), image, region),
decommit,
),

// If there's no memory image for this slot then pages are always
// manually reset back to zero or given to `decommit`.
Expand All @@ -597,7 +595,7 @@ impl MemoryImageSlot {
|region| region.fill(0),
decommit,
),
}
};
}

/// Manually resets `region` back to its original contents as specified
Expand Down
51 changes: 36 additions & 15 deletions crates/wasmtime/src/runtime/vm/instance/allocator/pooling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -318,9 +318,13 @@ pub struct PoolingInstanceAllocator {

#[cfg(feature = "gc")]
gc_heaps: GcHeapPool,
#[cfg(feature = "gc")]
live_gc_heaps: AtomicUsize,

#[cfg(feature = "async")]
stacks: StackPool,
#[cfg(feature = "async")]
live_stacks: AtomicUsize,

pagemap: Option<PageMap>,
}
Expand Down Expand Up @@ -350,10 +354,16 @@ impl Drop for PoolingInstanceAllocator {
debug_assert!(self.tables.is_empty());

#[cfg(feature = "gc")]
debug_assert!(self.gc_heaps.is_empty());
{
debug_assert!(self.gc_heaps.is_empty());
debug_assert_eq!(self.live_gc_heaps.load(Ordering::Acquire), 0);
}

#[cfg(feature = "async")]
debug_assert!(self.stacks.is_empty());
{
debug_assert!(self.stacks.is_empty());
debug_assert_eq!(self.live_stacks.load(Ordering::Acquire), 0);
}
}
}

Expand All @@ -372,8 +382,12 @@ impl PoolingInstanceAllocator {
live_tables: AtomicUsize::new(0),
#[cfg(feature = "gc")]
gc_heaps: GcHeapPool::new(config)?,
#[cfg(feature = "gc")]
live_gc_heaps: AtomicUsize::new(0),
#[cfg(feature = "async")]
stacks: StackPool::new(config)?,
#[cfg(feature = "async")]
live_stacks: AtomicUsize::new(0),
pagemap: match config.pagemap_scan {
Enabled::Auto => PageMap::new(),
Enabled::Yes => Some(PageMap::new().ok_or_else(|| {
Expand Down Expand Up @@ -704,7 +718,7 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
// reservation.
let mut image = memory.unwrap_static_image();
let mut queue = DecommitQueue::default();
image
let bytes_resident = image
.clear_and_remain_ready(
self.pagemap.as_ref(),
self.memories.keep_resident,
Expand All @@ -722,7 +736,7 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
// SAFETY: this image is not in use and its memory regions were enqueued
// with `push_raw` above.
unsafe {
queue.push_memory(allocation_index, image);
queue.push_memory(allocation_index, image, bytes_resident);
}
self.merge_or_flush(queue);
}
Expand Down Expand Up @@ -770,42 +784,45 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
// method is called and additionally all image ranges are pushed with
// the understanding that the memory won't get used until the whole
// queue is flushed.
unsafe {
let bytes_resident = unsafe {
self.tables.reset_table_pages_to_zero(
self.pagemap.as_ref(),
allocation_index,
&mut table,
|ptr, len| {
queue.push_raw(ptr, len);
},
);
}
)
};

// SAFETY: the table has had all its memory regions enqueued above.
unsafe {
queue.push_table(allocation_index, table);
queue.push_table(allocation_index, table, bytes_resident);
}
self.merge_or_flush(queue);
}

#[cfg(feature = "async")]
fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack> {
self.with_flush_and_retry(|| self.stacks.allocate())
let ret = self.with_flush_and_retry(|| self.stacks.allocate())?;
self.live_stacks.fetch_add(1, Ordering::Relaxed);
Ok(ret)
}

#[cfg(feature = "async")]
unsafe fn deallocate_fiber_stack(&self, mut stack: wasmtime_fiber::FiberStack) {
self.live_stacks.fetch_sub(1, Ordering::Relaxed);
let mut queue = DecommitQueue::default();
// SAFETY: the stack is no longer in use by definition when this
// function is called and memory ranges pushed here are otherwise no
// longer in use.
unsafe {
let bytes_resident = unsafe {
self.stacks
.zero_stack(&mut stack, |ptr, len| queue.push_raw(ptr, len));
}
.zero_stack(&mut stack, |ptr, len| queue.push_raw(ptr, len))
};
// SAFETY: this stack's memory regions were enqueued above.
unsafe {
queue.push_stack(stack);
queue.push_stack(stack, bytes_resident);
}
self.merge_or_flush(queue);
}
Expand Down Expand Up @@ -834,8 +851,11 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
memory_alloc_index: MemoryAllocationIndex,
memory: Memory,
) -> Result<(GcHeapAllocationIndex, Box<dyn GcHeap>)> {
self.gc_heaps
.allocate(engine, gc_runtime, memory_alloc_index, memory)
let ret = self
.gc_heaps
.allocate(engine, gc_runtime, memory_alloc_index, memory)?;
self.live_gc_heaps.fetch_add(1, Ordering::Relaxed);
Ok(ret)
}

#[cfg(feature = "gc")]
Expand All @@ -844,6 +864,7 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
allocation_index: GcHeapAllocationIndex,
gc_heap: Box<dyn GcHeap>,
) -> (MemoryAllocationIndex, Memory) {
self.live_gc_heaps.fetch_sub(1, Ordering::Relaxed);
self.gc_heaps.deallocate(allocation_index, gc_heap)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,10 @@ unsafe impl Sync for SendSyncStack {}
#[derive(Default)]
pub struct DecommitQueue {
raw: SmallVec<[IoVec; 2]>,
memories: SmallVec<[(MemoryAllocationIndex, MemoryImageSlot); 1]>,
tables: SmallVec<[(TableAllocationIndex, Table); 1]>,
memories: SmallVec<[(MemoryAllocationIndex, MemoryImageSlot, usize); 1]>,
tables: SmallVec<[(TableAllocationIndex, Table, usize); 1]>,
#[cfg(feature = "async")]
stacks: SmallVec<[SendSyncStack; 1]>,
stacks: SmallVec<[(SendSyncStack, usize); 1]>,
//
// TODO: GC heaps are not well-integrated with the pooling allocator
// yet. Once we better integrate them, we should start (optionally) zeroing
Expand Down Expand Up @@ -123,8 +123,10 @@ impl DecommitQueue {
&mut self,
allocation_index: MemoryAllocationIndex,
image: MemoryImageSlot,
bytes_resident: usize,
) {
self.memories.push((allocation_index, image));
self.memories
.push((allocation_index, image, bytes_resident));
}

/// Push a table into the queue.
Expand All @@ -133,8 +135,13 @@ impl DecommitQueue {
///
/// This table should not be in use, and its decommit regions must have
/// already been enqueued via `self.enqueue_raw`.
pub unsafe fn push_table(&mut self, allocation_index: TableAllocationIndex, table: Table) {
self.tables.push((allocation_index, table));
pub unsafe fn push_table(
&mut self,
allocation_index: TableAllocationIndex,
table: Table,
bytes_resident: usize,
) {
self.tables.push((allocation_index, table, bytes_resident));
}

/// Push a stack into the queue.
Expand All @@ -144,8 +151,8 @@ impl DecommitQueue {
/// This stack should not be in use, and its decommit regions must have
/// already been enqueued via `self.enqueue_raw`.
#[cfg(feature = "async")]
pub unsafe fn push_stack(&mut self, stack: FiberStack) {
self.stacks.push(SendSyncStack(stack));
pub unsafe fn push_stack(&mut self, stack: FiberStack, bytes_resident: usize) {
self.stacks.push((SendSyncStack(stack), bytes_resident));
}

fn decommit_all_raw(&mut self) {
Expand Down Expand Up @@ -174,23 +181,25 @@ impl DecommitQueue {
// lists. This is safe, and they are ready for reuse, now that their
// memory regions have been decommitted.
let mut deallocated_any = false;
for (allocation_index, image) in self.memories {
for (allocation_index, image, bytes_resident) in self.memories {
deallocated_any = true;
unsafe {
pool.memories.deallocate(allocation_index, image);
pool.memories
.deallocate(allocation_index, image, bytes_resident);
}
}
for (allocation_index, table) in self.tables {
for (allocation_index, table, bytes_resident) in self.tables {
deallocated_any = true;
unsafe {
pool.tables.deallocate(allocation_index, table);
pool.tables
.deallocate(allocation_index, table, bytes_resident);
}
}
#[cfg(feature = "async")]
for stack in self.stacks {
for (stack, bytes_resident) in self.stacks {
deallocated_any = true;
unsafe {
pool.stacks.deallocate(stack.0);
pool.stacks.deallocate(stack.0, bytes_resident);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ impl GcHeapPool {
heaps[allocation_index.index()].dealloc(heap)
};

self.index_allocator.free(SlotId(allocation_index.0));
self.index_allocator.free(SlotId(allocation_index.0), 0);

(memory_alloc_index, memory)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,15 +69,24 @@ impl StackPool {
&self,
_stack: &mut wasmtime_fiber::FiberStack,
_decommit: impl FnMut(*mut u8, usize),
) {
) -> usize {
// No need to actually zero the stack, since the stack won't ever be
// reused on non-unix systems.
0
}

/// Safety: see the unix implementation.
pub unsafe fn deallocate(&self, stack: wasmtime_fiber::FiberStack) {
pub unsafe fn deallocate(&self, stack: wasmtime_fiber::FiberStack, _bytes_resident: usize) {
self.live_stacks.fetch_sub(1, Ordering::AcqRel);
// A no-op as we don't actually own the fiber stack on Windows.
let _ = stack;
}

pub fn unused_warm_slots(&self) -> u32 {
0
}

pub fn unused_bytes_resident(&self) -> Option<usize> {
None
}
}
Loading