Skip to content

Commit a06b6b3

Browse files
committed
Dynamically detect support for Wasmtime's pooling allocator
This commit is intended to address #2119 and mirror bytecodealliance/wasmtime#8610. The base problem is that some systems are configured with smaller amounts of virtual memory than other systems, for example some aarch64 and riscv64 systems are shown to have only 39 bits of virtual address space rather than the 48 by default on x86_64. This means that the pooling allocator can't be initialized on these platforms since it needs more virtual memory than that. This changes Spin to dynamically choosing whether to use the pooling allocator. It's still used by default in Wasmtime but a dynamic probe is performed to determine whether it's going to work first. While here I also added an env var to control this behavior for an escape hatch if that's needed in the future too. Closes #2119 Signed-off-by: Alex Crichton <[email protected]>
1 parent c44da5f commit a06b6b3

File tree

1 file changed

+90
-36
lines changed

1 file changed

+90
-36
lines changed

crates/core/src/lib.rs

Lines changed: 90 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ mod store;
1515
pub mod wasi_2023_10_18;
1616
pub mod wasi_2023_11_10;
1717

18+
use std::sync::OnceLock;
1819
use std::{path::PathBuf, time::Duration};
1920

2021
use anyhow::Result;
@@ -92,42 +93,44 @@ impl Default for Config {
9293
inner.epoch_interruption(true);
9394
inner.wasm_component_model(true);
9495

95-
// By default enable the pooling instance allocator in Wasmtime. This
96-
// drastically reduces syscall/kernel overhead for wasm execution,
97-
// especially in async contexts where async stacks must be allocated.
98-
// The general goal here is that the default settings here rarely, if
99-
// ever, need to be modified. As a result there aren't fine-grained
100-
// knobs for each of these settings just yet and instead they're
101-
// generally set to defaults. Environment-variable-based fallbacks are
102-
// supported though as an escape valve for if this is a problem.
103-
let mut pooling_config = PoolingAllocationConfig::default();
104-
pooling_config
105-
.total_component_instances(env("SPIN_WASMTIME_INSTANCE_COUNT", 1_000))
106-
// This number accounts for internal data structures that Wasmtime allocates for each instance.
107-
// Instance allocation is proportional to the number of "things" in a wasm module like functions,
108-
// globals, memories, etc. Instance allocations are relatively small and are largely inconsequential
109-
// compared to other runtime state, but a number needs to be chosen here so a relatively large threshold
110-
// of 10MB is arbitrarily chosen. It should be unlikely that any reasonably-sized module hits this limit.
111-
.max_component_instance_size(
112-
env("SPIN_WASMTIME_INSTANCE_SIZE", (10 * MB) as u32) as usize
113-
)
114-
.max_core_instances_per_component(env("SPIN_WASMTIME_CORE_INSTANCE_COUNT", 200))
115-
.max_tables_per_component(env("SPIN_WASMTIME_INSTANCE_TABLES", 20))
116-
.table_elements(env("SPIN_WASMTIME_INSTANCE_TABLE_ELEMENTS", 30_000))
117-
// The number of memories an instance can have effectively limits the number of inner components
118-
// a composed component can have (since each inner component has its own memory). We default to 32 for now, and
119-
// we'll see how often this limit gets reached.
120-
.max_memories_per_component(env("SPIN_WASMTIME_INSTANCE_MEMORIES", 32))
121-
.total_memories(env("SPIN_WASMTIME_TOTAL_MEMORIES", 1_000))
122-
.total_tables(env("SPIN_WASMTIME_TOTAL_TABLES", 2_000))
123-
// Nothing is lost from allowing the maximum size of memory for
124-
// all instance as it's still limited through other the normal
125-
// `StoreLimitsAsync` accounting method too.
126-
.memory_pages(4 * GB / WASM_PAGE_SIZE)
127-
// These numbers are completely arbitrary at something above 0.
128-
.linear_memory_keep_resident((2 * MB) as usize)
129-
.table_keep_resident((MB / 2) as usize);
130-
inner.allocation_strategy(InstanceAllocationStrategy::Pooling(pooling_config));
96+
if use_pooling_allocator_by_default() {
97+
// By default enable the pooling instance allocator in Wasmtime. This
98+
// drastically reduces syscall/kernel overhead for wasm execution,
99+
// especially in async contexts where async stacks must be allocated.
100+
// The general goal here is that the default settings here rarely, if
101+
// ever, need to be modified. As a result there aren't fine-grained
102+
// knobs for each of these settings just yet and instead they're
103+
// generally set to defaults. Environment-variable-based fallbacks are
104+
// supported though as an escape valve for if this is a problem.
105+
let mut pooling_config = PoolingAllocationConfig::default();
106+
pooling_config
107+
.total_component_instances(env("SPIN_WASMTIME_INSTANCE_COUNT", 1_000))
108+
// This number accounts for internal data structures that Wasmtime allocates for each instance.
109+
// Instance allocation is proportional to the number of "things" in a wasm module like functions,
110+
// globals, memories, etc. Instance allocations are relatively small and are largely inconsequential
111+
// compared to other runtime state, but a number needs to be chosen here so a relatively large threshold
112+
// of 10MB is arbitrarily chosen. It should be unlikely that any reasonably-sized module hits this limit.
113+
.max_component_instance_size(
114+
env("SPIN_WASMTIME_INSTANCE_SIZE", (10 * MB) as u32) as usize
115+
)
116+
.max_core_instances_per_component(env("SPIN_WASMTIME_CORE_INSTANCE_COUNT", 200))
117+
.max_tables_per_component(env("SPIN_WASMTIME_INSTANCE_TABLES", 20))
118+
.table_elements(env("SPIN_WASMTIME_INSTANCE_TABLE_ELEMENTS", 30_000))
119+
// The number of memories an instance can have effectively limits the number of inner components
120+
// a composed component can have (since each inner component has its own memory). We default to 32 for now, and
121+
// we'll see how often this limit gets reached.
122+
.max_memories_per_component(env("SPIN_WASMTIME_INSTANCE_MEMORIES", 32))
123+
.total_memories(env("SPIN_WASMTIME_TOTAL_MEMORIES", 1_000))
124+
.total_tables(env("SPIN_WASMTIME_TOTAL_TABLES", 2_000))
125+
// Nothing is lost from allowing the maximum size of memory for
126+
// all instance as it's still limited through other the normal
127+
// `StoreLimitsAsync` accounting method too.
128+
.memory_pages(4 * GB / WASM_PAGE_SIZE)
129+
// These numbers are completely arbitrary at something above 0.
130+
.linear_memory_keep_resident((2 * MB) as usize)
131+
.table_keep_resident((MB / 2) as usize);
132+
inner.allocation_strategy(InstanceAllocationStrategy::Pooling(pooling_config));
133+
}
131134

132135
return Self { inner };
133136

@@ -142,6 +145,57 @@ impl Default for Config {
142145
}
143146
}
144147

148+
/// The pooling allocator is tailor made for the `spin up` use case, so
149+
/// try to use it when we can. The main cost of the pooling allocator, however,
150+
/// is the virtual memory required to run it. Not all systems support the same
151+
/// amount of virtual memory, for example some aarch64 and riscv64 configuration
152+
/// only support 39 bits of virtual address space.
153+
///
154+
/// The pooling allocator, by default, will request 1000 linear memories each
155+
/// sized at 6G per linear memory. This is 6T of virtual memory which ends up
156+
/// being about 42 bits of the address space. This exceeds the 39 bit limit of
157+
/// some systems, so there the pooling allocator will fail by default.
158+
///
159+
/// This function attempts to dynamically determine the hint for the pooling
160+
/// allocator. This returns `true` if the pooling allocator should be used
161+
/// by default, or `false` otherwise.
162+
///
163+
/// The method for testing this is to allocate a 0-sized 64-bit linear memory
164+
/// with a maximum size that's N bits large where we force all memories to be
165+
/// static. This should attempt to acquire N bits of the virtual address space.
166+
/// If successful that should mean that the pooling allocator is OK to use, but
167+
/// if it fails then the pooling allocator is not used and the normal mmap-based
168+
/// implementation is used instead.
169+
fn use_pooling_allocator_by_default() -> bool {
170+
static USE_POOLING: OnceLock<bool> = OnceLock::new();
171+
const BITS_TO_TEST: u32 = 42;
172+
173+
*USE_POOLING.get_or_init(|| {
174+
// Enable manual control through env vars as an escape hatch
175+
match std::env::var("SPIN_WASMTIME_POOLING") {
176+
Ok(s) if s == "1" => return true,
177+
Ok(s) if s == "0" => return false,
178+
_ => {}
179+
}
180+
181+
// If the env var isn't set then perform the dynamic runtime probe
182+
let mut config = wasmtime::Config::new();
183+
config.wasm_memory64(true);
184+
config.static_memory_maximum_size(1 << BITS_TO_TEST);
185+
186+
match wasmtime::Engine::new(&config) {
187+
Ok(engine) => {
188+
let mut store = wasmtime::Store::new(&engine, ());
189+
// NB: the maximum size is in wasm pages to take out the 16-bits of
190+
// wasm page size here from the maximum size.
191+
let ty = wasmtime::MemoryType::new64(0, Some(1 << (BITS_TO_TEST - 16)));
192+
wasmtime::Memory::new(&mut store, ty).is_ok()
193+
}
194+
Err(_) => false,
195+
}
196+
})
197+
}
198+
145199
/// Host state data associated with individual [Store]s and [Instance]s.
146200
pub struct Data<T> {
147201
inner: T,

0 commit comments

Comments
 (0)