|
55 | 55 | //! ``` |
56 | 56 |
|
57 | 57 | use crate::externref::VMExternRef; |
58 | | -use crate::instance::Instance; |
59 | 58 | use crate::table::{Table, TableElementType}; |
60 | 59 | use crate::vmcontext::{VMCallerCheckedAnyfunc, VMContext}; |
61 | | -use crate::TrapReason; |
| 60 | +use crate::{SharedMemory, TrapReason}; |
62 | 61 | use anyhow::Result; |
63 | 62 | use std::mem; |
64 | 63 | use std::ptr::{self, NonNull}; |
| 64 | +use std::time::{Duration, Instant}; |
65 | 65 | use wasmtime_environ::{ |
66 | 66 | DataIndex, ElemIndex, FuncIndex, GlobalIndex, MemoryIndex, TableIndex, Trap, |
67 | 67 | }; |
@@ -434,81 +434,81 @@ unsafe fn externref_global_set(vmctx: *mut VMContext, index: u32, externref: *mu |
434 | 434 | unsafe fn memory_atomic_notify( |
435 | 435 | vmctx: *mut VMContext, |
436 | 436 | memory_index: u32, |
437 | | - addr: u64, |
438 | | - _count: u32, |
| 437 | + addr_index: u64, |
| 438 | + count: u32, |
439 | 439 | ) -> Result<u32, TrapReason> { |
440 | 440 | let memory = MemoryIndex::from_u32(memory_index); |
441 | | - let instance = (*vmctx).instance(); |
442 | | - validate_atomic_addr(instance, memory, addr, 4, 4)?; |
443 | | - Err( |
444 | | - anyhow::anyhow!("unimplemented: wasm atomics (fn memory_atomic_notify) unsupported",) |
445 | | - .into(), |
446 | | - ) |
| 441 | + let instance = (*vmctx).instance_mut(); |
| 442 | + instance |
| 443 | + .get_memory(memory) |
| 444 | + .validate_addr(addr_index, 4, 4)?; |
| 445 | + |
| 446 | + let shared_mem = instance.get_runtime_memory(memory).as_shared_memory(); |
| 447 | + |
| 448 | + if count == 0 { |
| 449 | + return Ok(0); |
| 450 | + } |
| 451 | + |
| 452 | + let unparked_threads = shared_mem.map_or(0, |shared_mem| { |
| 453 | + // SAFETY: checked `addr_index` above |
| 454 | + unsafe { shared_mem.unchecked_atomic_notify(addr_index, count) } |
| 455 | + }); |
| 456 | + |
| 457 | + Ok(unparked_threads) |
447 | 458 | } |
448 | 459 |
|
449 | 460 | // Implementation of `memory.atomic.wait32` for locally defined memories. |
450 | 461 | unsafe fn memory_atomic_wait32( |
451 | 462 | vmctx: *mut VMContext, |
452 | 463 | memory_index: u32, |
453 | | - addr: u64, |
454 | | - _expected: u32, |
455 | | - _timeout: u64, |
| 464 | + addr_index: u64, |
| 465 | + expected: u32, |
| 466 | + timeout: u64, |
456 | 467 | ) -> Result<u32, TrapReason> { |
| 468 | + // convert timeout to Instant, before any wait happens on locking |
| 469 | + let timeout = (timeout as i64 >= 0).then(|| Instant::now() + Duration::from_nanos(timeout)); |
| 470 | + |
457 | 471 | let memory = MemoryIndex::from_u32(memory_index); |
458 | | - let instance = (*vmctx).instance(); |
459 | | - validate_atomic_addr(instance, memory, addr, 4, 4)?; |
460 | | - Err( |
461 | | - anyhow::anyhow!("unimplemented: wasm atomics (fn memory_atomic_wait32) unsupported",) |
462 | | - .into(), |
463 | | - ) |
| 472 | + let instance = (*vmctx).instance_mut(); |
| 473 | + let addr = instance |
| 474 | + .get_memory(memory) |
| 475 | + .validate_addr(addr_index, 4, 4)?; |
| 476 | + |
| 477 | + let shared_mem: SharedMemory = instance |
| 478 | + .get_runtime_memory(memory) |
| 479 | + .as_shared_memory() |
| 480 | + .ok_or(Trap::AtomicWaitNonSharedMemory)?; |
| 481 | + |
| 482 | + // SAFETY: checked `addr_index` above |
| 483 | + let res = unsafe { shared_mem.unchecked_atomic_wait32(addr_index, addr, expected, timeout) }; |
| 484 | + Ok(res) |
464 | 485 | } |
465 | 486 |
|
466 | 487 | // Implementation of `memory.atomic.wait64` for locally defined memories. |
467 | 488 | unsafe fn memory_atomic_wait64( |
468 | 489 | vmctx: *mut VMContext, |
469 | 490 | memory_index: u32, |
470 | | - addr: u64, |
471 | | - _expected: u64, |
472 | | - _timeout: u64, |
| 491 | + addr_index: u64, |
| 492 | + expected: u64, |
| 493 | + timeout: u64, |
473 | 494 | ) -> Result<u32, TrapReason> { |
474 | | - let memory = MemoryIndex::from_u32(memory_index); |
475 | | - let instance = (*vmctx).instance(); |
476 | | - validate_atomic_addr(instance, memory, addr, 8, 8)?; |
477 | | - Err( |
478 | | - anyhow::anyhow!("unimplemented: wasm atomics (fn memory_atomic_wait64) unsupported",) |
479 | | - .into(), |
480 | | - ) |
481 | | -} |
482 | | - |
483 | | -macro_rules! ensure { |
484 | | - ($cond:expr, $trap:expr) => { |
485 | | - if !($cond) { |
486 | | - return Err($trap); |
487 | | - } |
488 | | - }; |
489 | | -} |
| 495 | + // convert timeout to Instant, before any wait happens on locking |
| 496 | + let timeout = (timeout as i64 >= 0).then(|| Instant::now() + Duration::from_nanos(timeout)); |
490 | 497 |
|
491 | | -/// In the configurations where bounds checks were elided in JIT code (because |
492 | | -/// we are using static memories with virtual memory guard pages) this manual |
493 | | -/// check is here so we don't segfault from Rust. For other configurations, |
494 | | -/// these checks are required anyways. |
495 | | -unsafe fn validate_atomic_addr( |
496 | | - instance: &Instance, |
497 | | - memory: MemoryIndex, |
498 | | - addr: u64, |
499 | | - access_size: u64, |
500 | | - access_alignment: u64, |
501 | | -) -> Result<(), Trap> { |
502 | | - debug_assert!(access_alignment.is_power_of_two()); |
503 | | - ensure!(addr % access_alignment == 0, Trap::HeapMisaligned); |
504 | | - |
505 | | - let length = u64::try_from(instance.get_memory(memory).current_length()).unwrap(); |
506 | | - ensure!( |
507 | | - addr.saturating_add(access_size) < length, |
508 | | - Trap::MemoryOutOfBounds |
509 | | - ); |
510 | | - |
511 | | - Ok(()) |
| 498 | + let memory = MemoryIndex::from_u32(memory_index); |
| 499 | + let instance = (*vmctx).instance_mut(); |
| 500 | + let addr = instance |
| 501 | + .get_memory(memory) |
| 502 | + .validate_addr(addr_index, 8, 8)?; |
| 503 | + |
| 504 | + let shared_mem: SharedMemory = instance |
| 505 | + .get_runtime_memory(memory) |
| 506 | + .as_shared_memory() |
| 507 | + .ok_or(Trap::AtomicWaitNonSharedMemory)?; |
| 508 | + |
| 509 | + // SAFETY: checked `addr_index` above |
| 510 | + let res = unsafe { shared_mem.unchecked_atomic_wait64(addr_index, addr, expected, timeout) }; |
| 511 | + Ok(res) |
512 | 512 | } |
513 | 513 |
|
514 | 514 | // Hook for when an instance runs out of fuel. |
|
0 commit comments