@@ -434,17 +434,12 @@ unsafe fn externref_global_set(vmctx: *mut VMContext, index: u32, externref: *mu
434
434
unsafe fn memory_atomic_notify (
435
435
vmctx : * mut VMContext ,
436
436
memory_index : u32 ,
437
- addr : * mut u8 ,
437
+ addr : u64 ,
438
438
_count : u32 ,
439
439
) -> Result < u32 , TrapReason > {
440
- let addr = addr as usize ;
441
440
let memory = MemoryIndex :: from_u32 ( memory_index) ;
442
441
let instance = ( * vmctx) . instance ( ) ;
443
- // this should never overflow since addr + 4 either hits a guard page
444
- // or it's been validated to be in-bounds already. Double-check for now
445
- // just to be sure.
446
- let addr_to_check = addr. checked_add ( 4 ) . unwrap ( ) ;
447
- validate_atomic_addr ( instance, memory, addr_to_check) ?;
442
+ validate_atomic_addr ( instance, memory, addr, 4 , 4 ) ?;
448
443
Err (
449
444
anyhow:: anyhow!( "unimplemented: wasm atomics (fn memory_atomic_notify) unsupported" , )
450
445
. into ( ) ,
@@ -455,17 +450,13 @@ unsafe fn memory_atomic_notify(
455
450
unsafe fn memory_atomic_wait32 (
456
451
vmctx : * mut VMContext ,
457
452
memory_index : u32 ,
458
- addr : * mut u8 ,
453
+ addr : u64 ,
459
454
_expected : u32 ,
460
455
_timeout : u64 ,
461
456
) -> Result < u32 , TrapReason > {
462
- let addr = addr as usize ;
463
457
let memory = MemoryIndex :: from_u32 ( memory_index) ;
464
458
let instance = ( * vmctx) . instance ( ) ;
465
- // see wasmtime_memory_atomic_notify for why this shouldn't overflow
466
- // but we still double-check
467
- let addr_to_check = addr. checked_add ( 4 ) . unwrap ( ) ;
468
- validate_atomic_addr ( instance, memory, addr_to_check) ?;
459
+ validate_atomic_addr ( instance, memory, addr, 4 , 4 ) ?;
469
460
Err (
470
461
anyhow:: anyhow!( "unimplemented: wasm atomics (fn memory_atomic_wait32) unsupported" , )
471
462
. into ( ) ,
@@ -476,40 +467,47 @@ unsafe fn memory_atomic_wait32(
476
467
unsafe fn memory_atomic_wait64 (
477
468
vmctx : * mut VMContext ,
478
469
memory_index : u32 ,
479
- addr : * mut u8 ,
470
+ addr : u64 ,
480
471
_expected : u64 ,
481
472
_timeout : u64 ,
482
473
) -> Result < u32 , TrapReason > {
483
- let addr = addr as usize ;
484
474
let memory = MemoryIndex :: from_u32 ( memory_index) ;
485
475
let instance = ( * vmctx) . instance ( ) ;
486
- // see wasmtime_memory_atomic_notify for why this shouldn't overflow
487
- // but we still double-check
488
- let addr_to_check = addr. checked_add ( 8 ) . unwrap ( ) ;
489
- validate_atomic_addr ( instance, memory, addr_to_check) ?;
476
+ validate_atomic_addr ( instance, memory, addr, 8 , 8 ) ?;
490
477
Err (
491
478
anyhow:: anyhow!( "unimplemented: wasm atomics (fn memory_atomic_wait64) unsupported" , )
492
479
. into ( ) ,
493
480
)
494
481
}
495
482
496
- /// For atomic operations we still check the actual address despite this also
497
- /// being checked via the `heap_addr` instruction in cranelift. The reason for
498
- /// that is because the `heap_addr` instruction can defer to a later segfault to
499
- /// actually recognize the out-of-bounds whereas once we're running Rust code
500
- /// here we don't want to segfault.
501
- ///
502
- /// In the situations where bounds checks were elided in JIT code (because oob
503
- /// would then be later guaranteed to segfault) this manual check is here
504
- /// so we don't segfault from Rust.
483
+ macro_rules! ensure {
484
+ ( $cond: expr, $trap: expr) => {
485
+ if !( $cond) {
486
+ return Err ( $trap) ;
487
+ }
488
+ } ;
489
+ }
490
+
491
+ /// In the configurations where bounds checks were elided in JIT code (because
492
+ /// we are using static memories with virtual memory guard pages) this manual
493
+ /// check is here so we don't segfault from Rust. For other configurations,
494
+ /// these checks are required anyways.
505
495
unsafe fn validate_atomic_addr (
506
496
instance : & Instance ,
507
497
memory : MemoryIndex ,
508
- addr : usize ,
498
+ addr : u64 ,
499
+ access_size : u64 ,
500
+ access_alignment : u64 ,
509
501
) -> Result < ( ) , TrapCode > {
510
- if addr > instance. get_memory ( memory) . current_length ( ) {
511
- return Err ( TrapCode :: HeapOutOfBounds ) ;
512
- }
502
+ debug_assert ! ( access_alignment. is_power_of_two( ) ) ;
503
+ ensure ! ( addr % access_alignment == 0 , TrapCode :: HeapMisaligned ) ;
504
+
505
+ let length = u64:: try_from ( instance. get_memory ( memory) . current_length ( ) ) . unwrap ( ) ;
506
+ ensure ! (
507
+ addr. saturating_add( access_size) < length,
508
+ TrapCode :: HeapOutOfBounds
509
+ ) ;
510
+
513
511
Ok ( ( ) )
514
512
}
515
513
0 commit comments