@@ -949,63 +949,119 @@ impl Instance {
949949 Ok ( ( ) )
950950 }
951951
952- fn push ( & mut self , value : u64 ) {
953- let stack_offset = self . ctx . gpr . rsp as usize - self . alloc . slot ( ) . stack as usize ;
952+ fn push ( & mut self , value : u64 ) -> Result < ( ) , ( ) > {
953+ let stack_offset = self . ctx . gpr . rsp as usize - self . alloc . stack_start ( ) as usize ;
954954 let stack_index = stack_offset / 8 ;
955955 assert ! ( stack_offset % 8 == 0 ) ;
956956
957957 let stack = unsafe { self . alloc . stack_u64_mut ( ) } ;
958958
959959 // check for at least one free stack slot
960- if stack . len ( ) - stack_index >= 1 {
960+ if stack_index >= 1 {
961961 self . ctx . gpr . rsp -= 8 ;
962962 stack[ stack_index - 1 ] = value;
963+ Ok ( ( ) )
963964 } else {
964- panic ! ( "caused a guest stack overflow!" ) ;
965+ Err ( ( ) )
965966 }
966967 }
967968
969+ fn with_redzone_stack < T , F : FnOnce ( & mut Self ) -> T > ( & mut self , f : F ) -> T {
970+ self . alloc . enable_stack_redzone ( ) ;
971+
972+ let res = f ( self ) ;
973+
974+ self . alloc . disable_stack_redzone ( ) ;
975+
976+ res
977+ }
978+
979+ // Force a guest to unwind the stack from the specified guest address
968980 fn force_unwind ( & mut self ) -> Result < ( ) , Error > {
969- #[ unwind( allowed) ]
970- extern "C" fn initiate_unwind ( ) {
971- panic ! ( TerminationDetails :: ForcedUnwind ) ;
972- }
981+ // if we should unwind by returning into the guest to cause a fault, do so with the redzone
982+ // available in case the guest was at or close to overflowing.
983+ self . with_redzone_stack ( |inst| {
984+ #[ unwind( allowed) ]
985+ extern "C" fn initiate_unwind ( ) {
986+ panic ! ( TerminationDetails :: ForcedUnwind ) ;
987+ }
973988
974- // The logic for this conditional can be a bit unintuitive: we _require_ that the stack
975- // is aligned to 8 bytes, but not 16 bytes, when pushing `initiate_unwind`.
976- //
977- // A diagram of the required layout may help:
978- // `XXXXX0`: ------------------ <-- call frame start -- SysV ABI requires 16-byte alignment
979- // `XXXXX8`: | return address |
980- // `XXXX..`: | ..locals etc.. |
981- // `XXXX..`: | ..as needed... |
982- //
983- // By the time we've gotten here, we have already pushed "return address", the address of
984- // wherever in the guest we want to start unwinding. If it leaves the stack 16-byte
985- // aligned, it's 8 bytes off from the diagram above, and we would have the call frame for
986- // `initiate_unwind` in violation of the SysV ABI. Functionally, this means that
987- // compiler-generated xmm accesses will fault due to being misaligned.
988- //
989- // So, instead, push a new return address to construct a new call frame at the right
990- // offset. `unwind_stub` has CFA directives so the unwinder can connect from
991- // `initiate_unwind` to guest/host frames to unwind. The unwinder, thankfully, has no
992- // preferences about stack alignment of frames being unwound.
993- //
994- // extremely unsafe, doesn't handle any stack exhaustion edge cases yet
995- if self . ctx . gpr . rsp % 16 == 0 {
996- self . push ( crate :: context:: unwind_stub as u64 ) ;
997- }
989+ let guest_addr = inst
990+ . ctx
991+ . stop_addr
992+ . expect ( "guest that stopped in guest code has an address it stopped at" ) ;
993+
994+ // set up the faulting instruction pointer as the return address for `initiate_unwind`;
995+ // extremely unsafe, doesn't handle any edge cases yet
996+ //
997+ // TODO(Andy) if the last address is obtained through the signal handler, for a signal
998+ // received exactly when we have just executed a `call` to a guest function, we
999+ // actually want to not push it (or push it +1?) lest we try to unwind with a return
1000+ // address == start of function, where the system unwinder will unwind for the function
1001+ // at address-1, (probably) fail to find the function, and `abort()`.
1002+ //
1003+ // if `rip` == the start of some guest function, we can probably just discard it and
1004+ // use the return address instead.
1005+ inst. push ( guest_addr as u64 )
1006+ . expect ( "stack has available space" ) ;
1007+
1008+ // The logic for this conditional can be a bit unintuitive: we _require_ that the stack
1009+ // is aligned to 8 bytes, but not 16 bytes, when pushing `initiate_unwind`.
1010+ //
1011+ // A diagram of the required layout may help:
1012+ // `XXXXX0`: ------------------ <-- call frame start -- SysV ABI requires 16-byte alignment
1013+ // `XXXXX8`: | return address |
1014+ // `XXXX..`: | ..locals etc.. |
1015+ // `XXXX..`: | ..as needed... |
1016+ //
1017+ // Now ensure we _have_ an ABI-conformant call fame like above, by handling the case that
1018+ // could lead to an unaligned stack - the guest stack pointer currently being unaligned.
1019+ // Among other errors, a misaligned stack will result in compiler-generated xmm accesses to
1020+ // fault.
1021+ //
1022+ // Eg, we would have a stack like:
1023+ // `XXXXX8`: ------------------ <-- guest stack end, call frame start
1024+ // `XXXXX0`: | unwind_stub |
1025+ // `XXXX..`: | ..locals etc.. |
1026+ // `XXXX..`: | ..as needed... |
1027+ //
1028+ // So, instead, push a new return address to construct a new call frame at the right
1029+ // offset. `unwind_stub` has CFA directives so the unwinder can connect from
1030+ // `initiate_unwind` to guest/host frames to unwind. The unwinder, thankfully, has no
1031+ // preferences about alignment of frames being unwound.
1032+ //
1033+ // And we end up with a guest stack like this:
1034+ // `XXXXX8`: ------------------ <-- guest stack end
1035+ // `XXXXX0`: | guest ret addr | <-- guest return address to unwind through
1036+ // `XXXXX0`: ------------------ <-- call frame start -- SysV ABI requires 16-byte alignment
1037+ // `XXXXX8`: | unwind_stub |
1038+ // `XXXX..`: | ..locals etc.. |
1039+ // `XXXX..`: | ..as needed... |
1040+ if inst. ctx . gpr . rsp % 16 == 0 {
1041+ // extremely unsafe, doesn't handle any stack exhaustion edge cases yet
1042+ inst. push ( crate :: context:: unwind_stub as u64 )
1043+ . expect ( "stack has available space" ) ;
1044+ }
9981045
999- assert ! ( self . ctx. gpr. rsp % 16 == 8 ) ;
1000- self . push ( initiate_unwind as u64 ) ;
1046+ assert ! ( inst. ctx. gpr. rsp % 16 == 8 ) ;
1047+ // extremely unsafe, doesn't handle any stack exhaustion edge cases yet
1048+ inst. push ( initiate_unwind as u64 )
1049+ . expect ( "stack has available space" ) ;
10011050
1002- match self . swap_and_return ( ) {
1003- Ok ( _) => panic ! ( "forced unwinding shouldn't return normally" ) ,
1004- Err ( Error :: RuntimeTerminated ( TerminationDetails :: ForcedUnwind ) ) => ( ) ,
1005- Err ( e) => panic ! ( "unexpected error: {}" , e) ,
1006- }
1051+ inst. state = State :: Ready ;
10071052
1008- Ok ( ( ) )
1053+ match inst. swap_and_return ( ) {
1054+ Ok ( _) => panic ! ( "forced unwinding shouldn't return normally" ) ,
1055+ Err ( Error :: RuntimeTerminated ( TerminationDetails :: ForcedUnwind ) ) => ( ) ,
1056+ Err ( e) => panic ! ( "unexpected error: {}" , e) ,
1057+ }
1058+
1059+ // we've unwound the stack, so we know there are no longer any host frames.
1060+ inst. hostcall_count = 0 ;
1061+ inst. ctx . stop_addr = None ;
1062+
1063+ Ok ( ( ) )
1064+ } )
10091065 }
10101066}
10111067
0 commit comments