|
42 | 42 | // target is not tempered with. When unwinding, we're skipping the normal return |
43 | 43 | // procedure for multiple frames and thus need to pop the return addresses of |
44 | 44 | // the skipped frames from shadow stack to avoid triggering an exception (using |
45 | | -// `_LIBUNWIND_POP_SS_SSP()`). Also, some architectures, like the x86-family |
| 45 | +// `_LIBUNWIND_POP_SHSTK_SSP()`). Also, some architectures, like the x86-family |
46 | 46 | // CET, push the return adddresses onto shadow stack with common call |
47 | 47 | // instructions, so for these architectures, normal function calls should be |
48 | 48 | // avoided when invoking the `jumpto()` function. To do this, we use inline |
|
54 | 54 | __unw_resume((cursor)); \ |
55 | 55 | } while (0) |
56 | 56 | #elif defined(_LIBUNWIND_TARGET_I386) |
57 | | -#define __shadow_stack_step_size (4) |
| 57 | +#define __shstk_step_size (4) |
58 | 58 | #define __unw_phase2_resume(cursor, fn) \ |
59 | 59 | do { \ |
60 | | - _LIBUNWIND_POP_SS_SSP((fn)); \ |
61 | | - void *ssRegContext = __libunwind_ss_get_registers((cursor)); \ |
62 | | - void *ssJumpAddress = __libunwind_ss_get_jump_target(); \ |
| 60 | + _LIBUNWIND_POP_SHSTK_SSP((fn)); \ |
| 61 | + void *shstkRegContext = __libunwind_shstk_get_registers((cursor)); \ |
| 62 | + void *shstkJumpAddress = __libunwind_shstk_get_jump_target(); \ |
63 | 63 | __asm__ volatile("push %%edi\n\t" \ |
64 | 64 | "sub $4, %%esp\n\t" \ |
65 | | - "jmp *%%edx\n\t" ::"D"(ssRegContext), \ |
66 | | - "d"(ssJumpAddress)); \ |
| 65 | + "jmp *%%edx\n\t" ::"D"(shstkRegContext), \ |
| 66 | + "d"(shstkJumpAddress)); \ |
67 | 67 | } while (0) |
68 | 68 | #elif defined(_LIBUNWIND_TARGET_X86_64) |
69 | | -#define __shadow_stack_step_size (8) |
| 69 | +#define __shstk_step_size (8) |
70 | 70 | #define __unw_phase2_resume(cursor, fn) \ |
71 | 71 | do { \ |
72 | | - _LIBUNWIND_POP_SS_SSP((fn)); \ |
73 | | - void *ssRegContext = __libunwind_ss_get_registers((cursor)); \ |
74 | | - void *ssJumpAddress = __libunwind_ss_get_jump_target(); \ |
75 | | - __asm__ volatile("jmpq *%%rdx\n\t" ::"D"(ssRegContext), \ |
76 | | - "d"(ssJumpAddress)); \ |
| 72 | + _LIBUNWIND_POP_SHSTK_SSP((fn)); \ |
| 73 | + void *shstkRegContext = __libunwind_shstk_get_registers((cursor)); \ |
| 74 | + void *shstkJumpAddress = __libunwind_shstk_get_jump_target(); \ |
| 75 | + __asm__ volatile("jmpq *%%rdx\n\t" ::"D"(shstkRegContext), \ |
| 76 | + "d"(shstkJumpAddress)); \ |
77 | 77 | } while (0) |
78 | 78 | #elif defined(_LIBUNWIND_TARGET_AARCH64) |
79 | | -#define __shadow_stack_step_size (8) |
| 79 | +#define __shstk_step_size (8) |
80 | 80 | #define __unw_phase2_resume(cursor, fn) \ |
81 | 81 | do { \ |
82 | | - _LIBUNWIND_POP_SS_SSP((fn)); \ |
83 | | - void *ssRegContext = __libunwind_ss_get_registers((cursor)); \ |
84 | | - void *ssJumpAddress = __libunwind_ss_get_jump_target(); \ |
| 82 | + _LIBUNWIND_POP_SHSTK_SSP((fn)); \ |
| 83 | + void *shstkRegContext = __libunwind_shstk_get_registers((cursor)); \ |
| 84 | + void *shstkJumpAddress = __libunwind_shstk_get_jump_target(); \ |
85 | 85 | __asm__ volatile("mov x0, %0\n\t" \ |
86 | 86 | "br %1\n\t" \ |
87 | 87 | : \ |
88 | | - : "r"(ssRegContext), "r"(ssJumpAddress) \ |
| 88 | + : "r"(shstkRegContext), "r"(shstkJumpAddress) \ |
89 | 89 | : "x0"); \ |
90 | 90 | } while (0) |
91 | 91 | #endif |
@@ -267,8 +267,7 @@ unwind_phase2(unw_context_t *uc, unw_cursor_t *cursor, _Unwind_Exception *except |
267 | 267 | unw_word_t retInNormalStack; |
268 | 268 | __unw_get_reg(cursor, UNW_REG_IP, &retInNormalStack); |
269 | 269 | unsigned long retInShadowStack = |
270 | | - *(unsigned long *)(shadowStackTop + |
271 | | - __shadow_stack_step_size * framesWalked); |
| 270 | + *(unsigned long *)(shadowStackTop + __shstk_step_size * framesWalked); |
272 | 271 | if (retInNormalStack != retInShadowStack) |
273 | 272 | return _URC_FATAL_PHASE2_ERROR; |
274 | 273 | } |
|
0 commit comments