|
| 1 | +//===-- Common macros for jmpbuf -------------------------------*- C++ -*-===// |
| 2 | +// |
| 3 | +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | +// See https://llvm.org/LICENSE.txt for license information. |
| 5 | +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | +// |
| 7 | +//===----------------------------------------------------------------------===// |
| 8 | + |
| 9 | +#ifndef LIBC_SRC_SETJMP_X86_64_COMMON_H |
| 10 | +#define LIBC_SRC_SETJMP_X86_64_COMMON_H |
| 11 | + |
| 12 | +#include "include/llvm-libc-macros/offsetof-macro.h" |
| 13 | + |
| 14 | +//===----------------------------------------------------------------------===// |
| 15 | +// Architecture specific macros for x86_64. |
| 16 | +//===----------------------------------------------------------------------===// |
| 17 | + |
| 18 | +#ifdef __i386__ |
| 19 | +#define RET_REG eax |
| 20 | +#define BASE_REG ecx |
| 21 | +#define MUL_REG edx |
| 22 | +#define STACK_REG esp |
| 23 | +#define PC_REG eip |
| 24 | +#define NORMAL_STORE_REGS ebx, esi, edi, ebp |
| 25 | +#define STORE_ALL_REGS(M) M(ebx) M(esi) M(edi) M(ebp) |
| 26 | +#define LOAD_ALL_REGS(M) M(ebx) M(esi) M(edi) M(ebp) M(esp) |
| 27 | +#define DECLARE_ALL_REGS(M) M(ebx), M(esi), M(edi), M(ebp), M(esp), M(eip) |
| 28 | +#define LOAD_BASE() "mov 4(%%esp), %%ecx\n\t" |
| 29 | +#define CALCULATE_RETURN_VALUE() \ |
| 30 | + "mov 0x8(%%esp), %%eax" \ |
| 31 | + "cmp $0x1, %%eax\n\t" \ |
| 32 | + "adc $0x0, %%eax\n\t" |
| 33 | +#else |
| 34 | +#define RET_REG rax |
| 35 | +#define BASE_REG rdi |
| 36 | +#define MUL_REG rdx |
| 37 | +#define STACK_REG rsp |
| 38 | +#define PC_REG rip |
| 39 | +#define STORE_ALL_REGS(M) M(rbx) M(rbp) M(r12) M(r13) M(r14) M(r15) |
| 40 | +#define LOAD_ALL_REGS(M) M(rbx) M(rbp) M(r12) M(r13) M(r14) M(r15) M(rsp) |
| 41 | +#define DECLARE_ALL_REGS(M) \ |
| 42 | + M(rbx), M(rbp), M(r12), M(r13), M(r14), M(r15), M(rsp), M(rip) |
| 43 | +#define LOAD_BASE() |
| 44 | +#define CALCULATE_RETURN_VALUE() \ |
| 45 | + "cmp $0x1, %%esi\n\t" \ |
| 46 | + "adc $0x0, %%esi\n\t" \ |
| 47 | + "mov %%rsi, %%rax\n\t" |
| 48 | +#endif |
| 49 | + |
| 50 | +//===----------------------------------------------------------------------===// |
| 51 | +// Utility macros. |
| 52 | +//===----------------------------------------------------------------------===// |
| 53 | + |
| 54 | +#define _STR(X) #X |
| 55 | +#define STR(X) _STR(X) |
| 56 | +#define REG(X) "%%" STR(X) |
| 57 | +#define XOR(X, Y) "xor " REG(X) ", " REG(Y) "\n\t" |
| 58 | +#define MOV(X, Y) "mov " REG(X) ", " REG(Y) "\n\t" |
| 59 | +#define STORE(R, OFFSET, BASE) \ |
| 60 | + "mov " REG(R) ", %c[" STR(OFFSET) "](" REG(BASE) ")\n\t" |
| 61 | +#define LOAD(OFFSET, BASE, R) \ |
| 62 | + "mov %c[" STR(OFFSET) "](" REG(BASE) "), " REG(R) "\n\t" |
| 63 | +#define COMPUTE_STACK_TO_RET() \ |
| 64 | + "lea " STR(__SIZEOF_POINTER__) "(" REG(STACK_REG) "), " REG(RET_REG) "\n\t" |
| 65 | +#define COMPUTE_PC_TO_RET() "mov (" REG(STACK_REG) "), " REG(RET_REG) "\n\t" |
| 66 | +#define RETURN() "ret\n\t" |
| 67 | +#define DECLARE_OFFSET(X) [X] "i"(offsetof(__jmp_buf, X)) |
| 68 | +#define CMP_MEM_REG(OFFSET, BASE, DST) \ |
| 69 | + "cmp %c[" STR(OFFSET) "](" REG(BASE) "), " REG(DST) "\n\t" |
| 70 | +#define JNE_LABEL(LABEL) "jne " STR(LABEL) "\n\t" |
| 71 | + |
| 72 | +//===----------------------------------------------------------------------===// |
| 73 | +// Checksum related macros. |
| 74 | +//===----------------------------------------------------------------------===// |
| 75 | +// For now, the checksum is computed with a simple multiply-xor-rotation |
| 76 | +// algorithm. The pesudo code is as follows: |
| 77 | +// |
| 78 | +// def checksum(x, acc): |
| 79 | +// masked = x ^ MASK |
| 80 | +// high, low = full_multiply(masked, acc) |
| 81 | +// return rotate(high ^ low, ROTATION) |
| 82 | +// |
| 83 | +// Similar other multiplication-based hashing, zero inputs |
| 84 | +// for the `full_multiply` function may pollute the checksum with zero. |
| 85 | +// However, user inputs are always masked where the initial ACC amd MASK are |
| 86 | +// generated with random entropy and ROTATION is a fixed prime number. It should |
| 87 | +// be of a ultra-low chance for masked or acc being zero given a good quality of |
| 88 | +// system-level entropy. |
| 89 | +// |
| 90 | +// Notice that on x86-64, one-operand form of `mul` instruction: |
| 91 | +// mul %rdx |
| 92 | +// has the following effect: |
| 93 | +// RAX = LOW(RDX * RAX) |
| 94 | +// RDX = HIGH(RDX * RAX) |
| 95 | +//===----------------------------------------------------------------------===// |
| 96 | + |
| 97 | +#if LIBC_COPT_SETJMP_FORTIFICATION |
| 98 | +#define XOR_MASK(X) "xor %[value_mask], " REG(X) "\n\t" |
| 99 | +#define MUL(X) "mul " REG(X) "\n\t" |
| 100 | +#define ROTATE(X) "rol $%c[rotation], " REG(X) "\n\t" |
| 101 | +#define ACCUMULATE_CHECKSUM() MUL(MUL_REG) XOR(RET_REG, MUL_REG) ROTATE(MUL_REG) |
| 102 | + |
| 103 | +#define LOAD_CHKSUM_STATE_REGS() "mov %[checksum_cookie], " REG(MUL_REG) "\n\t" |
| 104 | + |
| 105 | +#define STORE_REG(SRC) \ |
| 106 | + MOV(SRC, RET_REG) XOR_MASK(RET_REG) STORE(RET_REG, SRC, BASE_REG) |
| 107 | +#define STORE_STACK() \ |
| 108 | + COMPUTE_STACK_TO_RET() \ |
| 109 | + XOR_MASK(RET_REG) \ |
| 110 | + STORE(RET_REG, STACK_REG, BASE_REG) |
| 111 | + |
| 112 | +#define STORE_PC() \ |
| 113 | + COMPUTE_PC_TO_RET() \ |
| 114 | + XOR_MASK(RET_REG) \ |
| 115 | + STORE(RET_REG, PC_REG, BASE_REG) |
| 116 | + |
| 117 | +#define STORE_CHECKSUM() STORE(MUL_REG, __chksum, BASE_REG) |
| 118 | +#define EXAMINE_CHECKSUM() \ |
| 119 | + LOAD(PC_REG, BASE_REG, RET_REG) \ |
| 120 | + ACCUMULATE_CHECKSUM() \ |
| 121 | + CMP_MEM_REG(__chksum, BASE_REG, MUL_REG) \ |
| 122 | + JNE_LABEL(__libc_jmpbuf_corruption) |
| 123 | + |
| 124 | +#define RESTORE_PC() \ |
| 125 | + LOAD(PC_REG, BASE_REG, BASE_REG) \ |
| 126 | + XOR_MASK(BASE_REG) \ |
| 127 | + "jmp *" REG(BASE_REG) |
| 128 | +#define RESTORE_REG(SRC) \ |
| 129 | + LOAD(SRC, BASE_REG, RET_REG) \ |
| 130 | + MOV(RET_REG, SRC) \ |
| 131 | + ACCUMULATE_CHECKSUM() XOR_MASK(SRC) |
| 132 | +#else |
| 133 | +#define XOR_MASK(X) |
| 134 | +#define ACCUMULATE_CHECKSUM() |
| 135 | +#define LOAD_CHKSUM_STATE_REGS() |
| 136 | +#define STORE_REG(SRC) STORE(SRC, SRC, BASE_REG) |
| 137 | +#define STORE_STACK() COMPUTE_STACK_TO_RET() STORE(RET_REG, STACK_REG, BASE_REG) |
| 138 | +#define STORE_PC() COMPUTE_PC_TO_RET() STORE(RET_REG, PC_REG, BASE_REG) |
| 139 | +#define STORE_CHECKSUM() |
| 140 | +#define EXAMINE_CHECKSUM() |
| 141 | +#define RESTORE_PC() "jmp *%c[" STR(PC_REG) "](" REG(BASE_REG) ")\n\t" |
| 142 | +#define RESTORE_REG(SRC) LOAD(SRC, BASE_REG, SRC) |
| 143 | +#endif |
| 144 | + |
| 145 | +#define STORE_REG_ACCUMULATE(SRC) STORE_REG(SRC) ACCUMULATE_CHECKSUM() |
| 146 | + |
| 147 | +#endif // LIBC_SRC_SETJMP_X86_64_COMMON_H |
0 commit comments