|
20 | 20 | #include "sanitizer_common/sanitizer_flags.h" |
21 | 21 | #include "sanitizer_common/sanitizer_interface_internal.h" |
22 | 22 | #include "sanitizer_common/sanitizer_libc.h" |
| 23 | +#include "sanitizer_common/sanitizer_ring_buffer.h" |
| 24 | +#include "sanitizer_common/sanitizer_stackdepot.h" |
23 | 25 |
|
24 | 26 | namespace __asan { |
25 | 27 |
|
| 28 | +using PoisonRecordRingBuffer = RingBuffer<PoisonRecord>; |
| 29 | + |
26 | 30 | static atomic_uint8_t can_poison_memory; |
27 | 31 |
|
| 32 | +static Mutex poison_records_mutex; |
| 33 | +static PoisonRecordRingBuffer *poison_records |
| 34 | + SANITIZER_GUARDED_BY(poison_records_mutex) = nullptr; |
| 35 | + |
| 36 | +void AddPoisonRecord(const PoisonRecord &new_record) { |
| 37 | + if (flags()->poison_history_size <= 0) |
| 38 | + return; |
| 39 | + |
| 40 | + GenericScopedLock<Mutex> l(&poison_records_mutex); |
| 41 | + |
| 42 | + if (poison_records == nullptr) |
| 43 | + poison_records = PoisonRecordRingBuffer::New(flags()->poison_history_size); |
| 44 | + |
| 45 | + poison_records->push(new_record); |
| 46 | +} |
| 47 | + |
| 48 | +bool FindPoisonRecord(uptr addr, PoisonRecord &match) { |
| 49 | + if (flags()->poison_history_size <= 0) |
| 50 | + return false; |
| 51 | + |
| 52 | + GenericScopedLock<Mutex> l(&poison_records_mutex); |
| 53 | + |
| 54 | + if (poison_records) { |
| 55 | + for (unsigned int i = 0; i < poison_records->size(); i++) { |
| 56 | + PoisonRecord record = (*poison_records)[i]; |
| 57 | + if (record.begin <= addr && addr < record.end) { |
| 58 | + internal_memcpy(&match, &record, sizeof(record)); |
| 59 | + return true; |
| 60 | + } |
| 61 | + } |
| 62 | + } |
| 63 | + |
| 64 | + return false; |
| 65 | +} |
| 66 | + |
| 67 | +void SANITIZER_ACQUIRE(poison_records_mutex) AcquirePoisonRecords() { |
| 68 | + poison_records_mutex.Lock(); |
| 69 | +} |
| 70 | + |
| 71 | +void SANITIZER_RELEASE(poison_records_mutex) ReleasePoisonRecords() { |
| 72 | + poison_records_mutex.Unlock(); |
| 73 | +} |
| 74 | + |
28 | 75 | void SetCanPoisonMemory(bool value) { |
29 | 76 | atomic_store(&can_poison_memory, value, memory_order_release); |
30 | 77 | } |
@@ -107,6 +154,20 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) { |
107 | 154 | uptr end_addr = beg_addr + size; |
108 | 155 | VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr, |
109 | 156 | (void *)end_addr); |
| 157 | + |
| 158 | + if (flags()->poison_history_size > 0) { |
| 159 | + GET_STACK_TRACE(/*max_size=*/16, /*fast=*/false); |
| 160 | + u32 current_tid = GetCurrentTidOrInvalid(); |
| 161 | + |
| 162 | + u32 stack_id = StackDepotPut(stack); |
| 163 | + |
| 164 | + PoisonRecord record{.stack_id = stack_id, |
| 165 | + .thread_id = current_tid, |
| 166 | + .begin = beg_addr, |
| 167 | + .end = end_addr}; |
| 168 | + AddPoisonRecord(record); |
| 169 | + } |
| 170 | + |
110 | 171 | ShadowSegmentEndpoint beg(beg_addr); |
111 | 172 | ShadowSegmentEndpoint end(end_addr); |
112 | 173 | if (beg.chunk == end.chunk) { |
@@ -147,6 +208,11 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) { |
147 | 208 | uptr end_addr = beg_addr + size; |
148 | 209 | VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr, |
149 | 210 | (void *)end_addr); |
| 211 | + |
| 212 | + // Note: we don't need to update the poison tracking here. Since the shadow |
| 213 | + // memory will be unpoisoned, the poison tracking ring buffer entries will be |
| 214 | + // ignored. |
| 215 | + |
150 | 216 | ShadowSegmentEndpoint beg(beg_addr); |
151 | 217 | ShadowSegmentEndpoint end(end_addr); |
152 | 218 | if (beg.chunk == end.chunk) { |
|
0 commit comments