Skip to content

Commit e148060

Browse files
authored
Merge pull request swiftlang#11880 from ndrewh/sanitizer-cherry-picks1
🍒 sanitizers: fixes for module map, additional sandbox warnings
2 parents 6e36000 + b5f6ecd commit e148060

File tree

8 files changed

+155
-38
lines changed

8 files changed

+155
-38
lines changed

compiler-rt/lib/sanitizer_common/sanitizer_common.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -484,6 +484,13 @@ inline uptr Log2(uptr x) {
484484
return LeastSignificantSetBitIndex(x);
485485
}
486486

487+
inline bool IntervalsAreSeparate(uptr start1, uptr end1, uptr start2,
488+
uptr end2) {
489+
CHECK_LE(start1, end1);
490+
CHECK_LE(start2, end2);
491+
return (end1 < start2) || (end2 < start1);
492+
}
493+
487494
// Don't use std::min, std::max or std::swap, to minimize dependency
488495
// on libstdc++.
489496
template <class T>

compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp

Lines changed: 47 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,8 @@ extern "C" {
103103
natural_t *nesting_depth,
104104
vm_region_recurse_info_t info,
105105
mach_msg_type_number_t *infoCnt);
106+
107+
extern const void* _dyld_get_shared_cache_range(size_t* length);
106108
}
107109

108110
# if !SANITIZER_GO
@@ -954,7 +956,17 @@ static void DisableMmapExcGuardExceptions() {
954956
RTLD_DEFAULT, "task_set_exc_guard_behavior");
955957
if (set_behavior == nullptr) return;
956958
const task_exc_guard_behavior_t task_exc_guard_none = 0;
957-
set_behavior(mach_task_self(), task_exc_guard_none);
959+
kern_return_t res = set_behavior(mach_task_self(), task_exc_guard_none);
960+
if (res != KERN_SUCCESS) {
961+
Report(
962+
"WARN: task_set_exc_guard_behavior returned %d (%s), "
963+
"mmap may fail unexpectedly.\n",
964+
res, mach_error_string(res));
965+
if (res == KERN_DENIED)
966+
Report(
967+
"HINT: Check that task_set_exc_guard_behavior is allowed by "
968+
"sandbox.\n");
969+
}
958970
}
959971

960972
static void VerifyInterceptorsWorking();
@@ -1387,15 +1399,27 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
13871399
return 0;
13881400
}
13891401

1390-
// Returns true if the address is definitely mapped, and false if it is not
1391-
// mapped or could not be determined.
1392-
bool IsAddressInMappedRegion(uptr addr) {
1402+
// This function (when used during initialization when there is
1403+
// only a single thread), can be used to verify that a range
1404+
// of memory hasn't already been mapped, and won't be mapped
1405+
// later in the shared cache.
1406+
//
1407+
// If the syscall mach_vm_region_recurse fails (due to sandbox),
1408+
// we assume that the memory is not mapped so that execution can continue.
1409+
//
1410+
// NOTE: range_end is inclusive
1411+
//
1412+
// WARNING: This function must NOT allocate memory, since it is
1413+
// used in InitializeShadowMemory between where we search for
1414+
// space for shadow and where we actually allocate it.
1415+
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
13931416
mach_vm_size_t vmsize = 0;
13941417
natural_t depth = 0;
13951418
vm_region_submap_short_info_data_64_t vminfo;
13961419
mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1397-
mach_vm_address_t address = addr;
1420+
mach_vm_address_t address = range_start;
13981421

1422+
// First, check if the range is already mapped.
13991423
kern_return_t kr =
14001424
mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
14011425
(vm_region_info_t)&vminfo, &count);
@@ -1407,7 +1431,24 @@ bool IsAddressInMappedRegion(uptr addr) {
14071431
Report("HINT: Is mach_vm_region_recurse allowed by sandbox?\n");
14081432
}
14091433

1410-
return (kr == KERN_SUCCESS && addr >= address && addr < address + vmsize);
1434+
if (kr == KERN_SUCCESS && !IntervalsAreSeparate(address, address + vmsize - 1,
1435+
range_start, range_end)) {
1436+
// Overlaps with already-mapped memory
1437+
return false;
1438+
}
1439+
1440+
size_t cacheLength;
1441+
uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength);
1442+
1443+
if (cacheStart &&
1444+
!IntervalsAreSeparate(cacheStart, cacheStart + cacheLength - 1,
1445+
range_start, range_end)) {
1446+
// Overlaps with shared cache region
1447+
return false;
1448+
}
1449+
1450+
// We believe this address is available.
1451+
return true;
14111452
}
14121453

14131454
// FIXME implement on this platform.

compiler-rt/lib/sanitizer_common/sanitizer_mac.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,8 +76,6 @@ struct ThreadEventCallbacks {
7676

7777
void InstallPthreadIntrospectionHook(const ThreadEventCallbacks &callbacks);
7878

79-
bool IsAddressInMappedRegion(uptr addr);
80-
8179
} // namespace __sanitizer
8280

8381
#endif // SANITIZER_APPLE

compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -225,17 +225,9 @@ void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
225225
return (void *)p;
226226
}
227227

228-
static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
229-
uptr start2, uptr end2) {
230-
CHECK(start1 <= end1);
231-
CHECK(start2 <= end2);
232-
return (end1 < start2) || (end2 < start1);
233-
}
234-
228+
# if !SANITIZER_APPLE
235229
// FIXME: this is thread-unsafe, but should not cause problems most of the time.
236-
// When the shadow is mapped only a single thread usually exists (plus maybe
237-
// several worker threads on Mac, which aren't expected to map big chunks of
238-
// memory).
230+
// When the shadow is mapped only a single thread usually exists
239231
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
240232
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
241233
if (proc_maps.Error())
@@ -251,7 +243,6 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
251243
return true;
252244
}
253245

254-
#if !SANITIZER_APPLE
255246
void DumpProcessMap() {
256247
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
257248
const sptr kBufSize = 4095;
@@ -265,7 +256,7 @@ void DumpProcessMap() {
265256
Report("End of process memory map.\n");
266257
UnmapOrDie(filename, kBufSize);
267258
}
268-
#endif
259+
# endif
269260

270261
const char *GetPwd() {
271262
return GetEnv("PWD");

compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp

Lines changed: 65 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@ struct MemoryMappedSegmentData {
4242
const char *current_load_cmd_addr;
4343
u32 lc_type;
4444
uptr base_virt_addr;
45-
uptr addr_mask;
4645
};
4746

4847
template <typename Section>
@@ -51,12 +50,60 @@ static void NextSectionLoad(LoadedModule *module, MemoryMappedSegmentData *data,
5150
const Section *sc = (const Section *)data->current_load_cmd_addr;
5251
data->current_load_cmd_addr += sizeof(Section);
5352

54-
uptr sec_start = (sc->addr & data->addr_mask) + data->base_virt_addr;
53+
uptr sec_start = sc->addr + data->base_virt_addr;
5554
uptr sec_end = sec_start + sc->size;
5655
module->addAddressRange(sec_start, sec_end, /*executable=*/false, isWritable,
5756
sc->sectname);
5857
}
5958

59+
static bool VerifyMemoryMapping(MemoryMappingLayout* mapping) {
60+
InternalMmapVector<LoadedModule> modules;
61+
modules.reserve(128); // matches DumpProcessMap
62+
mapping->DumpListOfModules(&modules);
63+
64+
InternalMmapVector<LoadedModule::AddressRange> segments;
65+
for (uptr i = 0; i < modules.size(); ++i) {
66+
for (auto& range : modules[i].ranges()) {
67+
segments.push_back(range);
68+
}
69+
}
70+
71+
// Verify that none of the segments overlap:
72+
// 1. Sort the segments by the start address
73+
// 2. Check that every segment starts after the previous one ends.
74+
Sort(segments.data(), segments.size(),
75+
[](LoadedModule::AddressRange& a, LoadedModule::AddressRange& b) {
76+
return a.beg < b.beg;
77+
});
78+
79+
// To avoid spam, we only print the report message once-per-process.
80+
static bool invalid_module_map_reported = false;
81+
bool well_formed = true;
82+
83+
for (size_t i = 1; i < segments.size(); i++) {
84+
uptr cur_start = segments[i].beg;
85+
uptr prev_end = segments[i - 1].end;
86+
if (cur_start < prev_end) {
87+
well_formed = false;
88+
VReport(2, "Overlapping mappings: %s start = %p, %s end = %p\n",
89+
segments[i].name, (void*)cur_start, segments[i - 1].name,
90+
(void*)prev_end);
91+
if (!invalid_module_map_reported) {
92+
Report(
93+
"WARN: Invalid dyld module map detected. This is most likely a bug "
94+
"in the sanitizer.\n");
95+
Report("WARN: Backtraces may be unreliable.\n");
96+
invalid_module_map_reported = true;
97+
}
98+
}
99+
}
100+
101+
for (auto& m : modules) m.clear();
102+
103+
mapping->Reset();
104+
return well_formed;
105+
}
106+
60107
void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {
61108
// Don't iterate over sections when the caller hasn't set up the
62109
// data pointer, when there are no sections, or when the segment
@@ -82,6 +129,7 @@ void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {
82129

83130
MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
84131
Reset();
132+
VerifyMemoryMapping(this);
85133
}
86134

87135
MemoryMappingLayout::~MemoryMappingLayout() {
@@ -187,6 +235,7 @@ typedef struct dyld_shared_cache_dylib_text_info
187235

188236
extern bool _dyld_get_shared_cache_uuid(uuid_t uuid);
189237
extern const void *_dyld_get_shared_cache_range(size_t *length);
238+
extern intptr_t _dyld_get_image_slide(const struct mach_header* mh);
190239
extern int dyld_shared_cache_iterate_text(
191240
const uuid_t cacheUuid,
192241
void (^callback)(const dyld_shared_cache_dylib_text_info *info));
@@ -255,23 +304,21 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
255304
layout_data->current_load_cmd_count--;
256305
if (((const load_command *)lc)->cmd == kLCSegment) {
257306
const SegmentCommand* sc = (const SegmentCommand *)lc;
258-
uptr base_virt_addr, addr_mask;
259-
if (layout_data->current_image == kDyldImageIdx) {
260-
base_virt_addr = (uptr)get_dyld_hdr();
261-
// vmaddr is masked with 0xfffff because on macOS versions < 10.12,
262-
// it contains an absolute address rather than an offset for dyld.
263-
// To make matters even more complicated, this absolute address
264-
// isn't actually the absolute segment address, but the offset portion
265-
// of the address is accurate when combined with the dyld base address,
266-
// and the mask will give just this offset.
267-
addr_mask = 0xfffff;
268-
} else {
307+
if (internal_strcmp(sc->segname, "__LINKEDIT") == 0) {
308+
// The LINKEDIT sections are for internal linker use, and may alias
309+
// with the LINKEDIT section for other modules. (If we included them,
310+
// our memory map would contain overlappping sections.)
311+
return false;
312+
}
313+
314+
uptr base_virt_addr;
315+
if (layout_data->current_image == kDyldImageIdx)
316+
base_virt_addr = (uptr)_dyld_get_image_slide(get_dyld_hdr());
317+
else
269318
base_virt_addr =
270319
(uptr)_dyld_get_image_vmaddr_slide(layout_data->current_image);
271-
addr_mask = ~0;
272-
}
273320

274-
segment->start = (sc->vmaddr & addr_mask) + base_virt_addr;
321+
segment->start = sc->vmaddr + base_virt_addr;
275322
segment->end = segment->start + sc->vmsize;
276323
// Most callers don't need section information, so only fill this struct
277324
// when required.
@@ -281,9 +328,9 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
281328
(const char *)lc + sizeof(SegmentCommand);
282329
seg_data->lc_type = kLCSegment;
283330
seg_data->base_virt_addr = base_virt_addr;
284-
seg_data->addr_mask = addr_mask;
285331
internal_strncpy(seg_data->name, sc->segname,
286332
ARRAY_SIZE(seg_data->name));
333+
seg_data->name[ARRAY_SIZE(seg_data->name) - 1] = 0;
287334
}
288335

289336
// Return the initial protection.
@@ -297,6 +344,7 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
297344
? kDyldPath
298345
: _dyld_get_image_name(layout_data->current_image);
299346
internal_strncpy(segment->filename, src, segment->filename_size);
347+
segment->filename[segment->filename_size - 1] = 0;
300348
}
301349
segment->arch = layout_data->current_arch;
302350
internal_memcpy(segment->uuid, layout_data->current_uuid, kModuleUUIDSize);

compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -505,6 +505,13 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
505505
}
506506

507507
# if SANITIZER_APPLE
508+
if (list->empty()) {
509+
Report(
510+
"WARN: No external symbolizers found. Symbols may be missing or "
511+
"unreliable.\n");
512+
Report(
513+
"HINT: Is PATH set? Does sandbox allow file-read of /usr/bin/atos?\n");
514+
}
508515
VReport(2, "Using dladdr symbolizer.\n");
509516
list->push_back(new (*allocator) DlAddrSymbolizer());
510517
# endif // SANITIZER_APPLE

compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ void InitializePlatformEarly() {
235235
}
236236
// In some configurations, the max_vm is expanded, but much of this space is
237237
// already mapped. TSAN will not work in this configuration.
238-
if (IsAddressInMappedRegion(HiAppMemEnd() - 1)) {
238+
if (!MemoryRangeIsAvailable(HiAppMemEnd() - 1, HiAppMemEnd() - 1)) {
239239
Report(
240240
"ThreadSanitizer: Unsupported virtual memory layout: Address %p is "
241241
"already mapped.\n",
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
// This test simply checks that the "Invalid dyld module map" warning is not printed
2+
// in the output of a backtrace.
3+
4+
// RUN: %clangxx_asan -DSHARED_LIB -g %s -dynamiclib -o %t.dylib
5+
// RUN: %clangxx_asan -O0 -g %s %t.dylib -o %t.executable
6+
// RUN: %env_asan_opts="print_module_map=2" not %run %t.executable 2>&1 | FileCheck %s -DDYLIB=%{t:stem}.tmp.dylib
7+
8+
// CHECK-NOT: WARN: Invalid dyld module map
9+
// CHECK-DAG: 0x{{.*}}-0x{{.*}} {{.*}}[[DYLIB]]
10+
// CHECK-DAG: 0x{{.*}}-0x{{.*}} {{.*}}libsystem
11+
12+
#ifdef SHARED_LIB
13+
extern "C" void foo(int *a) { *a = 5; }
14+
#else
15+
# include <cstdlib>
16+
17+
extern "C" void foo(int *a);
18+
19+
int main() {
20+
int *a = (int *)malloc(sizeof(int));
21+
free(a);
22+
foo(a);
23+
return 0;
24+
}
25+
#endif

0 commit comments

Comments
 (0)