Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions compiler-rt/lib/sanitizer_common/sanitizer_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -484,6 +484,13 @@ inline uptr Log2(uptr x) {
return LeastSignificantSetBitIndex(x);
}

inline bool IntervalsAreSeparate(uptr start1, uptr end1, uptr start2,
uptr end2) {
CHECK_LE(start1, end1);
CHECK_LE(start2, end2);
return (end1 < start2) || (end2 < start1);
}

// Don't use std::min, std::max or std::swap, to minimize dependency
// on libstdc++.
template <class T>
Expand Down
53 changes: 47 additions & 6 deletions compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,8 @@ extern "C" {
natural_t *nesting_depth,
vm_region_recurse_info_t info,
mach_msg_type_number_t *infoCnt);

extern const void* _dyld_get_shared_cache_range(size_t* length);
}

# if !SANITIZER_GO
Expand Down Expand Up @@ -954,7 +956,17 @@ static void DisableMmapExcGuardExceptions() {
RTLD_DEFAULT, "task_set_exc_guard_behavior");
if (set_behavior == nullptr) return;
const task_exc_guard_behavior_t task_exc_guard_none = 0;
set_behavior(mach_task_self(), task_exc_guard_none);
kern_return_t res = set_behavior(mach_task_self(), task_exc_guard_none);
if (res != KERN_SUCCESS) {
Report(
"WARN: task_set_exc_guard_behavior returned %d (%s), "
"mmap may fail unexpectedly.\n",
res, mach_error_string(res));
if (res == KERN_DENIED)
Report(
"HINT: Check that task_set_exc_guard_behavior is allowed by "
"sandbox.\n");
}
}

static void VerifyInterceptorsWorking();
Expand Down Expand Up @@ -1387,15 +1399,27 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
return 0;
}

// Returns true if the address is definitely mapped, and false if it is not
// mapped or could not be determined.
bool IsAddressInMappedRegion(uptr addr) {
// This function (when used during initialization when there is
// only a single thread), can be used to verify that a range
// of memory hasn't already been mapped, and won't be mapped
// later in the shared cache.
//
// If the syscall mach_vm_region_recurse fails (due to sandbox),
// we assume that the memory is not mapped so that execution can continue.
//
// NOTE: range_end is inclusive
//
// WARNING: This function must NOT allocate memory, since it is
// used in InitializeShadowMemory between where we search for
// space for shadow and where we actually allocate it.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
mach_vm_size_t vmsize = 0;
natural_t depth = 0;
vm_region_submap_short_info_data_64_t vminfo;
mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
mach_vm_address_t address = addr;
mach_vm_address_t address = range_start;

// First, check if the range is already mapped.
kern_return_t kr =
mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
(vm_region_info_t)&vminfo, &count);
Expand All @@ -1407,7 +1431,24 @@ bool IsAddressInMappedRegion(uptr addr) {
Report("HINT: Is mach_vm_region_recurse allowed by sandbox?\n");
}

return (kr == KERN_SUCCESS && addr >= address && addr < address + vmsize);
if (kr == KERN_SUCCESS && !IntervalsAreSeparate(address, address + vmsize - 1,
range_start, range_end)) {
// Overlaps with already-mapped memory
return false;
}

size_t cacheLength;
uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength);

if (cacheStart &&
!IntervalsAreSeparate(cacheStart, cacheStart + cacheLength - 1,
range_start, range_end)) {
// Overlaps with shared cache region
return false;
}

// We believe this address is available.
return true;
}

// FIXME implement on this platform.
Expand Down
2 changes: 0 additions & 2 deletions compiler-rt/lib/sanitizer_common/sanitizer_mac.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,6 @@ struct ThreadEventCallbacks {

void InstallPthreadIntrospectionHook(const ThreadEventCallbacks &callbacks);

bool IsAddressInMappedRegion(uptr addr);

} // namespace __sanitizer

#endif // SANITIZER_APPLE
Expand Down
15 changes: 3 additions & 12 deletions compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -225,17 +225,9 @@ void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
return (void *)p;
}

static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
uptr start2, uptr end2) {
CHECK(start1 <= end1);
CHECK(start2 <= end2);
return (end1 < start2) || (end2 < start1);
}

# if !SANITIZER_APPLE
// FIXME: this is thread-unsafe, but should not cause problems most of the time.
// When the shadow is mapped only a single thread usually exists (plus maybe
// several worker threads on Mac, which aren't expected to map big chunks of
// memory).
// When the shadow is mapped only a single thread usually exists
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
if (proc_maps.Error())
Expand All @@ -251,7 +243,6 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
return true;
}

#if !SANITIZER_APPLE
void DumpProcessMap() {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
const sptr kBufSize = 4095;
Expand All @@ -265,7 +256,7 @@ void DumpProcessMap() {
Report("End of process memory map.\n");
UnmapOrDie(filename, kBufSize);
}
#endif
# endif

const char *GetPwd() {
return GetEnv("PWD");
Expand Down
82 changes: 65 additions & 17 deletions compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ struct MemoryMappedSegmentData {
const char *current_load_cmd_addr;
u32 lc_type;
uptr base_virt_addr;
uptr addr_mask;
};

template <typename Section>
Expand All @@ -51,12 +50,60 @@ static void NextSectionLoad(LoadedModule *module, MemoryMappedSegmentData *data,
const Section *sc = (const Section *)data->current_load_cmd_addr;
data->current_load_cmd_addr += sizeof(Section);

uptr sec_start = (sc->addr & data->addr_mask) + data->base_virt_addr;
uptr sec_start = sc->addr + data->base_virt_addr;
uptr sec_end = sec_start + sc->size;
module->addAddressRange(sec_start, sec_end, /*executable=*/false, isWritable,
sc->sectname);
}

static bool VerifyMemoryMapping(MemoryMappingLayout* mapping) {
InternalMmapVector<LoadedModule> modules;
modules.reserve(128); // matches DumpProcessMap
mapping->DumpListOfModules(&modules);

InternalMmapVector<LoadedModule::AddressRange> segments;
for (uptr i = 0; i < modules.size(); ++i) {
for (auto& range : modules[i].ranges()) {
segments.push_back(range);
}
}

// Verify that none of the segments overlap:
// 1. Sort the segments by the start address
// 2. Check that every segment starts after the previous one ends.
Sort(segments.data(), segments.size(),
[](LoadedModule::AddressRange& a, LoadedModule::AddressRange& b) {
return a.beg < b.beg;
});

// To avoid spam, we only print the report message once-per-process.
static bool invalid_module_map_reported = false;
bool well_formed = true;

for (size_t i = 1; i < segments.size(); i++) {
uptr cur_start = segments[i].beg;
uptr prev_end = segments[i - 1].end;
if (cur_start < prev_end) {
well_formed = false;
VReport(2, "Overlapping mappings: %s start = %p, %s end = %p\n",
segments[i].name, (void*)cur_start, segments[i - 1].name,
(void*)prev_end);
if (!invalid_module_map_reported) {
Report(
"WARN: Invalid dyld module map detected. This is most likely a bug "
"in the sanitizer.\n");
Report("WARN: Backtraces may be unreliable.\n");
invalid_module_map_reported = true;
}
}
}

for (auto& m : modules) m.clear();

mapping->Reset();
return well_formed;
}

void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {
// Don't iterate over sections when the caller hasn't set up the
// data pointer, when there are no sections, or when the segment
Expand All @@ -82,6 +129,7 @@ void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {

MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
Reset();
VerifyMemoryMapping(this);
}

MemoryMappingLayout::~MemoryMappingLayout() {
Expand Down Expand Up @@ -187,6 +235,7 @@ typedef struct dyld_shared_cache_dylib_text_info

extern bool _dyld_get_shared_cache_uuid(uuid_t uuid);
extern const void *_dyld_get_shared_cache_range(size_t *length);
extern intptr_t _dyld_get_image_slide(const struct mach_header* mh);
extern int dyld_shared_cache_iterate_text(
const uuid_t cacheUuid,
void (^callback)(const dyld_shared_cache_dylib_text_info *info));
Expand Down Expand Up @@ -255,23 +304,21 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
layout_data->current_load_cmd_count--;
if (((const load_command *)lc)->cmd == kLCSegment) {
const SegmentCommand* sc = (const SegmentCommand *)lc;
uptr base_virt_addr, addr_mask;
if (layout_data->current_image == kDyldImageIdx) {
base_virt_addr = (uptr)get_dyld_hdr();
// vmaddr is masked with 0xfffff because on macOS versions < 10.12,
// it contains an absolute address rather than an offset for dyld.
// To make matters even more complicated, this absolute address
// isn't actually the absolute segment address, but the offset portion
// of the address is accurate when combined with the dyld base address,
// and the mask will give just this offset.
addr_mask = 0xfffff;
} else {
if (internal_strcmp(sc->segname, "__LINKEDIT") == 0) {
// The LINKEDIT sections are for internal linker use, and may alias
// with the LINKEDIT section for other modules. (If we included them,
// our memory map would contain overlappping sections.)
return false;
}

uptr base_virt_addr;
if (layout_data->current_image == kDyldImageIdx)
base_virt_addr = (uptr)_dyld_get_image_slide(get_dyld_hdr());
else
base_virt_addr =
(uptr)_dyld_get_image_vmaddr_slide(layout_data->current_image);
addr_mask = ~0;
}

segment->start = (sc->vmaddr & addr_mask) + base_virt_addr;
segment->start = sc->vmaddr + base_virt_addr;
segment->end = segment->start + sc->vmsize;
// Most callers don't need section information, so only fill this struct
// when required.
Expand All @@ -281,9 +328,9 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
(const char *)lc + sizeof(SegmentCommand);
seg_data->lc_type = kLCSegment;
seg_data->base_virt_addr = base_virt_addr;
seg_data->addr_mask = addr_mask;
internal_strncpy(seg_data->name, sc->segname,
ARRAY_SIZE(seg_data->name));
seg_data->name[ARRAY_SIZE(seg_data->name) - 1] = 0;
}

// Return the initial protection.
Expand All @@ -297,6 +344,7 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
? kDyldPath
: _dyld_get_image_name(layout_data->current_image);
internal_strncpy(segment->filename, src, segment->filename_size);
segment->filename[segment->filename_size - 1] = 0;
}
segment->arch = layout_data->current_arch;
internal_memcpy(segment->uuid, layout_data->current_uuid, kModuleUUIDSize);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -505,6 +505,13 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
}

# if SANITIZER_APPLE
if (list->empty()) {
Report(
"WARN: No external symbolizers found. Symbols may be missing or "
"unreliable.\n");
Report(
"HINT: Is PATH set? Does sandbox allow file-read of /usr/bin/atos?\n");
}
VReport(2, "Using dladdr symbolizer.\n");
list->push_back(new (*allocator) DlAddrSymbolizer());
# endif // SANITIZER_APPLE
Expand Down
2 changes: 1 addition & 1 deletion compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ void InitializePlatformEarly() {
}
// In some configurations, the max_vm is expanded, but much of this space is
// already mapped. TSAN will not work in this configuration.
if (IsAddressInMappedRegion(HiAppMemEnd() - 1)) {
if (!MemoryRangeIsAvailable(HiAppMemEnd() - 1, HiAppMemEnd())) {
Report(
"ThreadSanitizer: Unsupported virtual memory layout: Address %p is "
"already mapped.\n",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
// This test simply checks that the "Invalid dyld module map" warning is not printed
// in the output of a backtrace.

// RUN: %clangxx_asan -DSHARED_LIB -g %s -dynamiclib -o %t.dylib
// RUN: %clangxx_asan -O0 -g %s %t.dylib -o %t.executable
// RUN: %env_asan_opts="print_module_map=2" not %run %t.executable 2>&1 | FileCheck %s -DDYLIB=%{t:stem}.tmp.dylib

// CHECK-NOT: WARN: Invalid dyld module map
// CHECK-DAG: 0x{{.*}}-0x{{.*}} {{.*}}[[DYLIB]]
// CHECK-DAG: 0x{{.*}}-0x{{.*}} {{.*}}libsystem

#ifdef SHARED_LIB
extern "C" void foo(int *a) { *a = 5; }
#else
# include <cstdlib>

extern "C" void foo(int *a);

int main() {
int *a = (int *)malloc(sizeof(int));
free(a);
foo(a);
return 0;
}
#endif