@@ -45,7 +45,6 @@ struct MemoryMappedSegmentData {
4545 const char *current_load_cmd_addr;
4646 u32 lc_type;
4747 uptr base_virt_addr;
48- uptr addr_mask;
4948};
5049
5150template <typename Section>
@@ -54,12 +53,60 @@ static void NextSectionLoad(LoadedModule *module, MemoryMappedSegmentData *data,
5453 const Section *sc = (const Section *)data->current_load_cmd_addr ;
5554 data->current_load_cmd_addr += sizeof (Section);
5655
57- uptr sec_start = ( sc->addr & data-> addr_mask ) + data->base_virt_addr ;
56+ uptr sec_start = sc->addr + data->base_virt_addr ;
5857 uptr sec_end = sec_start + sc->size ;
5958 module ->addAddressRange (sec_start, sec_end, /* executable=*/ false , isWritable,
6059 sc->sectname );
6160}
6261
62+ static bool VerifyMemoryMapping (MemoryMappingLayout* mapping) {
63+ InternalMmapVector<LoadedModule> modules;
64+ modules.reserve (128 ); // matches DumpProcessMap
65+ mapping->DumpListOfModules (&modules);
66+
67+ InternalMmapVector<LoadedModule::AddressRange> segments;
68+ for (uptr i = 0 ; i < modules.size (); ++i) {
69+ for (auto & range : modules[i].ranges ()) {
70+ segments.push_back (range);
71+ }
72+ }
73+
74+ // Verify that none of the segments overlap:
75+ // 1. Sort the segments by the start address
76+ // 2. Check that every segment starts after the previous one ends.
77+ Sort (segments.data (), segments.size (),
78+ [](LoadedModule::AddressRange& a, LoadedModule::AddressRange& b) {
79+ return a.beg < b.beg ;
80+ });
81+
82+ // To avoid spam, we only print the report message once-per-process.
83+ static bool invalid_module_map_reported = false ;
84+ bool well_formed = true ;
85+
86+ for (size_t i = 1 ; i < segments.size (); i++) {
87+ uptr cur_start = segments[i].beg ;
88+ uptr prev_end = segments[i - 1 ].end ;
89+ if (cur_start < prev_end) {
90+ well_formed = false ;
91+ VReport (2 , " Overlapping mappings: %s start = %p, %s end = %p\n " ,
92+ segments[i].name , (void *)cur_start, segments[i - 1 ].name ,
93+ (void *)prev_end);
94+ if (!invalid_module_map_reported) {
95+ Report (
96+ " WARN: Invalid dyld module map detected. This is most likely a bug "
97+ " in the sanitizer.\n " );
98+ Report (" WARN: Backtraces may be unreliable.\n " );
99+ invalid_module_map_reported = true ;
100+ }
101+ }
102+ }
103+
104+ for (auto & m : modules) m.clear ();
105+
106+ mapping->Reset ();
107+ return well_formed;
108+ }
109+
63110void MemoryMappedSegment::AddAddressRanges (LoadedModule *module ) {
64111 // Don't iterate over sections when the caller hasn't set up the
65112 // data pointer, when there are no sections, or when the segment
@@ -85,6 +132,7 @@ void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {
85132
86133MemoryMappingLayout::MemoryMappingLayout (bool cache_enabled) {
87134 Reset ();
135+ VerifyMemoryMapping (this );
88136}
89137
90138MemoryMappingLayout::~MemoryMappingLayout () {
@@ -190,6 +238,7 @@ typedef struct dyld_shared_cache_dylib_text_info
190238
191239extern bool _dyld_get_shared_cache_uuid (uuid_t uuid);
192240extern const void *_dyld_get_shared_cache_range (size_t *length);
241+ extern intptr_t _dyld_get_image_slide (const struct mach_header * mh);
193242extern int dyld_shared_cache_iterate_text (
194243 const uuid_t cacheUuid,
195244 void (^callback)(const dyld_shared_cache_dylib_text_info *info));
@@ -258,23 +307,21 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
258307 layout_data->current_load_cmd_count --;
259308 if (((const load_command *)lc)->cmd == kLCSegment ) {
260309 const SegmentCommand* sc = (const SegmentCommand *)lc;
261- uptr base_virt_addr, addr_mask;
262- if (layout_data-> current_image == kDyldImageIdx ) {
263- base_virt_addr = (uptr) get_dyld_hdr ();
264- // vmaddr is masked with 0xfffff because on macOS versions < 10.12,
265- // it contains an absolute address rather than an offset for dyld.
266- // To make matters even more complicated, this absolute address
267- // isn't actually the absolute segment address, but the offset portion
268- // of the address is accurate when combined with the dyld base address,
269- // and the mask will give just this offset.
270- addr_mask = 0xfffff ;
271- } else {
310+ if ( internal_strcmp (sc-> segname , " __LINKEDIT " ) == 0 ) {
311+ // The LINKEDIT sections are for internal linker use, and may alias
312+ // with the LINKEDIT section for other modules. (If we included them,
313+ // our memory map would contain overlappping sections.)
314+ return false ;
315+ }
316+
317+ uptr base_virt_addr;
318+ if (layout_data-> current_image == kDyldImageIdx )
319+ base_virt_addr = (uptr) _dyld_get_image_slide ( get_dyld_hdr ()) ;
320+ else
272321 base_virt_addr =
273322 (uptr)_dyld_get_image_vmaddr_slide (layout_data->current_image );
274- addr_mask = ~0 ;
275- }
276323
277- segment->start = ( sc->vmaddr & addr_mask) + base_virt_addr;
324+ segment->start = sc->vmaddr + base_virt_addr;
278325 segment->end = segment->start + sc->vmsize ;
279326 // Most callers don't need section information, so only fill this struct
280327 // when required.
@@ -284,9 +331,9 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
284331 (const char *)lc + sizeof (SegmentCommand);
285332 seg_data->lc_type = kLCSegment ;
286333 seg_data->base_virt_addr = base_virt_addr;
287- seg_data->addr_mask = addr_mask;
288334 internal_strncpy (seg_data->name , sc->segname ,
289335 ARRAY_SIZE (seg_data->name ));
336+ seg_data->name [ARRAY_SIZE (seg_data->name ) - 1 ] = 0 ;
290337 }
291338
292339 // Return the initial protection.
@@ -300,6 +347,7 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
300347 ? kDyldPath
301348 : _dyld_get_image_name (layout_data->current_image );
302349 internal_strncpy (segment->filename , src, segment->filename_size );
350+ segment->filename [segment->filename_size - 1 ] = 0 ;
303351 }
304352 segment->arch = layout_data->current_arch ;
305353 internal_memcpy (segment->uuid , layout_data->current_uuid , kModuleUUIDSize );
0 commit comments