@@ -42,7 +42,6 @@ struct MemoryMappedSegmentData {
4242 const char *current_load_cmd_addr;
4343 u32 lc_type;
4444 uptr base_virt_addr;
45- uptr addr_mask;
4645};
4746
4847template <typename Section>
@@ -51,12 +50,60 @@ static void NextSectionLoad(LoadedModule *module, MemoryMappedSegmentData *data,
5150 const Section *sc = (const Section *)data->current_load_cmd_addr ;
5251 data->current_load_cmd_addr += sizeof (Section);
5352
54- uptr sec_start = ( sc->addr & data-> addr_mask ) + data->base_virt_addr ;
53+ uptr sec_start = sc->addr + data->base_virt_addr ;
5554 uptr sec_end = sec_start + sc->size ;
5655 module ->addAddressRange (sec_start, sec_end, /* executable=*/ false , isWritable,
5756 sc->sectname );
5857}
5958
59+ static bool VerifyMemoryMapping (MemoryMappingLayout* mapping) {
60+ InternalMmapVector<LoadedModule> modules;
61+ modules.reserve (128 ); // matches DumpProcessMap
62+ mapping->DumpListOfModules (&modules);
63+
64+ InternalMmapVector<LoadedModule::AddressRange> segments;
65+ for (uptr i = 0 ; i < modules.size (); ++i) {
66+ for (auto & range : modules[i].ranges ()) {
67+ segments.push_back (range);
68+ }
69+ }
70+
71+ // Verify that none of the segments overlap:
72+ // 1. Sort the segments by the start address
73+ // 2. Check that every segment starts after the previous one ends.
74+ Sort (segments.data (), segments.size (),
75+ [](LoadedModule::AddressRange& a, LoadedModule::AddressRange& b) {
76+ return a.beg < b.beg ;
77+ });
78+
79+ // To avoid spam, we only print the report message once-per-process.
80+ static bool invalid_module_map_reported = false ;
81+ bool well_formed = true ;
82+
83+ for (size_t i = 1 ; i < segments.size (); i++) {
84+ uptr cur_start = segments[i].beg ;
85+ uptr prev_end = segments[i - 1 ].end ;
86+ if (cur_start < prev_end) {
87+ well_formed = false ;
88+ VReport (2 , " Overlapping mappings: %s start = %p, %s end = %p\n " ,
89+ segments[i].name , (void *)cur_start, segments[i - 1 ].name ,
90+ (void *)prev_end);
91+ if (!invalid_module_map_reported) {
92+ Report (
93+ " WARN: Invalid dyld module map detected. This is most likely a bug "
94+ " in the sanitizer.\n " );
95+ Report (" WARN: Backtraces may be unreliable.\n " );
96+ invalid_module_map_reported = true ;
97+ }
98+ }
99+ }
100+
101+ for (auto & m : modules) m.clear ();
102+
103+ mapping->Reset ();
104+ return well_formed;
105+ }
106+
60107void MemoryMappedSegment::AddAddressRanges (LoadedModule *module ) {
61108 // Don't iterate over sections when the caller hasn't set up the
62109 // data pointer, when there are no sections, or when the segment
@@ -82,6 +129,7 @@ void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {
82129
83130MemoryMappingLayout::MemoryMappingLayout (bool cache_enabled) {
84131 Reset ();
132+ VerifyMemoryMapping (this );
85133}
86134
87135MemoryMappingLayout::~MemoryMappingLayout () {
@@ -187,6 +235,7 @@ typedef struct dyld_shared_cache_dylib_text_info
187235
188236extern bool _dyld_get_shared_cache_uuid (uuid_t uuid);
189237extern const void *_dyld_get_shared_cache_range (size_t *length);
238+ extern intptr_t _dyld_get_image_slide (const struct mach_header * mh);
190239extern int dyld_shared_cache_iterate_text (
191240 const uuid_t cacheUuid,
192241 void (^callback)(const dyld_shared_cache_dylib_text_info *info));
@@ -255,23 +304,21 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
255304 layout_data->current_load_cmd_count --;
256305 if (((const load_command *)lc)->cmd == kLCSegment ) {
257306 const SegmentCommand* sc = (const SegmentCommand *)lc;
258- uptr base_virt_addr, addr_mask;
259- if (layout_data-> current_image == kDyldImageIdx ) {
260- base_virt_addr = (uptr) get_dyld_hdr ();
261- // vmaddr is masked with 0xfffff because on macOS versions < 10.12,
262- // it contains an absolute address rather than an offset for dyld.
263- // To make matters even more complicated, this absolute address
264- // isn't actually the absolute segment address, but the offset portion
265- // of the address is accurate when combined with the dyld base address,
266- // and the mask will give just this offset.
267- addr_mask = 0xfffff ;
268- } else {
307+ if ( internal_strcmp (sc-> segname , " __LINKEDIT " ) == 0 ) {
308+ // The LINKEDIT sections are for internal linker use, and may alias
309+ // with the LINKEDIT section for other modules. (If we included them,
310+ // our memory map would contain overlappping sections.)
311+ return false ;
312+ }
313+
314+ uptr base_virt_addr;
315+ if (layout_data-> current_image == kDyldImageIdx )
316+ base_virt_addr = (uptr) _dyld_get_image_slide ( get_dyld_hdr ()) ;
317+ else
269318 base_virt_addr =
270319 (uptr)_dyld_get_image_vmaddr_slide (layout_data->current_image );
271- addr_mask = ~0 ;
272- }
273320
274- segment->start = ( sc->vmaddr & addr_mask) + base_virt_addr;
321+ segment->start = sc->vmaddr + base_virt_addr;
275322 segment->end = segment->start + sc->vmsize ;
276323 // Most callers don't need section information, so only fill this struct
277324 // when required.
@@ -281,9 +328,9 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
281328 (const char *)lc + sizeof (SegmentCommand);
282329 seg_data->lc_type = kLCSegment ;
283330 seg_data->base_virt_addr = base_virt_addr;
284- seg_data->addr_mask = addr_mask;
285331 internal_strncpy (seg_data->name , sc->segname ,
286332 ARRAY_SIZE (seg_data->name ));
333+ seg_data->name [ARRAY_SIZE (seg_data->name ) - 1 ] = 0 ;
287334 }
288335
289336 // Return the initial protection.
@@ -297,6 +344,7 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
297344 ? kDyldPath
298345 : _dyld_get_image_name (layout_data->current_image );
299346 internal_strncpy (segment->filename , src, segment->filename_size );
347+ segment->filename [segment->filename_size - 1 ] = 0 ;
300348 }
301349 segment->arch = layout_data->current_arch ;
302350 internal_memcpy (segment->uuid , layout_data->current_uuid , kModuleUUIDSize );
0 commit comments