2222# endif
2323# include < stdio.h>
2424
25+ // Start searching for available memory region past PAGEZERO, which is
26+ // 4KB on 32-bit and 4GB on 64-bit.
27+ # define GAP_SEARCH_START_ADDRESS \
28+ ((SANITIZER_WORDSIZE == 32 ) ? 0x000000001000 : 0x000100000000 )
29+
2530# include " sanitizer_common.h"
2631# include " sanitizer_file.h"
2732# include " sanitizer_flags.h"
@@ -58,6 +63,7 @@ extern char ***_NSGetArgv(void);
5863# include < dlfcn.h> // for dladdr()
5964# include < errno.h>
6065# include < fcntl.h>
66+ # include < inttypes.h>
6167# include < libkern/OSAtomic.h>
6268# include < mach-o/dyld.h>
6369# include < mach/mach.h>
@@ -1106,6 +1112,67 @@ static void StripEnv() {
11061112}
11071113#endif // SANITIZER_GO
11081114
1115+ // Prints out a consolidated memory map: contiguous regions
1116+ // are merged together.
1117+ static void PrintVmmap () {
1118+ const mach_vm_address_t max_vm_address = GetMaxVirtualAddress () + 1 ;
1119+ mach_vm_address_t address = GAP_SEARCH_START_ADDRESS;
1120+ kern_return_t kr = KERN_SUCCESS;
1121+
1122+ Report (" Memory map:\n " );
1123+ mach_vm_address_t last = 0 ;
1124+ mach_vm_address_t lastsz = 0 ;
1125+
1126+ while (1 ) {
1127+ mach_vm_size_t vmsize = 0 ;
1128+ natural_t depth = 0 ;
1129+ vm_region_submap_short_info_data_64_t vminfo;
1130+ mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1131+ kr = mach_vm_region_recurse (mach_task_self (), &address, &vmsize, &depth,
1132+ (vm_region_info_t )&vminfo, &count);
1133+
1134+ if (kr == KERN_DENIED) {
1135+ Report (
1136+ " ERROR: mach_vm_region_recurse got KERN_DENIED when printing memory "
1137+ " map.\n " );
1138+ Report (
1139+ " HINT: Check whether mach_vm_region_recurse is allowed by "
1140+ " sandbox.\n " );
1141+ }
1142+
1143+ if (kr == KERN_SUCCESS && address < max_vm_address) {
1144+ if (last + lastsz == address) {
1145+ // This region is contiguous with the last; merge together.
1146+ lastsz += vmsize;
1147+ } else {
1148+ if (lastsz)
1149+ Printf (" || `[%p, %p]` || size=0x%016" PRIx64 " ||\n " , last,
1150+ last + lastsz, lastsz);
1151+
1152+ last = address;
1153+ lastsz = vmsize;
1154+ }
1155+ address += vmsize;
1156+ } else {
1157+ // We've reached the end of the memory map. Print the last remaining
1158+ // region, if there is one.
1159+ if (lastsz)
1160+ Printf (" || `[%p, %p]` || size=0x%016" PRIx64 " ||\n " , last,
1161+ last + lastsz, lastsz);
1162+
1163+ break ;
1164+ }
1165+ }
1166+ }
1167+
1168+ static void ReportShadowAllocFail (uptr shadow_size_bytes, uptr alignment) {
1169+ Report (
1170+ " FATAL: Failed to allocate shadow memory. Tried to allocate %p bytes "
1171+ " (alignment=%p).\n " ,
1172+ shadow_size_bytes, alignment);
1173+ PrintVmmap ();
1174+ }
1175+
11091176char **GetArgv () {
11101177 return *_NSGetArgv ();
11111178}
@@ -1213,10 +1280,11 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
12131280 if (new_max_vm < max_occupied_addr) {
12141281 Report (" Unable to find a memory range for dynamic shadow.\n " );
12151282 Report (
1216- " space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
1217- " new_max_vm = %p\n " ,
1218- (void *)space_size, (void *)largest_gap_found,
1219- (void *)max_occupied_addr, (void *)new_max_vm);
1283+ " \t space_size = %p\n\t largest_gap_found = %p\n\t max_occupied_addr "
1284+ " = %p\n\t new_max_vm = %p\n " ,
1285+ (void *)space_size, (void *)largest_gap_found, (void *)max_occupied_addr,
1286+ (void *)new_max_vm);
1287+ ReportShadowAllocFail (shadow_size_bytes, alignment);
12201288 CHECK (0 && " cannot place shadow" );
12211289 }
12221290 RestrictMemoryToMaxAddress (new_max_vm);
@@ -1227,6 +1295,7 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
12271295 nullptr , nullptr );
12281296 if (shadow_start == 0 ) {
12291297 Report (" Unable to find a memory range after restricting VM.\n " );
1298+ ReportShadowAllocFail (shadow_size_bytes, alignment);
12301299 CHECK (0 && " cannot place shadow after restricting vm" );
12311300 }
12321301 }
@@ -1242,26 +1311,19 @@ uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
12421311}
12431312
12441313uptr FindAvailableMemoryRange (uptr size, uptr alignment, uptr left_padding,
1245- uptr *largest_gap_found,
1246- uptr *max_occupied_addr) {
1247- typedef vm_region_submap_short_info_data_64_t RegionInfo;
1248- enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
1249- // Start searching for available memory region past PAGEZERO, which is
1250- // 4KB on 32-bit and 4GB on 64-bit.
1251- mach_vm_address_t start_address =
1252- (SANITIZER_WORDSIZE == 32 ) ? 0x000000001000 : 0x000100000000 ;
1253-
1314+ uptr* largest_gap_found,
1315+ uptr* max_occupied_addr) {
12541316 const mach_vm_address_t max_vm_address = GetMaxVirtualAddress () + 1 ;
1255- mach_vm_address_t address = start_address ;
1256- mach_vm_address_t free_begin = start_address ;
1317+ mach_vm_address_t address = GAP_SEARCH_START_ADDRESS ;
1318+ mach_vm_address_t free_begin = GAP_SEARCH_START_ADDRESS ;
12571319 kern_return_t kr = KERN_SUCCESS;
12581320 if (largest_gap_found) *largest_gap_found = 0 ;
12591321 if (max_occupied_addr) *max_occupied_addr = 0 ;
12601322 while (kr == KERN_SUCCESS) {
12611323 mach_vm_size_t vmsize = 0 ;
12621324 natural_t depth = 0 ;
1263- RegionInfo vminfo;
1264- mach_msg_type_number_t count = kRegionInfoSize ;
1325+ vm_region_submap_short_info_data_64_t vminfo;
1326+ mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 ;
12651327 kr = mach_vm_region_recurse (mach_task_self (), &address, &vmsize, &depth,
12661328 (vm_region_info_t )&vminfo, &count);
12671329
0 commit comments