22
22
# endif
23
23
# include < stdio.h>
24
24
25
+ // Start searching for available memory region past PAGEZERO, which is
26
+ // 4KB on 32-bit and 4GB on 64-bit.
27
+ # define GAP_SEARCH_START_ADDRESS \
28
+ ((SANITIZER_WORDSIZE == 32 ) ? 0x000000001000 : 0x000100000000 )
29
+
25
30
# include " sanitizer_common.h"
26
31
# include " sanitizer_file.h"
27
32
# include " sanitizer_flags.h"
@@ -58,6 +63,7 @@ extern char ***_NSGetArgv(void);
58
63
# include < dlfcn.h> // for dladdr()
59
64
# include < errno.h>
60
65
# include < fcntl.h>
66
+ # include < inttypes.h>
61
67
# include < libkern/OSAtomic.h>
62
68
# include < mach-o/dyld.h>
63
69
# include < mach/mach.h>
@@ -1106,6 +1112,67 @@ static void StripEnv() {
1106
1112
}
1107
1113
#endif // SANITIZER_GO
1108
1114
1115
+ // Prints out a consolidated memory map: contiguous regions
1116
+ // are merged together.
1117
+ static void PrintVmmap () {
1118
+ const mach_vm_address_t max_vm_address = GetMaxVirtualAddress () + 1 ;
1119
+ mach_vm_address_t address = GAP_SEARCH_START_ADDRESS;
1120
+ kern_return_t kr = KERN_SUCCESS;
1121
+
1122
+ Report (" Memory map:\n " );
1123
+ mach_vm_address_t last = 0 ;
1124
+ mach_vm_address_t lastsz = 0 ;
1125
+
1126
+ while (1 ) {
1127
+ mach_vm_size_t vmsize = 0 ;
1128
+ natural_t depth = 0 ;
1129
+ vm_region_submap_short_info_data_64_t vminfo;
1130
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1131
+ kr = mach_vm_region_recurse (mach_task_self (), &address, &vmsize, &depth,
1132
+ (vm_region_info_t )&vminfo, &count);
1133
+
1134
+ if (kr == KERN_DENIED) {
1135
+ Report (
1136
+ " ERROR: mach_vm_region_recurse got KERN_DENIED when printing memory "
1137
+ " map.\n " );
1138
+ Report (
1139
+ " HINT: Check whether mach_vm_region_recurse is allowed by "
1140
+ " sandbox.\n " );
1141
+ }
1142
+
1143
+ if (kr == KERN_SUCCESS && address < max_vm_address) {
1144
+ if (last + lastsz == address) {
1145
+ // This region is contiguous with the last; merge together.
1146
+ lastsz += vmsize;
1147
+ } else {
1148
+ if (lastsz)
1149
+ Printf (" || `[%p, %p]` || size=0x%016" PRIx64 " ||\n " , last,
1150
+ last + lastsz, lastsz);
1151
+
1152
+ last = address;
1153
+ lastsz = vmsize;
1154
+ }
1155
+ address += vmsize;
1156
+ } else {
1157
+ // We've reached the end of the memory map. Print the last remaining
1158
+ // region, if there is one.
1159
+ if (lastsz)
1160
+ Printf (" || `[%p, %p]` || size=0x%016" PRIx64 " ||\n " , last,
1161
+ last + lastsz, lastsz);
1162
+
1163
+ break ;
1164
+ }
1165
+ }
1166
+ }
1167
+
1168
+ static void ReportShadowAllocFail (uptr shadow_size_bytes, uptr alignment) {
1169
+ Report (
1170
+ " FATAL: Failed to allocate shadow memory. Tried to allocate %p bytes "
1171
+ " (alignment=%p).\n " ,
1172
+ shadow_size_bytes, alignment);
1173
+ PrintVmmap ();
1174
+ }
1175
+
1109
1176
char **GetArgv () {
1110
1177
return *_NSGetArgv ();
1111
1178
}
@@ -1213,10 +1280,11 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
1213
1280
if (new_max_vm < max_occupied_addr) {
1214
1281
Report (" Unable to find a memory range for dynamic shadow.\n " );
1215
1282
Report (
1216
- " space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
1217
- " new_max_vm = %p\n " ,
1218
- (void *)space_size, (void *)largest_gap_found,
1219
- (void *)max_occupied_addr, (void *)new_max_vm);
1283
+ " \t space_size = %p\n\t largest_gap_found = %p\n\t max_occupied_addr "
1284
+ " = %p\n\t new_max_vm = %p\n " ,
1285
+ (void *)space_size, (void *)largest_gap_found, (void *)max_occupied_addr,
1286
+ (void *)new_max_vm);
1287
+ ReportShadowAllocFail (shadow_size_bytes, alignment);
1220
1288
CHECK (0 && " cannot place shadow" );
1221
1289
}
1222
1290
RestrictMemoryToMaxAddress (new_max_vm);
@@ -1227,6 +1295,7 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
1227
1295
nullptr , nullptr );
1228
1296
if (shadow_start == 0 ) {
1229
1297
Report (" Unable to find a memory range after restricting VM.\n " );
1298
+ ReportShadowAllocFail (shadow_size_bytes, alignment);
1230
1299
CHECK (0 && " cannot place shadow after restricting vm" );
1231
1300
}
1232
1301
}
@@ -1242,26 +1311,19 @@ uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
1242
1311
}
1243
1312
1244
1313
uptr FindAvailableMemoryRange (uptr size, uptr alignment, uptr left_padding,
1245
- uptr *largest_gap_found,
1246
- uptr *max_occupied_addr) {
1247
- typedef vm_region_submap_short_info_data_64_t RegionInfo;
1248
- enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
1249
- // Start searching for available memory region past PAGEZERO, which is
1250
- // 4KB on 32-bit and 4GB on 64-bit.
1251
- mach_vm_address_t start_address =
1252
- (SANITIZER_WORDSIZE == 32 ) ? 0x000000001000 : 0x000100000000 ;
1253
-
1314
+ uptr* largest_gap_found,
1315
+ uptr* max_occupied_addr) {
1254
1316
const mach_vm_address_t max_vm_address = GetMaxVirtualAddress () + 1 ;
1255
- mach_vm_address_t address = start_address ;
1256
- mach_vm_address_t free_begin = start_address ;
1317
+ mach_vm_address_t address = GAP_SEARCH_START_ADDRESS ;
1318
+ mach_vm_address_t free_begin = GAP_SEARCH_START_ADDRESS ;
1257
1319
kern_return_t kr = KERN_SUCCESS;
1258
1320
if (largest_gap_found) *largest_gap_found = 0 ;
1259
1321
if (max_occupied_addr) *max_occupied_addr = 0 ;
1260
1322
while (kr == KERN_SUCCESS) {
1261
1323
mach_vm_size_t vmsize = 0 ;
1262
1324
natural_t depth = 0 ;
1263
- RegionInfo vminfo;
1264
- mach_msg_type_number_t count = kRegionInfoSize ;
1325
+ vm_region_submap_short_info_data_64_t vminfo;
1326
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 ;
1265
1327
kr = mach_vm_region_recurse (mach_task_self (), &address, &vmsize, &depth,
1266
1328
(vm_region_info_t )&vminfo, &count);
1267
1329
0 commit comments