@@ -1211,292 +1211,20 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons
1211
1211
FLBitmap : ~const BinInteger ,
1212
1212
SLBitmap : ~const BinInteger ,
1213
1213
{
1214
- // Safety: `ptr` is a previously allocated memory block with the same
1215
- // alignment as `align`. This is upheld by the caller.
1216
- let block = Self :: used_block_hdr_for_allocation ( ptr, new_layout. align ( ) ) ;
1217
-
1218
- // Do this early so that the compiler can de-duplicate common
1219
- // subexpressions such as `block.as_ref().common.size - SIZE_USED`
1220
1214
let old_size = Self :: size_of_allocation ( ptr, new_layout. align ( ) ) ;
1221
1215
1222
- // First try to shrink or grow the block in-place (i.e., without
1223
- // allocating a whole new memory block).
1224
- if let Some ( x) = self . reallocate_inplace ( ptr, block, new_layout) {
1225
- return Some ( x) ;
1226
- }
1227
-
1228
1216
// Allocate a whole new memory block
1229
1217
let new_ptr = const_try ! ( self . allocate( new_layout) ) ;
1230
1218
1231
1219
// Move the existing data into the new location
1232
- debug_assert ! ( new_layout. size( ) >= old_size) ;
1233
- core:: ptr:: copy_nonoverlapping ( ptr. as_ptr ( ) , new_ptr. as_ptr ( ) , old_size) ;
1234
-
1235
- // Deallocate the old memory block.
1236
- self . deallocate ( ptr, new_layout. align ( ) ) ;
1237
-
1238
- Some ( new_ptr)
1239
- }
1240
-
1241
- /// A subroutine of [`Self::reallocate`] that tries to reallocate a memory
1242
- /// block in-place.
1243
- #[ inline]
1244
- const unsafe fn reallocate_inplace (
1245
- & mut self ,
1246
- ptr : NonNull < u8 > ,
1247
- mut block : NonNull < UsedBlockHdr > ,
1248
- new_layout : Layout ,
1249
- ) -> Option < NonNull < u8 > >
1250
- where
1251
- FLBitmap : ~const BinInteger ,
1252
- SLBitmap : ~const BinInteger ,
1253
- {
1254
- // The extra bytes consumed by the header and any padding
1255
- let overhead = ptr. as_ptr ( ) as usize - block. as_ptr ( ) as usize ;
1256
-
1257
- // Calculate the new block size. Fail if this causes an overflow.
1258
- // Failing at this point does not necessarily mean the whole process of
1259
- // reallocation has failed; a new place with a smaller overhead could be
1260
- // found later (whether there's actually such a situation or not is yet
1261
- // to be proven).
1262
- let new_size = const_try ! ( overhead. checked_add( new_layout. size( ) ) ) ;
1263
- let new_size = const_try ! ( new_size. checked_add( GRANULARITY - 1 ) ) & !( GRANULARITY - 1 ) ;
1264
-
1265
- let old_size = block. as_ref ( ) . common . size - SIZE_USED ;
1266
- debug_assert_eq ! ( old_size, block. as_ref( ) . common. size & SIZE_SIZE_MASK ) ;
1267
-
1268
- // Shrinking
1269
- // ------------------------------------------------------------------
1270
-
1271
- if new_size <= old_size {
1272
- if new_size == old_size {
1273
- // No size change
1274
- } else {
1275
- // Shrink the block, creating a new free block at the end
1276
- let shrink_by = old_size - new_size;
1277
-
1278
- // We will create a new free block at this address
1279
- let mut new_free_block: NonNull < FreeBlockHdr > =
1280
- NonNull :: new_unchecked ( block. cast :: < u8 > ( ) . as_ptr ( ) . add ( new_size) ) . cast ( ) ;
1281
- let mut new_free_block_size = shrink_by;
1282
-
1283
- // If the next block is a free block...
1284
- let mut next_phys_block = block. as_ref ( ) . common . next_phys_block ( ) ;
1285
- let next_phys_block_size_and_flags = next_phys_block. as_ref ( ) . size ;
1286
- if ( next_phys_block_size_and_flags & SIZE_USED ) == 0 {
1287
- let next_phys_block_size = next_phys_block_size_and_flags;
1288
- debug_assert_eq ! (
1289
- next_phys_block_size,
1290
- next_phys_block_size_and_flags & SIZE_SIZE_MASK
1291
- ) ;
1292
-
1293
- // Then we can merge this existing free block (`next_phys_block`)
1294
- // into the new one (`new_free_block`).
1295
- self . unlink_free_block ( next_phys_block. cast ( ) , next_phys_block_size) ;
1296
- new_free_block_size += next_phys_block_size;
1297
-
1298
- let mut next_next_phys_block = next_phys_block. as_ref ( ) . next_phys_block ( ) ;
1299
- next_next_phys_block. as_mut ( ) . prev_phys_block = Some ( new_free_block. cast ( ) ) ;
1300
- } else {
1301
- // We can't merge a used block (`next_phys_block`) and
1302
- // a free block (`new_free_block`).
1303
- next_phys_block. as_mut ( ) . prev_phys_block = Some ( new_free_block. cast ( ) ) ;
1304
- }
1305
-
1306
- new_free_block. as_mut ( ) . common = BlockHdr {
1307
- size : new_free_block_size,
1308
- prev_phys_block : Some ( block. cast ( ) ) ,
1309
- } ;
1310
- self . link_free_block ( new_free_block, new_free_block_size) ;
1311
-
1312
- block. as_mut ( ) . common . size = new_size | SIZE_USED ;
1313
- }
1314
-
1315
- return Some ( ptr) ;
1316
- }
1317
-
1318
- // In-place non-moving reallocation
1319
- // ------------------------------------------------------------------
1320
-
1321
- debug_assert ! ( new_size > old_size) ;
1322
-
1323
- let grow_by = new_size - old_size;
1324
- let next_phys_block = block. as_ref ( ) . common . next_phys_block ( ) ;
1325
-
1326
- // If we removed this block, there would be a continous free space of
1327
- // `moving_clearance` bytes, which is followed by `moving_clearance_end`
1328
- let mut moving_clearance = old_size;
1329
- let mut moving_clearance_end = next_phys_block;
1330
-
1331
- // Grow into the next free block. Fail if there isn't such a block.
1332
- #[ allow( clippy:: never_loop) ]
1333
- ' nonmoving: loop {
1334
- let next_phys_block_size_and_flags = next_phys_block. as_ref ( ) . size ;
1335
-
1336
- // Fail it isn't a free block.
1337
- if ( next_phys_block_size_and_flags & SIZE_USED ) != 0 {
1338
- break ' nonmoving;
1339
- }
1340
-
1341
- let mut next_phys_block_size = next_phys_block_size_and_flags;
1342
- debug_assert_eq ! (
1343
- next_phys_block_size,
1344
- next_phys_block_size_and_flags & SIZE_SIZE_MASK
1345
- ) ;
1346
-
1347
- // Now we know it's really a free block.
1348
- let mut next_phys_block = next_phys_block. cast :: < FreeBlockHdr > ( ) ;
1349
- let mut next_next_phys_block = next_phys_block. as_ref ( ) . common . next_phys_block ( ) ;
1350
-
1351
- moving_clearance += next_phys_block_size;
1352
- moving_clearance_end = next_next_phys_block;
1353
-
1354
- if grow_by > next_phys_block_size {
1355
- // Can't fit
1356
- break ' nonmoving;
1357
- }
1358
-
1359
- self . unlink_free_block ( next_phys_block, next_phys_block_size) ;
1360
-
1361
- if grow_by < next_phys_block_size {
1362
- // Can fit and there's some slack. Create a free block to fill
1363
- // the slack.
1364
- next_phys_block_size -= grow_by;
1365
-
1366
- next_phys_block =
1367
- NonNull :: new_unchecked ( block. cast :: < u8 > ( ) . as_ptr ( ) . add ( new_size) ) . cast ( ) ;
1368
- next_phys_block. as_mut ( ) . common = BlockHdr {
1369
- size : next_phys_block_size,
1370
- prev_phys_block : Some ( block. cast ( ) ) ,
1371
- } ;
1372
- self . link_free_block ( next_phys_block, next_phys_block_size) ;
1373
-
1374
- // Update `next_next_phys_block.prev_phys_block` accordingly
1375
- next_next_phys_block. as_mut ( ) . prev_phys_block = Some ( next_phys_block. cast ( ) ) ;
1376
- } else {
1377
- // Can fit exactly.
1378
- debug_assert_eq ! ( grow_by, next_phys_block_size) ;
1379
-
1380
- // Update `next_next_phys_block.prev_phys_block` accordingly
1381
- next_next_phys_block. as_mut ( ) . prev_phys_block = Some ( block. cast ( ) ) ;
1382
- }
1383
-
1384
- block. as_mut ( ) . common . size = new_size | SIZE_USED ;
1385
-
1386
- return Some ( ptr) ;
1387
- }
1388
-
1389
- // In-place moving reallocation
1390
- // ------------------------------------------------------------------
1391
-
1392
- // The non-moving reallocation was failure. Now try the moving approach.
1393
- // I.e., grow into the previous free block as well.
1394
- // Get the previous block. If there isn't such a block, the moving
1395
- // approach will not improve the situation anyway, so return `None`.
1396
- let prev_phys_block = const_try ! ( block. as_ref( ) . common. prev_phys_block) ;
1397
- let prev_phys_block_size_and_flags = prev_phys_block. as_ref ( ) . size ;
1398
-
1399
- // Fail it isn't a free block.
1400
- if ( prev_phys_block_size_and_flags & SIZE_USED ) != 0 {
1401
- return None ;
1402
- }
1403
-
1404
- let prev_phys_block_size = prev_phys_block_size_and_flags;
1405
- debug_assert_eq ! (
1406
- prev_phys_block_size,
1407
- prev_phys_block_size_and_flags & SIZE_SIZE_MASK
1408
- ) ;
1409
-
1410
- // Now we know it's really a free block.
1411
- moving_clearance += prev_phys_block_size;
1412
-
1413
- // Decide the starting address of the payload
1414
- let unaligned_ptr =
1415
- prev_phys_block. as_ptr ( ) as * mut u8 as usize + mem:: size_of :: < UsedBlockHdr > ( ) ;
1416
- let new_ptr = NonNull :: new_unchecked (
1417
- ( ( unaligned_ptr + new_layout. align ( ) - 1 ) & !( new_layout. align ( ) - 1 ) ) as * mut u8 ,
1418
- ) ;
1419
-
1420
- // Calculate the new block size
1421
- let new_overhead = new_ptr. as_ptr ( ) as usize - prev_phys_block. as_ptr ( ) as usize ;
1422
- let new_size = const_try ! ( new_overhead. checked_add( new_layout. size( ) ) ) ;
1423
- let new_size = const_try ! ( new_size. checked_add( GRANULARITY - 1 ) ) & !( GRANULARITY - 1 ) ;
1424
- if new_size > moving_clearance {
1425
- // Can't fit
1426
- return None ;
1427
- }
1428
-
1429
- // Unlink the existing free blocks included in `moving_clearance`
1430
- self . unlink_free_block ( prev_phys_block. cast ( ) , prev_phys_block_size) ;
1431
- let next_phys_block_size_and_flags = next_phys_block. as_ref ( ) . size ;
1432
- if ( next_phys_block_size_and_flags & SIZE_USED ) == 0 {
1433
- let next_phys_block_size = next_phys_block_size_and_flags;
1434
- debug_assert_eq ! (
1435
- next_phys_block_size,
1436
- next_phys_block_size_and_flags & SIZE_SIZE_MASK
1437
- ) ;
1438
- self . unlink_free_block ( next_phys_block. cast ( ) , next_phys_block_size) ;
1439
- }
1440
-
1441
- // Move the existing data into the new memory block.
1442
- core:: ptr:: copy (
1220
+ core:: ptr:: copy_nonoverlapping (
1443
1221
ptr. as_ptr ( ) ,
1444
1222
new_ptr. as_ptr ( ) ,
1445
- min_usize ( new_layout. size ( ) , old_size - overhead ) ,
1223
+ min_usize ( old_size , new_layout. size ( ) ) ,
1446
1224
) ;
1447
1225
1448
- // We'll replace `prev_phys_block` with a new used block.
1449
- let mut new_block = prev_phys_block. cast :: < UsedBlockHdr > ( ) ;
1450
-
1451
- if new_size == moving_clearance {
1452
- // The allocation completely fills this free block.
1453
- // Update `prev_phys_block` accordingly
1454
- moving_clearance_end. as_mut ( ) . prev_phys_block = Some ( new_block. cast ( ) ) ;
1455
- } else {
1456
- // The allocation partially fills this free block. Create a new
1457
- // free block header at `new_block + new_size..new_block
1458
- // + moving_clearance`.
1459
- let mut new_free_block: NonNull < FreeBlockHdr > =
1460
- NonNull :: new_unchecked ( new_block. cast :: < u8 > ( ) . as_ptr ( ) . add ( new_size) ) . cast ( ) ;
1461
- let mut new_free_block_size = moving_clearance - new_size;
1462
-
1463
- // If the following block (`moving_clearance_end`) is a free block...
1464
- let moving_clearance_end_size_and_flags = moving_clearance_end. as_ref ( ) . size ;
1465
- if ( moving_clearance_end_size_and_flags & SIZE_USED ) == 0 {
1466
- let moving_clearance_end_size = moving_clearance_end_size_and_flags;
1467
- debug_assert_eq ! (
1468
- moving_clearance_end_size,
1469
- moving_clearance_end_size_and_flags & SIZE_SIZE_MASK
1470
- ) ;
1471
-
1472
- // Then we should merge this existing free block (`moving_clearance_end`)
1473
- // into the new one (`new_free_block`).
1474
- self . unlink_free_block ( moving_clearance_end. cast ( ) , moving_clearance_end_size) ;
1475
- new_free_block_size += moving_clearance_end_size_and_flags;
1476
-
1477
- let mut next_next_phys_block = moving_clearance_end. as_ref ( ) . next_phys_block ( ) ;
1478
- next_next_phys_block. as_mut ( ) . prev_phys_block = Some ( new_free_block. cast ( ) ) ;
1479
- } else {
1480
- // We can't merge a used block (`moving_clearance_end`) and
1481
- // a free block (`new_free_block`).
1482
- moving_clearance_end. as_mut ( ) . prev_phys_block = Some ( new_free_block. cast ( ) ) ;
1483
- }
1484
-
1485
- new_free_block. as_mut ( ) . common = BlockHdr {
1486
- size : new_free_block_size,
1487
- prev_phys_block : Some ( new_block. cast ( ) ) ,
1488
- } ;
1489
- self . link_free_block ( new_free_block, new_free_block_size) ;
1490
- }
1491
-
1492
- // Turn `new_block` into a used memory block and initialize the used block
1493
- // header. `prev_phys_block` is already set.
1494
- new_block. as_mut ( ) . common . size = new_size | SIZE_USED ;
1495
-
1496
- // Place a header pointer (used by `used_block_hdr_for_allocation`)
1497
- if new_layout. align ( ) >= GRANULARITY {
1498
- ( * UsedBlockPad :: get_for_allocation ( new_ptr) ) . block_hdr = new_block;
1499
- }
1226
+ // Deallocate the old memory block.
1227
+ self . deallocate ( ptr, new_layout. align ( ) ) ;
1500
1228
1501
1229
Some ( new_ptr)
1502
1230
}
0 commit comments