172
172
#define GC_IDX2PTR (idx ) (GC_G(buf) + (idx))
173
173
#define GC_PTR2IDX (ptr ) ((ptr) - GC_G(buf))
174
174
175
+ /* Get the value to be placed in an unused buffer entry with the specified next unused list index */
175
176
#define GC_IDX2LIST (idx ) ((void*)(uintptr_t)(((idx) * sizeof(void*)) | GC_UNUSED))
177
+ /* Get the index of the next item in the unused list from the given root buffer entry. */
176
178
#define GC_LIST2IDX (list ) (((uint32_t)(uintptr_t)(list)) / sizeof(void*))
177
179
178
180
/* GC buffers */
228
230
} while (0)
229
231
230
232
/* unused buffers */
233
+
234
+ /* Are there any unused root buffer entries? */
231
235
#define GC_HAS_UNUSED () \
232
236
(GC_G(unused) != GC_INVALID)
237
+
238
+ /* Get the next unused entry and remove it from the list */
233
239
#define GC_FETCH_UNUSED () \
234
240
gc_fetch_unused()
241
+
242
+ /* Add a root buffer entry to the unused list */
235
243
#define GC_LINK_UNUSED (root ) \
236
244
gc_link_unused(root)
237
245
@@ -249,7 +257,18 @@ typedef struct _gc_root_buffer {
249
257
} gc_root_buffer ;
250
258
251
259
typedef struct _zend_gc_globals {
252
- gc_root_buffer * buf ; /* preallocated arrays of buffers */
260
+ /*
261
+ * The root buffer, which stores possible roots of reference cycles. It is
262
+ * also used to store garbage to be collected at the end of a run.
263
+ * A single array which is reallocated as necessary.
264
+ *
265
+ * The lower two bits in each entry are used for flags and need to be masked
266
+ * out to reconstruct a pointer.
267
+ *
268
+ * When an object in the root buffer is removed, the non-flag bits of the
269
+ * unused entry are used to store the index of the next entry in the unused
270
+ * list. */
271
+ gc_root_buffer * buf ;
253
272
254
273
bool gc_enabled ;
255
274
bool gc_active ; /* GC currently running, forbid nested GC */
@@ -262,13 +281,13 @@ typedef struct _zend_gc_globals {
262
281
uint32_t buf_size ; /* size of the GC buffer */
263
282
uint32_t num_roots ; /* number of roots in GC buffer */
264
283
265
- uint32_t gc_runs ;
266
- uint32_t collected ;
284
+ uint32_t gc_runs ; /* number of GC runs since reset */
285
+ uint32_t collected ; /* number of collected objects since reset */
267
286
268
- zend_hrtime_t activated_at ;
269
- zend_hrtime_t collector_time ;
270
- zend_hrtime_t dtor_time ;
271
- zend_hrtime_t free_time ;
287
+ zend_hrtime_t activated_at ; /* the timestamp of the last reset */
288
+ zend_hrtime_t collector_time ; /* time spent running GC (ns) */
289
+ zend_hrtime_t dtor_time ; /* time spent calling destructors (ns) */
290
+ zend_hrtime_t free_time ; /* time spent destroying objects and freeing memory (ns) */
272
291
273
292
uint32_t dtor_idx ; /* root buffer index */
274
293
uint32_t dtor_end ;
@@ -313,6 +332,8 @@ static zend_gc_globals gc_globals;
313
332
314
333
typedef struct _gc_stack gc_stack ;
315
334
335
+ /*
336
+ * The stack used for graph traversal is stored as a linked list of segments */
316
337
struct _gc_stack {
317
338
gc_stack * prev ;
318
339
gc_stack * next ;
@@ -375,6 +396,12 @@ static void gc_stack_free(gc_stack *stack)
375
396
}
376
397
}
377
398
399
+ /*
400
+ * Map a full index to a compressed index.
401
+ *
402
+ * The root buffer can have up to 2^30 entries, but we only have 20 bits to
403
+ * store the index. So we use the 1<<19 as a compression flag and use the other
404
+ * 19 bits to store the index modulo 2^19. */
378
405
static zend_always_inline uint32_t gc_compress (uint32_t idx )
379
406
{
380
407
if (EXPECTED (idx < GC_MAX_UNCOMPRESSED )) {
@@ -383,6 +410,10 @@ static zend_always_inline uint32_t gc_compress(uint32_t idx)
383
410
return (idx % GC_MAX_UNCOMPRESSED ) | GC_MAX_UNCOMPRESSED ;
384
411
}
385
412
413
+ /*
414
+ * Find the root buffer entry given a pointer and a compressed index.
415
+ * Iterate through the root buffer in steps of 2^19 until the pointer
416
+ * matches. */
386
417
static zend_always_inline gc_root_buffer * gc_decompress (zend_refcounted * ref , uint32_t idx )
387
418
{
388
419
gc_root_buffer * root = GC_IDX2PTR (idx );
@@ -401,6 +432,9 @@ static zend_always_inline gc_root_buffer* gc_decompress(zend_refcounted *ref, ui
401
432
}
402
433
}
403
434
435
+ /*
436
+ * Get the index of the next unused root buffer entry, and remove it from the
437
+ * unused list. GC_HAS_UNUSED() must be true before calling this. */
404
438
static zend_always_inline uint32_t gc_fetch_unused (void )
405
439
{
406
440
uint32_t idx ;
@@ -414,6 +448,7 @@ static zend_always_inline uint32_t gc_fetch_unused(void)
414
448
return idx ;
415
449
}
416
450
451
+ /* Add a root buffer entry to the unused list */
417
452
static zend_always_inline void gc_link_unused (gc_root_buffer * root )
418
453
{
419
454
root -> ref = GC_IDX2LIST (GC_G (unused ));
@@ -463,13 +498,15 @@ static void gc_trace_ref(zend_refcounted *ref) {
463
498
}
464
499
#endif
465
500
501
+ /* Mark a root buffer entry unused */
466
502
static zend_always_inline void gc_remove_from_roots (gc_root_buffer * root )
467
503
{
468
504
GC_LINK_UNUSED (root );
469
505
GC_G (num_roots )-- ;
470
506
GC_BENCH_DEC (root_buf_length );
471
507
}
472
508
509
+ /* Destroy the root buffer */
473
510
static void root_buffer_dtor (zend_gc_globals * gc_globals )
474
511
{
475
512
if (gc_globals -> buf ) {
@@ -565,6 +602,8 @@ void gc_reset(void)
565
602
GC_G (activated_at ) = zend_hrtime ();
566
603
}
567
604
605
+ /* Enable/disable the garbage collector.
606
+ * Initialize globals if necessary. */
568
607
ZEND_API bool gc_enable (bool enable )
569
608
{
570
609
bool old_enabled = GC_G (gc_enabled );
@@ -584,6 +623,7 @@ ZEND_API bool gc_enabled(void)
584
623
return GC_G (gc_enabled );
585
624
}
586
625
626
+ /* Protect the GC root buffer (prevent additions) */
587
627
ZEND_API bool gc_protect (bool protect )
588
628
{
589
629
bool old_protected = GC_G (gc_protected );
@@ -596,6 +636,7 @@ ZEND_API bool gc_protected(void)
596
636
return GC_G (gc_protected );
597
637
}
598
638
639
+ /* Reallocate the GC root buffer */
599
640
static void gc_grow_root_buffer (void )
600
641
{
601
642
size_t new_size ;
@@ -621,6 +662,7 @@ static void gc_grow_root_buffer(void)
621
662
GC_G (buf_size ) = new_size ;
622
663
}
623
664
665
+ /* Adjust the GC activation threshold given the number of objects collected by the last run */
624
666
static void gc_adjust_threshold (int count )
625
667
{
626
668
uint32_t new_threshold ;
@@ -651,6 +693,7 @@ static void gc_adjust_threshold(int count)
651
693
}
652
694
}
653
695
696
+ /* Add an object as a possible root, and perform a GC run unless one is active already. */
654
697
static zend_never_inline void ZEND_FASTCALL gc_possible_root_when_full (zend_refcounted * ref )
655
698
{
656
699
uint32_t idx ;
@@ -695,6 +738,8 @@ static zend_never_inline void ZEND_FASTCALL gc_possible_root_when_full(zend_refc
695
738
GC_BENCH_PEAK (root_buf_peak , root_buf_length );
696
739
}
697
740
741
+ /* Add a possible root object to the buffer.
742
+ * Maybe perform a GC run. */
698
743
ZEND_API void ZEND_FASTCALL gc_possible_root (zend_refcounted * ref )
699
744
{
700
745
uint32_t idx ;
@@ -731,6 +776,7 @@ ZEND_API void ZEND_FASTCALL gc_possible_root(zend_refcounted *ref)
731
776
GC_BENCH_PEAK (root_buf_peak , root_buf_length );
732
777
}
733
778
779
+ /* Add an extra root during a GC run */
734
780
static void ZEND_FASTCALL gc_extra_root (zend_refcounted * ref )
735
781
{
736
782
uint32_t idx ;
@@ -764,12 +810,14 @@ static void ZEND_FASTCALL gc_extra_root(zend_refcounted *ref)
764
810
GC_BENCH_PEAK (root_buf_peak , root_buf_length );
765
811
}
766
812
813
+ /* Remove an object from the root buffer given its compressed index */
767
814
static zend_never_inline void ZEND_FASTCALL gc_remove_compressed (zend_refcounted * ref , uint32_t idx )
768
815
{
769
816
gc_root_buffer * root = gc_decompress (ref , idx );
770
817
gc_remove_from_roots (root );
771
818
}
772
819
820
+ /* Remove an object from the root buffer */
773
821
ZEND_API void ZEND_FASTCALL gc_remove_from_buffer (zend_refcounted * ref )
774
822
{
775
823
gc_root_buffer * root ;
@@ -793,6 +841,10 @@ ZEND_API void ZEND_FASTCALL gc_remove_from_buffer(zend_refcounted *ref)
793
841
gc_remove_from_roots (root );
794
842
}
795
843
844
+ /* Traverse the graph of objects referred to by ref. Change grey objects back
845
+ * to black, and restore their reference counts. See ScanBlack() in Bacon & Rajan.
846
+ * To implement a depth-first search, discovered objects are added to a stack which
847
+ * is processed iteratively. */
796
848
static void gc_scan_black (zend_refcounted * ref , gc_stack * stack )
797
849
{
798
850
HashTable * ht ;
@@ -992,6 +1044,8 @@ static void gc_scan_black(zend_refcounted *ref, gc_stack *stack)
992
1044
}
993
1045
}
994
1046
1047
+ /* Traverse the graph of objects referred to by ref. Decrement the reference
1048
+ * counts and mark visited objects grey. See MarkGray() in Bacon & Rajan. */
995
1049
static void gc_mark_grey (zend_refcounted * ref , gc_stack * stack )
996
1050
{
997
1051
HashTable * ht ;
@@ -1204,6 +1258,8 @@ static void gc_compact(void)
1204
1258
}
1205
1259
}
1206
1260
1261
+ /* For all roots marked purple, traverse the graph, marking referred objects grey.
1262
+ * See MarkRoots() in Bacon & Rajan. */
1207
1263
static void gc_mark_roots (gc_stack * stack )
1208
1264
{
1209
1265
gc_root_buffer * current , * last ;
@@ -1223,6 +1279,10 @@ static void gc_mark_roots(gc_stack *stack)
1223
1279
}
1224
1280
}
1225
1281
1282
+ /* Traverse the reference graph of ref. Evaluate grey nodes and mark them
1283
+ * black (to keep) or white (to free). Note that nodes initially marked white
1284
+ * may later become black if they are visited from a live node.
1285
+ * See Scan() in Bacon & Rajan. */
1226
1286
static void gc_scan (zend_refcounted * ref , gc_stack * stack )
1227
1287
{
1228
1288
HashTable * ht ;
@@ -1376,6 +1436,7 @@ static void gc_scan(zend_refcounted *ref, gc_stack *stack)
1376
1436
}
1377
1437
}
1378
1438
1439
+ /* Scan all roots, coloring grey nodes black or white */
1379
1440
static void gc_scan_roots (gc_stack * stack )
1380
1441
{
1381
1442
uint32_t idx , end ;
@@ -1409,6 +1470,8 @@ static void gc_scan_roots(gc_stack *stack)
1409
1470
}
1410
1471
}
1411
1472
1473
+ /* Add an object to the buffer with the garbage flag, so that it will be
1474
+ * destroyed and freed when the scan is complete. */
1412
1475
static void gc_add_garbage (zend_refcounted * ref )
1413
1476
{
1414
1477
uint32_t idx ;
@@ -1434,6 +1497,7 @@ static void gc_add_garbage(zend_refcounted *ref)
1434
1497
GC_G (num_roots )++ ;
1435
1498
}
1436
1499
1500
+ /* Traverse the reference graph from ref, marking any white objects as garbage. */
1437
1501
static int gc_collect_white (zend_refcounted * ref , uint32_t * flags , gc_stack * stack )
1438
1502
{
1439
1503
int count = 0 ;
@@ -1622,6 +1686,7 @@ static int gc_collect_white(zend_refcounted *ref, uint32_t *flags, gc_stack *sta
1622
1686
return count ;
1623
1687
}
1624
1688
1689
+ /* Traverse the reference graph from all roots, marking white nodes as garbage. */
1625
1690
static int gc_collect_roots (uint32_t * flags , gc_stack * stack )
1626
1691
{
1627
1692
uint32_t idx , end ;
@@ -1808,6 +1873,7 @@ static ZEND_COLD ZEND_NORETURN void gc_start_destructor_fiber_error(void)
1808
1873
zend_error_noreturn (E_ERROR , "Unable to start destructor fiber" );
1809
1874
}
1810
1875
1876
+ /* Call destructors for garbage in the buffer. */
1811
1877
static zend_always_inline zend_result gc_call_destructors (uint32_t idx , uint32_t end , zend_fiber * fiber )
1812
1878
{
1813
1879
gc_root_buffer * current ;
@@ -1910,6 +1976,7 @@ static zend_never_inline void gc_call_destructors_in_fiber(uint32_t end)
1910
1976
}
1911
1977
}
1912
1978
1979
+ /* Perform a garbage collection run. The default implementation of gc_collect_cycles. */
1913
1980
ZEND_API int zend_gc_collect_cycles (void )
1914
1981
{
1915
1982
int total_count = 0 ;
0 commit comments