@@ -80,7 +80,7 @@ static ALWAYS_INLINE char small_string_at(struct object* obj, uword index) {
80
80
// +1 for (length | tag) byte
81
81
return ((uword )obj >> ((index + 1 ) * kBitsPerByte )) & 0xFF ;
82
82
}
83
- struct gc_obj * as_heap_object (struct object * obj ) {
83
+ static ALWAYS_INLINE struct gc_obj * as_heap_object (struct object * obj ) {
84
84
assert (is_heap_object (obj ));
85
85
assert (kHeapObjectTag == 1 );
86
86
return (struct gc_obj * )((uword )obj - 1 );
@@ -128,7 +128,10 @@ static ALWAYS_INLINE uintptr_t align(uintptr_t val, uintptr_t alignment) {
128
128
return (val + alignment - 1 ) & ~(alignment - 1 );
129
129
}
130
130
static ALWAYS_INLINE uintptr_t align_size (uintptr_t size ) {
131
- return align (size , sizeof (uintptr_t ));
131
+ return align (size , kObjectAlignment );
132
+ }
133
+ static ALWAYS_INLINE bool is_size_aligned (uword size ) {
134
+ return size == align_size (size );
132
135
}
133
136
134
137
#ifdef STATIC_HEAP
@@ -163,20 +166,23 @@ void init_heap(struct gc_heap* heap, struct space space) {
163
166
heap -> from_space = heap -> limit = heap -> hp + space .size / 2 ;
164
167
}
165
168
166
- static ALWAYS_INLINE bool is_power_of_two (uword x ) { return (x & (x - 1 )) == 0 ; }
167
-
168
- static ALWAYS_INLINE bool is_aligned (uword value , uword alignment ) {
169
- assert (is_power_of_two (alignment ));
170
- return (value & (alignment - 1 )) == 0 ;
169
+ static ALWAYS_INLINE uintptr_t heap_ptr (struct gc_heap * heap ) {
170
+ #if defined(NDEBUG ) && defined(__GNUC__ )
171
+ // Clang and GCC support this; TCC does not
172
+ return (uintptr_t )__builtin_assume_aligned ((void * )heap -> hp , kObjectAlignment );
173
+ #else
174
+ assert (is_size_aligned (heap -> hp ) && "need 3 bits for tagging" );
175
+ return heap -> hp ;
176
+ #endif
171
177
}
172
178
173
179
struct gc_obj * copy (struct gc_heap * heap , struct gc_obj * obj ) {
174
180
size_t size = heap_object_size (obj );
175
- struct gc_obj * new_obj = (struct gc_obj * )heap -> hp ;
181
+ struct gc_obj * new_obj = (struct gc_obj * )heap_ptr ( heap ) ;
176
182
memcpy (new_obj , obj , size );
177
183
forward (obj , new_obj );
178
- heap -> hp += align_size ( size ) ;
179
- assert (is_aligned (heap -> hp , 1 << kPrimaryTagBits ) && "need 3 bits for tagging" );
184
+ heap -> hp += size ;
185
+ assert (is_size_aligned (heap -> hp ) && "need 3 bits for tagging" );
180
186
return new_obj ;
181
187
}
182
188
@@ -309,28 +315,38 @@ byte obj_tag(struct gc_obj* obj) { return (obj->tag & 0xff); }
309
315
310
316
bool obj_has_tag (struct gc_obj * obj , byte tag ) { return obj_tag (obj ) == tag ; }
311
317
312
- static NEVER_INLINE void allocate_slow_path (struct gc_heap * heap , uword size ) {
318
+ static NEVER_INLINE ALLOCATOR struct object * allocate_slow_path (struct gc_heap * heap , uword tag , uword size ) {
319
+ // Outlining allocate_slow_path like this helps the compiler generate better
320
+ // code in callers of allocate such as mklist. For some reason we have to
321
+ // tail-duplicate allocate, too :(
313
322
#ifndef STATIC_HEAP
314
323
heap_grow (heap );
315
324
#endif
316
- // size is already aligned
325
+ assert ( is_size_aligned ( size ) && "need 3 bits for tagging" );
317
326
if (UNLIKELY (heap -> limit - heap -> hp < size )) {
318
327
fprintf (stderr , "out of memory\n" );
319
328
abort ();
320
329
}
330
+ // NOTE: Keep in sync with allocate
331
+ uintptr_t addr = heap_ptr (heap );
332
+ uintptr_t new_hp = addr + size ;
333
+ assert (is_size_aligned (new_hp ) && "need 3 bits for tagging" );
334
+ heap -> hp = new_hp ;
335
+ ((struct gc_obj * )addr )-> tag = make_tag (tag , size );
336
+ return heap_tag (addr );
321
337
}
322
338
323
339
static ALWAYS_INLINE ALLOCATOR struct object * allocate (struct gc_heap * heap ,
324
340
uword tag , uword size ) {
325
- uintptr_t addr = heap -> hp ;
326
- uintptr_t new_hp = align_size (addr + size );
327
- assert (is_aligned (new_hp , 1 << kPrimaryTagBits ) && "need 3 bits for tagging" );
341
+ assert (is_size_aligned (size ) && "need 3 bits for tagging" );
342
+ // NOTE: Keep in sync with allocate_slow_path
343
+ uintptr_t addr = heap_ptr (heap );
344
+ uintptr_t new_hp = addr + size ;
345
+ assert (is_size_aligned (new_hp ) && "need 3 bits for tagging" );
328
346
if (UNLIKELY (heap -> limit < new_hp )) {
329
- allocate_slow_path (heap , size );
330
- addr = heap -> hp ;
331
- new_hp = align_size (addr + size );
332
- assert (is_aligned (new_hp , 1 << kPrimaryTagBits ) && "need 3 bits for tagging" );
347
+ return allocate_slow_path (heap , tag , size );
333
348
}
349
+ // NOTE: Keep in sync with allocate_slow_path
334
350
heap -> hp = new_hp ;
335
351
((struct gc_obj * )addr )-> tag = make_tag (tag , size );
336
352
return heap_tag (addr );
@@ -352,11 +368,13 @@ enum {
352
368
#undef ENUM_TAG
353
369
};
354
370
371
+ #define HEAP_ALIGNED __attribute__((__aligned__(kObjectAlignment)))
372
+
355
373
struct list {
356
374
struct gc_obj HEAD ;
357
375
struct object * first ;
358
376
struct object * rest ;
359
- };
377
+ } HEAP_ALIGNED ;
360
378
361
379
typedef struct object * (* ClosureFn )(struct object * , struct object * );
362
380
@@ -367,7 +385,7 @@ struct closure {
367
385
ClosureFn fn ;
368
386
size_t size ;
369
387
struct object * env [];
370
- };
388
+ }; // Not HEAP_ALIGNED; env is variable size
371
389
372
390
struct record_field {
373
391
size_t key ;
@@ -378,21 +396,25 @@ struct record {
378
396
struct gc_obj HEAD ;
379
397
size_t size ;
380
398
struct record_field fields [];
381
- };
399
+ }; // Not HEAP_ALIGNED; fields is variable size
382
400
383
401
struct heap_string {
384
402
struct gc_obj HEAD ;
385
403
size_t size ;
386
404
char data [];
387
- };
405
+ }; // Not HEAP_ALIGNED; data is variable size
388
406
389
407
struct variant {
390
408
struct gc_obj HEAD ;
391
409
size_t tag ;
392
410
struct object * value ;
393
- };
411
+ } HEAP_ALIGNED ;
394
412
395
- size_t heap_object_size (struct gc_obj * obj ) { return obj -> tag >> kBitsPerByte ; }
413
+ size_t heap_object_size (struct gc_obj * obj ) {
414
+ size_t result = obj -> tag >> kBitsPerByte ;
415
+ assert (is_size_aligned (result ));
416
+ return result ;
417
+ }
396
418
397
419
size_t trace_heap_object (struct gc_obj * obj , struct gc_heap * heap ,
398
420
VisitFn visit ) {
@@ -492,8 +514,8 @@ struct closure* as_closure(struct object* obj) {
492
514
493
515
struct object * mkclosure (struct gc_heap * heap , ClosureFn fn ,
494
516
size_t num_fields ) {
495
- struct object * result = allocate (
496
- heap , TAG_CLOSURE , sizeof ( struct closure ) + num_fields * kPointerSize );
517
+ uword size = align_size ( sizeof ( struct closure ) + num_fields * kPointerSize );
518
+ struct object * result = allocate ( heap , TAG_CLOSURE , size );
497
519
as_closure (result )-> fn = fn ;
498
520
as_closure (result )-> size = num_fields ;
499
521
// Assumes the items will be filled in immediately after calling mkclosure so
@@ -530,9 +552,8 @@ struct record* as_record(struct object* obj) {
530
552
}
531
553
532
554
struct object * mkrecord (struct gc_heap * heap , size_t num_fields ) {
533
- struct object * result = allocate (
534
- heap , TAG_RECORD ,
535
- sizeof (struct record ) + num_fields * sizeof (struct record_field ));
555
+ uword size = align_size (sizeof (struct record ) + num_fields * sizeof (struct record_field ));
556
+ struct object * result = allocate (heap , TAG_RECORD , size );
536
557
as_record (result )-> size = num_fields ;
537
558
// Assumes the items will be filled in immediately after calling mkrecord so
538
559
// they are not initialized
@@ -576,8 +597,8 @@ struct heap_string* as_heap_string(struct object* obj) {
576
597
577
598
struct object * mkstring_uninit_private (struct gc_heap * heap , size_t count ) {
578
599
assert (count > kMaxSmallStringLength ); // can't fill in small string later
579
- struct object * result =
580
- allocate (heap , TAG_STRING , sizeof ( struct heap_string ) + count );
600
+ uword size = align_size ( sizeof ( struct heap_string ) + count );
601
+ struct object * result = allocate (heap , TAG_STRING , size );
581
602
as_heap_string (result )-> size = count ;
582
603
return result ;
583
604
}
0 commit comments