1
1
#ifndef ARENA_H
2
2
#define ARENA_H
3
3
4
+ #include < algorithm>
4
5
#include < cstddef>
6
+ #include < cstdint>
5
7
#include < sys/types.h>
8
+ #include < utility>
6
9
7
10
#include " runtime/alloc.h"
8
11
9
12
extern " C" {
10
13
14
+ size_t const HYPERBLOCK_SIZE = (size_t )BLOCK_SIZE * 1024 * 1024 ;
15
+
11
16
// An arena can be used to allocate objects that can then be deallocated all at
12
17
// once.
13
18
class arena {
14
19
public:
15
20
arena (char id)
16
- : allocation_semispace_id(id) { }
21
+ : allocation_semispace_id(id) {
22
+ initialize_semispace ();
23
+ }
17
24
18
25
// Allocates the requested number of bytes as a contiguous region and returns a
19
26
// pointer to the first allocated byte.
20
- // If called with requested size greater than the maximun single allocation
21
- // size, the space is allocated in a general (not garbage collected pool).
22
27
void *kore_arena_alloc (size_t requested);
23
28
24
29
// Returns the address of the first byte that belongs in the given arena.
25
30
// Returns 0 if nothing has been allocated ever in that arena.
26
- char *arena_start_ptr () const ;
31
+ char *arena_start_ptr () const {
32
+ return current_addr_ptr ? current_addr_ptr + sizeof (memory_block_header)
33
+ : nullptr ;
34
+ }
27
35
28
36
// Returns a pointer to a location holding the address of last allocated
29
37
// byte in the given arena plus 1.
30
38
// This address is 0 if nothing has been allocated ever in that arena.
31
- char **arena_end_ptr ();
39
+ char **arena_end_ptr () { return &allocation_ptr; }
32
40
33
41
// return the total number of allocatable bytes currently in the arena in its
34
42
// active semispace.
35
- size_t arena_size () const ;
43
+ size_t arena_size () const {
44
+ update_num_blocks ();
45
+ return BLOCK_SIZE * std::max (num_blocks, num_collection_blocks);
46
+ }
36
47
37
48
// Clears the current allocation space by setting its start back to its first
38
49
// block. It is used during garbage collection to effectively collect all of the
@@ -41,15 +52,18 @@ class arena {
41
52
42
53
// Resizes the last allocation as long as the resize does not require a new
43
54
// block allocation.
44
- // Returns the address of the byte following the last newlly allocated byte when
45
- // the resize succeeds, returns 0 otherwise.
46
- void *arena_resize_last_alloc (ssize_t increase);
55
+ // Returns the address of the byte following the last newlly allocated byte.
56
+ void *arena_resize_last_alloc (ssize_t increase) {
57
+ return (allocation_ptr += increase);
58
+ }
47
59
48
60
// Returns the given arena's current collection semispace ID.
49
61
// Each arena has 2 semispace IDs one equal to the arena ID and the other equal
50
62
// to the 1's complement of the arena ID. At any time one of these semispaces
51
63
// is used for allocation and the other is used for collection.
52
- char get_arena_collection_semispace_id () const ;
64
+ char get_arena_collection_semispace_id () const {
65
+ return ~allocation_semispace_id;
66
+ }
53
67
54
68
// Exchanges the current allocation and collection semispaces and clears the new
55
69
// current allocation semispace by setting its start back to its first block.
@@ -61,7 +75,7 @@ class arena {
61
75
// by the blocks of that arena. This difference will include blocks containing
62
76
// sentinel bytes. Undefined behavior will result if the pointers belong to
63
77
// different arenas.
64
- static ssize_t ptr_diff (char *ptr1, char *ptr2);
78
+ static ssize_t ptr_diff (char *ptr1, char *ptr2) { return ptr1 - ptr2; }
65
79
66
80
// Given a starting pointer to an address allocated in an arena and a size in
67
81
// bytes, this function returns a pointer to an address allocated in the
@@ -72,42 +86,70 @@ class arena {
72
86
// 3rd argument: the address of last allocated byte in the arena plus 1
73
87
// Return value: the address allocated in the arena after size bytes from the
74
88
// starting pointer, or 0 if this is equal to the 3rd argument.
75
- static char *move_ptr (char *ptr, size_t size, char const *arena_end_ptr);
89
+ static char *move_ptr (char *ptr, size_t size, char const *arena_end_ptr) {
90
+ char *next_ptr = ptr + size;
91
+ return (next_ptr == arena_end_ptr) ? 0 : next_ptr;
92
+ }
76
93
77
94
// Returns the ID of the semispace where the given address was allocated.
78
95
// The behavior is undefined if called with an address that has not been
79
96
// allocated within an arena.
80
97
static char get_arena_semispace_id_of_object (void *ptr);
81
98
82
99
private:
83
- struct memory_block_header {
84
- char *next_block;
100
+ union memory_block_header {
101
+ //
102
+ // Currently the header just holds the semispace id. But we need it to be a
103
+ // multiple of sizeof(char*) for alignment purposes so we add a dummy char*.
104
+ //
85
105
char semispace;
106
+ char *alignment_dummy;
86
107
};
87
108
88
- void fresh_block ();
89
- static memory_block_header *mem_block_header (void *ptr);
109
+ //
110
+ // We update the number of 1MB blocks actually written to, only when we need this value,
111
+ // or before a garbage collection rather than trying to determine when we write to a fresh block.
112
+ //
113
+ void update_num_blocks () const {
114
+ //
115
+ // Calculate how many 1M blocks of the current arena we used.
116
+ //
117
+ size_t num_used_blocks
118
+ = (allocation_ptr - current_addr_ptr - 1 ) / BLOCK_SIZE + 1 ;
119
+ if (num_used_blocks > num_blocks)
120
+ num_blocks = num_used_blocks;
121
+ }
122
+
123
+ void initialize_semispace ();
90
124
91
- // helper function for `kore_arena_alloc`. Do not call directly.
92
- void *do_alloc_slow (size_t requested);
125
+ static memory_block_header *mem_block_header (void *ptr) {
126
+ uintptr_t address = reinterpret_cast <uintptr_t >(ptr);
127
+ return reinterpret_cast <arena::memory_block_header *>(
128
+ (address - 1 ) & ~(HYPERBLOCK_SIZE - 1 ));
129
+ }
93
130
94
- char *first_block; // beginning of first block
95
- char *block; // where allocations are being made in current block
96
- char *block_start; // start of current block
97
- char *block_end; // 1 past end of current block
98
- char *first_collection_block; // beginning of other semispace
99
- size_t num_blocks; // number of blocks in current semispace
100
- size_t num_collection_blocks; // number of blocks in other semispace
131
+ //
132
+ // Current semispace where allocations are being made.
133
+ //
134
+ char *current_addr_ptr; // pointer to start of current address space
135
+ char *allocation_ptr; // next available location in current semispace
136
+ char *tripwire; // allocating past this triggers slow allocation
137
+ mutable size_t
138
+ num_blocks; // notional number of BLOCK_SIZE blocks in current semispace
101
139
char allocation_semispace_id; // id of current semispace
140
+ //
141
+ // Semispace where allocations will be made during and after garbage collect.
142
+ //
143
+ char *collection_addr_ptr
144
+ = nullptr ; // pointer to start of collection address space
145
+ size_t num_collection_blocks
146
+ = 0 ; // notional number of BLOCK_SIZE blocks in collection semispace
102
147
};
103
148
104
149
// Macro to define a new arena with the given ID. Supports IDs ranging from 0 to
105
150
// 127.
106
151
#define REGISTER_ARENA (name, id ) static thread_local arena name (id)
107
152
108
- #define MEM_BLOCK_START (ptr ) \
109
- ((char *)(((uintptr_t )(ptr)-1 ) & ~(BLOCK_SIZE - 1 )))
110
-
111
153
#ifdef __MACH__
112
154
//
113
155
// thread_local disabled for Apple
@@ -120,16 +162,51 @@ extern thread_local bool time_for_collection;
120
162
size_t get_gc_threshold ();
121
163
122
164
inline void *arena::kore_arena_alloc (size_t requested) {
123
- if (block + requested > block_end) {
124
- return do_alloc_slow (requested);
165
+ if (allocation_ptr + requested >= tripwire) {
166
+ //
167
+ // We got close to or past the last location accessed in this address range so far,
168
+ // depending on the requested size and tripwire setting. This triggers a garbage
169
+ // collect when allowed.
170
+ //
171
+ time_for_collection = true ;
172
+ tripwire = current_addr_ptr
173
+ + HYPERBLOCK_SIZE; // won't trigger again until arena swap
125
174
}
126
- void *result = block ;
127
- block += requested;
175
+ void *result = allocation_ptr ;
176
+ allocation_ptr += requested;
128
177
MEM_LOG (
129
- " Allocation at %p (size %zd), next alloc at %p (if it fits) \n " , result,
130
- requested, block);
178
+ " Allocation at %p (size %zd), next alloc at %p\n " , result, requested ,
179
+ block);
131
180
return result;
132
181
}
182
+
183
+ inline void arena::arena_clear () {
184
+ //
185
+ // We set the allocation pointer to the first available address.
186
+ //
187
+ allocation_ptr = arena_start_ptr ();
188
+ //
189
+ // If the number of blocks we've touched is >= threshold, we want to trigger
190
+ // a garbage collection if we get within 1 block of the end of this area.
191
+ // Otherwise we only want to generate a garbage collect if we allocate off the
192
+ // end of this area.
193
+ //
194
+ tripwire = current_addr_ptr
195
+ + (num_blocks - (num_blocks >= get_gc_threshold ())) * BLOCK_SIZE;
133
196
}
134
197
198
+ inline void arena::arena_swap_and_clear () {
199
+ update_num_blocks (); // so we save the correct number of touched blocks
200
+ std::swap (current_addr_ptr, collection_addr_ptr);
201
+ std::swap (num_blocks, num_collection_blocks);
202
+ allocation_semispace_id = ~allocation_semispace_id;
203
+ if (current_addr_ptr == nullptr ) {
204
+ //
205
+ // The other semispace hasn't be initialized yet.
206
+ //
207
+ initialize_semispace ();
208
+ } else
209
+ arena_clear ();
210
+ }
211
+ }
135
212
#endif // ARENA_H
0 commit comments