13
13
#include "utils_common.h"
14
14
#include "utils_concurrency.h"
15
15
16
+ #ifndef NDEBUG
17
+ #define DEBUG_RUN_CHECKS (pool ) ba_debug_checks(pool)
18
+ #define DEBUG_SET_VAR (var , value ) DO_WHILE_EXPRS((var = value))
19
+ #define DEBUG_INC_VAR (var ) DO_WHILE_EXPRS((var++))
20
+ #define DEBUG_DEC_VAR (var ) DO_WHILE_EXPRS((var--))
21
+ #else
22
+ #define DEBUG_RUN_CHECKS (pool ) DO_WHILE_EMPTY
23
+ #define DEBUG_SET_VAR (var , value ) DO_WHILE_EMPTY
24
+ #define DEBUG_INC_VAR (var ) DO_WHILE_EMPTY
25
+ #define DEBUG_DEC_VAR (var ) DO_WHILE_EMPTY
26
+ #endif /* NDEBUG */
27
+
16
28
// minimum size of a single pool of the linear base allocator
17
29
#define MINIMUM_LINEAR_POOL_SIZE (ba_os_get_page_size())
18
30
@@ -27,9 +39,11 @@ typedef struct umf_ba_main_linear_pool_meta_t {
27
39
os_mutex_t lock ;
28
40
char * data_ptr ;
29
41
size_t size_left ;
42
+ size_t pool_n_allocs ; // number of allocations in this pool
30
43
#ifndef NDEBUG
31
44
size_t n_pools ;
32
- #endif /* NDEBUG */
45
+ size_t global_n_allocs ; // global number of allocations in all pools
46
+ #endif /* NDEBUG */
33
47
} umf_ba_main_linear_pool_meta_t ;
34
48
35
49
// the main pool of the linear base allocator (there is only one such pool)
@@ -52,7 +66,8 @@ struct umf_ba_next_linear_pool_t {
52
66
// to be freed in umf_ba_linear_destroy())
53
67
umf_ba_next_linear_pool_t * next_pool ;
54
68
55
- size_t pool_size ; // size of this pool (argument of ba_os_alloc() call)
69
+ size_t pool_size ; // size of this pool (argument of ba_os_alloc() call)
70
+ size_t pool_n_allocs ; // number of allocations in this pool
56
71
57
72
// data area of all pools except of the main (the first one) starts here
58
73
char data [];
@@ -94,9 +109,9 @@ umf_ba_linear_pool_t *umf_ba_linear_create(size_t pool_size) {
94
109
pool -> metadata .data_ptr = data_ptr ;
95
110
pool -> metadata .size_left = size_left ;
96
111
pool -> next_pool = NULL ; // this is the only pool now
97
- #ifndef NDEBUG
98
- pool -> metadata .n_pools = 1 ;
99
- #endif /* NDEBUG */
112
+ pool -> metadata . pool_n_allocs = 0 ;
113
+ DEBUG_SET_VAR ( pool -> metadata .n_pools , 1 ) ;
114
+ DEBUG_SET_VAR ( pool -> metadata . global_n_allocs , 0 );
100
115
101
116
// init lock
102
117
os_mutex_t * lock = util_mutex_init (& pool -> metadata .lock );
@@ -131,6 +146,7 @@ void *umf_ba_linear_alloc(umf_ba_linear_pool_t *pool, size_t size) {
131
146
}
132
147
133
148
new_pool -> pool_size = pool_size ;
149
+ new_pool -> pool_n_allocs = 0 ;
134
150
135
151
void * data_ptr = & new_pool -> data ;
136
152
size_t size_left =
@@ -143,23 +159,86 @@ void *umf_ba_linear_alloc(umf_ba_linear_pool_t *pool, size_t size) {
143
159
// add the new pool to the list of pools
144
160
new_pool -> next_pool = pool -> next_pool ;
145
161
pool -> next_pool = new_pool ;
146
- #ifndef NDEBUG
147
- pool -> metadata .n_pools ++ ;
148
- #endif /* NDEBUG */
162
+ DEBUG_INC_VAR (pool -> metadata .n_pools );
149
163
}
150
164
151
165
assert (pool -> metadata .size_left >= aligned_size );
152
166
void * ptr = pool -> metadata .data_ptr ;
153
167
pool -> metadata .data_ptr += aligned_size ;
154
168
pool -> metadata .size_left -= aligned_size ;
155
- #ifndef NDEBUG
156
- ba_debug_checks (pool );
157
- #endif /* NDEBUG */
169
+ if (pool -> next_pool ) {
170
+ pool -> next_pool -> pool_n_allocs ++ ;
171
+ } else {
172
+ pool -> metadata .pool_n_allocs ++ ;
173
+ }
174
+ DEBUG_INC_VAR (pool -> metadata .global_n_allocs );
175
+ DEBUG_RUN_CHECKS (pool );
158
176
util_mutex_unlock (& pool -> metadata .lock );
159
177
160
178
return ptr ;
161
179
}
162
180
181
+ // check if ptr belongs to pool
182
+ static inline int pool_contains_ptr (void * pool , size_t pool_size ,
183
+ void * data_begin , void * ptr ) {
184
+ return ((char * )ptr >= (char * )data_begin &&
185
+ (char * )ptr < ((char * )(pool )) + pool_size );
186
+ }
187
+
188
+ // umf_ba_linear_free() really frees memory only if all allocations from an inactive pool were freed
189
+ // It returns:
190
+ // 0 - ptr belonged to the pool and was freed
191
+ // -1 - ptr doesn't belong to the pool and wasn't freed
192
+ int umf_ba_linear_free (umf_ba_linear_pool_t * pool , void * ptr ) {
193
+ util_mutex_lock (& pool -> metadata .lock );
194
+ DEBUG_RUN_CHECKS (pool );
195
+ if (pool_contains_ptr (pool , pool -> metadata .pool_size , pool -> data , ptr )) {
196
+ pool -> metadata .pool_n_allocs -- ;
197
+ DEBUG_DEC_VAR (pool -> metadata .global_n_allocs );
198
+ size_t page_size = ba_os_get_page_size ();
199
+ if ((pool -> metadata .pool_n_allocs == 0 ) && pool -> next_pool &&
200
+ (pool -> metadata .pool_size > page_size )) {
201
+ // we can free the first (main) pool except of the first page containing the metadata
202
+ void * ptr = pool + page_size ;
203
+ size_t size = pool -> metadata .pool_size - page_size ;
204
+ ba_os_free (ptr , size );
205
+ }
206
+ DEBUG_RUN_CHECKS (pool );
207
+ util_mutex_unlock (& pool -> metadata .lock );
208
+ return 0 ;
209
+ }
210
+
211
+ umf_ba_next_linear_pool_t * next_pool = pool -> next_pool ;
212
+ umf_ba_next_linear_pool_t * prev_pool = NULL ;
213
+ while (next_pool ) {
214
+ if (pool_contains_ptr (next_pool , next_pool -> pool_size , next_pool -> data ,
215
+ ptr )) {
216
+ DEBUG_DEC_VAR (pool -> metadata .global_n_allocs );
217
+ next_pool -> pool_n_allocs -- ;
218
+ // pool->next_pool is the active pool - we cannot free it
219
+ if ((next_pool -> pool_n_allocs == 0 ) &&
220
+ next_pool != pool -> next_pool ) {
221
+ assert (prev_pool ); // it cannot be the active pool
222
+ assert (prev_pool -> next_pool == next_pool );
223
+ prev_pool -> next_pool = next_pool -> next_pool ;
224
+ DEBUG_DEC_VAR (pool -> metadata .n_pools );
225
+ void * ptr = next_pool ;
226
+ size_t size = next_pool -> pool_size ;
227
+ ba_os_free (ptr , size );
228
+ }
229
+ DEBUG_RUN_CHECKS (pool );
230
+ util_mutex_unlock (& pool -> metadata .lock );
231
+ return 0 ;
232
+ }
233
+ prev_pool = next_pool ;
234
+ next_pool = next_pool -> next_pool ;
235
+ }
236
+
237
+ util_mutex_unlock (& pool -> metadata .lock );
238
+ // ptr doesn't belong to the pool and wasn't freed
239
+ return -1 ;
240
+ }
241
+
163
242
void umf_ba_linear_destroy (umf_ba_linear_pool_t * pool ) {
164
243
// Do not destroy if we are running in the proxy library,
165
244
// because it may need those resources till
@@ -169,7 +248,12 @@ void umf_ba_linear_destroy(umf_ba_linear_pool_t *pool) {
169
248
}
170
249
171
250
#ifndef NDEBUG
172
- ba_debug_checks (pool );
251
+ DEBUG_RUN_CHECKS (pool );
252
+ if (pool -> metadata .global_n_allocs ) {
253
+ fprintf (stderr , "umf_ba_linear_destroy(): global_n_allocs = %zu\n" ,
254
+ pool -> metadata .global_n_allocs );
255
+ assert (pool -> metadata .global_n_allocs == 0 );
256
+ }
173
257
#endif /* NDEBUG */
174
258
175
259
umf_ba_next_linear_pool_t * current_pool ;
0 commit comments