@@ -135,32 +135,19 @@ static void tail_mask_set(atomic_t *tail_mask, size_t num_bits, size_t off)
135
135
return ;
136
136
}
137
137
138
- /* If bit mask exceeds a single word then tail may spill to the adjacent word. */
139
138
size_t idx = tail_off / 32 ;
139
+ atomic_t * t_mask = & tail_mask [idx ];
140
140
141
- tail_off = tail_off - 32 * idx ;
142
- if ((tail_off + tail_bits ) <= 32 ) {
143
- /* Tail mask fits in a single word. */
144
- atomic_or (& tail_mask [idx ], BIT_MASK (tail_bits ) << tail_off );
145
- return ;
146
- }
147
-
148
- /* Tail spilled. Remainder is set in the next word. Since number of tail_masks
149
- * match number of words in bitarray we don't need to check if we are exceeding
150
- * the array boundary.
151
- */
152
- atomic_or (& tail_mask [idx ], BIT_MASK (32 - tail_off ) << tail_off );
153
-
154
-
155
- size_t rem_tail = tail_bits - (32 - tail_off );
156
- atomic_t * mask = & tail_mask [idx + 1 ];
141
+ tail_off = tail_off % 32 ;
142
+ while (tail_bits > 0 ) {
143
+ uint32_t bits = MIN (32 - tail_off , tail_bits );
144
+ uint32_t mask = (bits == 32 ) ? UINT32_MAX : (BIT_MASK (bits ) << tail_off );
157
145
158
- while ( rem_tail >= 32 ) {
159
- atomic_or ( mask , UINT32_MAX ) ;
160
- mask ++ ;
161
- rem_tail -= 32 ;
146
+ atomic_or ( t_mask , mask );
147
+ t_mask ++ ;
148
+ tail_off = 0 ;
149
+ tail_bits -= bits ;
162
150
}
163
- atomic_or (mask , BIT_MASK (rem_tail ));
164
151
}
165
152
166
153
/* Function determines how many chunks were used for the allocated buffer. It is
@@ -175,47 +162,37 @@ static void tail_mask_set(atomic_t *tail_mask, size_t num_bits, size_t off)
175
162
*/
176
163
static uint32_t num_bits_get (atomic_t * tail_mask , size_t off )
177
164
{
178
- uint32_t mask ;
179
- uint32_t num_bits ;
165
+ uint32_t num_bits = 1 ;
166
+ size_t tail_off = off + 1 ;
167
+ size_t idx = tail_off / 32 ;
168
+ atomic_t * t_mask = & tail_mask [idx ];
180
169
181
- if (HEAP_NUM_WORDS == 1 ) {
182
- mask = (* tail_mask | BIT (off )) >> off ;
183
- num_bits = (~mask == 0 ) ? 32 : __builtin_ctz (~mask );
184
- if (num_bits > 1 ) {
185
- mask = BIT_MASK (num_bits - 1 ) << (off + 1 );
186
- atomic_and (tail_mask , ~mask );
187
- }
170
+ tail_off = tail_off % 32 ;
171
+ do {
172
+ uint32_t mask = (uint32_t )* t_mask >> tail_off ;
188
173
189
- return num_bits ;
190
- }
174
+ if (mask == UINT32_MAX ) {
175
+ num_bits += 32 ;
176
+ atomic_set (t_mask , 0 );
177
+ } else {
178
+ uint32_t bits = __builtin_ctz (~mask );
191
179
192
- /* In multiword bit array we need to check if tail is spilling over to the next word. */
193
- size_t idx = off / 32 ;
194
- size_t w_off = off - 32 * idx ;
195
- atomic_t * t_mask = & tail_mask [idx ];
180
+ if (bits == 0 ) {
181
+ break ;
182
+ }
196
183
197
- mask = (* t_mask | BIT (w_off )) >> w_off ;
198
- num_bits = (~mask == 0 ) ? 32 : __builtin_ctz (~mask );
199
- if (num_bits == 1 ) {
200
- return num_bits ;
201
- }
184
+ num_bits += bits ;
185
+ atomic_and (t_mask , ~(BIT_MASK (bits ) << tail_off ));
202
186
203
- mask = BIT_MASK (num_bits - 1 ) << (w_off + 1 );
204
- atomic_and (t_mask , ~mask );
205
- if (((w_off + num_bits ) == 32 ) && (idx < (HEAP_NUM_WORDS - 1 ))) {
206
- size_t tmp_bits ;
187
+ if (bits + tail_off < 32 ) {
188
+ break ;
189
+ }
207
190
208
- /* If we are at the end of the one mask we need to check the beginning of the
209
- * next one as there might be remaining part of the tail.
210
- */
211
- do {
212
- t_mask ++ ;
213
- tmp_bits = (* t_mask == UINT32_MAX ) ? 32 : __builtin_ctz (~(* t_mask ));
214
- mask = (tmp_bits == 32 ) ? UINT32_MAX : BIT_MASK (tmp_bits );
215
- atomic_and (t_mask , ~mask );
216
- num_bits += tmp_bits ;
217
- } while ((tmp_bits == 32 ) && (t_mask != & tail_mask [HEAP_NUM_WORDS - 1 ]));
218
- }
191
+ tail_off = 0 ;
192
+ }
193
+
194
+ t_mask ++ ;
195
+ } while ((HEAP_NUM_WORDS > 1 ) && (t_mask != & tail_mask [HEAP_NUM_WORDS ]));
219
196
220
197
return num_bits ;
221
198
}
0 commit comments