@@ -135,32 +135,20 @@ static void tail_mask_set(atomic_t *tail_mask, size_t num_bits, size_t off)
135135 return ;
136136 }
137137
138- /* If bit mask exceeds a single word then tail may spill to the adjacent word. */
139138 size_t idx = tail_off / 32 ;
139+ atomic_t * t_mask = & tail_mask [idx ];
140140
141- tail_off = tail_off - 32 * idx ;
142- if ((tail_off + tail_bits ) <= 32 ) {
143- /* Tail mask fits in a single word. */
144- atomic_or (& tail_mask [idx ], BIT_MASK (tail_bits ) << tail_off );
145- return ;
146- }
147-
148- /* Tail spilled. Remainder is set in the next word. Since number of tail_masks
149- * match number of words in bitarray we don't need to check if we are exceeding
150- * the array boundary.
151- */
152- atomic_or (& tail_mask [idx ], BIT_MASK (32 - tail_off ) << tail_off );
153-
154-
155- size_t rem_tail = tail_bits - (32 - tail_off );
156- atomic_t * mask = & tail_mask [idx + 1 ];
141+ tail_off = tail_off % 32 ;
142+ while (tail_bits > 0 ) {
143+ uint32_t rem_bits = MIN (32 - tail_off , tail_bits );
144+ uint32_t bits = MIN (tail_bits , rem_bits );
145+ uint32_t mask = (bits == 32 ) ? UINT32_MAX : (BIT_MASK (bits ) << tail_off );
157146
158- while ( rem_tail >= 32 ) {
159- atomic_or ( mask , UINT32_MAX ) ;
160- mask ++ ;
161- rem_tail -= 32 ;
147+ atomic_or ( t_mask , mask );
148+ t_mask ++ ;
149+ tail_off = 0 ;
150+ tail_bits -= bits ;
162151 }
163- atomic_or (mask , BIT_MASK (rem_tail ));
164152}
165153
166154/* Function determines how many chunks were used for the allocated buffer. It is
@@ -189,33 +177,36 @@ static uint32_t num_bits_get(atomic_t *tail_mask, size_t off)
189177 return num_bits ;
190178 }
191179
192- /* In multiword bit array we need to check if tail is spilling over to the next word. */
193- size_t idx = off / 32 ;
194- size_t w_off = off - 32 * idx ;
180+ size_t tail_off = off + 1 ;
181+ size_t idx = tail_off / 32 ;
195182 atomic_t * t_mask = & tail_mask [idx ];
196183
197- mask = ( * t_mask | BIT ( w_off )) >> w_off ;
198- num_bits = (~ mask == 0 ) ? 32 : __builtin_ctz (~ mask ) ;
199- if (num_bits == 1 ) {
184+ tail_off = tail_off % 32 ;
185+ num_bits = 1 ;
186+ if (( * t_mask & BIT ( tail_off )) == 0 ) {
200187 return num_bits ;
201188 }
202189
203- mask = BIT_MASK (num_bits - 1 ) << (w_off + 1 );
204- atomic_and (t_mask , ~mask );
205- if (((w_off + num_bits ) == 32 ) && (idx < (HEAP_NUM_WORDS - 1 ))) {
206- size_t tmp_bits ;
190+ do {
191+ uint32_t mask = (uint32_t )* t_mask >> tail_off ;
192+ uint32_t bits = (mask == UINT32_MAX ) ? 32 : __builtin_ctz (~mask );
207193
208- /* If we are at the end of the one mask we need to check the beginning of the
209- * next one as there might be remaining part of the tail.
210- */
211- do {
212- t_mask ++ ;
213- tmp_bits = (* t_mask == UINT32_MAX ) ? 32 : __builtin_ctz (~(* t_mask ));
214- mask = (tmp_bits == 32 ) ? UINT32_MAX : BIT_MASK (tmp_bits );
194+ num_bits += bits ;
195+ if (mask == UINT32_MAX ) {
196+
197+ atomic_set (t_mask , 0 );
198+ } else {
199+ mask = BIT_MASK (bits ) << tail_off ;
215200 atomic_and (t_mask , ~mask );
216- num_bits += tmp_bits ;
217- } while ((tmp_bits == 32 ) && (t_mask != & tail_mask [HEAP_NUM_WORDS - 1 ]));
218- }
201+ }
202+
203+ if (((bits + tail_off ) < 32 ) || (bits == 0 )) {
204+ return num_bits ;
205+ }
206+
207+ tail_off = 0 ;
208+ t_mask ++ ;
209+ } while (t_mask != & tail_mask [HEAP_NUM_WORDS ]);
219210
220211 return num_bits ;
221212}
0 commit comments