@@ -172,11 +172,6 @@ static inline struct maple_node *mt_alloc_one(gfp_t gfp)
172172 return kmem_cache_alloc (maple_node_cache , gfp );
173173}
174174
175- static inline int mt_alloc_bulk (gfp_t gfp , size_t size , void * * nodes )
176- {
177- return kmem_cache_alloc_bulk (maple_node_cache , gfp , size , nodes );
178- }
179-
180175static inline void mt_free_bulk (size_t size , void __rcu * * nodes )
181176{
182177 kmem_cache_free_bulk (maple_node_cache , size , (void * * )nodes );
@@ -1150,6 +1145,19 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
11501145 mas_set_err (mas , - ENOMEM );
11511146}
11521147
1148+ static inline void mas_empty_nodes (struct ma_state * mas )
1149+ {
1150+ mas -> node_request = 0 ;
1151+ if (mas -> sheaf ) {
1152+ mt_return_sheaf (mas -> sheaf );
1153+ mas -> sheaf = NULL ;
1154+ }
1155+
1156+ if (mas -> alloc ) {
1157+ kfree (mas -> alloc );
1158+ mas -> alloc = NULL ;
1159+ }
1160+ }
11531161
11541162/*
11551163 * mas_free() - Free an encoded maple node
@@ -5208,15 +5216,7 @@ EXPORT_SYMBOL_GPL(mas_preallocate);
52085216void mas_destroy (struct ma_state * mas )
52095217{
52105218 mas -> mas_flags &= ~MA_STATE_PREALLOC ;
5211-
5212- mas -> node_request = 0 ;
5213- if (mas -> sheaf )
5214- mt_return_sheaf (mas -> sheaf );
5215- mas -> sheaf = NULL ;
5216-
5217- if (mas -> alloc )
5218- kfree (mas -> alloc );
5219- mas -> alloc = NULL ;
5219+ mas_empty_nodes (mas );
52205220}
52215221EXPORT_SYMBOL_GPL (mas_destroy );
52225222
@@ -6241,28 +6241,25 @@ static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
62416241 struct maple_node * node = mte_to_node (mas -> node );
62426242 struct maple_node * new_node = mte_to_node (new_mas -> node );
62436243 enum maple_type type ;
6244- unsigned char request , count , i ;
6244+ unsigned char count , i ;
62456245 void __rcu * * slots ;
62466246 void __rcu * * new_slots ;
62476247 unsigned long val ;
62486248
62496249 /* Allocate memory for child nodes. */
62506250 type = mte_node_type (mas -> node );
62516251 new_slots = ma_slots (new_node , type );
6252- request = mas_data_end (mas ) + 1 ;
6253- count = mt_alloc_bulk (gfp , request , (void * * )new_slots );
6254- if (unlikely (count < request )) {
6255- memset (new_slots , 0 , request * sizeof (void * ));
6256- mas_set_err (mas , - ENOMEM );
6252+ count = mas -> node_request = mas_data_end (mas ) + 1 ;
6253+ mas_alloc_nodes (mas , gfp );
6254+ if (unlikely (mas_is_err (mas )))
62576255 return ;
6258- }
62596256
6260- /* Restore node type information in slots. */
62616257 slots = ma_slots (node , type );
62626258 for (i = 0 ; i < count ; i ++ ) {
62636259 val = (unsigned long )mt_slot_locked (mas -> tree , slots , i );
62646260 val &= MAPLE_NODE_MASK ;
6265- ((unsigned long * )new_slots )[i ] |= val ;
6261+ new_slots [i ] = ma_mnode_ptr ((unsigned long )mas_pop_node (mas ) |
6262+ val );
62666263 }
62676264}
62686265
@@ -6316,7 +6313,7 @@ static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
63166313 /* Only allocate child nodes for non-leaf nodes. */
63176314 mas_dup_alloc (mas , new_mas , gfp );
63186315 if (unlikely (mas_is_err (mas )))
6319- return ;
6316+ goto empty_mas ;
63206317 } else {
63216318 /*
63226319 * This is the last leaf node and duplication is
@@ -6349,6 +6346,8 @@ static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
63496346 /* Make them the same height */
63506347 new_mas -> tree -> ma_flags = mas -> tree -> ma_flags ;
63516348 rcu_assign_pointer (new_mas -> tree -> ma_root , root );
6349+ empty_mas :
6350+ mas_empty_nodes (mas );
63526351}
63536352
63546353/**
0 commit comments