Skip to content

Commit e3852a1

Browse files
howletttehcaster
authored andcommitted
maple_tree: Drop bulk insert support
Bulk insert mode was added to facilitate forking faster, but forking now uses __mt_dup() to duplicate the tree. The addition of sheaves has made the bulk allocations difficult to maintain - since the expected entries would preallocate into the maple state. A big part of the maple state node allocation was the ability to push nodes back onto the state for later use, which was essential to the bulk insert algorithm. Remove mas_expected_entries() and mas_destroy_rebalance() functions as well as the MA_STATE_BULK and MA_STATE_REBALANCE maple state flags since there are no users anymore. Drop the associated testing as well. Signed-off-by: Liam R. Howlett <[email protected]> Reviewed-by: Suren Baghdasaryan <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent da577f1 commit e3852a1

File tree

3 files changed

+4
-439
lines changed

3 files changed

+4
-439
lines changed

lib/maple_tree.c

Lines changed: 4 additions & 266 deletions
Original file line numberDiff line numberDiff line change
@@ -83,13 +83,9 @@
8383

8484
/*
8585
* Maple state flags
86-
* * MA_STATE_BULK - Bulk insert mode
87-
* * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
8886
* * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
8987
*/
90-
#define MA_STATE_BULK 1
91-
#define MA_STATE_REBALANCE 2
92-
#define MA_STATE_PREALLOC 4
88+
#define MA_STATE_PREALLOC 1
9389

9490
#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
9591
#define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT)
@@ -1031,24 +1027,6 @@ static inline void mas_descend(struct ma_state *mas)
10311027
mas->node = mas_slot(mas, slots, mas->offset);
10321028
}
10331029

1034-
/*
1035-
* mte_set_gap() - Set a maple node gap.
1036-
* @mn: The encoded maple node
1037-
* @gap: The offset of the gap to set
1038-
* @val: The gap value
1039-
*/
1040-
static inline void mte_set_gap(const struct maple_enode *mn,
1041-
unsigned char gap, unsigned long val)
1042-
{
1043-
switch (mte_node_type(mn)) {
1044-
default:
1045-
break;
1046-
case maple_arange_64:
1047-
mte_to_node(mn)->ma64.gap[gap] = val;
1048-
break;
1049-
}
1050-
}
1051-
10521030
/*
10531031
* mas_ascend() - Walk up a level of the tree.
10541032
* @mas: The maple state
@@ -1878,21 +1856,7 @@ static inline int mab_calc_split(struct ma_state *mas,
18781856
* end on a NULL entry, with the exception of the left-most leaf. The
18791857
* limitation means that the split of a node must be checked for this condition
18801858
* and be able to put more data in one direction or the other.
1881-
*/
1882-
if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1883-
*mid_split = 0;
1884-
split = b_end - mt_min_slots[bn->type];
1885-
1886-
if (!ma_is_leaf(bn->type))
1887-
return split;
1888-
1889-
mas->mas_flags |= MA_STATE_REBALANCE;
1890-
if (!bn->slot[split])
1891-
split--;
1892-
return split;
1893-
}
1894-
1895-
/*
1859+
*
18961860
* Although extremely rare, it is possible to enter what is known as the 3-way
18971861
* split scenario. The 3-way split comes about by means of a store of a range
18981862
* that overwrites the end and beginning of two full nodes. The result is a set
@@ -2039,27 +2003,6 @@ static inline void mab_mas_cp(struct maple_big_node *b_node,
20392003
}
20402004
}
20412005

2042-
/*
2043-
* mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2044-
* @mas: The maple state
2045-
* @end: The maple node end
2046-
* @mt: The maple node type
2047-
*/
2048-
static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2049-
enum maple_type mt)
2050-
{
2051-
if (!(mas->mas_flags & MA_STATE_BULK))
2052-
return;
2053-
2054-
if (mte_is_root(mas->node))
2055-
return;
2056-
2057-
if (end > mt_min_slots[mt]) {
2058-
mas->mas_flags &= ~MA_STATE_REBALANCE;
2059-
return;
2060-
}
2061-
}
2062-
20632006
/*
20642007
* mas_store_b_node() - Store an @entry into the b_node while also copying the
20652008
* data from a maple encoded node.
@@ -2109,9 +2052,6 @@ static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
21092052
/* Handle new range ending before old range ends */
21102053
piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
21112054
if (piv > mas->last) {
2112-
if (piv == ULONG_MAX)
2113-
mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2114-
21152055
if (offset_end != slot)
21162056
wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
21172057
offset_end);
@@ -3011,126 +2951,6 @@ static inline void mas_rebalance(struct ma_state *mas,
30112951
return mas_spanning_rebalance(mas, &mast, empty_count);
30122952
}
30132953

3014-
/*
3015-
* mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3016-
* state.
3017-
* @mas: The maple state
3018-
* @end: The end of the left-most node.
3019-
*
3020-
* During a mass-insert event (such as forking), it may be necessary to
3021-
* rebalance the left-most node when it is not sufficient.
3022-
*/
3023-
static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3024-
{
3025-
enum maple_type mt = mte_node_type(mas->node);
3026-
struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3027-
struct maple_enode *eparent, *old_eparent;
3028-
unsigned char offset, tmp, split = mt_slots[mt] / 2;
3029-
void __rcu **l_slots, **slots;
3030-
unsigned long *l_pivs, *pivs, gap;
3031-
bool in_rcu = mt_in_rcu(mas->tree);
3032-
unsigned char new_height = mas_mt_height(mas);
3033-
3034-
MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3035-
3036-
l_mas = *mas;
3037-
mas_prev_sibling(&l_mas);
3038-
3039-
/* set up node. */
3040-
if (in_rcu) {
3041-
newnode = mas_pop_node(mas);
3042-
} else {
3043-
newnode = &reuse;
3044-
}
3045-
3046-
node = mas_mn(mas);
3047-
newnode->parent = node->parent;
3048-
slots = ma_slots(newnode, mt);
3049-
pivs = ma_pivots(newnode, mt);
3050-
left = mas_mn(&l_mas);
3051-
l_slots = ma_slots(left, mt);
3052-
l_pivs = ma_pivots(left, mt);
3053-
if (!l_slots[split])
3054-
split++;
3055-
tmp = mas_data_end(&l_mas) - split;
3056-
3057-
memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3058-
memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3059-
pivs[tmp] = l_mas.max;
3060-
memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3061-
memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3062-
3063-
l_mas.max = l_pivs[split];
3064-
mas->min = l_mas.max + 1;
3065-
old_eparent = mt_mk_node(mte_parent(l_mas.node),
3066-
mas_parent_type(&l_mas, l_mas.node));
3067-
tmp += end;
3068-
if (!in_rcu) {
3069-
unsigned char max_p = mt_pivots[mt];
3070-
unsigned char max_s = mt_slots[mt];
3071-
3072-
if (tmp < max_p)
3073-
memset(pivs + tmp, 0,
3074-
sizeof(unsigned long) * (max_p - tmp));
3075-
3076-
if (tmp < mt_slots[mt])
3077-
memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3078-
3079-
memcpy(node, newnode, sizeof(struct maple_node));
3080-
ma_set_meta(node, mt, 0, tmp - 1);
3081-
mte_set_pivot(old_eparent, mte_parent_slot(l_mas.node),
3082-
l_pivs[split]);
3083-
3084-
/* Remove data from l_pivs. */
3085-
tmp = split + 1;
3086-
memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3087-
memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3088-
ma_set_meta(left, mt, 0, split);
3089-
eparent = old_eparent;
3090-
3091-
goto done;
3092-
}
3093-
3094-
/* RCU requires replacing both l_mas, mas, and parent. */
3095-
mas->node = mt_mk_node(newnode, mt);
3096-
ma_set_meta(newnode, mt, 0, tmp);
3097-
3098-
new_left = mas_pop_node(mas);
3099-
new_left->parent = left->parent;
3100-
mt = mte_node_type(l_mas.node);
3101-
slots = ma_slots(new_left, mt);
3102-
pivs = ma_pivots(new_left, mt);
3103-
memcpy(slots, l_slots, sizeof(void *) * split);
3104-
memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3105-
ma_set_meta(new_left, mt, 0, split);
3106-
l_mas.node = mt_mk_node(new_left, mt);
3107-
3108-
/* replace parent. */
3109-
offset = mte_parent_slot(mas->node);
3110-
mt = mas_parent_type(&l_mas, l_mas.node);
3111-
parent = mas_pop_node(mas);
3112-
slots = ma_slots(parent, mt);
3113-
pivs = ma_pivots(parent, mt);
3114-
memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node));
3115-
rcu_assign_pointer(slots[offset], mas->node);
3116-
rcu_assign_pointer(slots[offset - 1], l_mas.node);
3117-
pivs[offset - 1] = l_mas.max;
3118-
eparent = mt_mk_node(parent, mt);
3119-
done:
3120-
gap = mas_leaf_max_gap(mas);
3121-
mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3122-
gap = mas_leaf_max_gap(&l_mas);
3123-
mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3124-
mas_ascend(mas);
3125-
3126-
if (in_rcu) {
3127-
mas_replace_node(mas, old_eparent, new_height);
3128-
mas_adopt_children(mas, mas->node);
3129-
}
3130-
3131-
mas_update_gap(mas);
3132-
}
3133-
31342954
/*
31352955
* mas_split_final_node() - Split the final node in a subtree operation.
31362956
* @mast: the maple subtree state
@@ -3837,8 +3657,6 @@ static inline void mas_wr_node_store(struct ma_wr_state *wr_mas,
38373657

38383658
if (mas->last == wr_mas->end_piv)
38393659
offset_end++; /* don't copy this offset */
3840-
else if (unlikely(wr_mas->r_max == ULONG_MAX))
3841-
mas_bulk_rebalance(mas, mas->end, wr_mas->type);
38423660

38433661
/* set up node. */
38443662
if (in_rcu) {
@@ -4255,7 +4073,7 @@ static inline enum store_type mas_wr_store_type(struct ma_wr_state *wr_mas)
42554073
new_end = mas_wr_new_end(wr_mas);
42564074
/* Potential spanning rebalance collapsing a node */
42574075
if (new_end < mt_min_slots[wr_mas->type]) {
4258-
if (!mte_is_root(mas->node) && !(mas->mas_flags & MA_STATE_BULK))
4076+
if (!mte_is_root(mas->node))
42594077
return wr_rebalance;
42604078
return wr_node_store;
42614079
}
@@ -5562,25 +5380,7 @@ void mas_destroy(struct ma_state *mas)
55625380
struct maple_alloc *node;
55635381
unsigned long total;
55645382

5565-
/*
5566-
* When using mas_for_each() to insert an expected number of elements,
5567-
* it is possible that the number inserted is less than the expected
5568-
* number. To fix an invalid final node, a check is performed here to
5569-
* rebalance the previous node with the final node.
5570-
*/
5571-
if (mas->mas_flags & MA_STATE_REBALANCE) {
5572-
unsigned char end;
5573-
if (mas_is_err(mas))
5574-
mas_reset(mas);
5575-
mas_start(mas);
5576-
mtree_range_walk(mas);
5577-
end = mas->end + 1;
5578-
if (end < mt_min_slot_count(mas->node) - 1)
5579-
mas_destroy_rebalance(mas, end);
5580-
5581-
mas->mas_flags &= ~MA_STATE_REBALANCE;
5582-
}
5583-
mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5383+
mas->mas_flags &= ~MA_STATE_PREALLOC;
55845384

55855385
total = mas_allocated(mas);
55865386
while (total) {
@@ -5600,68 +5400,6 @@ void mas_destroy(struct ma_state *mas)
56005400
}
56015401
EXPORT_SYMBOL_GPL(mas_destroy);
56025402

5603-
/*
5604-
* mas_expected_entries() - Set the expected number of entries that will be inserted.
5605-
* @mas: The maple state
5606-
* @nr_entries: The number of expected entries.
5607-
*
5608-
* This will attempt to pre-allocate enough nodes to store the expected number
5609-
* of entries. The allocations will occur using the bulk allocator interface
5610-
* for speed. Please call mas_destroy() on the @mas after inserting the entries
5611-
* to ensure any unused nodes are freed.
5612-
*
5613-
* Return: 0 on success, -ENOMEM if memory could not be allocated.
5614-
*/
5615-
int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5616-
{
5617-
int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5618-
struct maple_enode *enode = mas->node;
5619-
int nr_nodes;
5620-
int ret;
5621-
5622-
/*
5623-
* Sometimes it is necessary to duplicate a tree to a new tree, such as
5624-
* forking a process and duplicating the VMAs from one tree to a new
5625-
* tree. When such a situation arises, it is known that the new tree is
5626-
* not going to be used until the entire tree is populated. For
5627-
* performance reasons, it is best to use a bulk load with RCU disabled.
5628-
* This allows for optimistic splitting that favours the left and reuse
5629-
* of nodes during the operation.
5630-
*/
5631-
5632-
/* Optimize splitting for bulk insert in-order */
5633-
mas->mas_flags |= MA_STATE_BULK;
5634-
5635-
/*
5636-
* Avoid overflow, assume a gap between each entry and a trailing null.
5637-
* If this is wrong, it just means allocation can happen during
5638-
* insertion of entries.
5639-
*/
5640-
nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5641-
if (!mt_is_alloc(mas->tree))
5642-
nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5643-
5644-
/* Leaves; reduce slots to keep space for expansion */
5645-
nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5646-
/* Internal nodes */
5647-
nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5648-
/* Add working room for split (2 nodes) + new parents */
5649-
mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
5650-
5651-
/* Detect if allocations run out */
5652-
mas->mas_flags |= MA_STATE_PREALLOC;
5653-
5654-
if (!mas_is_err(mas))
5655-
return 0;
5656-
5657-
ret = xa_err(mas->node);
5658-
mas->node = enode;
5659-
mas_destroy(mas);
5660-
return ret;
5661-
5662-
}
5663-
EXPORT_SYMBOL_GPL(mas_expected_entries);
5664-
56655403
static void mas_may_activate(struct ma_state *mas)
56665404
{
56675405
if (!mas->node) {

0 commit comments

Comments
 (0)