Skip to content

Commit 1aaf5cb

Browse files
author
Kent Overstreet
committed
bcachefs: Fix btree_trans list ordering
The debug code relies on btree_trans_list being ordered so that it can resume on subsequent calls or lock restarts. However, it was using trans->locknig_wait.task.pid, which is incorrect since btree_trans objects are cached and reused - typically by different tasks. Fix this by switching to pointer order, and also sort them lazily when required - speeding up the btree_trans_get() fastpath. Signed-off-by: Kent Overstreet <[email protected]>
1 parent de611ab commit 1aaf5cb

File tree

2 files changed

+34
-11
lines changed

2 files changed

+34
-11
lines changed

fs/bcachefs/btree_iter.c

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3149,15 +3149,10 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
31493149
BUG_ON(pos_task &&
31503150
pid == pos_task->pid &&
31513151
pos->locked);
3152-
3153-
if (pos_task && pid < pos_task->pid) {
3154-
list_add_tail(&trans->list, &pos->list);
3155-
goto list_add_done;
3156-
}
31573152
}
31583153
}
3159-
list_add_tail(&trans->list, &c->btree_trans_list);
3160-
list_add_done:
3154+
3155+
list_add(&trans->list, &c->btree_trans_list);
31613156
seqmutex_unlock(&c->btree_trans_lock);
31623157
got_trans:
31633158
trans->c = c;

fs/bcachefs/debug.c

Lines changed: 32 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -568,6 +568,32 @@ static const struct file_operations cached_btree_nodes_ops = {
568568
.read = bch2_cached_btree_nodes_read,
569569
};
570570

571+
typedef int (*list_cmp_fn)(const struct list_head *l, const struct list_head *r);
572+
573+
static void list_sort(struct list_head *head, list_cmp_fn cmp)
574+
{
575+
struct list_head *pos;
576+
577+
list_for_each(pos, head)
578+
while (!list_is_last(pos, head) &&
579+
cmp(pos, pos->next) > 0) {
580+
struct list_head *pos2, *next = pos->next;
581+
582+
list_del(next);
583+
list_for_each(pos2, head)
584+
if (cmp(next, pos2) < 0)
585+
goto pos_found;
586+
BUG();
587+
pos_found:
588+
list_add_tail(next, pos2);
589+
}
590+
}
591+
592+
static int list_ptr_order_cmp(const struct list_head *l, const struct list_head *r)
593+
{
594+
return cmp_int(l, r);
595+
}
596+
571597
static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
572598
size_t size, loff_t *ppos)
573599
{
@@ -581,12 +607,14 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
581607
i->ret = 0;
582608
restart:
583609
seqmutex_lock(&c->btree_trans_lock);
584-
list_for_each_entry(trans, &c->btree_trans_list, list) {
585-
struct task_struct *task = READ_ONCE(trans->locking_wait.task);
610+
list_sort(&c->btree_trans_list, list_ptr_order_cmp);
586611

587-
if (!task || task->pid <= i->iter)
612+
list_for_each_entry(trans, &c->btree_trans_list, list) {
613+
if ((ulong) trans < i->iter)
588614
continue;
589615

616+
i->iter = (ulong) trans;
617+
590618
if (!closure_get_not_zero(&trans->ref))
591619
continue;
592620

@@ -596,7 +624,7 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
596624

597625
prt_printf(&i->buf, "backtrace:\n");
598626
printbuf_indent_add(&i->buf, 2);
599-
bch2_prt_task_backtrace(&i->buf, task, 0, GFP_KERNEL);
627+
bch2_prt_task_backtrace(&i->buf, trans->locking_wait.task, 0, GFP_KERNEL);
600628
printbuf_indent_sub(&i->buf, 2);
601629
prt_newline(&i->buf);
602630

0 commit comments

Comments
 (0)