Skip to content

Commit 27861fc

Browse files
kkdwivedianakryiko
authored andcommitted
bpf: Drop rqspinlock usage in ringbuf
We noticed potential lock ups and delays in progs running in NMI context with the rqspinlock changes, which suggests more improvements need to be made before we can support ring buffer updates in such a context safely. Revert the change for now. Fixes: a650d38 ("bpf: Convert ringbuf map to rqspinlock") Reported-by: Josef Bacik <[email protected]> Signed-off-by: Kumar Kartikeya Dwivedi <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Cc: [email protected] Link: https://lore.kernel.org/bpf/[email protected]
1 parent e4414b0 commit 27861fc

File tree

1 file changed

+10
-7
lines changed

1 file changed

+10
-7
lines changed

kernel/bpf/ringbuf.c

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111
#include <linux/kmemleak.h>
1212
#include <uapi/linux/btf.h>
1313
#include <linux/btf_ids.h>
14-
#include <asm/rqspinlock.h>
1514

1615
#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
1716

@@ -30,7 +29,7 @@ struct bpf_ringbuf {
3029
u64 mask;
3130
struct page **pages;
3231
int nr_pages;
33-
rqspinlock_t spinlock ____cacheline_aligned_in_smp;
32+
raw_spinlock_t spinlock ____cacheline_aligned_in_smp;
3433
/* For user-space producer ring buffers, an atomic_t busy bit is used
3534
* to synchronize access to the ring buffers in the kernel, rather than
3635
* the spinlock that is used for kernel-producer ring buffers. This is
@@ -174,7 +173,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
174173
if (!rb)
175174
return NULL;
176175

177-
raw_res_spin_lock_init(&rb->spinlock);
176+
raw_spin_lock_init(&rb->spinlock);
178177
atomic_set(&rb->busy, 0);
179178
init_waitqueue_head(&rb->waitq);
180179
init_irq_work(&rb->work, bpf_ringbuf_notify);
@@ -417,8 +416,12 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
417416

418417
cons_pos = smp_load_acquire(&rb->consumer_pos);
419418

420-
if (raw_res_spin_lock_irqsave(&rb->spinlock, flags))
421-
return NULL;
419+
if (in_nmi()) {
420+
if (!raw_spin_trylock_irqsave(&rb->spinlock, flags))
421+
return NULL;
422+
} else {
423+
raw_spin_lock_irqsave(&rb->spinlock, flags);
424+
}
422425

423426
pend_pos = rb->pending_pos;
424427
prod_pos = rb->producer_pos;
@@ -443,7 +446,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
443446
*/
444447
if (new_prod_pos - cons_pos > rb->mask ||
445448
new_prod_pos - pend_pos > rb->mask) {
446-
raw_res_spin_unlock_irqrestore(&rb->spinlock, flags);
449+
raw_spin_unlock_irqrestore(&rb->spinlock, flags);
447450
return NULL;
448451
}
449452

@@ -455,7 +458,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
455458
/* pairs with consumer's smp_load_acquire() */
456459
smp_store_release(&rb->producer_pos, new_prod_pos);
457460

458-
raw_res_spin_unlock_irqrestore(&rb->spinlock, flags);
461+
raw_spin_unlock_irqrestore(&rb->spinlock, flags);
459462

460463
return (void *)hdr + BPF_RINGBUF_HDR_SZ;
461464
}

0 commit comments

Comments
 (0)