Skip to content

Commit 83c9030

Browse files
mykyta5Alexei Starovoitov
authored andcommitted
bpf: Simplify bpf_timer_cancel()
Remove lock from the bpf_timer_cancel() helper. The lock does not protect from concurrent modification of the bpf_async_cb data fields as those are modified in the callback without locking. Use guard(rcu)() instead of pair of explicit lock()/unlock(). Acked-by: Kumar Kartikeya Dwivedi <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Signed-off-by: Mykyta Yatsenko <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 8bb1e32 commit 83c9030

File tree

1 file changed

+11
-16
lines changed

1 file changed

+11
-16
lines changed

kernel/bpf/helpers.c

Lines changed: 11 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1471,30 +1471,28 @@ static const struct bpf_func_proto bpf_timer_start_proto = {
14711471
.arg3_type = ARG_ANYTHING,
14721472
};
14731473

1474-
BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
1474+
BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, async)
14751475
{
14761476
struct bpf_hrtimer *t, *cur_t;
14771477
bool inc = false;
14781478
int ret = 0;
14791479

14801480
if (in_nmi())
14811481
return -EOPNOTSUPP;
1482-
rcu_read_lock();
1483-
__bpf_spin_lock_irqsave(&timer->lock);
1484-
t = timer->timer;
1485-
if (!t) {
1486-
ret = -EINVAL;
1487-
goto out;
1488-
}
1482+
1483+
guard(rcu)();
1484+
1485+
t = READ_ONCE(async->timer);
1486+
if (!t)
1487+
return -EINVAL;
14891488

14901489
cur_t = this_cpu_read(hrtimer_running);
14911490
if (cur_t == t) {
14921491
/* If bpf callback_fn is trying to bpf_timer_cancel()
14931492
* its own timer the hrtimer_cancel() will deadlock
14941493
* since it waits for callback_fn to finish.
14951494
*/
1496-
ret = -EDEADLK;
1497-
goto out;
1495+
return -EDEADLK;
14981496
}
14991497

15001498
/* Only account in-flight cancellations when invoked from a timer
@@ -1517,20 +1515,17 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
15171515
* cancelling and waiting for it synchronously, since it might
15181516
* do the same. Bail!
15191517
*/
1520-
ret = -EDEADLK;
1521-
goto out;
1518+
atomic_dec(&t->cancelling);
1519+
return -EDEADLK;
15221520
}
15231521
drop:
15241522
bpf_async_update_prog_callback(&t->cb, NULL, NULL);
1525-
out:
1526-
__bpf_spin_unlock_irqrestore(&timer->lock);
15271523
/* Cancel the timer and wait for associated callback to finish
15281524
* if it was running.
15291525
*/
1530-
ret = ret ?: hrtimer_cancel(&t->timer);
1526+
ret = hrtimer_cancel(&t->timer);
15311527
if (inc)
15321528
atomic_dec(&t->cancelling);
1533-
rcu_read_unlock();
15341529
return ret;
15351530
}
15361531

0 commit comments

Comments
 (0)