Skip to content

Commit e4add24

Browse files
mhiramatIngo Molnar
authored andcommitted
kprobes: Fix optimize_kprobe()/unoptimize_kprobe() cancellation logic
optimize_kprobe() and unoptimize_kprobe() cancels if a given kprobe is on the optimizing_list or unoptimizing_list already. However, since the following commit: f66c044 ("kprobes: Set unoptimized flag after unoptimizing code") modified the update timing of the KPROBE_FLAG_OPTIMIZED, it doesn't work as expected anymore. The optimized_kprobe could be in the following states: - [optimizing]: Before inserting jump instruction op.kp->flags has KPROBE_FLAG_OPTIMIZED and op->list is not empty. - [optimized]: jump inserted op.kp->flags has KPROBE_FLAG_OPTIMIZED and op->list is empty. - [unoptimizing]: Before removing jump instruction (including unused optprobe) op.kp->flags has KPROBE_FLAG_OPTIMIZED and op->list is not empty. - [unoptimized]: jump removed op.kp->flags doesn't have KPROBE_FLAG_OPTIMIZED and op->list is empty. Current code mis-expects [unoptimizing] state doesn't have KPROBE_FLAG_OPTIMIZED, and that can cause incorrect results. To fix this, introduce optprobe_queued_unopt() to distinguish [optimizing] and [unoptimizing] states and fixes the logic in optimize_kprobe() and unoptimize_kprobe(). [ mingo: Cleaned up the changelog and the code a bit. ] Signed-off-by: Masami Hiramatsu <[email protected]> Reviewed-by: Steven Rostedt (VMware) <[email protected]> Cc: Alexei Starovoitov <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Fixes: f66c044 ("kprobes: Set unoptimized flag after unoptimizing code") Link: https://lkml.kernel.org/r/157840814418.7181.13478003006386303481.stgit@devnote2 Signed-off-by: Ingo Molnar <[email protected]>
1 parent 2040cf9 commit e4add24

File tree

1 file changed

+43
-24
lines changed

1 file changed

+43
-24
lines changed

kernel/kprobes.c

Lines changed: 43 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -612,6 +612,18 @@ void wait_for_kprobe_optimizer(void)
612612
mutex_unlock(&kprobe_mutex);
613613
}
614614

615+
static bool optprobe_queued_unopt(struct optimized_kprobe *op)
616+
{
617+
struct optimized_kprobe *_op;
618+
619+
list_for_each_entry(_op, &unoptimizing_list, list) {
620+
if (op == _op)
621+
return true;
622+
}
623+
624+
return false;
625+
}
626+
615627
/* Optimize kprobe if p is ready to be optimized */
616628
static void optimize_kprobe(struct kprobe *p)
617629
{
@@ -633,17 +645,21 @@ static void optimize_kprobe(struct kprobe *p)
633645
return;
634646

635647
/* Check if it is already optimized. */
636-
if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
648+
if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
649+
if (optprobe_queued_unopt(op)) {
650+
/* This is under unoptimizing. Just dequeue the probe */
651+
list_del_init(&op->list);
652+
}
637653
return;
654+
}
638655
op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
639656

640-
if (!list_empty(&op->list))
641-
/* This is under unoptimizing. Just dequeue the probe */
642-
list_del_init(&op->list);
643-
else {
644-
list_add(&op->list, &optimizing_list);
645-
kick_kprobe_optimizer();
646-
}
657+
/* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
658+
if (WARN_ON_ONCE(!list_empty(&op->list)))
659+
return;
660+
661+
list_add(&op->list, &optimizing_list);
662+
kick_kprobe_optimizer();
647663
}
648664

649665
/* Short cut to direct unoptimizing */
@@ -665,30 +681,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
665681
return; /* This is not an optprobe nor optimized */
666682

667683
op = container_of(p, struct optimized_kprobe, kp);
668-
if (!kprobe_optimized(p)) {
669-
/* Unoptimized or unoptimizing case */
670-
if (force && !list_empty(&op->list)) {
671-
/*
672-
* Only if this is unoptimizing kprobe and forced,
673-
* forcibly unoptimize it. (No need to unoptimize
674-
* unoptimized kprobe again :)
675-
*/
676-
list_del_init(&op->list);
677-
force_unoptimize_kprobe(op);
678-
}
684+
if (!kprobe_optimized(p))
679685
return;
680-
}
681686

682687
if (!list_empty(&op->list)) {
683-
/* Dequeue from the optimization queue */
684-
list_del_init(&op->list);
688+
if (optprobe_queued_unopt(op)) {
689+
/* Queued in unoptimizing queue */
690+
if (force) {
691+
/*
692+
* Forcibly unoptimize the kprobe here, and queue it
693+
* in the freeing list for release afterwards.
694+
*/
695+
force_unoptimize_kprobe(op);
696+
list_move(&op->list, &freeing_list);
697+
}
698+
} else {
699+
/* Dequeue from the optimizing queue */
700+
list_del_init(&op->list);
701+
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
702+
}
685703
return;
686704
}
705+
687706
/* Optimized kprobe case */
688-
if (force)
707+
if (force) {
689708
/* Forcibly update the code: this is a special case */
690709
force_unoptimize_kprobe(op);
691-
else {
710+
} else {
692711
list_add(&op->list, &unoptimizing_list);
693712
kick_kprobe_optimizer();
694713
}

0 commit comments

Comments
 (0)