@@ -612,6 +612,18 @@ void wait_for_kprobe_optimizer(void)
612
612
mutex_unlock (& kprobe_mutex );
613
613
}
614
614
615
+ static bool optprobe_queued_unopt (struct optimized_kprobe * op )
616
+ {
617
+ struct optimized_kprobe * _op ;
618
+
619
+ list_for_each_entry (_op , & unoptimizing_list , list ) {
620
+ if (op == _op )
621
+ return true;
622
+ }
623
+
624
+ return false;
625
+ }
626
+
615
627
/* Optimize kprobe if p is ready to be optimized */
616
628
static void optimize_kprobe (struct kprobe * p )
617
629
{
@@ -633,17 +645,21 @@ static void optimize_kprobe(struct kprobe *p)
633
645
return ;
634
646
635
647
/* Check if it is already optimized. */
636
- if (op -> kp .flags & KPROBE_FLAG_OPTIMIZED )
648
+ if (op -> kp .flags & KPROBE_FLAG_OPTIMIZED ) {
649
+ if (optprobe_queued_unopt (op )) {
650
+ /* This is under unoptimizing. Just dequeue the probe */
651
+ list_del_init (& op -> list );
652
+ }
637
653
return ;
654
+ }
638
655
op -> kp .flags |= KPROBE_FLAG_OPTIMIZED ;
639
656
640
- if (!list_empty (& op -> list ))
641
- /* This is under unoptimizing. Just dequeue the probe */
642
- list_del_init (& op -> list );
643
- else {
644
- list_add (& op -> list , & optimizing_list );
645
- kick_kprobe_optimizer ();
646
- }
657
+ /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
658
+ if (WARN_ON_ONCE (!list_empty (& op -> list )))
659
+ return ;
660
+
661
+ list_add (& op -> list , & optimizing_list );
662
+ kick_kprobe_optimizer ();
647
663
}
648
664
649
665
/* Short cut to direct unoptimizing */
@@ -665,30 +681,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
665
681
return ; /* This is not an optprobe nor optimized */
666
682
667
683
op = container_of (p , struct optimized_kprobe , kp );
668
- if (!kprobe_optimized (p )) {
669
- /* Unoptimized or unoptimizing case */
670
- if (force && !list_empty (& op -> list )) {
671
- /*
672
- * Only if this is unoptimizing kprobe and forced,
673
- * forcibly unoptimize it. (No need to unoptimize
674
- * unoptimized kprobe again :)
675
- */
676
- list_del_init (& op -> list );
677
- force_unoptimize_kprobe (op );
678
- }
684
+ if (!kprobe_optimized (p ))
679
685
return ;
680
- }
681
686
682
687
if (!list_empty (& op -> list )) {
683
- /* Dequeue from the optimization queue */
684
- list_del_init (& op -> list );
688
+ if (optprobe_queued_unopt (op )) {
689
+ /* Queued in unoptimizing queue */
690
+ if (force ) {
691
+ /*
692
+ * Forcibly unoptimize the kprobe here, and queue it
693
+ * in the freeing list for release afterwards.
694
+ */
695
+ force_unoptimize_kprobe (op );
696
+ list_move (& op -> list , & freeing_list );
697
+ }
698
+ } else {
699
+ /* Dequeue from the optimizing queue */
700
+ list_del_init (& op -> list );
701
+ op -> kp .flags &= ~KPROBE_FLAG_OPTIMIZED ;
702
+ }
685
703
return ;
686
704
}
705
+
687
706
/* Optimized kprobe case */
688
- if (force )
707
+ if (force ) {
689
708
/* Forcibly update the code: this is a special case */
690
709
force_unoptimize_kprobe (op );
691
- else {
710
+ } else {
692
711
list_add (& op -> list , & unoptimizing_list );
693
712
kick_kprobe_optimizer ();
694
713
}
0 commit comments