@@ -666,6 +666,27 @@ EXPORT_SYMBOL_GPL(resilient_queued_spin_lock_slowpath);
666
666
667
667
__bpf_kfunc_start_defs ();
668
668
669
+ static void bpf_prog_report_rqspinlock_violation (const char * str , void * lock , bool irqsave )
670
+ {
671
+ struct rqspinlock_held * rqh = this_cpu_ptr (& rqspinlock_held_locks );
672
+ struct bpf_stream_stage ss ;
673
+ struct bpf_prog * prog ;
674
+
675
+ prog = bpf_prog_find_from_stack ();
676
+ if (!prog )
677
+ return ;
678
+ bpf_stream_stage (ss , prog , BPF_STDERR , ({
679
+ bpf_stream_printk (ss , "ERROR: %s for bpf_res_spin_lock%s\n" , str , irqsave ? "_irqsave" : "" );
680
+ bpf_stream_printk (ss , "Attempted lock = 0x%px\n" , lock );
681
+ bpf_stream_printk (ss , "Total held locks = %d\n" , rqh -> cnt );
682
+ for (int i = 0 ; i < min (RES_NR_HELD , rqh -> cnt ); i ++ )
683
+ bpf_stream_printk (ss , "Held lock[%2d] = 0x%px\n" , i , rqh -> locks [i ]);
684
+ bpf_stream_dump_stack (ss );
685
+ }));
686
+ }
687
+
688
+ #define REPORT_STR (ret ) ({ (ret) == -ETIMEDOUT ? "Timeout detected" : "AA or ABBA deadlock detected"; })
689
+
669
690
__bpf_kfunc int bpf_res_spin_lock (struct bpf_res_spin_lock * lock )
670
691
{
671
692
int ret ;
@@ -676,6 +697,7 @@ __bpf_kfunc int bpf_res_spin_lock(struct bpf_res_spin_lock *lock)
676
697
preempt_disable ();
677
698
ret = res_spin_lock ((rqspinlock_t * )lock );
678
699
if (unlikely (ret )) {
700
+ bpf_prog_report_rqspinlock_violation (REPORT_STR (ret ), lock , false);
679
701
preempt_enable ();
680
702
return ret ;
681
703
}
@@ -698,6 +720,7 @@ __bpf_kfunc int bpf_res_spin_lock_irqsave(struct bpf_res_spin_lock *lock, unsign
698
720
local_irq_save (flags );
699
721
ret = res_spin_lock ((rqspinlock_t * )lock );
700
722
if (unlikely (ret )) {
723
+ bpf_prog_report_rqspinlock_violation (REPORT_STR (ret ), lock , true);
701
724
local_irq_restore (flags );
702
725
preempt_enable ();
703
726
return ret ;
0 commit comments