@@ -3952,10 +3952,36 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
3952
3952
return ret ;
3953
3953
}
3954
3954
3955
+ static inline short task_wait_context (struct task_struct * curr )
3956
+ {
3957
+ /*
3958
+ * Set appropriate wait type for the context; for IRQs we have to take
3959
+ * into account force_irqthread as that is implied by PREEMPT_RT.
3960
+ */
3961
+ if (curr -> hardirq_context ) {
3962
+ /*
3963
+ * Check if force_irqthreads will run us threaded.
3964
+ */
3965
+ if (curr -> hardirq_threaded || curr -> irq_config )
3966
+ return LD_WAIT_CONFIG ;
3967
+
3968
+ return LD_WAIT_SPIN ;
3969
+ } else if (curr -> softirq_context ) {
3970
+ /*
3971
+ * Softirqs are always threaded.
3972
+ */
3973
+ return LD_WAIT_CONFIG ;
3974
+ }
3975
+
3976
+ return LD_WAIT_MAX ;
3977
+ }
3978
+
3955
3979
static int
3956
3980
print_lock_invalid_wait_context (struct task_struct * curr ,
3957
3981
struct held_lock * hlock )
3958
3982
{
3983
+ short curr_inner ;
3984
+
3959
3985
if (!debug_locks_off ())
3960
3986
return 0 ;
3961
3987
if (debug_locks_silent )
@@ -3971,6 +3997,10 @@ print_lock_invalid_wait_context(struct task_struct *curr,
3971
3997
print_lock (hlock );
3972
3998
3973
3999
pr_warn ("other info that might help us debug this:\n" );
4000
+
4001
+ curr_inner = task_wait_context (curr );
4002
+ pr_warn ("context-{%d:%d}\n" , curr_inner , curr_inner );
4003
+
3974
4004
lockdep_print_held_locks (curr );
3975
4005
3976
4006
pr_warn ("stack backtrace:\n" );
@@ -4017,26 +4047,7 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
4017
4047
}
4018
4048
depth ++ ;
4019
4049
4020
- /*
4021
- * Set appropriate wait type for the context; for IRQs we have to take
4022
- * into account force_irqthread as that is implied by PREEMPT_RT.
4023
- */
4024
- if (curr -> hardirq_context ) {
4025
- /*
4026
- * Check if force_irqthreads will run us threaded.
4027
- */
4028
- if (curr -> hardirq_threaded || curr -> irq_config )
4029
- curr_inner = LD_WAIT_CONFIG ;
4030
- else
4031
- curr_inner = LD_WAIT_SPIN ;
4032
- } else if (curr -> softirq_context ) {
4033
- /*
4034
- * Softirqs are always threaded.
4035
- */
4036
- curr_inner = LD_WAIT_CONFIG ;
4037
- } else {
4038
- curr_inner = LD_WAIT_MAX ;
4039
- }
4050
+ curr_inner = task_wait_context (curr );
4040
4051
4041
4052
for (; depth < curr -> lockdep_depth ; depth ++ ) {
4042
4053
struct held_lock * prev = curr -> held_locks + depth ;
0 commit comments