@@ -2062,9 +2062,9 @@ print_bad_irq_dependency(struct task_struct *curr,
2062
2062
pr_warn ("-----------------------------------------------------\n" );
2063
2063
pr_warn ("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n" ,
2064
2064
curr -> comm , task_pid_nr (curr ),
2065
- lockdep_hardirq_context (curr ), hardirq_count () >> HARDIRQ_SHIFT ,
2065
+ lockdep_hardirq_context (), hardirq_count () >> HARDIRQ_SHIFT ,
2066
2066
curr -> softirq_context , softirq_count () >> SOFTIRQ_SHIFT ,
2067
- lockdep_hardirqs_enabled (curr ),
2067
+ lockdep_hardirqs_enabled (),
2068
2068
curr -> softirqs_enabled );
2069
2069
print_lock (next );
2070
2070
@@ -3331,9 +3331,9 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
3331
3331
3332
3332
pr_warn ("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n" ,
3333
3333
curr -> comm , task_pid_nr (curr ),
3334
- lockdep_hardirq_context (curr ), hardirq_count () >> HARDIRQ_SHIFT ,
3334
+ lockdep_hardirq_context (), hardirq_count () >> HARDIRQ_SHIFT ,
3335
3335
lockdep_softirq_context (curr ), softirq_count () >> SOFTIRQ_SHIFT ,
3336
- lockdep_hardirqs_enabled (curr ),
3336
+ lockdep_hardirqs_enabled (),
3337
3337
lockdep_softirqs_enabled (curr ));
3338
3338
print_lock (this );
3339
3339
@@ -3658,7 +3658,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
3658
3658
if (unlikely (current -> lockdep_recursion & LOCKDEP_RECURSION_MASK ))
3659
3659
return ;
3660
3660
3661
- if (unlikely (lockdep_hardirqs_enabled (current ))) {
3661
+ if (unlikely (lockdep_hardirqs_enabled ())) {
3662
3662
/*
3663
3663
* Neither irq nor preemption are disabled here
3664
3664
* so this is racy by nature but losing one hit
@@ -3686,7 +3686,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
3686
3686
* Can't allow enabling interrupts while in an interrupt handler,
3687
3687
* that's general bad form and such. Recursion, limited stack etc..
3688
3688
*/
3689
- if (DEBUG_LOCKS_WARN_ON (lockdep_hardirq_context (current )))
3689
+ if (DEBUG_LOCKS_WARN_ON (lockdep_hardirq_context ()))
3690
3690
return ;
3691
3691
3692
3692
current -> hardirq_chain_key = current -> curr_chain_key ;
@@ -3724,7 +3724,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
3724
3724
if (unlikely (current -> lockdep_recursion & LOCKDEP_RECURSION_MASK ))
3725
3725
return ;
3726
3726
3727
- if (lockdep_hardirqs_enabled (curr )) {
3727
+ if (lockdep_hardirqs_enabled ()) {
3728
3728
/*
3729
3729
* Neither irq nor preemption are disabled here
3730
3730
* so this is racy by nature but losing one hit
@@ -3783,7 +3783,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
3783
3783
if (DEBUG_LOCKS_WARN_ON (!irqs_disabled ()))
3784
3784
return ;
3785
3785
3786
- if (lockdep_hardirqs_enabled (curr )) {
3786
+ if (lockdep_hardirqs_enabled ()) {
3787
3787
/*
3788
3788
* We have done an ON -> OFF transition:
3789
3789
*/
@@ -3832,7 +3832,7 @@ void lockdep_softirqs_on(unsigned long ip)
3832
3832
* usage bit for all held locks, if hardirqs are
3833
3833
* enabled too:
3834
3834
*/
3835
- if (lockdep_hardirqs_enabled (curr ))
3835
+ if (lockdep_hardirqs_enabled ())
3836
3836
mark_held_locks (curr , LOCK_ENABLED_SOFTIRQ );
3837
3837
lockdep_recursion_finish ();
3838
3838
}
@@ -3881,7 +3881,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
3881
3881
*/
3882
3882
if (!hlock -> trylock ) {
3883
3883
if (hlock -> read ) {
3884
- if (lockdep_hardirq_context (curr ))
3884
+ if (lockdep_hardirq_context ())
3885
3885
if (!mark_lock (curr , hlock ,
3886
3886
LOCK_USED_IN_HARDIRQ_READ ))
3887
3887
return 0 ;
@@ -3890,7 +3890,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
3890
3890
LOCK_USED_IN_SOFTIRQ_READ ))
3891
3891
return 0 ;
3892
3892
} else {
3893
- if (lockdep_hardirq_context (curr ))
3893
+ if (lockdep_hardirq_context ())
3894
3894
if (!mark_lock (curr , hlock , LOCK_USED_IN_HARDIRQ ))
3895
3895
return 0 ;
3896
3896
if (curr -> softirq_context )
@@ -3928,7 +3928,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
3928
3928
3929
3929
static inline unsigned int task_irq_context (struct task_struct * task )
3930
3930
{
3931
- return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context (task ) +
3931
+ return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context () +
3932
3932
LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task -> softirq_context ;
3933
3933
}
3934
3934
@@ -4021,7 +4021,7 @@ static inline short task_wait_context(struct task_struct *curr)
4021
4021
* Set appropriate wait type for the context; for IRQs we have to take
4022
4022
* into account force_irqthread as that is implied by PREEMPT_RT.
4023
4023
*/
4024
- if (lockdep_hardirq_context (curr )) {
4024
+ if (lockdep_hardirq_context ()) {
4025
4025
/*
4026
4026
* Check if force_irqthreads will run us threaded.
4027
4027
*/
@@ -4864,11 +4864,11 @@ static void check_flags(unsigned long flags)
4864
4864
return ;
4865
4865
4866
4866
if (irqs_disabled_flags (flags )) {
4867
- if (DEBUG_LOCKS_WARN_ON (lockdep_hardirqs_enabled (current ))) {
4867
+ if (DEBUG_LOCKS_WARN_ON (lockdep_hardirqs_enabled ())) {
4868
4868
printk ("possible reason: unannotated irqs-off.\n" );
4869
4869
}
4870
4870
} else {
4871
- if (DEBUG_LOCKS_WARN_ON (!lockdep_hardirqs_enabled (current ))) {
4871
+ if (DEBUG_LOCKS_WARN_ON (!lockdep_hardirqs_enabled ())) {
4872
4872
printk ("possible reason: unannotated irqs-on.\n" );
4873
4873
}
4874
4874
}
0 commit comments