@@ -785,7 +785,7 @@ static void lockdep_print_held_locks(struct task_struct *p)
785785 printk ("no locks held by %s/%d.\n" , p -> comm , task_pid_nr (p ));
786786 else
787787 printk ("%d lock%s held by %s/%d:\n" , depth ,
788- depth > 1 ? "s" : "" , p -> comm , task_pid_nr (p ));
788+ str_plural ( depth ) , p -> comm , task_pid_nr (p ));
789789 /*
790790 * It's not reliable to print a task's held locks if it's not sleeping
791791 * and it's not the current task.
@@ -2067,6 +2067,9 @@ static noinline void print_bfs_bug(int ret)
20672067 /*
20682068 * Breadth-first-search failed, graph got corrupted?
20692069 */
2070+ if (ret == BFS_EQUEUEFULL )
2071+ pr_warn ("Increase LOCKDEP_CIRCULAR_QUEUE_BITS to avoid this warning:\n" );
2072+
20702073 WARN (1 , "lockdep bfs error:%d\n" , ret );
20712074}
20722075
@@ -6196,25 +6199,27 @@ static struct pending_free *get_pending_free(void)
61966199static void free_zapped_rcu (struct rcu_head * cb );
61976200
61986201/*
6199- * Schedule an RCU callback if no RCU callback is pending. Must be called with
6200- * the graph lock held.
6201- */
6202- static void call_rcu_zapped (struct pending_free * pf )
6202+ * See if we need to queue an RCU callback, must called with
6203+ * the lockdep lock held, returns false if either we don't have
6204+ * any pending free or the callback is already scheduled.
6205+ * Otherwise, a call_rcu() must follow this function call.
6206+ */
6207+ static bool prepare_call_rcu_zapped (struct pending_free * pf )
62036208{
62046209 WARN_ON_ONCE (inside_selftest ());
62056210
62066211 if (list_empty (& pf -> zapped ))
6207- return ;
6212+ return false ;
62086213
62096214 if (delayed_free .scheduled )
6210- return ;
6215+ return false ;
62116216
62126217 delayed_free .scheduled = true;
62136218
62146219 WARN_ON_ONCE (delayed_free .pf + delayed_free .index != pf );
62156220 delayed_free .index ^= 1 ;
62166221
6217- call_rcu ( & delayed_free . rcu_head , free_zapped_rcu ) ;
6222+ return true ;
62186223}
62196224
62206225/* The caller must hold the graph lock. May be called from RCU context. */
@@ -6240,6 +6245,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
62406245{
62416246 struct pending_free * pf ;
62426247 unsigned long flags ;
6248+ bool need_callback ;
62436249
62446250 if (WARN_ON_ONCE (ch != & delayed_free .rcu_head ))
62456251 return ;
@@ -6251,14 +6257,18 @@ static void free_zapped_rcu(struct rcu_head *ch)
62516257 pf = delayed_free .pf + (delayed_free .index ^ 1 );
62526258 __free_zapped_classes (pf );
62536259 delayed_free .scheduled = false;
6260+ need_callback =
6261+ prepare_call_rcu_zapped (delayed_free .pf + delayed_free .index );
6262+ lockdep_unlock ();
6263+ raw_local_irq_restore (flags );
62546264
62556265 /*
6256- * If there's anything on the open list, close and start a new callback.
6257- */
6258- call_rcu_zapped (delayed_free .pf + delayed_free .index );
6266+ * If there's pending free and its callback has not been scheduled,
6267+ * queue an RCU callback.
6268+ */
6269+ if (need_callback )
6270+ call_rcu (& delayed_free .rcu_head , free_zapped_rcu );
62596271
6260- lockdep_unlock ();
6261- raw_local_irq_restore (flags );
62626272}
62636273
62646274/*
@@ -6298,17 +6308,19 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
62986308{
62996309 struct pending_free * pf ;
63006310 unsigned long flags ;
6311+ bool need_callback ;
63016312
63026313 init_data_structures_once ();
63036314
63046315 raw_local_irq_save (flags );
63056316 lockdep_lock ();
63066317 pf = get_pending_free ();
63076318 __lockdep_free_key_range (pf , start , size );
6308- call_rcu_zapped (pf );
6319+ need_callback = prepare_call_rcu_zapped (pf );
63096320 lockdep_unlock ();
63106321 raw_local_irq_restore (flags );
6311-
6322+ if (need_callback )
6323+ call_rcu (& delayed_free .rcu_head , free_zapped_rcu );
63126324 /*
63136325 * Wait for any possible iterators from look_up_lock_class() to pass
63146326 * before continuing to free the memory they refer to.
@@ -6402,6 +6414,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
64026414 struct pending_free * pf ;
64036415 unsigned long flags ;
64046416 int locked ;
6417+ bool need_callback = false;
64056418
64066419 raw_local_irq_save (flags );
64076420 locked = graph_lock ();
@@ -6410,11 +6423,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
64106423
64116424 pf = get_pending_free ();
64126425 __lockdep_reset_lock (pf , lock );
6413- call_rcu_zapped (pf );
6426+ need_callback = prepare_call_rcu_zapped (pf );
64146427
64156428 graph_unlock ();
64166429out_irq :
64176430 raw_local_irq_restore (flags );
6431+ if (need_callback )
6432+ call_rcu (& delayed_free .rcu_head , free_zapped_rcu );
64186433}
64196434
64206435/*
@@ -6458,6 +6473,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
64586473 struct pending_free * pf ;
64596474 unsigned long flags ;
64606475 bool found = false;
6476+ bool need_callback = false;
64616477
64626478 might_sleep ();
64636479
@@ -6478,11 +6494,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
64786494 if (found ) {
64796495 pf = get_pending_free ();
64806496 __lockdep_free_key_range (pf , key , 1 );
6481- call_rcu_zapped (pf );
6497+ need_callback = prepare_call_rcu_zapped (pf );
64826498 }
64836499 lockdep_unlock ();
64846500 raw_local_irq_restore (flags );
64856501
6502+ if (need_callback )
6503+ call_rcu (& delayed_free .rcu_head , free_zapped_rcu );
6504+
64866505 /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
64876506 synchronize_rcu ();
64886507}
0 commit comments