@@ -214,8 +214,9 @@ static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
214
214
215
215
/**
216
216
* nbcon_context_try_acquire_direct - Try to acquire directly
217
- * @ctxt: The context of the caller
218
- * @cur: The current console state
217
+ * @ctxt: The context of the caller
218
+ * @cur: The current console state
219
+ * @is_reacquire: This acquire is a reacquire
219
220
*
220
221
* Acquire the console when it is released. Also acquire the console when
221
222
* the current owner has a lower priority and the console is in a safe state.
@@ -225,32 +226,38 @@ static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
225
226
*
226
227
* Errors:
227
228
*
228
- * -EPERM: A panic is in progress and this is not the panic CPU.
229
- * Or the current owner or waiter has the same or higher
230
- * priority. No acquire method can be successful in
231
- * this case .
229
+ * -EPERM: A panic is in progress and this is neither the panic
230
+ * CPU nor is this a reacquire. Or the current owner or
231
+ * waiter has the same or higher priority. No acquire
232
+ * method can be successful in these cases .
232
233
*
233
234
* -EBUSY: The current owner has a lower priority but the console
234
235
* in an unsafe state. The caller should try using
235
236
* the handover acquire method.
236
237
*/
237
238
static int nbcon_context_try_acquire_direct (struct nbcon_context * ctxt ,
238
- struct nbcon_state * cur )
239
+ struct nbcon_state * cur , bool is_reacquire )
239
240
{
240
241
unsigned int cpu = smp_processor_id ();
241
242
struct console * con = ctxt -> console ;
242
243
struct nbcon_state new ;
243
244
244
245
do {
245
246
/*
246
- * Panic does not imply that the console is owned. However, it
247
- * is critical that non-panic CPUs during panic are unable to
248
- * acquire ownership in order to satisfy the assumptions of
249
- * nbcon_waiter_matches(). In particular, the assumption that
250
- * lower priorities are ignored during panic.
247
+ * Panic does not imply that the console is owned. However,
248
+ * since all non-panic CPUs are stopped during panic(), it
249
+ * is safer to have them avoid gaining console ownership.
250
+ *
251
+ * If this acquire is a reacquire (and an unsafe takeover
252
+ * has not previously occurred) then it is allowed to attempt
253
+ * a direct acquire in panic. This gives console drivers an
254
+ * opportunity to perform any necessary cleanup if they were
255
+ * interrupted by the panic CPU while printing.
251
256
*/
252
- if (other_cpu_in_panic ())
257
+ if (other_cpu_in_panic () &&
258
+ (!is_reacquire || cur -> unsafe_takeover )) {
253
259
return - EPERM ;
260
+ }
254
261
255
262
if (ctxt -> prio <= cur -> prio || ctxt -> prio <= cur -> req_prio )
256
263
return - EPERM ;
@@ -301,8 +308,9 @@ static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio)
301
308
* Event #1 implies this context is EMERGENCY.
302
309
* Event #2 implies the new context is PANIC.
303
310
* Event #3 occurs when panic() has flushed the console.
304
- * Events #4 and #5 are not possible due to the other_cpu_in_panic()
305
- * check in nbcon_context_try_acquire_direct().
311
+ * Event #4 occurs when a non-panic CPU reacquires.
312
+ * Event #5 is not possible due to the other_cpu_in_panic() check
313
+ * in nbcon_context_try_acquire_handover().
306
314
*/
307
315
308
316
return (cur -> req_prio == expected_prio );
@@ -431,6 +439,16 @@ static int nbcon_context_try_acquire_handover(struct nbcon_context *ctxt,
431
439
WARN_ON_ONCE (ctxt -> prio <= cur -> prio || ctxt -> prio <= cur -> req_prio );
432
440
WARN_ON_ONCE (!cur -> unsafe );
433
441
442
+ /*
443
+ * Panic does not imply that the console is owned. However, it
444
+ * is critical that non-panic CPUs during panic are unable to
445
+ * wait for a handover in order to satisfy the assumptions of
446
+ * nbcon_waiter_matches(). In particular, the assumption that
447
+ * lower priorities are ignored during panic.
448
+ */
449
+ if (other_cpu_in_panic ())
450
+ return - EPERM ;
451
+
434
452
/* Handover is not possible on the same CPU. */
435
453
if (cur -> cpu == cpu )
436
454
return - EBUSY ;
@@ -558,7 +576,8 @@ static struct printk_buffers panic_nbcon_pbufs;
558
576
559
577
/**
560
578
* nbcon_context_try_acquire - Try to acquire nbcon console
561
- * @ctxt: The context of the caller
579
+ * @ctxt: The context of the caller
580
+ * @is_reacquire: This acquire is a reacquire
562
581
*
563
582
* Context: Under @ctxt->con->device_lock() or local_irq_save().
564
583
* Return: True if the console was acquired. False otherwise.
@@ -568,7 +587,7 @@ static struct printk_buffers panic_nbcon_pbufs;
568
587
* in an unsafe state. Otherwise, on success the caller may assume
569
588
* the console is not in an unsafe state.
570
589
*/
571
- static bool nbcon_context_try_acquire (struct nbcon_context * ctxt )
590
+ static bool nbcon_context_try_acquire (struct nbcon_context * ctxt , bool is_reacquire )
572
591
{
573
592
unsigned int cpu = smp_processor_id ();
574
593
struct console * con = ctxt -> console ;
@@ -577,7 +596,7 @@ static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
577
596
578
597
nbcon_state_read (con , & cur );
579
598
try_again :
580
- err = nbcon_context_try_acquire_direct (ctxt , & cur );
599
+ err = nbcon_context_try_acquire_direct (ctxt , & cur , is_reacquire );
581
600
if (err != - EBUSY )
582
601
goto out ;
583
602
@@ -913,7 +932,7 @@ void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt)
913
932
{
914
933
struct nbcon_context * ctxt = & ACCESS_PRIVATE (wctxt , ctxt );
915
934
916
- while (!nbcon_context_try_acquire (ctxt ))
935
+ while (!nbcon_context_try_acquire (ctxt , true ))
917
936
cpu_relax ();
918
937
919
938
nbcon_write_context_set_buf (wctxt , NULL , 0 );
@@ -1101,7 +1120,7 @@ static bool nbcon_emit_one(struct nbcon_write_context *wctxt, bool use_atomic)
1101
1120
cant_migrate ();
1102
1121
}
1103
1122
1104
- if (!nbcon_context_try_acquire (ctxt ))
1123
+ if (!nbcon_context_try_acquire (ctxt , false ))
1105
1124
goto out ;
1106
1125
1107
1126
/*
@@ -1486,7 +1505,7 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
1486
1505
ctxt -> prio = nbcon_get_default_prio ();
1487
1506
ctxt -> allow_unsafe_takeover = allow_unsafe_takeover ;
1488
1507
1489
- if (!nbcon_context_try_acquire (ctxt ))
1508
+ if (!nbcon_context_try_acquire (ctxt , false ))
1490
1509
return - EPERM ;
1491
1510
1492
1511
while (nbcon_seq_read (con ) < stop_seq ) {
@@ -1671,6 +1690,9 @@ bool nbcon_alloc(struct console *con)
1671
1690
{
1672
1691
struct nbcon_state state = { };
1673
1692
1693
+ /* Synchronize the kthread start. */
1694
+ lockdep_assert_console_list_lock_held ();
1695
+
1674
1696
/* The write_thread() callback is mandatory. */
1675
1697
if (WARN_ON (!con -> write_thread ))
1676
1698
return false;
@@ -1701,12 +1723,15 @@ bool nbcon_alloc(struct console *con)
1701
1723
return false;
1702
1724
}
1703
1725
1704
- if (printk_kthreads_running ) {
1726
+ if (printk_kthreads_ready && ! have_boot_console ) {
1705
1727
if (!nbcon_kthread_create (con )) {
1706
1728
kfree (con -> pbufs );
1707
1729
con -> pbufs = NULL ;
1708
1730
return false;
1709
1731
}
1732
+
1733
+ /* Might be the first kthread. */
1734
+ printk_kthreads_running = true;
1710
1735
}
1711
1736
}
1712
1737
@@ -1716,14 +1741,30 @@ bool nbcon_alloc(struct console *con)
1716
1741
/**
1717
1742
* nbcon_free - Free and cleanup the nbcon console specific data
1718
1743
* @con: Console to free/cleanup nbcon data
1744
+ *
1745
+ * Important: @have_nbcon_console must be updated before calling
1746
+ * this function. In particular, it can be set only when there
1747
+ * is still another nbcon console registered.
1719
1748
*/
1720
1749
void nbcon_free (struct console * con )
1721
1750
{
1722
1751
struct nbcon_state state = { };
1723
1752
1724
- if (printk_kthreads_running )
1753
+ /* Synchronize the kthread stop. */
1754
+ lockdep_assert_console_list_lock_held ();
1755
+
1756
+ if (printk_kthreads_running ) {
1725
1757
nbcon_kthread_stop (con );
1726
1758
1759
+ /* Might be the last nbcon console.
1760
+ *
1761
+ * Do not rely on printk_kthreads_check_locked(). It is not
1762
+ * called in some code paths, see nbcon_free() callers.
1763
+ */
1764
+ if (!have_nbcon_console )
1765
+ printk_kthreads_running = false;
1766
+ }
1767
+
1727
1768
nbcon_state_set (con , & state );
1728
1769
1729
1770
/* Boot consoles share global printk buffers. */
@@ -1762,7 +1803,7 @@ bool nbcon_device_try_acquire(struct console *con)
1762
1803
ctxt -> console = con ;
1763
1804
ctxt -> prio = NBCON_PRIO_NORMAL ;
1764
1805
1765
- if (!nbcon_context_try_acquire (ctxt ))
1806
+ if (!nbcon_context_try_acquire (ctxt , false ))
1766
1807
return false;
1767
1808
1768
1809
if (!nbcon_context_enter_unsafe (ctxt ))
0 commit comments