@@ -1043,9 +1043,10 @@ static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt, bool use_a
10431043}
10441044
10451045/*
1046- * nbcon_atomic_emit_one - Print one record for an nbcon console using the
1047- * write_atomic() callback
1046+ * nbcon_emit_one - Print one record for an nbcon console using the
1047+ * specified callback
10481048 * @wctxt: An initialized write context struct to use for this context
1049+ * @use_atomic: True if the write_atomic() callback is to be used
10491050 *
10501051 * Return: True, when a record has been printed and there are still
10511052 * pending records. The caller might want to continue flushing.
@@ -1058,12 +1059,25 @@ static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt, bool use_a
10581059 * This is an internal helper to handle the locking of the console before
10591060 * calling nbcon_emit_next_record().
10601061 */
1061- static bool nbcon_atomic_emit_one (struct nbcon_write_context * wctxt )
1062+ static bool nbcon_emit_one (struct nbcon_write_context * wctxt , bool use_atomic )
10621063{
10631064 struct nbcon_context * ctxt = & ACCESS_PRIVATE (wctxt , ctxt );
1065+ struct console * con = ctxt -> console ;
1066+ unsigned long flags ;
1067+ bool ret = false;
1068+
1069+ if (!use_atomic ) {
1070+ con -> device_lock (con , & flags );
1071+
1072+ /*
1073+ * Ensure this stays on the CPU to make handover and
1074+ * takeover possible.
1075+ */
1076+ cant_migrate ();
1077+ }
10641078
10651079 if (!nbcon_context_try_acquire (ctxt ))
1066- return false ;
1080+ goto out ;
10671081
10681082 /*
10691083 * nbcon_emit_next_record() returns false when the console was
@@ -1073,12 +1087,16 @@ static bool nbcon_atomic_emit_one(struct nbcon_write_context *wctxt)
10731087 * The higher priority printing context takes over responsibility
10741088 * to print the pending records.
10751089 */
1076- if (!nbcon_emit_next_record (wctxt , true ))
1077- return false ;
1090+ if (!nbcon_emit_next_record (wctxt , use_atomic ))
1091+ goto out ;
10781092
10791093 nbcon_context_release (ctxt );
10801094
1081- return ctxt -> backlog ;
1095+ ret = ctxt -> backlog ;
1096+ out :
1097+ if (!use_atomic )
1098+ con -> device_unlock (con , flags );
1099+ return ret ;
10821100}
10831101
10841102/**
@@ -1163,30 +1181,8 @@ static int nbcon_kthread_func(void *__console)
11631181
11641182 con_flags = console_srcu_read_flags (con );
11651183
1166- if (console_is_usable (con , con_flags , false)) {
1167- unsigned long lock_flags ;
1168-
1169- con -> device_lock (con , & lock_flags );
1170-
1171- /*
1172- * Ensure this stays on the CPU to make handover and
1173- * takeover possible.
1174- */
1175- cant_migrate ();
1176-
1177- if (nbcon_context_try_acquire (ctxt )) {
1178- /*
1179- * If the emit fails, this context is no
1180- * longer the owner.
1181- */
1182- if (nbcon_emit_next_record (& wctxt , false)) {
1183- nbcon_context_release (ctxt );
1184- backlog = ctxt -> backlog ;
1185- }
1186- }
1187-
1188- con -> device_unlock (con , lock_flags );
1189- }
1184+ if (console_is_usable (con , con_flags , false))
1185+ backlog = nbcon_emit_one (& wctxt , false);
11901186
11911187 console_srcu_read_unlock (cookie );
11921188
@@ -1367,6 +1363,13 @@ enum nbcon_prio nbcon_get_default_prio(void)
13671363 * both the console_lock and the SRCU read lock. Otherwise it
13681364 * is set to false.
13691365 * @cookie: The cookie from the SRCU read lock.
1366+ * @use_atomic: Set true when called in an atomic or unknown context.
1367+ * It affects which nbcon callback will be used: write_atomic()
1368+ * or write_thread().
1369+ *
1370+ * When false, the write_thread() callback is used and would be
1371+ * called in a preemtible context unless disabled by the
1372+ * device_lock. The legacy handover is not allowed in this mode.
13701373 *
13711374 * Context: Any context except NMI.
13721375 * Return: True, when a record has been printed and there are still
@@ -1382,26 +1385,36 @@ enum nbcon_prio nbcon_get_default_prio(void)
13821385 * Essentially it is the nbcon version of console_emit_next_record().
13831386 */
13841387bool nbcon_legacy_emit_next_record (struct console * con , bool * handover ,
1385- int cookie )
1388+ int cookie , bool use_atomic )
13861389{
13871390 struct nbcon_write_context wctxt = { };
13881391 struct nbcon_context * ctxt = & ACCESS_PRIVATE (& wctxt , ctxt );
13891392 unsigned long flags ;
13901393 bool progress ;
13911394
1392- /* Use the same procedure as console_emit_next_record(). */
1393- printk_safe_enter_irqsave (flags );
1394- console_lock_spinning_enable ();
1395- stop_critical_timings ();
1396-
13971395 ctxt -> console = con ;
13981396 ctxt -> prio = nbcon_get_default_prio ();
13991397
1400- progress = nbcon_atomic_emit_one (& wctxt );
1398+ if (use_atomic ) {
1399+ /*
1400+ * In an atomic or unknown context, use the same procedure as
1401+ * in console_emit_next_record(). It allows to handover.
1402+ */
1403+ printk_safe_enter_irqsave (flags );
1404+ console_lock_spinning_enable ();
1405+ stop_critical_timings ();
1406+ }
14011407
1402- start_critical_timings ();
1403- * handover = console_lock_spinning_disable_and_check (cookie );
1404- printk_safe_exit_irqrestore (flags );
1408+ progress = nbcon_emit_one (& wctxt , use_atomic );
1409+
1410+ if (use_atomic ) {
1411+ start_critical_timings ();
1412+ * handover = console_lock_spinning_disable_and_check (cookie );
1413+ printk_safe_exit_irqrestore (flags );
1414+ } else {
1415+ /* Non-atomic does not perform legacy spinning handovers. */
1416+ * handover = false;
1417+ }
14051418
14061419 return progress ;
14071420}
0 commit comments