@@ -1043,9 +1043,10 @@ static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt, bool use_a
1043
1043
}
1044
1044
1045
1045
/*
1046
- * nbcon_atomic_emit_one - Print one record for an nbcon console using the
1047
- * write_atomic() callback
1046
+ * nbcon_emit_one - Print one record for an nbcon console using the
1047
+ * specified callback
1048
1048
* @wctxt: An initialized write context struct to use for this context
1049
+ * @use_atomic: True if the write_atomic() callback is to be used
1049
1050
*
1050
1051
* Return: True, when a record has been printed and there are still
1051
1052
* pending records. The caller might want to continue flushing.
@@ -1058,12 +1059,25 @@ static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt, bool use_a
1058
1059
* This is an internal helper to handle the locking of the console before
1059
1060
* calling nbcon_emit_next_record().
1060
1061
*/
1061
- static bool nbcon_atomic_emit_one (struct nbcon_write_context * wctxt )
1062
+ static bool nbcon_emit_one (struct nbcon_write_context * wctxt , bool use_atomic )
1062
1063
{
1063
1064
struct nbcon_context * ctxt = & ACCESS_PRIVATE (wctxt , ctxt );
1065
+ struct console * con = ctxt -> console ;
1066
+ unsigned long flags ;
1067
+ bool ret = false;
1068
+
1069
+ if (!use_atomic ) {
1070
+ con -> device_lock (con , & flags );
1071
+
1072
+ /*
1073
+ * Ensure this stays on the CPU to make handover and
1074
+ * takeover possible.
1075
+ */
1076
+ cant_migrate ();
1077
+ }
1064
1078
1065
1079
if (!nbcon_context_try_acquire (ctxt ))
1066
- return false ;
1080
+ goto out ;
1067
1081
1068
1082
/*
1069
1083
* nbcon_emit_next_record() returns false when the console was
@@ -1073,12 +1087,16 @@ static bool nbcon_atomic_emit_one(struct nbcon_write_context *wctxt)
1073
1087
* The higher priority printing context takes over responsibility
1074
1088
* to print the pending records.
1075
1089
*/
1076
- if (!nbcon_emit_next_record (wctxt , true ))
1077
- return false ;
1090
+ if (!nbcon_emit_next_record (wctxt , use_atomic ))
1091
+ goto out ;
1078
1092
1079
1093
nbcon_context_release (ctxt );
1080
1094
1081
- return ctxt -> backlog ;
1095
+ ret = ctxt -> backlog ;
1096
+ out :
1097
+ if (!use_atomic )
1098
+ con -> device_unlock (con , flags );
1099
+ return ret ;
1082
1100
}
1083
1101
1084
1102
/**
@@ -1163,30 +1181,8 @@ static int nbcon_kthread_func(void *__console)
1163
1181
1164
1182
con_flags = console_srcu_read_flags (con );
1165
1183
1166
- if (console_is_usable (con , con_flags , false)) {
1167
- unsigned long lock_flags ;
1168
-
1169
- con -> device_lock (con , & lock_flags );
1170
-
1171
- /*
1172
- * Ensure this stays on the CPU to make handover and
1173
- * takeover possible.
1174
- */
1175
- cant_migrate ();
1176
-
1177
- if (nbcon_context_try_acquire (ctxt )) {
1178
- /*
1179
- * If the emit fails, this context is no
1180
- * longer the owner.
1181
- */
1182
- if (nbcon_emit_next_record (& wctxt , false)) {
1183
- nbcon_context_release (ctxt );
1184
- backlog = ctxt -> backlog ;
1185
- }
1186
- }
1187
-
1188
- con -> device_unlock (con , lock_flags );
1189
- }
1184
+ if (console_is_usable (con , con_flags , false))
1185
+ backlog = nbcon_emit_one (& wctxt , false);
1190
1186
1191
1187
console_srcu_read_unlock (cookie );
1192
1188
@@ -1367,6 +1363,13 @@ enum nbcon_prio nbcon_get_default_prio(void)
1367
1363
* both the console_lock and the SRCU read lock. Otherwise it
1368
1364
* is set to false.
1369
1365
* @cookie: The cookie from the SRCU read lock.
1366
+ * @use_atomic: Set true when called in an atomic or unknown context.
1367
+ * It affects which nbcon callback will be used: write_atomic()
1368
+ * or write_thread().
1369
+ *
1370
+ * When false, the write_thread() callback is used and would be
1371
+ * called in a preemtible context unless disabled by the
1372
+ * device_lock. The legacy handover is not allowed in this mode.
1370
1373
*
1371
1374
* Context: Any context except NMI.
1372
1375
* Return: True, when a record has been printed and there are still
@@ -1382,26 +1385,36 @@ enum nbcon_prio nbcon_get_default_prio(void)
1382
1385
* Essentially it is the nbcon version of console_emit_next_record().
1383
1386
*/
1384
1387
bool nbcon_legacy_emit_next_record (struct console * con , bool * handover ,
1385
- int cookie )
1388
+ int cookie , bool use_atomic )
1386
1389
{
1387
1390
struct nbcon_write_context wctxt = { };
1388
1391
struct nbcon_context * ctxt = & ACCESS_PRIVATE (& wctxt , ctxt );
1389
1392
unsigned long flags ;
1390
1393
bool progress ;
1391
1394
1392
- /* Use the same procedure as console_emit_next_record(). */
1393
- printk_safe_enter_irqsave (flags );
1394
- console_lock_spinning_enable ();
1395
- stop_critical_timings ();
1396
-
1397
1395
ctxt -> console = con ;
1398
1396
ctxt -> prio = nbcon_get_default_prio ();
1399
1397
1400
- progress = nbcon_atomic_emit_one (& wctxt );
1398
+ if (use_atomic ) {
1399
+ /*
1400
+ * In an atomic or unknown context, use the same procedure as
1401
+ * in console_emit_next_record(). It allows to handover.
1402
+ */
1403
+ printk_safe_enter_irqsave (flags );
1404
+ console_lock_spinning_enable ();
1405
+ stop_critical_timings ();
1406
+ }
1401
1407
1402
- start_critical_timings ();
1403
- * handover = console_lock_spinning_disable_and_check (cookie );
1404
- printk_safe_exit_irqrestore (flags );
1408
+ progress = nbcon_emit_one (& wctxt , use_atomic );
1409
+
1410
+ if (use_atomic ) {
1411
+ start_critical_timings ();
1412
+ * handover = console_lock_spinning_disable_and_check (cookie );
1413
+ printk_safe_exit_irqrestore (flags );
1414
+ } else {
1415
+ /* Non-atomic does not perform legacy spinning handovers. */
1416
+ * handover = false;
1417
+ }
1405
1418
1406
1419
return progress ;
1407
1420
}
0 commit comments