@@ -1083,6 +1083,7 @@ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
1083
1083
* write_atomic() callback
1084
1084
* @con: The nbcon console to flush
1085
1085
* @stop_seq: Flush up until this record
1086
+ * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
1086
1087
*
1087
1088
* Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
1088
1089
* failure.
@@ -1101,7 +1102,8 @@ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
1101
1102
* returned, it cannot be expected that the unfinalized record will become
1102
1103
* available.
1103
1104
*/
1104
- static int __nbcon_atomic_flush_pending_con (struct console * con , u64 stop_seq )
1105
+ static int __nbcon_atomic_flush_pending_con (struct console * con , u64 stop_seq ,
1106
+ bool allow_unsafe_takeover )
1105
1107
{
1106
1108
struct nbcon_write_context wctxt = { };
1107
1109
struct nbcon_context * ctxt = & ACCESS_PRIVATE (& wctxt , ctxt );
@@ -1110,6 +1112,7 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
1110
1112
ctxt -> console = con ;
1111
1113
ctxt -> spinwait_max_us = 2000 ;
1112
1114
ctxt -> prio = nbcon_get_default_prio ();
1115
+ ctxt -> allow_unsafe_takeover = allow_unsafe_takeover ;
1113
1116
1114
1117
if (!nbcon_context_try_acquire (ctxt ))
1115
1118
return - EPERM ;
@@ -1140,13 +1143,15 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
1140
1143
* write_atomic() callback
1141
1144
* @con: The nbcon console to flush
1142
1145
* @stop_seq: Flush up until this record
1146
+ * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
1143
1147
*
1144
1148
* This will stop flushing before @stop_seq if another context has ownership.
1145
1149
* That context is then responsible for the flushing. Likewise, if new records
1146
1150
* are added while this context was flushing and there is no other context
1147
1151
* to handle the printing, this context must also flush those records.
1148
1152
*/
1149
- static void nbcon_atomic_flush_pending_con (struct console * con , u64 stop_seq )
1153
+ static void nbcon_atomic_flush_pending_con (struct console * con , u64 stop_seq ,
1154
+ bool allow_unsafe_takeover )
1150
1155
{
1151
1156
unsigned long flags ;
1152
1157
int err ;
@@ -1160,7 +1165,7 @@ static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
1160
1165
*/
1161
1166
local_irq_save (flags );
1162
1167
1163
- err = __nbcon_atomic_flush_pending_con (con , stop_seq );
1168
+ err = __nbcon_atomic_flush_pending_con (con , stop_seq , allow_unsafe_takeover );
1164
1169
1165
1170
local_irq_restore (flags );
1166
1171
@@ -1190,8 +1195,9 @@ static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
1190
1195
* __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
1191
1196
* write_atomic() callback
1192
1197
* @stop_seq: Flush up until this record
1198
+ * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
1193
1199
*/
1194
- static void __nbcon_atomic_flush_pending (u64 stop_seq )
1200
+ static void __nbcon_atomic_flush_pending (u64 stop_seq , bool allow_unsafe_takeover )
1195
1201
{
1196
1202
struct console * con ;
1197
1203
int cookie ;
@@ -1209,7 +1215,7 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq)
1209
1215
if (nbcon_seq_read (con ) >= stop_seq )
1210
1216
continue ;
1211
1217
1212
- nbcon_atomic_flush_pending_con (con , stop_seq );
1218
+ nbcon_atomic_flush_pending_con (con , stop_seq , allow_unsafe_takeover );
1213
1219
}
1214
1220
console_srcu_read_unlock (cookie );
1215
1221
}
@@ -1225,7 +1231,19 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq)
1225
1231
*/
1226
1232
void nbcon_atomic_flush_pending (void )
1227
1233
{
1228
- __nbcon_atomic_flush_pending (prb_next_reserve_seq (prb ));
1234
+ __nbcon_atomic_flush_pending (prb_next_reserve_seq (prb ), false);
1235
+ }
1236
+
1237
+ /**
1238
+ * nbcon_atomic_flush_unsafe - Flush all nbcon consoles using their
1239
+ * write_atomic() callback and allowing unsafe hostile takeovers
1240
+ *
1241
+ * Flush the backlog up through the currently newest record. Unsafe hostile
1242
+ * takeovers will be performed, if necessary.
1243
+ */
1244
+ void nbcon_atomic_flush_unsafe (void )
1245
+ {
1246
+ __nbcon_atomic_flush_pending (prb_next_reserve_seq (prb ), true);
1229
1247
}
1230
1248
1231
1249
/**
@@ -1342,7 +1360,7 @@ void nbcon_device_release(struct console *con)
1342
1360
if (console_is_usable (con , console_srcu_read_flags (con )) &&
1343
1361
prb_read_valid (prb , nbcon_seq_read (con ), NULL )) {
1344
1362
if (!have_boot_console ) {
1345
- __nbcon_atomic_flush_pending_con (con , prb_next_reserve_seq (prb ));
1363
+ __nbcon_atomic_flush_pending_con (con , prb_next_reserve_seq (prb ), false );
1346
1364
} else if (!is_printk_legacy_deferred ()) {
1347
1365
if (console_trylock ())
1348
1366
console_unlock ();
0 commit comments