@@ -203,8 +203,18 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
203
203
if (unlikely (data -> ghcb_active )) {
204
204
/* GHCB is already in use - save its contents */
205
205
206
- if (unlikely (data -> backup_ghcb_active ))
207
- return NULL ;
206
+ if (unlikely (data -> backup_ghcb_active )) {
207
+ /*
208
+ * Backup-GHCB is also already in use. There is no way
209
+ * to continue here so just kill the machine. To make
210
+ * panic() work, mark GHCBs inactive so that messages
211
+ * can be printed out.
212
+ */
213
+ data -> ghcb_active = false;
214
+ data -> backup_ghcb_active = false;
215
+
216
+ panic ("Unable to handle #VC exception! GHCB and Backup GHCB are already in use" );
217
+ }
208
218
209
219
/* Mark backup_ghcb active before writing to it */
210
220
data -> backup_ghcb_active = true;
@@ -221,24 +231,6 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
221
231
return ghcb ;
222
232
}
223
233
224
- static __always_inline void sev_es_put_ghcb (struct ghcb_state * state )
225
- {
226
- struct sev_es_runtime_data * data ;
227
- struct ghcb * ghcb ;
228
-
229
- data = this_cpu_read (runtime_data );
230
- ghcb = & data -> ghcb_page ;
231
-
232
- if (state -> ghcb ) {
233
- /* Restore GHCB from Backup */
234
- * ghcb = * state -> ghcb ;
235
- data -> backup_ghcb_active = false;
236
- state -> ghcb = NULL ;
237
- } else {
238
- data -> ghcb_active = false;
239
- }
240
- }
241
-
242
234
/* Needed in vc_early_forward_exception */
243
235
void do_early_exception (struct pt_regs * regs , int trapnr );
244
236
@@ -323,31 +315,44 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
323
315
u16 d2 ;
324
316
u8 d1 ;
325
317
326
- /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
327
- if (!user_mode (ctxt -> regs ) && !access_ok (target , size )) {
328
- memcpy (dst , buf , size );
329
- return ES_OK ;
330
- }
331
-
318
+ /*
319
+ * This function uses __put_user() independent of whether kernel or user
320
+ * memory is accessed. This works fine because __put_user() does no
321
+ * sanity checks of the pointer being accessed. All that it does is
322
+ * to report when the access failed.
323
+ *
324
+ * Also, this function runs in atomic context, so __put_user() is not
325
+ * allowed to sleep. The page-fault handler detects that it is running
326
+ * in atomic context and will not try to take mmap_sem and handle the
327
+ * fault, so additional pagefault_enable()/disable() calls are not
328
+ * needed.
329
+ *
330
+ * The access can't be done via copy_to_user() here because
331
+ * vc_write_mem() must not use string instructions to access unsafe
332
+ * memory. The reason is that MOVS is emulated by the #VC handler by
333
+ * splitting the move up into a read and a write and taking a nested #VC
334
+ * exception on whatever of them is the MMIO access. Using string
335
+ * instructions here would cause infinite nesting.
336
+ */
332
337
switch (size ) {
333
338
case 1 :
334
339
memcpy (& d1 , buf , 1 );
335
- if (put_user (d1 , target ))
340
+ if (__put_user (d1 , target ))
336
341
goto fault ;
337
342
break ;
338
343
case 2 :
339
344
memcpy (& d2 , buf , 2 );
340
- if (put_user (d2 , target ))
345
+ if (__put_user (d2 , target ))
341
346
goto fault ;
342
347
break ;
343
348
case 4 :
344
349
memcpy (& d4 , buf , 4 );
345
- if (put_user (d4 , target ))
350
+ if (__put_user (d4 , target ))
346
351
goto fault ;
347
352
break ;
348
353
case 8 :
349
354
memcpy (& d8 , buf , 8 );
350
- if (put_user (d8 , target ))
355
+ if (__put_user (d8 , target ))
351
356
goto fault ;
352
357
break ;
353
358
default :
@@ -378,30 +383,43 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
378
383
u16 d2 ;
379
384
u8 d1 ;
380
385
381
- /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
382
- if (!user_mode (ctxt -> regs ) && !access_ok (s , size )) {
383
- memcpy (buf , src , size );
384
- return ES_OK ;
385
- }
386
-
386
+ /*
387
+ * This function uses __get_user() independent of whether kernel or user
388
+ * memory is accessed. This works fine because __get_user() does no
389
+ * sanity checks of the pointer being accessed. All that it does is
390
+ * to report when the access failed.
391
+ *
392
+ * Also, this function runs in atomic context, so __get_user() is not
393
+ * allowed to sleep. The page-fault handler detects that it is running
394
+ * in atomic context and will not try to take mmap_sem and handle the
395
+ * fault, so additional pagefault_enable()/disable() calls are not
396
+ * needed.
397
+ *
398
+ * The access can't be done via copy_from_user() here because
399
+ * vc_read_mem() must not use string instructions to access unsafe
400
+ * memory. The reason is that MOVS is emulated by the #VC handler by
401
+ * splitting the move up into a read and a write and taking a nested #VC
402
+ * exception on whatever of them is the MMIO access. Using string
403
+ * instructions here would cause infinite nesting.
404
+ */
387
405
switch (size ) {
388
406
case 1 :
389
- if (get_user (d1 , s ))
407
+ if (__get_user (d1 , s ))
390
408
goto fault ;
391
409
memcpy (buf , & d1 , 1 );
392
410
break ;
393
411
case 2 :
394
- if (get_user (d2 , s ))
412
+ if (__get_user (d2 , s ))
395
413
goto fault ;
396
414
memcpy (buf , & d2 , 2 );
397
415
break ;
398
416
case 4 :
399
- if (get_user (d4 , s ))
417
+ if (__get_user (d4 , s ))
400
418
goto fault ;
401
419
memcpy (buf , & d4 , 4 );
402
420
break ;
403
421
case 8 :
404
- if (get_user (d8 , s ))
422
+ if (__get_user (d8 , s ))
405
423
goto fault ;
406
424
memcpy (buf , & d8 , 8 );
407
425
break ;
@@ -461,6 +479,29 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
461
479
/* Include code shared with pre-decompression boot stage */
462
480
#include "sev-shared.c"
463
481
482
+ static __always_inline void sev_es_put_ghcb (struct ghcb_state * state )
483
+ {
484
+ struct sev_es_runtime_data * data ;
485
+ struct ghcb * ghcb ;
486
+
487
+ data = this_cpu_read (runtime_data );
488
+ ghcb = & data -> ghcb_page ;
489
+
490
+ if (state -> ghcb ) {
491
+ /* Restore GHCB from Backup */
492
+ * ghcb = * state -> ghcb ;
493
+ data -> backup_ghcb_active = false;
494
+ state -> ghcb = NULL ;
495
+ } else {
496
+ /*
497
+ * Invalidate the GHCB so a VMGEXIT instruction issued
498
+ * from userspace won't appear to be valid.
499
+ */
500
+ vc_ghcb_invalidate (ghcb );
501
+ data -> ghcb_active = false;
502
+ }
503
+ }
504
+
464
505
void noinstr __sev_es_nmi_complete (void )
465
506
{
466
507
struct ghcb_state state ;
@@ -1255,6 +1296,10 @@ static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
1255
1296
case X86_TRAP_UD :
1256
1297
exc_invalid_op (ctxt -> regs );
1257
1298
break ;
1299
+ case X86_TRAP_PF :
1300
+ write_cr2 (ctxt -> fi .cr2 );
1301
+ exc_page_fault (ctxt -> regs , error_code );
1302
+ break ;
1258
1303
case X86_TRAP_AC :
1259
1304
exc_alignment_check (ctxt -> regs , error_code );
1260
1305
break ;
@@ -1284,7 +1329,6 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
1284
1329
*/
1285
1330
DEFINE_IDTENTRY_VC_SAFE_STACK (exc_vmm_communication )
1286
1331
{
1287
- struct sev_es_runtime_data * data = this_cpu_read (runtime_data );
1288
1332
irqentry_state_t irq_state ;
1289
1333
struct ghcb_state state ;
1290
1334
struct es_em_ctxt ctxt ;
@@ -1310,16 +1354,6 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
1310
1354
*/
1311
1355
1312
1356
ghcb = sev_es_get_ghcb (& state );
1313
- if (!ghcb ) {
1314
- /*
1315
- * Mark GHCBs inactive so that panic() is able to print the
1316
- * message.
1317
- */
1318
- data -> ghcb_active = false;
1319
- data -> backup_ghcb_active = false;
1320
-
1321
- panic ("Unable to handle #VC exception! GHCB and Backup GHCB are already in use" );
1322
- }
1323
1357
1324
1358
vc_ghcb_invalidate (ghcb );
1325
1359
result = vc_init_em_ctxt (& ctxt , regs , error_code );
0 commit comments