@@ -321,3 +321,313 @@ static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
321
321
322
322
return ret ;
323
323
}
324
+
325
+ int kvm_emu_mmio_read (struct kvm_vcpu * vcpu , larch_inst inst )
326
+ {
327
+ int ret ;
328
+ unsigned int op8 , opcode , rd ;
329
+ struct kvm_run * run = vcpu -> run ;
330
+
331
+ run -> mmio .phys_addr = vcpu -> arch .badv ;
332
+ vcpu -> mmio_needed = 2 ; /* signed */
333
+ op8 = (inst .word >> 24 ) & 0xff ;
334
+ ret = EMULATE_DO_MMIO ;
335
+
336
+ switch (op8 ) {
337
+ case 0x24 ... 0x27 : /* ldptr.w/d process */
338
+ rd = inst .reg2i14_format .rd ;
339
+ opcode = inst .reg2i14_format .opcode ;
340
+
341
+ switch (opcode ) {
342
+ case ldptrw_op :
343
+ run -> mmio .len = 4 ;
344
+ break ;
345
+ case ldptrd_op :
346
+ run -> mmio .len = 8 ;
347
+ break ;
348
+ default :
349
+ break ;
350
+ }
351
+ break ;
352
+ case 0x28 ... 0x2e : /* ld.b/h/w/d, ld.bu/hu/wu process */
353
+ rd = inst .reg2i12_format .rd ;
354
+ opcode = inst .reg2i12_format .opcode ;
355
+
356
+ switch (opcode ) {
357
+ case ldb_op :
358
+ run -> mmio .len = 1 ;
359
+ break ;
360
+ case ldbu_op :
361
+ vcpu -> mmio_needed = 1 ; /* unsigned */
362
+ run -> mmio .len = 1 ;
363
+ break ;
364
+ case ldh_op :
365
+ run -> mmio .len = 2 ;
366
+ break ;
367
+ case ldhu_op :
368
+ vcpu -> mmio_needed = 1 ; /* unsigned */
369
+ run -> mmio .len = 2 ;
370
+ break ;
371
+ case ldw_op :
372
+ run -> mmio .len = 4 ;
373
+ break ;
374
+ case ldwu_op :
375
+ vcpu -> mmio_needed = 1 ; /* unsigned */
376
+ run -> mmio .len = 4 ;
377
+ break ;
378
+ case ldd_op :
379
+ run -> mmio .len = 8 ;
380
+ break ;
381
+ default :
382
+ ret = EMULATE_FAIL ;
383
+ break ;
384
+ }
385
+ break ;
386
+ case 0x38 : /* ldx.b/h/w/d, ldx.bu/hu/wu process */
387
+ rd = inst .reg3_format .rd ;
388
+ opcode = inst .reg3_format .opcode ;
389
+
390
+ switch (opcode ) {
391
+ case ldxb_op :
392
+ run -> mmio .len = 1 ;
393
+ break ;
394
+ case ldxbu_op :
395
+ run -> mmio .len = 1 ;
396
+ vcpu -> mmio_needed = 1 ; /* unsigned */
397
+ break ;
398
+ case ldxh_op :
399
+ run -> mmio .len = 2 ;
400
+ break ;
401
+ case ldxhu_op :
402
+ run -> mmio .len = 2 ;
403
+ vcpu -> mmio_needed = 1 ; /* unsigned */
404
+ break ;
405
+ case ldxw_op :
406
+ run -> mmio .len = 4 ;
407
+ break ;
408
+ case ldxwu_op :
409
+ run -> mmio .len = 4 ;
410
+ vcpu -> mmio_needed = 1 ; /* unsigned */
411
+ break ;
412
+ case ldxd_op :
413
+ run -> mmio .len = 8 ;
414
+ break ;
415
+ default :
416
+ ret = EMULATE_FAIL ;
417
+ break ;
418
+ }
419
+ break ;
420
+ default :
421
+ ret = EMULATE_FAIL ;
422
+ }
423
+
424
+ if (ret == EMULATE_DO_MMIO ) {
425
+ /* Set for kvm_complete_mmio_read() use */
426
+ vcpu -> arch .io_gpr = rd ;
427
+ run -> mmio .is_write = 0 ;
428
+ vcpu -> mmio_is_write = 0 ;
429
+ } else {
430
+ kvm_err ("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n" ,
431
+ inst .word , vcpu -> arch .pc , vcpu -> arch .badv );
432
+ kvm_arch_vcpu_dump_regs (vcpu );
433
+ vcpu -> mmio_needed = 0 ;
434
+ }
435
+
436
+ return ret ;
437
+ }
438
+
439
+ int kvm_complete_mmio_read (struct kvm_vcpu * vcpu , struct kvm_run * run )
440
+ {
441
+ enum emulation_result er = EMULATE_DONE ;
442
+ unsigned long * gpr = & vcpu -> arch .gprs [vcpu -> arch .io_gpr ];
443
+
444
+ /* Update with new PC */
445
+ update_pc (& vcpu -> arch );
446
+ switch (run -> mmio .len ) {
447
+ case 1 :
448
+ if (vcpu -> mmio_needed == 2 )
449
+ * gpr = * (s8 * )run -> mmio .data ;
450
+ else
451
+ * gpr = * (u8 * )run -> mmio .data ;
452
+ break ;
453
+ case 2 :
454
+ if (vcpu -> mmio_needed == 2 )
455
+ * gpr = * (s16 * )run -> mmio .data ;
456
+ else
457
+ * gpr = * (u16 * )run -> mmio .data ;
458
+ break ;
459
+ case 4 :
460
+ if (vcpu -> mmio_needed == 2 )
461
+ * gpr = * (s32 * )run -> mmio .data ;
462
+ else
463
+ * gpr = * (u32 * )run -> mmio .data ;
464
+ break ;
465
+ case 8 :
466
+ * gpr = * (s64 * )run -> mmio .data ;
467
+ break ;
468
+ default :
469
+ kvm_err ("Bad MMIO length: %d, addr is 0x%lx\n" ,
470
+ run -> mmio .len , vcpu -> arch .badv );
471
+ er = EMULATE_FAIL ;
472
+ break ;
473
+ }
474
+
475
+ return er ;
476
+ }
477
+
478
+ int kvm_emu_mmio_write (struct kvm_vcpu * vcpu , larch_inst inst )
479
+ {
480
+ int ret ;
481
+ unsigned int rd , op8 , opcode ;
482
+ unsigned long curr_pc , rd_val = 0 ;
483
+ struct kvm_run * run = vcpu -> run ;
484
+ void * data = run -> mmio .data ;
485
+
486
+ /*
487
+ * Update PC and hold onto current PC in case there is
488
+ * an error and we want to rollback the PC
489
+ */
490
+ curr_pc = vcpu -> arch .pc ;
491
+ update_pc (& vcpu -> arch );
492
+
493
+ op8 = (inst .word >> 24 ) & 0xff ;
494
+ run -> mmio .phys_addr = vcpu -> arch .badv ;
495
+ ret = EMULATE_DO_MMIO ;
496
+ switch (op8 ) {
497
+ case 0x24 ... 0x27 : /* stptr.w/d process */
498
+ rd = inst .reg2i14_format .rd ;
499
+ opcode = inst .reg2i14_format .opcode ;
500
+
501
+ switch (opcode ) {
502
+ case stptrw_op :
503
+ run -> mmio .len = 4 ;
504
+ * (unsigned int * )data = vcpu -> arch .gprs [rd ];
505
+ break ;
506
+ case stptrd_op :
507
+ run -> mmio .len = 8 ;
508
+ * (unsigned long * )data = vcpu -> arch .gprs [rd ];
509
+ break ;
510
+ default :
511
+ ret = EMULATE_FAIL ;
512
+ break ;
513
+ }
514
+ break ;
515
+ case 0x28 ... 0x2e : /* st.b/h/w/d process */
516
+ rd = inst .reg2i12_format .rd ;
517
+ opcode = inst .reg2i12_format .opcode ;
518
+ rd_val = vcpu -> arch .gprs [rd ];
519
+
520
+ switch (opcode ) {
521
+ case stb_op :
522
+ run -> mmio .len = 1 ;
523
+ * (unsigned char * )data = rd_val ;
524
+ break ;
525
+ case sth_op :
526
+ run -> mmio .len = 2 ;
527
+ * (unsigned short * )data = rd_val ;
528
+ break ;
529
+ case stw_op :
530
+ run -> mmio .len = 4 ;
531
+ * (unsigned int * )data = rd_val ;
532
+ break ;
533
+ case std_op :
534
+ run -> mmio .len = 8 ;
535
+ * (unsigned long * )data = rd_val ;
536
+ break ;
537
+ default :
538
+ ret = EMULATE_FAIL ;
539
+ break ;
540
+ }
541
+ break ;
542
+ case 0x38 : /* stx.b/h/w/d process */
543
+ rd = inst .reg3_format .rd ;
544
+ opcode = inst .reg3_format .opcode ;
545
+
546
+ switch (opcode ) {
547
+ case stxb_op :
548
+ run -> mmio .len = 1 ;
549
+ * (unsigned char * )data = vcpu -> arch .gprs [rd ];
550
+ break ;
551
+ case stxh_op :
552
+ run -> mmio .len = 2 ;
553
+ * (unsigned short * )data = vcpu -> arch .gprs [rd ];
554
+ break ;
555
+ case stxw_op :
556
+ run -> mmio .len = 4 ;
557
+ * (unsigned int * )data = vcpu -> arch .gprs [rd ];
558
+ break ;
559
+ case stxd_op :
560
+ run -> mmio .len = 8 ;
561
+ * (unsigned long * )data = vcpu -> arch .gprs [rd ];
562
+ break ;
563
+ default :
564
+ ret = EMULATE_FAIL ;
565
+ break ;
566
+ }
567
+ break ;
568
+ default :
569
+ ret = EMULATE_FAIL ;
570
+ }
571
+
572
+ if (ret == EMULATE_DO_MMIO ) {
573
+ run -> mmio .is_write = 1 ;
574
+ vcpu -> mmio_needed = 1 ;
575
+ vcpu -> mmio_is_write = 1 ;
576
+ } else {
577
+ vcpu -> arch .pc = curr_pc ;
578
+ kvm_err ("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n" ,
579
+ inst .word , vcpu -> arch .pc , vcpu -> arch .badv );
580
+ kvm_arch_vcpu_dump_regs (vcpu );
581
+ /* Rollback PC if emulation was unsuccessful */
582
+ }
583
+
584
+ return ret ;
585
+ }
586
+
587
+ static int kvm_handle_rdwr_fault (struct kvm_vcpu * vcpu , bool write )
588
+ {
589
+ int ret ;
590
+ larch_inst inst ;
591
+ enum emulation_result er = EMULATE_DONE ;
592
+ struct kvm_run * run = vcpu -> run ;
593
+ unsigned long badv = vcpu -> arch .badv ;
594
+
595
+ ret = kvm_handle_mm_fault (vcpu , badv , write );
596
+ if (ret ) {
597
+ /* Treat as MMIO */
598
+ inst .word = vcpu -> arch .badi ;
599
+ if (write ) {
600
+ er = kvm_emu_mmio_write (vcpu , inst );
601
+ } else {
602
+ /* A code fetch fault doesn't count as an MMIO */
603
+ if (kvm_is_ifetch_fault (& vcpu -> arch )) {
604
+ kvm_queue_exception (vcpu , EXCCODE_ADE , EXSUBCODE_ADEF );
605
+ return RESUME_GUEST ;
606
+ }
607
+
608
+ er = kvm_emu_mmio_read (vcpu , inst );
609
+ }
610
+ }
611
+
612
+ if (er == EMULATE_DONE ) {
613
+ ret = RESUME_GUEST ;
614
+ } else if (er == EMULATE_DO_MMIO ) {
615
+ run -> exit_reason = KVM_EXIT_MMIO ;
616
+ ret = RESUME_HOST ;
617
+ } else {
618
+ kvm_queue_exception (vcpu , EXCCODE_ADE , EXSUBCODE_ADEM );
619
+ ret = RESUME_GUEST ;
620
+ }
621
+
622
+ return ret ;
623
+ }
624
+
625
+ static int kvm_handle_read_fault (struct kvm_vcpu * vcpu )
626
+ {
627
+ return kvm_handle_rdwr_fault (vcpu , false);
628
+ }
629
+
630
+ static int kvm_handle_write_fault (struct kvm_vcpu * vcpu )
631
+ {
632
+ return kvm_handle_rdwr_fault (vcpu , true);
633
+ }
0 commit comments