@@ -141,6 +141,12 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
141141 if (rc )
142142 return rc ;
143143
144+ /*
145+ * Setup SBI extensions
146+ * NOTE: This must be the last thing to be initialized.
147+ */
148+ kvm_riscv_vcpu_sbi_init (vcpu );
149+
144150 /* Reset VCPU */
145151 kvm_riscv_reset_vcpu (vcpu );
146152
@@ -471,31 +477,38 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
471477 return - EINVAL ;
472478}
473479
474- static void kvm_riscv_vcpu_update_config ( const unsigned long * isa )
480+ static void kvm_riscv_vcpu_setup_config ( struct kvm_vcpu * vcpu )
475481{
476- u64 henvcfg = 0 ;
482+ const unsigned long * isa = vcpu -> arch .isa ;
483+ struct kvm_vcpu_config * cfg = & vcpu -> arch .cfg ;
477484
478485 if (riscv_isa_extension_available (isa , SVPBMT ))
479- henvcfg |= ENVCFG_PBMTE ;
486+ cfg -> henvcfg |= ENVCFG_PBMTE ;
480487
481488 if (riscv_isa_extension_available (isa , SSTC ))
482- henvcfg |= ENVCFG_STCE ;
489+ cfg -> henvcfg |= ENVCFG_STCE ;
483490
484491 if (riscv_isa_extension_available (isa , ZICBOM ))
485- henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE );
492+ cfg -> henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE );
486493
487494 if (riscv_isa_extension_available (isa , ZICBOZ ))
488- henvcfg |= ENVCFG_CBZE ;
489-
490- csr_write (CSR_HENVCFG , henvcfg );
491- #ifdef CONFIG_32BIT
492- csr_write (CSR_HENVCFGH , henvcfg >> 32 );
493- #endif
495+ cfg -> henvcfg |= ENVCFG_CBZE ;
496+
497+ if (riscv_has_extension_unlikely (RISCV_ISA_EXT_SMSTATEEN )) {
498+ cfg -> hstateen0 |= SMSTATEEN0_HSENVCFG ;
499+ if (riscv_isa_extension_available (isa , SSAIA ))
500+ cfg -> hstateen0 |= SMSTATEEN0_AIA_IMSIC |
501+ SMSTATEEN0_AIA |
502+ SMSTATEEN0_AIA_ISEL ;
503+ if (riscv_isa_extension_available (isa , SMSTATEEN ))
504+ cfg -> hstateen0 |= SMSTATEEN0_SSTATEEN0 ;
505+ }
494506}
495507
496508void kvm_arch_vcpu_load (struct kvm_vcpu * vcpu , int cpu )
497509{
498510 struct kvm_vcpu_csr * csr = & vcpu -> arch .guest_csr ;
511+ struct kvm_vcpu_config * cfg = & vcpu -> arch .cfg ;
499512
500513 csr_write (CSR_VSSTATUS , csr -> vsstatus );
501514 csr_write (CSR_VSIE , csr -> vsie );
@@ -506,8 +519,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
506519 csr_write (CSR_VSTVAL , csr -> vstval );
507520 csr_write (CSR_HVIP , csr -> hvip );
508521 csr_write (CSR_VSATP , csr -> vsatp );
509-
510- kvm_riscv_vcpu_update_config (vcpu -> arch .isa );
522+ csr_write (CSR_HENVCFG , cfg -> henvcfg );
523+ if (IS_ENABLED (CONFIG_32BIT ))
524+ csr_write (CSR_HENVCFGH , cfg -> henvcfg >> 32 );
525+ if (riscv_has_extension_unlikely (RISCV_ISA_EXT_SMSTATEEN )) {
526+ csr_write (CSR_HSTATEEN0 , cfg -> hstateen0 );
527+ if (IS_ENABLED (CONFIG_32BIT ))
528+ csr_write (CSR_HSTATEEN0H , cfg -> hstateen0 >> 32 );
529+ }
511530
512531 kvm_riscv_gstage_update_hgatp (vcpu );
513532
@@ -606,6 +625,32 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
606625 kvm_riscv_vcpu_aia_update_hvip (vcpu );
607626}
608627
628+ static __always_inline void kvm_riscv_vcpu_swap_in_guest_state (struct kvm_vcpu * vcpu )
629+ {
630+ struct kvm_vcpu_smstateen_csr * smcsr = & vcpu -> arch .smstateen_csr ;
631+ struct kvm_vcpu_csr * csr = & vcpu -> arch .guest_csr ;
632+ struct kvm_vcpu_config * cfg = & vcpu -> arch .cfg ;
633+
634+ vcpu -> arch .host_senvcfg = csr_swap (CSR_SENVCFG , csr -> senvcfg );
635+ if (riscv_has_extension_unlikely (RISCV_ISA_EXT_SMSTATEEN ) &&
636+ (cfg -> hstateen0 & SMSTATEEN0_SSTATEEN0 ))
637+ vcpu -> arch .host_sstateen0 = csr_swap (CSR_SSTATEEN0 ,
638+ smcsr -> sstateen0 );
639+ }
640+
641+ static __always_inline void kvm_riscv_vcpu_swap_in_host_state (struct kvm_vcpu * vcpu )
642+ {
643+ struct kvm_vcpu_smstateen_csr * smcsr = & vcpu -> arch .smstateen_csr ;
644+ struct kvm_vcpu_csr * csr = & vcpu -> arch .guest_csr ;
645+ struct kvm_vcpu_config * cfg = & vcpu -> arch .cfg ;
646+
647+ csr -> senvcfg = csr_swap (CSR_SENVCFG , vcpu -> arch .host_senvcfg );
648+ if (riscv_has_extension_unlikely (RISCV_ISA_EXT_SMSTATEEN ) &&
649+ (cfg -> hstateen0 & SMSTATEEN0_SSTATEEN0 ))
650+ smcsr -> sstateen0 = csr_swap (CSR_SSTATEEN0 ,
651+ vcpu -> arch .host_sstateen0 );
652+ }
653+
609654/*
610655 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
611656 * the vCPU is running.
@@ -615,10 +660,12 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
615660 */
616661static void noinstr kvm_riscv_vcpu_enter_exit (struct kvm_vcpu * vcpu )
617662{
663+ kvm_riscv_vcpu_swap_in_guest_state (vcpu );
618664 guest_state_enter_irqoff ();
619665 __kvm_riscv_switch_to (& vcpu -> arch );
620666 vcpu -> arch .last_exit_cpu = vcpu -> cpu ;
621667 guest_state_exit_irqoff ();
668+ kvm_riscv_vcpu_swap_in_host_state (vcpu );
622669}
623670
624671int kvm_arch_vcpu_ioctl_run (struct kvm_vcpu * vcpu )
@@ -627,6 +674,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
627674 struct kvm_cpu_trap trap ;
628675 struct kvm_run * run = vcpu -> run ;
629676
677+ if (!vcpu -> arch .ran_atleast_once )
678+ kvm_riscv_vcpu_setup_config (vcpu );
679+
630680 /* Mark this VCPU ran at least once */
631681 vcpu -> arch .ran_atleast_once = true;
632682
0 commit comments