1717#include <linux/sched/signal.h>
1818#include <linux/fs.h>
1919#include <linux/kvm_host.h>
20- #include <asm/csr.h>
2120#include <asm/cacheflush.h>
21+ #include <asm/kvm_nacl.h>
2222#include <asm/kvm_vcpu_vector.h>
2323
2424#define CREATE_TRACE_POINTS
@@ -368,10 +368,10 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
368368 struct kvm_vcpu_csr * csr = & vcpu -> arch .guest_csr ;
369369
370370 /* Read current HVIP and VSIE CSRs */
371- csr -> vsie = csr_read (CSR_VSIE );
371+ csr -> vsie = ncsr_read (CSR_VSIE );
372372
373373 /* Sync-up HVIP.VSSIP bit changes does by Guest */
374- hvip = csr_read (CSR_HVIP );
374+ hvip = ncsr_read (CSR_HVIP );
375375 if ((csr -> hvip ^ hvip ) & (1UL << IRQ_VS_SOFT )) {
376376 if (hvip & (1UL << IRQ_VS_SOFT )) {
377377 if (!test_and_set_bit (IRQ_VS_SOFT ,
@@ -568,26 +568,49 @@ static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu)
568568
569569void kvm_arch_vcpu_load (struct kvm_vcpu * vcpu , int cpu )
570570{
571+ void * nsh ;
571572 struct kvm_vcpu_csr * csr = & vcpu -> arch .guest_csr ;
572573 struct kvm_vcpu_config * cfg = & vcpu -> arch .cfg ;
573574
574- csr_write (CSR_VSSTATUS , csr -> vsstatus );
575- csr_write (CSR_VSIE , csr -> vsie );
576- csr_write (CSR_VSTVEC , csr -> vstvec );
577- csr_write (CSR_VSSCRATCH , csr -> vsscratch );
578- csr_write (CSR_VSEPC , csr -> vsepc );
579- csr_write (CSR_VSCAUSE , csr -> vscause );
580- csr_write (CSR_VSTVAL , csr -> vstval );
581- csr_write (CSR_HEDELEG , cfg -> hedeleg );
582- csr_write (CSR_HVIP , csr -> hvip );
583- csr_write (CSR_VSATP , csr -> vsatp );
584- csr_write (CSR_HENVCFG , cfg -> henvcfg );
585- if (IS_ENABLED (CONFIG_32BIT ))
586- csr_write (CSR_HENVCFGH , cfg -> henvcfg >> 32 );
587- if (riscv_has_extension_unlikely (RISCV_ISA_EXT_SMSTATEEN )) {
588- csr_write (CSR_HSTATEEN0 , cfg -> hstateen0 );
575+ if (kvm_riscv_nacl_sync_csr_available ()) {
576+ nsh = nacl_shmem ();
577+ nacl_csr_write (nsh , CSR_VSSTATUS , csr -> vsstatus );
578+ nacl_csr_write (nsh , CSR_VSIE , csr -> vsie );
579+ nacl_csr_write (nsh , CSR_VSTVEC , csr -> vstvec );
580+ nacl_csr_write (nsh , CSR_VSSCRATCH , csr -> vsscratch );
581+ nacl_csr_write (nsh , CSR_VSEPC , csr -> vsepc );
582+ nacl_csr_write (nsh , CSR_VSCAUSE , csr -> vscause );
583+ nacl_csr_write (nsh , CSR_VSTVAL , csr -> vstval );
584+ nacl_csr_write (nsh , CSR_HEDELEG , cfg -> hedeleg );
585+ nacl_csr_write (nsh , CSR_HVIP , csr -> hvip );
586+ nacl_csr_write (nsh , CSR_VSATP , csr -> vsatp );
587+ nacl_csr_write (nsh , CSR_HENVCFG , cfg -> henvcfg );
588+ if (IS_ENABLED (CONFIG_32BIT ))
589+ nacl_csr_write (nsh , CSR_HENVCFGH , cfg -> henvcfg >> 32 );
590+ if (riscv_has_extension_unlikely (RISCV_ISA_EXT_SMSTATEEN )) {
591+ nacl_csr_write (nsh , CSR_HSTATEEN0 , cfg -> hstateen0 );
592+ if (IS_ENABLED (CONFIG_32BIT ))
593+ nacl_csr_write (nsh , CSR_HSTATEEN0H , cfg -> hstateen0 >> 32 );
594+ }
595+ } else {
596+ csr_write (CSR_VSSTATUS , csr -> vsstatus );
597+ csr_write (CSR_VSIE , csr -> vsie );
598+ csr_write (CSR_VSTVEC , csr -> vstvec );
599+ csr_write (CSR_VSSCRATCH , csr -> vsscratch );
600+ csr_write (CSR_VSEPC , csr -> vsepc );
601+ csr_write (CSR_VSCAUSE , csr -> vscause );
602+ csr_write (CSR_VSTVAL , csr -> vstval );
603+ csr_write (CSR_HEDELEG , cfg -> hedeleg );
604+ csr_write (CSR_HVIP , csr -> hvip );
605+ csr_write (CSR_VSATP , csr -> vsatp );
606+ csr_write (CSR_HENVCFG , cfg -> henvcfg );
589607 if (IS_ENABLED (CONFIG_32BIT ))
590- csr_write (CSR_HSTATEEN0H , cfg -> hstateen0 >> 32 );
608+ csr_write (CSR_HENVCFGH , cfg -> henvcfg >> 32 );
609+ if (riscv_has_extension_unlikely (RISCV_ISA_EXT_SMSTATEEN )) {
610+ csr_write (CSR_HSTATEEN0 , cfg -> hstateen0 );
611+ if (IS_ENABLED (CONFIG_32BIT ))
612+ csr_write (CSR_HSTATEEN0H , cfg -> hstateen0 >> 32 );
613+ }
591614 }
592615
593616 kvm_riscv_gstage_update_hgatp (vcpu );
@@ -610,6 +633,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
610633
611634void kvm_arch_vcpu_put (struct kvm_vcpu * vcpu )
612635{
636+ void * nsh ;
613637 struct kvm_vcpu_csr * csr = & vcpu -> arch .guest_csr ;
614638
615639 vcpu -> cpu = -1 ;
@@ -625,15 +649,28 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
625649 vcpu -> arch .isa );
626650 kvm_riscv_vcpu_host_vector_restore (& vcpu -> arch .host_context );
627651
628- csr -> vsstatus = csr_read (CSR_VSSTATUS );
629- csr -> vsie = csr_read (CSR_VSIE );
630- csr -> vstvec = csr_read (CSR_VSTVEC );
631- csr -> vsscratch = csr_read (CSR_VSSCRATCH );
632- csr -> vsepc = csr_read (CSR_VSEPC );
633- csr -> vscause = csr_read (CSR_VSCAUSE );
634- csr -> vstval = csr_read (CSR_VSTVAL );
635- csr -> hvip = csr_read (CSR_HVIP );
636- csr -> vsatp = csr_read (CSR_VSATP );
652+ if (kvm_riscv_nacl_available ()) {
653+ nsh = nacl_shmem ();
654+ csr -> vsstatus = nacl_csr_read (nsh , CSR_VSSTATUS );
655+ csr -> vsie = nacl_csr_read (nsh , CSR_VSIE );
656+ csr -> vstvec = nacl_csr_read (nsh , CSR_VSTVEC );
657+ csr -> vsscratch = nacl_csr_read (nsh , CSR_VSSCRATCH );
658+ csr -> vsepc = nacl_csr_read (nsh , CSR_VSEPC );
659+ csr -> vscause = nacl_csr_read (nsh , CSR_VSCAUSE );
660+ csr -> vstval = nacl_csr_read (nsh , CSR_VSTVAL );
661+ csr -> hvip = nacl_csr_read (nsh , CSR_HVIP );
662+ csr -> vsatp = nacl_csr_read (nsh , CSR_VSATP );
663+ } else {
664+ csr -> vsstatus = csr_read (CSR_VSSTATUS );
665+ csr -> vsie = csr_read (CSR_VSIE );
666+ csr -> vstvec = csr_read (CSR_VSTVEC );
667+ csr -> vsscratch = csr_read (CSR_VSSCRATCH );
668+ csr -> vsepc = csr_read (CSR_VSEPC );
669+ csr -> vscause = csr_read (CSR_VSCAUSE );
670+ csr -> vstval = csr_read (CSR_VSTVAL );
671+ csr -> hvip = csr_read (CSR_HVIP );
672+ csr -> vsatp = csr_read (CSR_VSATP );
673+ }
637674}
638675
639676static void kvm_riscv_check_vcpu_requests (struct kvm_vcpu * vcpu )
@@ -688,7 +725,7 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
688725{
689726 struct kvm_vcpu_csr * csr = & vcpu -> arch .guest_csr ;
690727
691- csr_write (CSR_HVIP , csr -> hvip );
728+ ncsr_write (CSR_HVIP , csr -> hvip );
692729 kvm_riscv_vcpu_aia_update_hvip (vcpu );
693730}
694731
@@ -735,7 +772,9 @@ static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
735772 kvm_riscv_vcpu_swap_in_guest_state (vcpu );
736773 guest_state_enter_irqoff ();
737774
738- hcntx -> hstatus = csr_swap (CSR_HSTATUS , gcntx -> hstatus );
775+ hcntx -> hstatus = ncsr_swap (CSR_HSTATUS , gcntx -> hstatus );
776+
777+ nsync_csr (-1UL );
739778
740779 __kvm_riscv_switch_to (& vcpu -> arch );
741780
@@ -870,8 +909,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
870909 trap .sepc = vcpu -> arch .guest_context .sepc ;
871910 trap .scause = csr_read (CSR_SCAUSE );
872911 trap .stval = csr_read (CSR_STVAL );
873- trap .htval = csr_read (CSR_HTVAL );
874- trap .htinst = csr_read (CSR_HTINST );
912+ trap .htval = ncsr_read (CSR_HTVAL );
913+ trap .htinst = ncsr_read (CSR_HTINST );
875914
876915 /* Syncup interrupts state with HW */
877916 kvm_riscv_vcpu_sync_interrupts (vcpu );
0 commit comments