@@ -32,6 +32,126 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
3232 sizeof (kvm_vcpu_stats_desc ),
3333};
3434
35+ static inline void kvm_save_host_pmu (struct kvm_vcpu * vcpu )
36+ {
37+ struct kvm_context * context ;
38+
39+ context = this_cpu_ptr (vcpu -> kvm -> arch .vmcs );
40+ context -> perf_cntr [0 ] = read_csr_perfcntr0 ();
41+ context -> perf_cntr [1 ] = read_csr_perfcntr1 ();
42+ context -> perf_cntr [2 ] = read_csr_perfcntr2 ();
43+ context -> perf_cntr [3 ] = read_csr_perfcntr3 ();
44+ context -> perf_ctrl [0 ] = write_csr_perfctrl0 (0 );
45+ context -> perf_ctrl [1 ] = write_csr_perfctrl1 (0 );
46+ context -> perf_ctrl [2 ] = write_csr_perfctrl2 (0 );
47+ context -> perf_ctrl [3 ] = write_csr_perfctrl3 (0 );
48+ }
49+
50+ static inline void kvm_restore_host_pmu (struct kvm_vcpu * vcpu )
51+ {
52+ struct kvm_context * context ;
53+
54+ context = this_cpu_ptr (vcpu -> kvm -> arch .vmcs );
55+ write_csr_perfcntr0 (context -> perf_cntr [0 ]);
56+ write_csr_perfcntr1 (context -> perf_cntr [1 ]);
57+ write_csr_perfcntr2 (context -> perf_cntr [2 ]);
58+ write_csr_perfcntr3 (context -> perf_cntr [3 ]);
59+ write_csr_perfctrl0 (context -> perf_ctrl [0 ]);
60+ write_csr_perfctrl1 (context -> perf_ctrl [1 ]);
61+ write_csr_perfctrl2 (context -> perf_ctrl [2 ]);
62+ write_csr_perfctrl3 (context -> perf_ctrl [3 ]);
63+ }
64+
65+
66+ static inline void kvm_save_guest_pmu (struct kvm_vcpu * vcpu )
67+ {
68+ struct loongarch_csrs * csr = vcpu -> arch .csr ;
69+
70+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_PERFCNTR0 );
71+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_PERFCNTR1 );
72+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_PERFCNTR2 );
73+ kvm_save_hw_gcsr (csr , LOONGARCH_CSR_PERFCNTR3 );
74+ kvm_read_clear_hw_gcsr (csr , LOONGARCH_CSR_PERFCTRL0 );
75+ kvm_read_clear_hw_gcsr (csr , LOONGARCH_CSR_PERFCTRL1 );
76+ kvm_read_clear_hw_gcsr (csr , LOONGARCH_CSR_PERFCTRL2 );
77+ kvm_read_clear_hw_gcsr (csr , LOONGARCH_CSR_PERFCTRL3 );
78+ }
79+
80+ static inline void kvm_restore_guest_pmu (struct kvm_vcpu * vcpu )
81+ {
82+ struct loongarch_csrs * csr = vcpu -> arch .csr ;
83+
84+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_PERFCNTR0 );
85+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_PERFCNTR1 );
86+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_PERFCNTR2 );
87+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_PERFCNTR3 );
88+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_PERFCTRL0 );
89+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_PERFCTRL1 );
90+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_PERFCTRL2 );
91+ kvm_restore_hw_gcsr (csr , LOONGARCH_CSR_PERFCTRL3 );
92+ }
93+
94+ static int kvm_own_pmu (struct kvm_vcpu * vcpu )
95+ {
96+ unsigned long val ;
97+
98+ if (!kvm_guest_has_pmu (& vcpu -> arch ))
99+ return - EINVAL ;
100+
101+ kvm_save_host_pmu (vcpu );
102+
103+ /* Set PM0-PM(num) to guest */
104+ val = read_csr_gcfg () & ~CSR_GCFG_GPERF ;
105+ val |= (kvm_get_pmu_num (& vcpu -> arch ) + 1 ) << CSR_GCFG_GPERF_SHIFT ;
106+ write_csr_gcfg (val );
107+
108+ kvm_restore_guest_pmu (vcpu );
109+
110+ return 0 ;
111+ }
112+
113+ static void kvm_lose_pmu (struct kvm_vcpu * vcpu )
114+ {
115+ unsigned long val ;
116+ struct loongarch_csrs * csr = vcpu -> arch .csr ;
117+
118+ if (!(vcpu -> arch .aux_inuse & KVM_LARCH_PMU ))
119+ return ;
120+
121+ kvm_save_guest_pmu (vcpu );
122+
123+ /* Disable pmu access from guest */
124+ write_csr_gcfg (read_csr_gcfg () & ~CSR_GCFG_GPERF );
125+
126+ /*
127+ * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
128+ * exiting the guest, so that the next time trap into the guest.
129+ * We don't need to deal with PMU CSRs contexts.
130+ */
131+ val = kvm_read_sw_gcsr (csr , LOONGARCH_CSR_PERFCTRL0 );
132+ val |= kvm_read_sw_gcsr (csr , LOONGARCH_CSR_PERFCTRL1 );
133+ val |= kvm_read_sw_gcsr (csr , LOONGARCH_CSR_PERFCTRL2 );
134+ val |= kvm_read_sw_gcsr (csr , LOONGARCH_CSR_PERFCTRL3 );
135+ if (!(val & KVM_PMU_EVENT_ENABLED ))
136+ vcpu -> arch .aux_inuse &= ~KVM_LARCH_PMU ;
137+
138+ kvm_restore_host_pmu (vcpu );
139+ }
140+
141+ static void kvm_restore_pmu (struct kvm_vcpu * vcpu )
142+ {
143+ if ((vcpu -> arch .aux_inuse & KVM_LARCH_PMU ))
144+ kvm_make_request (KVM_REQ_PMU , vcpu );
145+ }
146+
147+ static void kvm_check_pmu (struct kvm_vcpu * vcpu )
148+ {
149+ if (kvm_check_request (KVM_REQ_PMU , vcpu )) {
150+ kvm_own_pmu (vcpu );
151+ vcpu -> arch .aux_inuse |= KVM_LARCH_PMU ;
152+ }
153+ }
154+
35155static void kvm_update_stolen_time (struct kvm_vcpu * vcpu )
36156{
37157 u32 version ;
@@ -159,6 +279,7 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
159279 /* Make sure the vcpu mode has been written */
160280 smp_store_mb (vcpu -> mode , IN_GUEST_MODE );
161281 kvm_check_vpid (vcpu );
282+ kvm_check_pmu (vcpu );
162283
163284 /*
164285 * Called after function kvm_check_vpid()
@@ -196,6 +317,8 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
196317 /* Set a default exit reason */
197318 run -> exit_reason = KVM_EXIT_UNKNOWN ;
198319
320+ kvm_lose_pmu (vcpu );
321+
199322 guest_timing_exit_irqoff ();
200323 guest_state_exit_irqoff ();
201324 local_irq_enable ();
@@ -469,6 +592,22 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
469592
470593 kvm_write_sw_gcsr (csr , id , val );
471594
595+ /*
596+ * After modifying the PMU CSR register value of the vcpu.
597+ * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
598+ */
599+ if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3 ) {
600+ unsigned long val ;
601+
602+ val = kvm_read_sw_gcsr (csr , LOONGARCH_CSR_PERFCTRL0 ) |
603+ kvm_read_sw_gcsr (csr , LOONGARCH_CSR_PERFCTRL1 ) |
604+ kvm_read_sw_gcsr (csr , LOONGARCH_CSR_PERFCTRL2 ) |
605+ kvm_read_sw_gcsr (csr , LOONGARCH_CSR_PERFCTRL3 );
606+
607+ if (val & KVM_PMU_EVENT_ENABLED )
608+ kvm_make_request (KVM_REQ_PMU , vcpu );
609+ }
610+
472611 return ret ;
473612}
474613
@@ -513,6 +652,12 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
513652 case LOONGARCH_CPUCFG5 :
514653 * v = GENMASK (31 , 0 );
515654 return 0 ;
655+ case LOONGARCH_CPUCFG6 :
656+ if (cpu_has_pmp )
657+ * v = GENMASK (14 , 0 );
658+ else
659+ * v = 0 ;
660+ return 0 ;
516661 case LOONGARCH_CPUCFG16 :
517662 * v = GENMASK (16 , 0 );
518663 return 0 ;
@@ -557,6 +702,17 @@ static int kvm_check_cpucfg(int id, u64 val)
557702 /* LASX architecturally implies LSX and FP but val does not satisfy that */
558703 return - EINVAL ;
559704 return 0 ;
705+ case LOONGARCH_CPUCFG6 :
706+ if (val & CPUCFG6_PMP ) {
707+ u32 host = read_cpucfg (LOONGARCH_CPUCFG6 );
708+ if ((val & CPUCFG6_PMBITS ) != (host & CPUCFG6_PMBITS ))
709+ return - EINVAL ;
710+ if ((val & CPUCFG6_PMNUM ) > (host & CPUCFG6_PMNUM ))
711+ return - EINVAL ;
712+ if ((val & CPUCFG6_UPM ) && !(host & CPUCFG6_UPM ))
713+ return - EINVAL ;
714+ }
715+ return 0 ;
560716 default :
561717 /*
562718 * Values for the other CPUCFG IDs are not being further validated
@@ -670,6 +826,9 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
670826 if (ret )
671827 break ;
672828 vcpu -> arch .cpucfg [id ] = (u32 )v ;
829+ if (id == LOONGARCH_CPUCFG6 )
830+ vcpu -> arch .max_pmu_csrid =
831+ LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num (& vcpu -> arch ) + 1 ;
673832 break ;
674833 case KVM_REG_LOONGARCH_LBT :
675834 if (!kvm_guest_has_lbt (& vcpu -> arch ))
@@ -791,7 +950,8 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
791950 struct kvm_device_attr * attr )
792951{
793952 switch (attr -> attr ) {
794- case 2 :
953+ case LOONGARCH_CPUCFG2 :
954+ case LOONGARCH_CPUCFG6 :
795955 return 0 ;
796956 default :
797957 return - ENXIO ;
@@ -1356,6 +1516,9 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
13561516 change_csr_gcfg (CSR_GCFG_MATC_MASK , CSR_GCFG_MATC_ROOT );
13571517 kvm_make_request (KVM_REQ_STEAL_UPDATE , vcpu );
13581518
1519+ /* Restore hardware PMU CSRs */
1520+ kvm_restore_pmu (vcpu );
1521+
13591522 /* Don't bother restoring registers multiple times unless necessary */
13601523 if (vcpu -> arch .aux_inuse & KVM_LARCH_HWCSR_USABLE )
13611524 return 0 ;
0 commit comments