@@ -162,6 +162,9 @@ struct mshv_vtl_per_cpu {
162
162
u64 l1_msr_sfmask ;
163
163
u64 l1_msr_tsc_aux ;
164
164
#endif
165
+ #if defined(CONFIG_X86_64 ) && defined(CONFIG_SEV_GUEST )
166
+ struct page * secure_avic_page ;
167
+ #endif
165
168
};
166
169
167
170
static struct mutex mshv_vtl_poll_file_lock ;
@@ -191,18 +194,28 @@ static struct page *mshv_vtl_cpu_reg_page(int cpu)
191
194
return * per_cpu_ptr (& mshv_vtl_per_cpu .reg_page , cpu );
192
195
}
193
196
194
- #if defined(CONFIG_X86_64 ) && defined(CONFIG_INTEL_TDX_GUEST )
195
197
196
198
static struct page * tdx_apic_page (int cpu )
197
199
{
200
+ #if defined(CONFIG_X86_64 ) && defined(CONFIG_INTEL_TDX_GUEST )
198
201
return * per_cpu_ptr (& mshv_vtl_per_cpu .tdx_apic_page , cpu );
202
+ #else
203
+ (void )cpu ;
204
+ return NULL ;
205
+ #endif
199
206
}
200
207
201
208
static struct page * tdx_this_apic_page (void )
202
209
{
210
+ #if defined(CONFIG_X86_64 ) && defined(CONFIG_INTEL_TDX_GUEST )
203
211
return * this_cpu_ptr (& mshv_vtl_per_cpu .tdx_apic_page );
212
+ #else
213
+ return NULL ;
214
+ #endif
204
215
}
205
216
217
+ #if defined(CONFIG_X86_64 ) && defined(CONFIG_INTEL_TDX_GUEST )
218
+
206
219
/*
207
220
* For ICR emulation on TDX, we need a fast way to map APICIDs to CPUIDs.
208
221
* Instead of iterating through all CPUs for each target in the ICR destination field
@@ -236,6 +249,26 @@ static int mshv_tdx_set_cpumask_from_apicid(int apicid, struct cpumask *cpu_mask
236
249
}
237
250
#endif
238
251
252
+ static struct page * snp_secure_avic_page (int cpu )
253
+ {
254
+ #if defined(CONFIG_X86_64 ) && defined(CONFIG_SEV_GUEST )
255
+ return * per_cpu_ptr (& mshv_vtl_per_cpu .secure_avic_page , cpu );
256
+ #else
257
+ (void )cpu ;
258
+ return NULL ;
259
+ #endif
260
+ }
261
+
262
+ static struct page * mshv_apic_page (int cpu )
263
+ {
264
+ if (hv_isolation_type_tdx ())
265
+ return tdx_apic_page (cpu );
266
+ else if (hv_isolation_type_snp ())
267
+ return snp_secure_avic_page (cpu );
268
+
269
+ return NULL ;
270
+ }
271
+
239
272
static long __mshv_vtl_ioctl_check_extension (u32 arg )
240
273
{
241
274
switch (arg ) {
@@ -619,12 +652,34 @@ static int mshv_vtl_alloc_context(unsigned int cpu)
619
652
mshv_write_tdx_apic_page (page_to_phys (tdx_apic_page ));
620
653
#endif
621
654
} else if (hv_isolation_type_snp ()) {
622
- #ifdef CONFIG_X86_64
655
+ #if defined( CONFIG_X86_64 ) && defined( CONFIG_SEV_GUEST )
623
656
int ret ;
624
657
625
658
ret = mshv_configure_vmsa_page (0 , & per_cpu -> vmsa_page );
626
659
if (ret < 0 )
627
660
return ret ;
661
+
662
+ if (cc_platform_has (CC_ATTR_SNP_SECURE_AVIC )) {
663
+ struct page * page = alloc_page (GFP_KERNEL | __GFP_ZERO );
664
+ void * secure_avic_page ;
665
+
666
+ if (!page )
667
+ return - ENOMEM ;
668
+ secure_avic_page = page_address (page );
669
+
670
+ /* VMPL 2 for the VTL0 */
671
+ ret = rmpadjust ((unsigned long )secure_avic_page ,
672
+ RMP_PG_SIZE_4K , 2 | RMPADJUST_ENABLE_READ | RMPADJUST_ENABLE_WRITE );
673
+ if (ret ) {
674
+ pr_err ("failed to adjust RMP for the secure AVIC page: %d\n" , ret );
675
+ free_page ((u64 )page );
676
+ return - EINVAL ;
677
+ }
678
+ pr_info ("VTL0 secure AVIC page allocated, CPU %d\n" , cpu );
679
+
680
+ x2apic_savic_init_backing_page (secure_avic_page );
681
+ per_cpu -> secure_avic_page = secure_avic_page ;
682
+ }
628
683
#endif
629
684
} else if (mshv_vsm_capabilities .intercept_page_available )
630
685
mshv_vtl_configure_reg_page (per_cpu );
@@ -1933,7 +1988,7 @@ mshv_vtl_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1933
1988
1934
1989
static vm_fault_t mshv_vtl_fault (struct vm_fault * vmf )
1935
1990
{
1936
- struct page * page ;
1991
+ struct page * page = NULL ;
1937
1992
int cpu = vmf -> pgoff & MSHV_PG_OFF_CPU_MASK ;
1938
1993
int real_off = vmf -> pgoff >> MSHV_REAL_OFF_SHIFT ;
1939
1994
@@ -1965,18 +2020,16 @@ static vm_fault_t mshv_vtl_fault(struct vm_fault *vmf)
1965
2020
if (!hv_isolation_type_snp ())
1966
2021
return VM_FAULT_SIGBUS ;
1967
2022
page = * per_cpu_ptr (& mshv_vtl_per_cpu .vmsa_page , cpu );
1968
- #ifdef CONFIG_INTEL_TDX_GUEST
1969
2023
} else if (real_off == MSHV_APIC_PAGE_OFFSET ) {
1970
- if (!hv_isolation_type_tdx ())
1971
- return VM_FAULT_SIGBUS ;
1972
-
1973
- page = tdx_apic_page (cpu );
1974
- #endif
2024
+ page = mshv_apic_page (cpu );
1975
2025
#endif
1976
2026
} else {
1977
2027
return VM_FAULT_NOPAGE ;
1978
2028
}
1979
2029
2030
+ if (!page )
2031
+ return VM_FAULT_SIGBUS ;
2032
+
1980
2033
get_page (page );
1981
2034
vmf -> page = page ;
1982
2035
0 commit comments