@@ -190,7 +190,7 @@ static void apf_task_wake_all(void)
190190 }
191191}
192192
193- void kvm_async_pf_task_wake (u32 token )
193+ static void kvm_async_pf_task_wake (u32 token )
194194{
195195 u32 key = hash_32 (token , KVM_TASK_SLEEP_HASHBITS );
196196 struct kvm_task_sleep_head * b = & async_pf_sleepers [key ];
@@ -241,7 +241,6 @@ void kvm_async_pf_task_wake(u32 token)
241241 /* A dummy token might be allocated and ultimately not used. */
242242 kfree (dummy );
243243}
244- EXPORT_SYMBOL_GPL (kvm_async_pf_task_wake );
245244
246245noinstr u32 kvm_read_and_reset_apf_flags (void )
247246{
@@ -933,6 +932,19 @@ static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc)
933932
934933static void __init kvm_init_platform (void )
935934{
935+ u64 tolud = PFN_PHYS (e820__end_of_low_ram_pfn ());
936+ /*
937+ * Note, hardware requires variable MTRR ranges to be power-of-2 sized
938+ * and naturally aligned. But when forcing guest MTRR state, Linux
939+ * doesn't program the forced ranges into hardware. Don't bother doing
940+ * the math to generate a technically-legal range.
941+ */
942+ struct mtrr_var_range pci_hole = {
943+ .base_lo = tolud | X86_MEMTYPE_UC ,
944+ .mask_lo = (u32 )(~(SZ_4G - tolud - 1 )) | MTRR_PHYSMASK_V ,
945+ .mask_hi = (BIT_ULL (boot_cpu_data .x86_phys_bits ) - 1 ) >> 32 ,
946+ };
947+
936948 if (cc_platform_has (CC_ATTR_GUEST_MEM_ENCRYPT ) &&
937949 kvm_para_has_feature (KVM_FEATURE_MIGRATION_CONTROL )) {
938950 unsigned long nr_pages ;
@@ -982,8 +994,12 @@ static void __init kvm_init_platform(void)
982994 kvmclock_init ();
983995 x86_platform .apic_post_init = kvm_apic_init ;
984996
985- /* Set WB as the default cache mode for SEV-SNP and TDX */
986- guest_force_mtrr_state (NULL , 0 , MTRR_TYPE_WRBACK );
997+ /*
998+ * Set WB as the default cache mode for SEV-SNP and TDX, with a single
999+ * UC range for the legacy PCI hole, e.g. so that devices that expect
1000+ * to get UC/WC mappings don't get surprised with WB.
1001+ */
1002+ guest_force_mtrr_state (& pci_hole , 1 , MTRR_TYPE_WRBACK );
9871003}
9881004
9891005#if defined(CONFIG_AMD_MEM_ENCRYPT )
@@ -1072,16 +1088,6 @@ static void kvm_wait(u8 *ptr, u8 val)
10721088 */
10731089void __init kvm_spinlock_init (void )
10741090{
1075- /*
1076- * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an
1077- * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is
1078- * preferred over native qspinlock when vCPU is preempted.
1079- */
1080- if (!kvm_para_has_feature (KVM_FEATURE_PV_UNHALT )) {
1081- pr_info ("PV spinlocks disabled, no host support\n" );
1082- return ;
1083- }
1084-
10851091 /*
10861092 * Disable PV spinlocks and use native qspinlock when dedicated pCPUs
10871093 * are available.
@@ -1101,6 +1107,16 @@ void __init kvm_spinlock_init(void)
11011107 goto out ;
11021108 }
11031109
1110+ /*
1111+ * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an
1112+ * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is
1113+ * preferred over native qspinlock when vCPU is preempted.
1114+ */
1115+ if (!kvm_para_has_feature (KVM_FEATURE_PV_UNHALT )) {
1116+ pr_info ("PV spinlocks disabled, no host support\n" );
1117+ return ;
1118+ }
1119+
11041120 pr_info ("PV spinlocks enabled\n" );
11051121
11061122 __pv_init_lock_hash ();
0 commit comments