From f11e3413261cbd08f13b7a12d539d9e9de68aee4 Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Thu, 31 Jul 2025 15:44:00 +0100 Subject: [PATCH] chore: update patch series to v17 Update the mmap support patch series to v17, and update the direct map removal series to apply on top of v17. Rebase all the other series. Signed-off-by: Patrick Roy --- resources/hiding_ci/kernel_commit_hash | 2 +- ..._KVM_PRIVATE_MEM-to-CONFIG_KVM_GUES.patch} | 87 ++-- ...G_KVM_GENERIC_PRIVATE_MEM-to-CONFIG_.patch | 103 ----- ...-vendor-neutral-sub-configs-depend-o.patch | 109 +++++ ...KVM-Introduce-kvm_arch_supports_gmem.patch | 102 ----- ...VM_GENERIC_PRIVATE_MEM-directly-from.patch | 42 ++ ...x86-Introduce-kvm-arch.supports_gmem.patch | 95 ----- ...DX-s-KVM_GENERIC_xxx-dependencies-if.patch | 43 ++ ...G_KVM_GENERIC_PRIVATE_MEM-to-CONFIG_.patch | 144 +++++++ ...ot_can_be_private-to-kvm_slot_has_g.patch} | 28 +- ...x-comments-that-refer-to-slots_lock.patch} | 14 +- ...that-refers-to-kvm-uapi-header-path.patch} | 10 +- ...-Allow-host-to-map-guest_memfd-pages.patch | 216 ---------- ...VM_GUEST_MEMFD-for-all-64-bit-builds.patch | 144 +++++++ ...Add-plumbing-to-host-to-map-guest_me.patch | 185 +++++++++ ...ralize-private_max_mapping_level-x86.patch | 241 ----------- ...rack-guest_memfd-mmap-support-in-me.patch} | 26 +- ...w-NULL-able-fault-in-kvm_max_private.patch | 76 ---- ...ult-guest_memfd-when-computing-max_m.patch | 212 ---------- ...me-.private_max_mapping_level-to-.gm.patch | 171 ++++++++ ...t-guest_memfd-max-level-order-helper.patch | 113 ++++++ ...guest_memfd-mmap-for-default-VM-type.patch | 94 ----- ...rce-guest_memfd-s-max-order-when-rec.patch | 196 +++++++++ ...nd-guest_memfd-s-max-mapping-level-t.patch | 163 ++++++++ ...e-guest-page-faults-for-guest_memfd.patch} | 30 +- ...7-KVM-arm64-Refactor-user_mem_abort.patch} | 8 +- ...-host-mapping-of-shared-guest_memfd-.patch | 89 ---- ...guest_memfd-backed-guest-page-fault.patch} | 8 +- ...the-KVM-capability-KVM_CAP_GMEM_MMAP.patch | 77 ---- ...le-VNCR_EL2-triggered-faults-backed.patch} | 14 +- ...-support-for-guest_memfd-backed-memo.patch | 61 +++ ...vertise-support-for-host-mmap-on-gue.patch | 112 +++++ ...not-use-hardcoded-page-sizes-in-gue.patch} | 17 +- ...st_memfd-mmap-test-when-mmap-is-sup.patch} | 147 +++---- ...d-guest_memfd-testcase-to-fault-in-o.patch | 115 ++++++ ...address_space-mapping-to-free_folio.patch} | 12 +- ...irect_map_valid_noflush-to-KVM-modu.patch} | 4 +- ... 0027-mm-introduce-AS_NO_DIRECT_MAP.patch} | 6 +- ...-Add-flag-to-remove-from-direct-map.patch} | 87 ++-- ...n-describe-GUEST_MEMFD_FLAG_NO_DIREC.patch | 30 ++ ...selftests-load-elf-via-bounce-buffer.patch | 105 +++++ ...t-KVM_MEM_GUEST_MEMFD-in-vm_mem_add-.patch | 71 ++++ ...d-guest_memfd-based-vm_mem_backing_s.patch | 190 +++++++++ ...uff-vm_mem_backing_src_type-into-vm_.patch | 98 +++++ ...ver-GUEST_MEMFD_FLAG_NO_DIRECT_MAP-i.patch | 49 +++ ...ver-GUEST_MEMFD_FLAG_NO_DIRECT_MAP-i.patch | 27 ++ ...st-guest-execution-from-direct-map-r.patch | 88 ++++ .../0026-de-gpc-ify-kvm-clock.patch | 147 ------- ...-for-kvm-clock-if-kvm_gpc_refresh-fa.patch | 103 +++++ ...ix-vm_mem_region_set_flags-docstring.patch | 28 -- ...KVM-selftests-Fix-prefault_mem-logic.patch | 37 -- ...ests-Add-va_start-end-into-uffd_desc.patch | 44 -- ...form-set_memory_region_test-of-KVM_M.patch | 31 -- ...d-KVM-Userfault-mode-to-demand_pagin.patch | 381 ------------------ ...M_USERFAULT-memslot-flag-and-bitmap.patch} | 26 +- ...d-KVM_MEM_USERFAULT-guest_memfd-togg.patch | 65 --- ...-Add-KVM_MEMORY_EXIT_FLAG_USERFAULT.patch} | 6 +- ...tting-of-KVM_MEM_USERFAULT-on-guest.patch} | 6 +- ...u-Add-support-for-KVM_MEM_USERFAULT.patch} | 34 +- ..._CAP_USERFAULT-in-KVM_CHECK_EXTENSI.patch} | 18 +- ...4-Add-support-for-KVM_MEM_USERFAULT.patch} | 12 +- ...mfd-add-generic-population-via-write.patch | 133 ------ ...tests-update-guest_memfd-write-tests.patch | 126 ------ ...mfd-add-generic-population-via-write.patch | 118 ++++++ ...-generic-continue-for-non-hugetlbfs.patch} | 4 +- ...provide-can_userfault-vma-operation.patch} | 4 +- ...tfd-use-can_userfault-vma-operation.patch} | 4 +- ...d-add-support-for-userfaultfd-minor.patch} | 8 +- ...-add-UFFD_FEATURE_MINOR_GUEST_MEMFD.patch} | 4 +- ... 0050-fixup-for-guest_memfd-uffd-v3.patch} | 8 +- 70 files changed, 2782 insertions(+), 2626 deletions(-) rename resources/hiding_ci/linux_patches/05-mmap-support/{0001-KVM-Rename-CONFIG_KVM_PRIVATE_MEM-to-CONFIG_KVM_GMEM.patch => 0001-KVM-Rename-CONFIG_KVM_PRIVATE_MEM-to-CONFIG_KVM_GUES.patch} (68%) delete mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0002-KVM-Rename-CONFIG_KVM_GENERIC_PRIVATE_MEM-to-CONFIG_.patch create mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0002-KVM-x86-Have-all-vendor-neutral-sub-configs-depend-o.patch delete mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0003-KVM-Introduce-kvm_arch_supports_gmem.patch create mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0003-KVM-x86-Select-KVM_GENERIC_PRIVATE_MEM-directly-from.patch delete mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0004-KVM-x86-Introduce-kvm-arch.supports_gmem.patch create mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0004-KVM-x86-Select-TDX-s-KVM_GENERIC_xxx-dependencies-if.patch create mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0005-KVM-Rename-CONFIG_KVM_GENERIC_PRIVATE_MEM-to-CONFIG_.patch rename resources/hiding_ci/linux_patches/05-mmap-support/{0005-KVM-Rename-kvm_slot_can_be_private-to-kvm_slot_has_g.patch => 0006-KVM-Rename-kvm_slot_can_be_private-to-kvm_slot_has_g.patch} (80%) rename resources/hiding_ci/linux_patches/05-mmap-support/{0006-KVM-Fix-comments-that-refer-to-slots_lock.patch => 0007-KVM-Fix-comments-that-refer-to-slots_lock.patch} (80%) rename resources/hiding_ci/linux_patches/05-mmap-support/{0007-KVM-Fix-comment-that-refers-to-kvm-uapi-header-path.patch => 0008-KVM-Fix-comment-that-refers-to-kvm-uapi-header-path.patch} (75%) delete mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0008-KVM-guest_memfd-Allow-host-to-map-guest_memfd-pages.patch create mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0009-KVM-x86-Enable-KVM_GUEST_MEMFD-for-all-64-bit-builds.patch create mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0010-KVM-guest_memfd-Add-plumbing-to-host-to-map-guest_me.patch delete mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0010-KVM-x86-mmu-Generalize-private_max_mapping_level-x86.patch rename resources/hiding_ci/linux_patches/05-mmap-support/{0009-KVM-guest_memfd-Track-guest_memfd-mmap-support-in-me.patch => 0011-KVM-guest_memfd-Track-guest_memfd-mmap-support-in-me.patch} (72%) delete mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0011-KVM-x86-mmu-Allow-NULL-able-fault-in-kvm_max_private.patch delete mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0012-KVM-x86-mmu-Consult-guest_memfd-when-computing-max_m.patch create mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0012-KVM-x86-mmu-Rename-.private_max_mapping_level-to-.gm.patch create mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0013-KVM-x86-mmu-Hoist-guest_memfd-max-level-order-helper.patch delete mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0014-KVM-x86-Enable-guest_memfd-mmap-for-default-VM-type.patch create mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0014-KVM-x86-mmu-Enforce-guest_memfd-s-max-order-when-rec.patch create mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0015-KVM-x86-mmu-Extend-guest_memfd-s-max-mapping-level-t.patch rename resources/hiding_ci/linux_patches/05-mmap-support/{0013-KVM-x86-mmu-Handle-guest-page-faults-for-guest_memfd.patch => 0016-KVM-x86-mmu-Handle-guest-page-faults-for-guest_memfd.patch} (69%) rename resources/hiding_ci/linux_patches/05-mmap-support/{0015-KVM-arm64-Refactor-user_mem_abort.patch => 0017-KVM-arm64-Refactor-user_mem_abort.patch} (96%) delete mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0018-KVM-arm64-Enable-host-mapping-of-shared-guest_memfd-.patch rename resources/hiding_ci/linux_patches/05-mmap-support/{0016-KVM-arm64-Handle-guest_memfd-backed-guest-page-fault.patch => 0018-KVM-arm64-Handle-guest_memfd-backed-guest-page-fault.patch} (94%) delete mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0019-KVM-Introduce-the-KVM-capability-KVM_CAP_GMEM_MMAP.patch rename resources/hiding_ci/linux_patches/05-mmap-support/{0017-KVM-arm64-nv-Handle-VNCR_EL2-triggered-faults-backed.patch => 0019-KVM-arm64-nv-Handle-VNCR_EL2-triggered-faults-backed.patch} (89%) create mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0020-KVM-arm64-Enable-support-for-guest_memfd-backed-memo.patch create mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0021-KVM-Allow-and-advertise-support-for-host-mmap-on-gue.patch rename resources/hiding_ci/linux_patches/05-mmap-support/{0021-KVM-selftests-Do-not-use-hardcoded-page-sizes-in-gue.patch => 0022-KVM-selftests-Do-not-use-hardcoded-page-sizes-in-gue.patch} (87%) rename resources/hiding_ci/linux_patches/05-mmap-support/{0020-KVM-selftests-guest_memfd-mmap-test-when-mmap-is-sup.patch => 0023-KVM-selftests-guest_memfd-mmap-test-when-mmap-is-sup.patch} (75%) create mode 100644 resources/hiding_ci/linux_patches/05-mmap-support/0024-KVM-selftests-Add-guest_memfd-testcase-to-fault-in-o.patch rename resources/hiding_ci/linux_patches/10-direct-map-removal/{0022-filemap-Pass-address_space-mapping-to-free_folio.patch => 0025-filemap-Pass-address_space-mapping-to-free_folio.patch} (96%) rename resources/hiding_ci/linux_patches/10-direct-map-removal/{0023-arch-export-set_direct_map_valid_noflush-to-KVM-modu.patch => 0026-arch-export-set_direct_map_valid_noflush-to-KVM-modu.patch} (96%) rename resources/hiding_ci/linux_patches/10-direct-map-removal/{0024-mm-introduce-AS_NO_DIRECT_MAP.patch => 0027-mm-introduce-AS_NO_DIRECT_MAP.patch} (97%) rename resources/hiding_ci/linux_patches/10-direct-map-removal/{0025-KVM-guest_memfd-Add-flag-to-remove-from-direct-map.patch => 0028-KVM-guest_memfd-Add-flag-to-remove-from-direct-map.patch} (74%) create mode 100644 resources/hiding_ci/linux_patches/10-direct-map-removal/0029-KVM-Documentation-describe-GUEST_MEMFD_FLAG_NO_DIREC.patch create mode 100644 resources/hiding_ci/linux_patches/10-direct-map-removal/0030-KVM-selftests-load-elf-via-bounce-buffer.patch create mode 100644 resources/hiding_ci/linux_patches/10-direct-map-removal/0031-KVM-selftests-set-KVM_MEM_GUEST_MEMFD-in-vm_mem_add-.patch create mode 100644 resources/hiding_ci/linux_patches/10-direct-map-removal/0032-KVM-selftests-Add-guest_memfd-based-vm_mem_backing_s.patch create mode 100644 resources/hiding_ci/linux_patches/10-direct-map-removal/0033-KVM-selftests-stuff-vm_mem_backing_src_type-into-vm_.patch create mode 100644 resources/hiding_ci/linux_patches/10-direct-map-removal/0034-KVM-selftests-cover-GUEST_MEMFD_FLAG_NO_DIRECT_MAP-i.patch create mode 100644 resources/hiding_ci/linux_patches/10-direct-map-removal/0035-KVM-selftests-cover-GUEST_MEMFD_FLAG_NO_DIRECT_MAP-i.patch create mode 100644 resources/hiding_ci/linux_patches/10-direct-map-removal/0036-KVM-selftests-Test-guest-execution-from-direct-map-r.patch delete mode 100644 resources/hiding_ci/linux_patches/11-kvm-clock/0026-de-gpc-ify-kvm-clock.patch create mode 100644 resources/hiding_ci/linux_patches/11-kvm-clock/0037-KVM-x86-use-uhva-for-kvm-clock-if-kvm_gpc_refresh-fa.patch delete mode 100644 resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0033-KVM-selftests-Fix-vm_mem_region_set_flags-docstring.patch delete mode 100644 resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0034-KVM-selftests-Fix-prefault_mem-logic.patch delete mode 100644 resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0035-KVM-selftests-Add-va_start-end-into-uffd_desc.patch delete mode 100644 resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0036-KVM-selftests-Inform-set_memory_region_test-of-KVM_M.patch delete mode 100644 resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0037-KVM-selftests-Add-KVM-Userfault-mode-to-demand_pagin.patch rename resources/hiding_ci/linux_patches/15-kvm-mem-userfault/{0027-KVM-Add-KVM_MEM_USERFAULT-memslot-flag-and-bitmap.patch => 0038-KVM-Add-KVM_MEM_USERFAULT-memslot-flag-and-bitmap.patch} (86%) delete mode 100644 resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0038-KVM-selftests-Add-KVM_MEM_USERFAULT-guest_memfd-togg.patch rename resources/hiding_ci/linux_patches/15-kvm-mem-userfault/{0028-KVM-Add-KVM_MEMORY_EXIT_FLAG_USERFAULT.patch => 0039-KVM-Add-KVM_MEMORY_EXIT_FLAG_USERFAULT.patch} (81%) rename resources/hiding_ci/linux_patches/15-kvm-mem-userfault/{0029-KVM-Allow-late-setting-of-KVM_MEM_USERFAULT-on-guest.patch => 0040-KVM-Allow-late-setting-of-KVM_MEM_USERFAULT-on-guest.patch} (92%) rename resources/hiding_ci/linux_patches/15-kvm-mem-userfault/{0030-KVM-x86-mmu-Add-support-for-KVM_MEM_USERFAULT.patch => 0041-KVM-x86-mmu-Add-support-for-KVM_MEM_USERFAULT.patch} (88%) rename resources/hiding_ci/linux_patches/15-kvm-mem-userfault/{0031-KVM-Advertise-KVM_CAP_USERFAULT-in-KVM_CHECK_EXTENSI.patch => 0042-KVM-Advertise-KVM_CAP_USERFAULT-in-KVM_CHECK_EXTENSI.patch} (73%) rename resources/hiding_ci/linux_patches/15-kvm-mem-userfault/{0032-KVM-arm64-Add-support-for-KVM_MEM_USERFAULT.patch => 0043-KVM-arm64-Add-support-for-KVM_MEM_USERFAULT.patch} (92%) delete mode 100644 resources/hiding_ci/linux_patches/20-gmem-write/0039-KVM-guest_memfd-add-generic-population-via-write.patch delete mode 100644 resources/hiding_ci/linux_patches/20-gmem-write/0040-KVM-selftests-update-guest_memfd-write-tests.patch create mode 100644 resources/hiding_ci/linux_patches/20-gmem-write/0044-KVM-guest_memfd-add-generic-population-via-write.patch rename resources/hiding_ci/linux_patches/25-gmem-uffd/{0041-mm-userfaultfd-generic-continue-for-non-hugetlbfs.patch => 0045-mm-userfaultfd-generic-continue-for-non-hugetlbfs.patch} (97%) rename resources/hiding_ci/linux_patches/25-gmem-uffd/{0042-mm-provide-can_userfault-vma-operation.patch => 0046-mm-provide-can_userfault-vma-operation.patch} (95%) rename resources/hiding_ci/linux_patches/25-gmem-uffd/{0043-mm-userfaultfd-use-can_userfault-vma-operation.patch => 0047-mm-userfaultfd-use-can_userfault-vma-operation.patch} (95%) rename resources/hiding_ci/linux_patches/25-gmem-uffd/{0044-KVM-guest_memfd-add-support-for-userfaultfd-minor.patch => 0048-KVM-guest_memfd-add-support-for-userfaultfd-minor.patch} (80%) rename resources/hiding_ci/linux_patches/25-gmem-uffd/{0045-mm-userfaultfd-add-UFFD_FEATURE_MINOR_GUEST_MEMFD.patch => 0049-mm-userfaultfd-add-UFFD_FEATURE_MINOR_GUEST_MEMFD.patch} (94%) rename resources/hiding_ci/linux_patches/25-gmem-uffd/{0046-fixup-for-guest_memfd-uffd-v3.patch => 0050-fixup-for-guest_memfd-uffd-v3.patch} (91%) diff --git a/resources/hiding_ci/kernel_commit_hash b/resources/hiding_ci/kernel_commit_hash index f9edf6840ea..0e03de1fe6f 100644 --- a/resources/hiding_ci/kernel_commit_hash +++ b/resources/hiding_ci/kernel_commit_hash @@ -1 +1 @@ -347e9f5043c89695b01e66b3ed111755afcf1911 +beafd7ecf2255e8b62a42dc04f54843033db3d24 \ No newline at end of file diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0001-KVM-Rename-CONFIG_KVM_PRIVATE_MEM-to-CONFIG_KVM_GMEM.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0001-KVM-Rename-CONFIG_KVM_PRIVATE_MEM-to-CONFIG_KVM_GUES.patch similarity index 68% rename from resources/hiding_ci/linux_patches/05-mmap-support/0001-KVM-Rename-CONFIG_KVM_PRIVATE_MEM-to-CONFIG_KVM_GMEM.patch rename to resources/hiding_ci/linux_patches/05-mmap-support/0001-KVM-Rename-CONFIG_KVM_PRIVATE_MEM-to-CONFIG_KVM_GUES.patch index 77dd444fd7a..086f055a3d8 100644 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0001-KVM-Rename-CONFIG_KVM_PRIVATE_MEM-to-CONFIG_KVM_GMEM.patch +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0001-KVM-Rename-CONFIG_KVM_PRIVATE_MEM-to-CONFIG_KVM_GUES.patch @@ -1,27 +1,30 @@ -From 000264f8823f76fb6cf91dc40ace84a29a0fa089 Mon Sep 17 00:00:00 2001 +From 83ed02c1c583b5b831e7827453845fe4fd7b4c80 Mon Sep 17 00:00:00 2001 From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:30 +0100 -Subject: [PATCH 01/46] KVM: Rename CONFIG_KVM_PRIVATE_MEM to CONFIG_KVM_GMEM +Date: Tue, 29 Jul 2025 15:54:32 -0700 +Subject: [PATCH 01/49] KVM: Rename CONFIG_KVM_PRIVATE_MEM to + CONFIG_KVM_GUEST_MEMFD -Rename the Kconfig option CONFIG_KVM_PRIVATE_MEM to CONFIG_KVM_GMEM. The -original name implied that the feature only supported "private" memory. -However, CONFIG_KVM_PRIVATE_MEM enables guest_memfd in general, which is -not exclusively for private memory. Subsequent patches in this series -will add guest_memfd support for non-CoCo VMs, whose memory is not -private. +Rename the Kconfig option CONFIG_KVM_PRIVATE_MEM to +CONFIG_KVM_GUEST_MEMFD. The original name implied that the feature only +supported "private" memory. However, CONFIG_KVM_PRIVATE_MEM enables +guest_memfd in general, which is not exclusively for private memory. +Subsequent patches in this series will add guest_memfd support for +non-CoCo VMs, whose memory is not private. -Renaming the Kconfig option to CONFIG_KVM_GMEM more accurately reflects -its broader scope as the main Kconfig option for all guest_memfd-backed -memory. This provides clearer semantics for the option and avoids -confusion as new features are introduced. +Renaming the Kconfig option to CONFIG_KVM_GUEST_MEMFD more accurately +reflects its broader scope as the main Kconfig option for all +guest_memfd-backed memory. This provides clearer semantics for the +option and avoids confusion as new features are introduced. Reviewed-by: Ira Weiny Reviewed-by: Gavin Shan Reviewed-by: Shivank Garg Reviewed-by: Vlastimil Babka +Reviewed-by: Xiaoyao Li Co-developed-by: David Hildenbrand Signed-off-by: David Hildenbrand Signed-off-by: Fuad Tabba +Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 2 +- include/linux/kvm_host.h | 14 +++++++------- @@ -32,32 +35,32 @@ Signed-off-by: Fuad Tabba 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h -index f7af967aa16f..acb25f935d84 100644 +index f19a76d3ca0e..7b0f2b3e492d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h -@@ -2275,7 +2275,7 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, +@@ -2276,7 +2276,7 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, int tdp_max_root_level, int tdp_huge_page_level); -#ifdef CONFIG_KVM_PRIVATE_MEM -+#ifdef CONFIG_KVM_GMEM ++#ifdef CONFIG_KVM_GUEST_MEMFD #define kvm_arch_has_private_mem(kvm) ((kvm)->arch.has_private_mem) #else #define kvm_arch_has_private_mem(kvm) false diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h -index 3bde4fb5c6aa..755b09dcafce 100644 +index 15656b7fba6c..8cdc0b3cc1b1 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h -@@ -601,7 +601,7 @@ struct kvm_memory_slot { +@@ -602,7 +602,7 @@ struct kvm_memory_slot { short id; u16 as_id; -#ifdef CONFIG_KVM_PRIVATE_MEM -+#ifdef CONFIG_KVM_GMEM ++#ifdef CONFIG_KVM_GUEST_MEMFD struct { /* * Writes protected by kvm->slots_lock. Acquiring a -@@ -719,10 +719,10 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) +@@ -720,10 +720,10 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) #endif /* @@ -67,39 +70,39 @@ index 3bde4fb5c6aa..755b09dcafce 100644 + * enabled. */ -#if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) -+#if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_GMEM) ++#if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_GUEST_MEMFD) static inline bool kvm_arch_has_private_mem(struct kvm *kvm) { return false; -@@ -2527,7 +2527,7 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm, +@@ -2505,7 +2505,7 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm, static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) { - return IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) && -+ return IS_ENABLED(CONFIG_KVM_GMEM) && ++ return IS_ENABLED(CONFIG_KVM_GUEST_MEMFD) && kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE; } #else -@@ -2537,7 +2537,7 @@ static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) +@@ -2515,7 +2515,7 @@ static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) } #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */ -#ifdef CONFIG_KVM_PRIVATE_MEM -+#ifdef CONFIG_KVM_GMEM ++#ifdef CONFIG_KVM_GUEST_MEMFD int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t *pfn, struct page **page, int *max_order); -@@ -2550,7 +2550,7 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm, +@@ -2528,7 +2528,7 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm, KVM_BUG_ON(1, kvm); return -EIO; } -#endif /* CONFIG_KVM_PRIVATE_MEM */ -+#endif /* CONFIG_KVM_GMEM */ ++#endif /* CONFIG_KVM_GUEST_MEMFD */ #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order); diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig -index 727b542074e7..49df4e32bff7 100644 +index 727b542074e7..e4b400feff94 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -112,19 +112,19 @@ config KVM_GENERIC_MEMORY_ATTRIBUTES @@ -107,27 +110,27 @@ index 727b542074e7..49df4e32bff7 100644 bool -config KVM_PRIVATE_MEM -+config KVM_GMEM ++config KVM_GUEST_MEMFD select XARRAY_MULTI bool config KVM_GENERIC_PRIVATE_MEM select KVM_GENERIC_MEMORY_ATTRIBUTES - select KVM_PRIVATE_MEM -+ select KVM_GMEM ++ select KVM_GUEST_MEMFD bool config HAVE_KVM_ARCH_GMEM_PREPARE bool - depends on KVM_PRIVATE_MEM -+ depends on KVM_GMEM ++ depends on KVM_GUEST_MEMFD config HAVE_KVM_ARCH_GMEM_INVALIDATE bool - depends on KVM_PRIVATE_MEM -+ depends on KVM_GMEM ++ depends on KVM_GUEST_MEMFD diff --git a/virt/kvm/Makefile.kvm b/virt/kvm/Makefile.kvm -index 724c89af78af..8d00918d4c8b 100644 +index 724c89af78af..d047d4cf58c9 100644 --- a/virt/kvm/Makefile.kvm +++ b/virt/kvm/Makefile.kvm @@ -12,4 +12,4 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o @@ -135,31 +138,31 @@ index 724c89af78af..8d00918d4c8b 100644 kvm-$(CONFIG_HAVE_KVM_DIRTY_RING) += $(KVM)/dirty_ring.o kvm-$(CONFIG_HAVE_KVM_PFNCACHE) += $(KVM)/pfncache.o -kvm-$(CONFIG_KVM_PRIVATE_MEM) += $(KVM)/guest_memfd.o -+kvm-$(CONFIG_KVM_GMEM) += $(KVM)/guest_memfd.o ++kvm-$(CONFIG_KVM_GUEST_MEMFD) += $(KVM)/guest_memfd.o diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c -index 222f0e894a0c..d5f0ec2d321f 100644 +index 6c07dd423458..25a94eed75fd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c -@@ -4913,7 +4913,7 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) +@@ -4915,7 +4915,7 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) case KVM_CAP_MEMORY_ATTRIBUTES: return kvm_supported_mem_attributes(kvm); #endif -#ifdef CONFIG_KVM_PRIVATE_MEM -+#ifdef CONFIG_KVM_GMEM ++#ifdef CONFIG_KVM_GUEST_MEMFD case KVM_CAP_GUEST_MEMFD: return !kvm || kvm_arch_has_private_mem(kvm); #endif -@@ -5347,7 +5347,7 @@ static long kvm_vm_ioctl(struct file *filp, +@@ -5352,7 +5352,7 @@ static long kvm_vm_ioctl(struct file *filp, case KVM_GET_STATS_FD: r = kvm_vm_ioctl_get_stats_fd(kvm); break; -#ifdef CONFIG_KVM_PRIVATE_MEM -+#ifdef CONFIG_KVM_GMEM ++#ifdef CONFIG_KVM_GUEST_MEMFD case KVM_CREATE_GUEST_MEMFD: { struct kvm_create_guest_memfd guest_memfd; diff --git a/virt/kvm/kvm_mm.h b/virt/kvm/kvm_mm.h -index acef3f5c582a..ec311c0d6718 100644 +index acef3f5c582a..31defb08ccba 100644 --- a/virt/kvm/kvm_mm.h +++ b/virt/kvm/kvm_mm.h @@ -67,7 +67,7 @@ static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, @@ -167,7 +170,7 @@ index acef3f5c582a..ec311c0d6718 100644 #endif /* HAVE_KVM_PFNCACHE */ -#ifdef CONFIG_KVM_PRIVATE_MEM -+#ifdef CONFIG_KVM_GMEM ++#ifdef CONFIG_KVM_GUEST_MEMFD void kvm_gmem_init(struct module *module); int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args); int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot, @@ -176,7 +179,7 @@ index acef3f5c582a..ec311c0d6718 100644 WARN_ON_ONCE(1); } -#endif /* CONFIG_KVM_PRIVATE_MEM */ -+#endif /* CONFIG_KVM_GMEM */ ++#endif /* CONFIG_KVM_GUEST_MEMFD */ #endif /* __KVM_MM_H__ */ -- diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0002-KVM-Rename-CONFIG_KVM_GENERIC_PRIVATE_MEM-to-CONFIG_.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0002-KVM-Rename-CONFIG_KVM_GENERIC_PRIVATE_MEM-to-CONFIG_.patch deleted file mode 100644 index ac66b9c7cba..00000000000 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0002-KVM-Rename-CONFIG_KVM_GENERIC_PRIVATE_MEM-to-CONFIG_.patch +++ /dev/null @@ -1,103 +0,0 @@ -From 05cf45cc4528079db3c40c021947ae0cc28eec82 Mon Sep 17 00:00:00 2001 -From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:31 +0100 -Subject: [PATCH 02/46] KVM: Rename CONFIG_KVM_GENERIC_PRIVATE_MEM to - CONFIG_KVM_GENERIC_GMEM_POPULATE - -The original name was vague regarding its functionality. This Kconfig -option specifically enables and gates the kvm_gmem_populate() function, -which is responsible for populating a GPA range with guest data. - -The new name, KVM_GENERIC_GMEM_POPULATE, describes the purpose of the -option: to enable generic guest_memfd population mechanisms. This -improves clarity for developers and ensures the name accurately reflects -the functionality it controls, especially as guest_memfd support expands -beyond purely "private" memory scenarios. - -Reviewed-by: Ira Weiny -Reviewed-by: Gavin Shan -Reviewed-by: Shivank Garg -Reviewed-by: Vlastimil Babka -Co-developed-by: David Hildenbrand -Signed-off-by: David Hildenbrand -Signed-off-by: Fuad Tabba ---- - arch/x86/kvm/Kconfig | 6 +++--- - include/linux/kvm_host.h | 2 +- - virt/kvm/Kconfig | 2 +- - virt/kvm/guest_memfd.c | 2 +- - 4 files changed, 6 insertions(+), 6 deletions(-) - -diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig -index 2eeffcec5382..df1fdbb4024b 100644 ---- a/arch/x86/kvm/Kconfig -+++ b/arch/x86/kvm/Kconfig -@@ -46,7 +46,7 @@ config KVM_X86 - select HAVE_KVM_PM_NOTIFIER if PM - select KVM_GENERIC_HARDWARE_ENABLING - select KVM_GENERIC_PRE_FAULT_MEMORY -- select KVM_GENERIC_PRIVATE_MEM if KVM_SW_PROTECTED_VM -+ select KVM_GENERIC_GMEM_POPULATE if KVM_SW_PROTECTED_VM - select KVM_WERROR if WERROR - - config KVM -@@ -95,7 +95,7 @@ config KVM_SW_PROTECTED_VM - config KVM_INTEL - tristate "KVM for Intel (and compatible) processors support" - depends on KVM && IA32_FEAT_CTL -- select KVM_GENERIC_PRIVATE_MEM if INTEL_TDX_HOST -+ select KVM_GENERIC_GMEM_POPULATE if INTEL_TDX_HOST - select KVM_GENERIC_MEMORY_ATTRIBUTES if INTEL_TDX_HOST - help - Provides support for KVM on processors equipped with Intel's VT -@@ -157,7 +157,7 @@ config KVM_AMD_SEV - depends on KVM_AMD && X86_64 - depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m) - select ARCH_HAS_CC_PLATFORM -- select KVM_GENERIC_PRIVATE_MEM -+ select KVM_GENERIC_GMEM_POPULATE - select HAVE_KVM_ARCH_GMEM_PREPARE - select HAVE_KVM_ARCH_GMEM_INVALIDATE - help -diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h -index 755b09dcafce..359baaae5e9f 100644 ---- a/include/linux/kvm_host.h -+++ b/include/linux/kvm_host.h -@@ -2556,7 +2556,7 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm, - int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order); - #endif - --#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM -+#ifdef CONFIG_KVM_GENERIC_GMEM_POPULATE - /** - * kvm_gmem_populate() - Populate/prepare a GPA range with guest data - * -diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig -index 49df4e32bff7..559c93ad90be 100644 ---- a/virt/kvm/Kconfig -+++ b/virt/kvm/Kconfig -@@ -116,7 +116,7 @@ config KVM_GMEM - select XARRAY_MULTI - bool - --config KVM_GENERIC_PRIVATE_MEM -+config KVM_GENERIC_GMEM_POPULATE - select KVM_GENERIC_MEMORY_ATTRIBUTES - select KVM_GMEM - bool -diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c -index b2aa6bf24d3a..befea51bbc75 100644 ---- a/virt/kvm/guest_memfd.c -+++ b/virt/kvm/guest_memfd.c -@@ -638,7 +638,7 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, - } - EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn); - --#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM -+#ifdef CONFIG_KVM_GENERIC_GMEM_POPULATE - long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages, - kvm_gmem_populate_cb post_populate, void *opaque) - { --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0002-KVM-x86-Have-all-vendor-neutral-sub-configs-depend-o.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0002-KVM-x86-Have-all-vendor-neutral-sub-configs-depend-o.patch new file mode 100644 index 00000000000..fe70a496b4c --- /dev/null +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0002-KVM-x86-Have-all-vendor-neutral-sub-configs-depend-o.patch @@ -0,0 +1,109 @@ +From 8800d0a0bd2be12a870e65a739a7e97441579441 Mon Sep 17 00:00:00 2001 +From: Sean Christopherson +Date: Tue, 29 Jul 2025 15:54:33 -0700 +Subject: [PATCH 02/49] KVM: x86: Have all vendor neutral sub-configs depend on + KVM_X86, not just KVM + +Make all vendor neutral KVM x86 configs depend on KVM_X86, not just KVM, +i.e. gate them on at least one vendor module being enabled and thus on +kvm.ko actually being built. Depending on just KVM allows the user to +select the configs even though they won't actually take effect, and more +importantly, makes it all too easy to create unmet dependencies. E.g. +KVM_GENERIC_PRIVATE_MEM can't be selected by KVM_SW_PROTECTED_VM, because +the KVM_GENERIC_MMU_NOTIFIER dependency is select by KVM_X86. + +Hiding all sub-configs when neither KVM_AMD nor KVM_INTEL is selected also +helps communicate to the user that nothing "interesting" is going on, e.g. + + --- Virtualization + Kernel-based Virtual Machine (KVM) support + < > KVM for Intel (and compatible) processors support + < > KVM for AMD processors support + +Fixes: ea4290d77bda ("KVM: x86: leave kvm.ko out of the build if no vendor module is requested") +Reviewed-by: David Hildenbrand +Reviewed-by: Xiaoyao Li +Signed-off-by: Sean Christopherson +--- + arch/x86/kvm/Kconfig | 16 ++++++++-------- + 1 file changed, 8 insertions(+), 8 deletions(-) + +diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig +index 2c86673155c9..9895fc3cd901 100644 +--- a/arch/x86/kvm/Kconfig ++++ b/arch/x86/kvm/Kconfig +@@ -74,7 +74,7 @@ config KVM_WERROR + # FRAME_WARN, i.e. KVM_WERROR=y with KASAN=y requires special tuning. + # Building KVM with -Werror and KASAN is still doable via enabling + # the kernel-wide WERROR=y. +- depends on KVM && ((EXPERT && !KASAN) || WERROR) ++ depends on KVM_X86 && ((EXPERT && !KASAN) || WERROR) + help + Add -Werror to the build flags for KVM. + +@@ -83,7 +83,7 @@ config KVM_WERROR + config KVM_SW_PROTECTED_VM + bool "Enable support for KVM software-protected VMs" + depends on EXPERT +- depends on KVM && X86_64 ++ depends on KVM_X86 && X86_64 + help + Enable support for KVM software-protected VMs. Currently, software- + protected VMs are purely a development and testing vehicle for +@@ -169,7 +169,7 @@ config KVM_AMD_SEV + config KVM_IOAPIC + bool "I/O APIC, PIC, and PIT emulation" + default y +- depends on KVM ++ depends on KVM_X86 + help + Provides support for KVM to emulate an I/O APIC, PIC, and PIT, i.e. + for full in-kernel APIC emulation. +@@ -179,7 +179,7 @@ config KVM_IOAPIC + config KVM_SMM + bool "System Management Mode emulation" + default y +- depends on KVM ++ depends on KVM_X86 + help + Provides support for KVM to emulate System Management Mode (SMM) + in virtual machines. This can be used by the virtual machine +@@ -189,7 +189,7 @@ config KVM_SMM + + config KVM_HYPERV + bool "Support for Microsoft Hyper-V emulation" +- depends on KVM ++ depends on KVM_X86 + default y + help + Provides KVM support for emulating Microsoft Hyper-V. This allows KVM +@@ -203,7 +203,7 @@ config KVM_HYPERV + + config KVM_XEN + bool "Support for Xen hypercall interface" +- depends on KVM ++ depends on KVM_X86 + help + Provides KVM support for the hosting Xen HVM guests and + passing Xen hypercalls to userspace. +@@ -213,7 +213,7 @@ config KVM_XEN + config KVM_PROVE_MMU + bool "Prove KVM MMU correctness" + depends on DEBUG_KERNEL +- depends on KVM ++ depends on KVM_X86 + depends on EXPERT + help + Enables runtime assertions in KVM's MMU that are too costly to enable +@@ -228,7 +228,7 @@ config KVM_EXTERNAL_WRITE_TRACKING + + config KVM_MAX_NR_VCPUS + int "Maximum number of vCPUs per KVM guest" +- depends on KVM ++ depends on KVM_X86 + range 1024 4096 + default 4096 if MAXSMP + default 1024 +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0003-KVM-Introduce-kvm_arch_supports_gmem.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0003-KVM-Introduce-kvm_arch_supports_gmem.patch deleted file mode 100644 index e83a2ef0e4b..00000000000 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0003-KVM-Introduce-kvm_arch_supports_gmem.patch +++ /dev/null @@ -1,102 +0,0 @@ -From fd6bbab2fcae663ac196e4c68c8bcd8393b99d6a Mon Sep 17 00:00:00 2001 -From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:32 +0100 -Subject: [PATCH 03/46] KVM: Introduce kvm_arch_supports_gmem() - -Introduce kvm_arch_supports_gmem() to explicitly indicate whether an -architecture supports guest_memfd. - -Previously, kvm_arch_has_private_mem() was used to check for guest_memfd -support. However, this conflated guest_memfd with "private" memory, -implying that guest_memfd was exclusively for CoCo VMs or other private -memory use cases. - -With the expansion of guest_memfd to support non-private memory, such as -shared host mappings, it is necessary to decouple these concepts. The -new kvm_arch_supports_gmem() function provides a clear way to check for -guest_memfd support. - -Reviewed-by: Ira Weiny -Reviewed-by: Gavin Shan -Reviewed-by: Shivank Garg -Reviewed-by: Vlastimil Babka -Co-developed-by: David Hildenbrand -Signed-off-by: David Hildenbrand -Signed-off-by: Fuad Tabba ---- - arch/x86/include/asm/kvm_host.h | 4 +++- - include/linux/kvm_host.h | 11 +++++++++++ - virt/kvm/kvm_main.c | 4 ++-- - 3 files changed, 16 insertions(+), 3 deletions(-) - -diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h -index acb25f935d84..bde811b2d303 100644 ---- a/arch/x86/include/asm/kvm_host.h -+++ b/arch/x86/include/asm/kvm_host.h -@@ -2277,8 +2277,10 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, - - #ifdef CONFIG_KVM_GMEM - #define kvm_arch_has_private_mem(kvm) ((kvm)->arch.has_private_mem) -+#define kvm_arch_supports_gmem(kvm) kvm_arch_has_private_mem(kvm) - #else - #define kvm_arch_has_private_mem(kvm) false -+#define kvm_arch_supports_gmem(kvm) false - #endif - - #define kvm_arch_has_readonly_mem(kvm) (!(kvm)->arch.has_protected_state) -@@ -2331,7 +2333,7 @@ enum { - #define HF_SMM_INSIDE_NMI_MASK (1 << 2) - - # define KVM_MAX_NR_ADDRESS_SPACES 2 --/* SMM is currently unsupported for guests with private memory. */ -+/* SMM is currently unsupported for guests with guest_memfd private memory. */ - # define kvm_arch_nr_memslot_as_ids(kvm) (kvm_arch_has_private_mem(kvm) ? 1 : 2) - # define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) - # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) -diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h -index 359baaae5e9f..ab1bde048034 100644 ---- a/include/linux/kvm_host.h -+++ b/include/linux/kvm_host.h -@@ -729,6 +729,17 @@ static inline bool kvm_arch_has_private_mem(struct kvm *kvm) - } - #endif - -+/* -+ * Arch code must define kvm_arch_supports_gmem if support for guest_memfd is -+ * enabled. -+ */ -+#if !defined(kvm_arch_supports_gmem) && !IS_ENABLED(CONFIG_KVM_GMEM) -+static inline bool kvm_arch_supports_gmem(struct kvm *kvm) -+{ -+ return false; -+} -+#endif -+ - #ifndef kvm_arch_has_readonly_mem - static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm) - { -diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c -index d5f0ec2d321f..162e2a69cc49 100644 ---- a/virt/kvm/kvm_main.c -+++ b/virt/kvm/kvm_main.c -@@ -1588,7 +1588,7 @@ static int check_memory_region_flags(struct kvm *kvm, - { - u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; - -- if (kvm_arch_has_private_mem(kvm)) -+ if (kvm_arch_supports_gmem(kvm)) - valid_flags |= KVM_MEM_GUEST_MEMFD; - - /* Dirty logging private memory is not currently supported. */ -@@ -4915,7 +4915,7 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) - #endif - #ifdef CONFIG_KVM_GMEM - case KVM_CAP_GUEST_MEMFD: -- return !kvm || kvm_arch_has_private_mem(kvm); -+ return !kvm || kvm_arch_supports_gmem(kvm); - #endif - default: - break; --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0003-KVM-x86-Select-KVM_GENERIC_PRIVATE_MEM-directly-from.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0003-KVM-x86-Select-KVM_GENERIC_PRIVATE_MEM-directly-from.patch new file mode 100644 index 00000000000..b5e09c6a178 --- /dev/null +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0003-KVM-x86-Select-KVM_GENERIC_PRIVATE_MEM-directly-from.patch @@ -0,0 +1,42 @@ +From 77d38342c84fd5a10a01fe3180aecc3acdac45dd Mon Sep 17 00:00:00 2001 +From: Sean Christopherson +Date: Tue, 29 Jul 2025 15:54:34 -0700 +Subject: [PATCH 03/49] KVM: x86: Select KVM_GENERIC_PRIVATE_MEM directly from + KVM_SW_PROTECTED_VM + +Now that KVM_SW_PROTECTED_VM doesn't have a hidden dependency on KVM_X86, +select KVM_GENERIC_PRIVATE_MEM from within KVM_SW_PROTECTED_VM instead of +conditionally selecting it from KVM_X86. + +No functional change intended. + +Reviewed-by: Xiaoyao Li +Reviewed-by: David Hildenbrand +Signed-off-by: Sean Christopherson +--- + arch/x86/kvm/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig +index 9895fc3cd901..402ba00fdf45 100644 +--- a/arch/x86/kvm/Kconfig ++++ b/arch/x86/kvm/Kconfig +@@ -46,7 +46,6 @@ config KVM_X86 + select HAVE_KVM_PM_NOTIFIER if PM + select KVM_GENERIC_HARDWARE_ENABLING + select KVM_GENERIC_PRE_FAULT_MEMORY +- select KVM_GENERIC_PRIVATE_MEM if KVM_SW_PROTECTED_VM + select KVM_WERROR if WERROR + + config KVM +@@ -84,6 +83,7 @@ config KVM_SW_PROTECTED_VM + bool "Enable support for KVM software-protected VMs" + depends on EXPERT + depends on KVM_X86 && X86_64 ++ select KVM_GENERIC_PRIVATE_MEM + help + Enable support for KVM software-protected VMs. Currently, software- + protected VMs are purely a development and testing vehicle for +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0004-KVM-x86-Introduce-kvm-arch.supports_gmem.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0004-KVM-x86-Introduce-kvm-arch.supports_gmem.patch deleted file mode 100644 index 6587ea27c95..00000000000 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0004-KVM-x86-Introduce-kvm-arch.supports_gmem.patch +++ /dev/null @@ -1,95 +0,0 @@ -From 76851fca367e2d7666c3e709eab8cc016406f91b Mon Sep 17 00:00:00 2001 -From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:33 +0100 -Subject: [PATCH 04/46] KVM: x86: Introduce kvm->arch.supports_gmem - -Introduce a new boolean member, supports_gmem, to kvm->arch. - -Previously, the has_private_mem boolean within kvm->arch was implicitly -used to indicate whether guest_memfd was supported for a KVM instance. -However, with the broader support for guest_memfd, it's not exclusively -for private or confidential memory. Therefore, it's necessary to -distinguish between a VM's general guest_memfd capabilities and its -support for private memory. - -This new supports_gmem member will now explicitly indicate guest_memfd -support for a given VM, allowing has_private_mem to represent only -support for private memory. - -Reviewed-by: Ira Weiny -Reviewed-by: Gavin Shan -Reviewed-by: Shivank Garg -Reviewed-by: Vlastimil Babka -Co-developed-by: David Hildenbrand -Signed-off-by: David Hildenbrand -Signed-off-by: Fuad Tabba ---- - arch/x86/include/asm/kvm_host.h | 3 ++- - arch/x86/kvm/svm/svm.c | 1 + - arch/x86/kvm/vmx/tdx.c | 1 + - arch/x86/kvm/x86.c | 4 ++-- - 4 files changed, 6 insertions(+), 3 deletions(-) - -diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h -index bde811b2d303..938b5be03d33 100644 ---- a/arch/x86/include/asm/kvm_host.h -+++ b/arch/x86/include/asm/kvm_host.h -@@ -1348,6 +1348,7 @@ struct kvm_arch { - u8 mmu_valid_gen; - u8 vm_type; - bool has_private_mem; -+ bool supports_gmem; - bool has_protected_state; - bool pre_fault_allowed; - struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; -@@ -2277,7 +2278,7 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, - - #ifdef CONFIG_KVM_GMEM - #define kvm_arch_has_private_mem(kvm) ((kvm)->arch.has_private_mem) --#define kvm_arch_supports_gmem(kvm) kvm_arch_has_private_mem(kvm) -+#define kvm_arch_supports_gmem(kvm) ((kvm)->arch.supports_gmem) - #else - #define kvm_arch_has_private_mem(kvm) false - #define kvm_arch_supports_gmem(kvm) false -diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c -index ab9b947dbf4f..d1c484eaa8ad 100644 ---- a/arch/x86/kvm/svm/svm.c -+++ b/arch/x86/kvm/svm/svm.c -@@ -5181,6 +5181,7 @@ static int svm_vm_init(struct kvm *kvm) - to_kvm_sev_info(kvm)->need_init = true; - - kvm->arch.has_private_mem = (type == KVM_X86_SNP_VM); -+ kvm->arch.supports_gmem = (type == KVM_X86_SNP_VM); - kvm->arch.pre_fault_allowed = !kvm->arch.has_private_mem; - } - -diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c -index f31ccdeb905b..a3db6df245ee 100644 ---- a/arch/x86/kvm/vmx/tdx.c -+++ b/arch/x86/kvm/vmx/tdx.c -@@ -632,6 +632,7 @@ int tdx_vm_init(struct kvm *kvm) - - kvm->arch.has_protected_state = true; - kvm->arch.has_private_mem = true; -+ kvm->arch.supports_gmem = true; - kvm->arch.disabled_quirks |= KVM_X86_QUIRK_IGNORE_GUEST_PAT; - - /* -diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 357b9e3a6cef..adbdc2cc97d4 100644 ---- a/arch/x86/kvm/x86.c -+++ b/arch/x86/kvm/x86.c -@@ -12780,8 +12780,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) - return -EINVAL; - - kvm->arch.vm_type = type; -- kvm->arch.has_private_mem = -- (type == KVM_X86_SW_PROTECTED_VM); -+ kvm->arch.has_private_mem = (type == KVM_X86_SW_PROTECTED_VM); -+ kvm->arch.supports_gmem = (type == KVM_X86_SW_PROTECTED_VM); - /* Decided by the vendor code for other VM types. */ - kvm->arch.pre_fault_allowed = - type == KVM_X86_DEFAULT_VM || type == KVM_X86_SW_PROTECTED_VM; --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0004-KVM-x86-Select-TDX-s-KVM_GENERIC_xxx-dependencies-if.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0004-KVM-x86-Select-TDX-s-KVM_GENERIC_xxx-dependencies-if.patch new file mode 100644 index 00000000000..1d33e531e57 --- /dev/null +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0004-KVM-x86-Select-TDX-s-KVM_GENERIC_xxx-dependencies-if.patch @@ -0,0 +1,43 @@ +From 746288ca13800a1aeec74f2a4527d6db2306db59 Mon Sep 17 00:00:00 2001 +From: Sean Christopherson +Date: Tue, 29 Jul 2025 15:54:35 -0700 +Subject: [PATCH 04/49] KVM: x86: Select TDX's KVM_GENERIC_xxx dependencies iff + CONFIG_KVM_INTEL_TDX=y + +Select KVM_GENERIC_PRIVATE_MEM and KVM_GENERIC_MEMORY_ATTRIBUTES directly +from KVM_INTEL_TDX, i.e. if and only if TDX support is fully enabled in +KVM. There is no need to enable KVM's private memory support just because +the core kernel's INTEL_TDX_HOST is enabled. + +Reviewed-by: Xiaoyao Li +Reviewed-by: David Hildenbrand +Signed-off-by: Sean Christopherson +--- + arch/x86/kvm/Kconfig | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig +index 402ba00fdf45..13ab7265b505 100644 +--- a/arch/x86/kvm/Kconfig ++++ b/arch/x86/kvm/Kconfig +@@ -95,8 +95,6 @@ config KVM_SW_PROTECTED_VM + config KVM_INTEL + tristate "KVM for Intel (and compatible) processors support" + depends on KVM && IA32_FEAT_CTL +- select KVM_GENERIC_PRIVATE_MEM if INTEL_TDX_HOST +- select KVM_GENERIC_MEMORY_ATTRIBUTES if INTEL_TDX_HOST + help + Provides support for KVM on processors equipped with Intel's VT + extensions, a.k.a. Virtual Machine Extensions (VMX). +@@ -135,6 +133,8 @@ config KVM_INTEL_TDX + bool "Intel Trust Domain Extensions (TDX) support" + default y + depends on INTEL_TDX_HOST ++ select KVM_GENERIC_PRIVATE_MEM ++ select KVM_GENERIC_MEMORY_ATTRIBUTES + help + Provides support for launching Intel Trust Domain Extensions (TDX) + confidential VMs on Intel processors. +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0005-KVM-Rename-CONFIG_KVM_GENERIC_PRIVATE_MEM-to-CONFIG_.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0005-KVM-Rename-CONFIG_KVM_GENERIC_PRIVATE_MEM-to-CONFIG_.patch new file mode 100644 index 00000000000..6c73c02f499 --- /dev/null +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0005-KVM-Rename-CONFIG_KVM_GENERIC_PRIVATE_MEM-to-CONFIG_.patch @@ -0,0 +1,144 @@ +From 0f72f7fe353052120eb0853c9fee863c373c7eb9 Mon Sep 17 00:00:00 2001 +From: Fuad Tabba +Date: Tue, 29 Jul 2025 15:54:36 -0700 +Subject: [PATCH 05/49] KVM: Rename CONFIG_KVM_GENERIC_PRIVATE_MEM to + CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE + +The original name was vague regarding its functionality. This Kconfig +option specifically enables and gates the kvm_gmem_populate() function, +which is responsible for populating a GPA range with guest data. + +The new name, HAVE_KVM_ARCH_GMEM_POPULATE, describes the purpose of the +option: to enable arch-specific guest_memfd population mechanisms. It +also follows the same pattern as the other HAVE_KVM_ARCH_* configuration +options. + +This improves clarity for developers and ensures the name accurately +reflects the functionality it controls, especially as guest_memfd +support expands beyond purely "private" memory scenarios. + +Temporarily keep KVM_GENERIC_PRIVATE_MEM as an x86-only config so as to +minimize churn, and to hopefully make it easier to see what features +require HAVE_KVM_ARCH_GMEM_POPULATE. On that note, omit GMEM_POPULATE +for KVM_X86_SW_PROTECTED_VM, as regular ol' memset() suffices for +software-protected VMs. + +As for KVM_GENERIC_PRIVATE_MEM, a future change will select KVM_GUEST_MEMFD +for all 64-bit KVM builds, at which point the intermediate config will +become obsolete and can/will be dropped. + +Reviewed-by: Ira Weiny +Reviewed-by: Gavin Shan +Reviewed-by: Shivank Garg +Reviewed-by: Vlastimil Babka +Co-developed-by: David Hildenbrand +Signed-off-by: David Hildenbrand +Signed-off-by: Fuad Tabba +Reviewed-by: Xiaoyao Li +Co-developed-by: Sean Christopherson +Signed-off-by: Sean Christopherson +--- + arch/x86/kvm/Kconfig | 14 ++++++++++---- + include/linux/kvm_host.h | 2 +- + virt/kvm/Kconfig | 9 ++++----- + virt/kvm/guest_memfd.c | 2 +- + 4 files changed, 16 insertions(+), 11 deletions(-) + +diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig +index 13ab7265b505..c763446d9b9f 100644 +--- a/arch/x86/kvm/Kconfig ++++ b/arch/x86/kvm/Kconfig +@@ -79,11 +79,16 @@ config KVM_WERROR + + If in doubt, say "N". + ++config KVM_X86_PRIVATE_MEM ++ select KVM_GENERIC_MEMORY_ATTRIBUTES ++ select KVM_GUEST_MEMFD ++ bool ++ + config KVM_SW_PROTECTED_VM + bool "Enable support for KVM software-protected VMs" + depends on EXPERT + depends on KVM_X86 && X86_64 +- select KVM_GENERIC_PRIVATE_MEM ++ select KVM_X86_PRIVATE_MEM + help + Enable support for KVM software-protected VMs. Currently, software- + protected VMs are purely a development and testing vehicle for +@@ -133,8 +138,8 @@ config KVM_INTEL_TDX + bool "Intel Trust Domain Extensions (TDX) support" + default y + depends on INTEL_TDX_HOST +- select KVM_GENERIC_PRIVATE_MEM +- select KVM_GENERIC_MEMORY_ATTRIBUTES ++ select KVM_X86_PRIVATE_MEM ++ select HAVE_KVM_ARCH_GMEM_POPULATE + help + Provides support for launching Intel Trust Domain Extensions (TDX) + confidential VMs on Intel processors. +@@ -157,9 +162,10 @@ config KVM_AMD_SEV + depends on KVM_AMD && X86_64 + depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m) + select ARCH_HAS_CC_PLATFORM +- select KVM_GENERIC_PRIVATE_MEM ++ select KVM_X86_PRIVATE_MEM + select HAVE_KVM_ARCH_GMEM_PREPARE + select HAVE_KVM_ARCH_GMEM_INVALIDATE ++ select HAVE_KVM_ARCH_GMEM_POPULATE + help + Provides support for launching encrypted VMs which use Secure + Encrypted Virtualization (SEV), Secure Encrypted Virtualization with +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h +index 8cdc0b3cc1b1..ddfb6cfe20a6 100644 +--- a/include/linux/kvm_host.h ++++ b/include/linux/kvm_host.h +@@ -2534,7 +2534,7 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm, + int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order); + #endif + +-#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM ++#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE + /** + * kvm_gmem_populate() - Populate/prepare a GPA range with guest data + * +diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig +index e4b400feff94..1b7d5be0b6c4 100644 +--- a/virt/kvm/Kconfig ++++ b/virt/kvm/Kconfig +@@ -116,11 +116,6 @@ config KVM_GUEST_MEMFD + select XARRAY_MULTI + bool + +-config KVM_GENERIC_PRIVATE_MEM +- select KVM_GENERIC_MEMORY_ATTRIBUTES +- select KVM_GUEST_MEMFD +- bool +- + config HAVE_KVM_ARCH_GMEM_PREPARE + bool + depends on KVM_GUEST_MEMFD +@@ -128,3 +123,7 @@ config HAVE_KVM_ARCH_GMEM_PREPARE + config HAVE_KVM_ARCH_GMEM_INVALIDATE + bool + depends on KVM_GUEST_MEMFD ++ ++config HAVE_KVM_ARCH_GMEM_POPULATE ++ bool ++ depends on KVM_GUEST_MEMFD +diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c +index 7d85cc33c0bb..b2b50560e80e 100644 +--- a/virt/kvm/guest_memfd.c ++++ b/virt/kvm/guest_memfd.c +@@ -627,7 +627,7 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, + } + EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn); + +-#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM ++#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE + long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages, + kvm_gmem_populate_cb post_populate, void *opaque) + { +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0005-KVM-Rename-kvm_slot_can_be_private-to-kvm_slot_has_g.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0006-KVM-Rename-kvm_slot_can_be_private-to-kvm_slot_has_g.patch similarity index 80% rename from resources/hiding_ci/linux_patches/05-mmap-support/0005-KVM-Rename-kvm_slot_can_be_private-to-kvm_slot_has_g.patch rename to resources/hiding_ci/linux_patches/05-mmap-support/0006-KVM-Rename-kvm_slot_can_be_private-to-kvm_slot_has_g.patch index 13f366af656..55e9e4b53a3 100644 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0005-KVM-Rename-kvm_slot_can_be_private-to-kvm_slot_has_g.patch +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0006-KVM-Rename-kvm_slot_can_be_private-to-kvm_slot_has_g.patch @@ -1,7 +1,7 @@ -From a56ba2f9a2ec7436126f23997e502543e0e4bbe0 Mon Sep 17 00:00:00 2001 +From 31e60b5c346e1bf2ccce5cb32d2379cb8f7dea30 Mon Sep 17 00:00:00 2001 From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:34 +0100 -Subject: [PATCH 05/46] KVM: Rename kvm_slot_can_be_private() to +Date: Tue, 29 Jul 2025 15:54:37 -0700 +Subject: [PATCH 06/49] KVM: Rename kvm_slot_can_be_private() to kvm_slot_has_gmem() Rename kvm_slot_can_be_private() to kvm_slot_has_gmem() to improve @@ -21,9 +21,11 @@ Reviewed-by: Ira Weiny Reviewed-by: Gavin Shan Reviewed-by: Shivank Garg Reviewed-by: Vlastimil Babka +Reviewed-by: Xiaoyao Li Co-developed-by: David Hildenbrand Signed-off-by: David Hildenbrand Signed-off-by: Fuad Tabba +Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 4 ++-- arch/x86/kvm/svm/sev.c | 4 ++-- @@ -32,10 +34,10 @@ Signed-off-by: Fuad Tabba 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c -index 4e06e2e89a8f..213904daf1e5 100644 +index 6e838cb6c9e1..fdc2824755ee 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c -@@ -3285,7 +3285,7 @@ static int __kvm_mmu_max_mapping_level(struct kvm *kvm, +@@ -3312,7 +3312,7 @@ static int __kvm_mmu_max_mapping_level(struct kvm *kvm, int kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn) { @@ -44,7 +46,7 @@ index 4e06e2e89a8f..213904daf1e5 100644 kvm_mem_is_private(kvm, gfn); return __kvm_mmu_max_mapping_level(kvm, slot, gfn, PG_LEVEL_NUM, is_private); -@@ -4498,7 +4498,7 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu, +@@ -4551,7 +4551,7 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu, { int max_order, r; @@ -54,10 +56,10 @@ index 4e06e2e89a8f..213904daf1e5 100644 return -EFAULT; } diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c -index b201f77fcd49..687392c5bf5d 100644 +index 2fbdebf79fbb..7744c210f947 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c -@@ -2323,7 +2323,7 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp) +@@ -2365,7 +2365,7 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp) mutex_lock(&kvm->slots_lock); memslot = gfn_to_memslot(kvm, params.gfn_start); @@ -66,7 +68,7 @@ index b201f77fcd49..687392c5bf5d 100644 ret = -EINVAL; goto out; } -@@ -4678,7 +4678,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) +@@ -4719,7 +4719,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) } slot = gfn_to_memslot(kvm, gfn); @@ -76,10 +78,10 @@ index b201f77fcd49..687392c5bf5d 100644 gpa); return; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h -index ab1bde048034..ed00c2b40e4b 100644 +index ddfb6cfe20a6..4c5e0a898652 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h -@@ -614,7 +614,7 @@ struct kvm_memory_slot { +@@ -615,7 +615,7 @@ struct kvm_memory_slot { #endif }; @@ -89,10 +91,10 @@ index ab1bde048034..ed00c2b40e4b 100644 return slot && (slot->flags & KVM_MEM_GUEST_MEMFD); } diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c -index befea51bbc75..6db515833f61 100644 +index b2b50560e80e..a99e11b8b77f 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c -@@ -654,7 +654,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long +@@ -643,7 +643,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long return -EINVAL; slot = gfn_to_memslot(kvm, start_gfn); diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0006-KVM-Fix-comments-that-refer-to-slots_lock.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0007-KVM-Fix-comments-that-refer-to-slots_lock.patch similarity index 80% rename from resources/hiding_ci/linux_patches/05-mmap-support/0006-KVM-Fix-comments-that-refer-to-slots_lock.patch rename to resources/hiding_ci/linux_patches/05-mmap-support/0007-KVM-Fix-comments-that-refer-to-slots_lock.patch index 489a5e7cd6b..b1ac9d7c402 100644 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0006-KVM-Fix-comments-that-refer-to-slots_lock.patch +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0007-KVM-Fix-comments-that-refer-to-slots_lock.patch @@ -1,7 +1,7 @@ -From ffbe742826fa64c4af474398ce274b58338f3e48 Mon Sep 17 00:00:00 2001 +From a26ec49cecb4ab11cba6e770904ee5f79b29d2b0 Mon Sep 17 00:00:00 2001 From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:35 +0100 -Subject: [PATCH 06/46] KVM: Fix comments that refer to slots_lock +Date: Tue, 29 Jul 2025 15:54:38 -0700 +Subject: [PATCH 07/49] KVM: Fix comments that refer to slots_lock Fix comments so that they refer to slots_lock instead of slots_locks (remove trailing s). @@ -11,17 +11,19 @@ Reviewed-by: Ira Weiny Reviewed-by: Gavin Shan Reviewed-by: Shivank Garg Reviewed-by: Vlastimil Babka +Reviewed-by: Xiaoyao Li Signed-off-by: Fuad Tabba +Signed-off-by: Sean Christopherson --- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h -index ed00c2b40e4b..9c654dfb6dce 100644 +index 4c5e0a898652..5c25b03d3d50 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h -@@ -870,7 +870,7 @@ struct kvm { +@@ -860,7 +860,7 @@ struct kvm { struct notifier_block pm_notifier; #endif #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES @@ -31,7 +33,7 @@ index ed00c2b40e4b..9c654dfb6dce 100644 #endif char stats_id[KVM_STATS_NAME_SIZE]; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c -index 162e2a69cc49..46bddac1dacd 100644 +index 25a94eed75fd..aa86dfd757db 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -331,7 +331,7 @@ void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0007-KVM-Fix-comment-that-refers-to-kvm-uapi-header-path.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0008-KVM-Fix-comment-that-refers-to-kvm-uapi-header-path.patch similarity index 75% rename from resources/hiding_ci/linux_patches/05-mmap-support/0007-KVM-Fix-comment-that-refers-to-kvm-uapi-header-path.patch rename to resources/hiding_ci/linux_patches/05-mmap-support/0008-KVM-Fix-comment-that-refers-to-kvm-uapi-header-path.patch index 666827c658a..e0c94d5fd0a 100644 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0007-KVM-Fix-comment-that-refers-to-kvm-uapi-header-path.patch +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0008-KVM-Fix-comment-that-refers-to-kvm-uapi-header-path.patch @@ -1,7 +1,7 @@ -From 2b0fd6a86bfa830aee045aaab2cd21616ee2df7d Mon Sep 17 00:00:00 2001 +From a2fbf5ba7d74d4039918211c6fc95e40ae28f1d0 Mon Sep 17 00:00:00 2001 From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:36 +0100 -Subject: [PATCH 07/46] KVM: Fix comment that refers to kvm uapi header path +Date: Tue, 29 Jul 2025 15:54:39 -0700 +Subject: [PATCH 08/49] KVM: Fix comment that refers to kvm uapi header path The comment that points to the path where the user-visible memslot flags are refers to an outdated path and has a typo. @@ -12,13 +12,15 @@ Reviewed-by: David Hildenbrand Reviewed-by: Gavin Shan Reviewed-by: Shivank Garg Reviewed-by: Vlastimil Babka +Reviewed-by: Xiaoyao Li Signed-off-by: Fuad Tabba +Signed-off-by: Sean Christopherson --- include/linux/kvm_host.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h -index 9c654dfb6dce..1ec71648824c 100644 +index 5c25b03d3d50..56ea8c862cfd 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -52,7 +52,7 @@ diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0008-KVM-guest_memfd-Allow-host-to-map-guest_memfd-pages.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0008-KVM-guest_memfd-Allow-host-to-map-guest_memfd-pages.patch deleted file mode 100644 index c1fd2be3a3e..00000000000 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0008-KVM-guest_memfd-Allow-host-to-map-guest_memfd-pages.patch +++ /dev/null @@ -1,216 +0,0 @@ -From 86e455716787a2e9361fb48458d38f5731e8666c Mon Sep 17 00:00:00 2001 -From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:37 +0100 -Subject: [PATCH 08/46] KVM: guest_memfd: Allow host to map guest_memfd pages - -Introduce the core infrastructure to enable host userspace to mmap() -guest_memfd-backed memory. This is needed for several evolving KVM use -cases: - -* Non-CoCo VM backing: Allows VMMs like Firecracker to run guests - entirely backed by guest_memfd, even for non-CoCo VMs [1]. This - provides a unified memory management model and simplifies guest memory - handling. - -* Direct map removal for enhanced security: This is an important step - for direct map removal of guest memory [2]. By allowing host userspace - to fault in guest_memfd pages directly, we can avoid maintaining host - kernel direct maps of guest memory. This provides additional hardening - against Spectre-like transient execution attacks by removing a - potential attack surface within the kernel. - -* Future guest_memfd features: This also lays the groundwork for future - enhancements to guest_memfd, such as supporting huge pages and - enabling in-place sharing of guest memory with the host for CoCo - platforms that permit it [3]. - -Therefore, enable the basic mmap and fault handling logic within -guest_memfd. However, this functionality is not yet exposed to userspace -and remains inactive until two conditions are met in subsequent patches: - -* Kconfig Gate (CONFIG_KVM_GMEM_SUPPORTS_MMAP): A new Kconfig option, - KVM_GMEM_SUPPORTS_MMAP, is introduced later in this series. This - option gates the compilation and availability of this mmap - functionality at a system level. While the code changes in this patch - might seem small, the Kconfig option is introduced to explicitly - signal the intent to enable this new capability and to provide a clear - compile-time switch for it. It also helps ensure that the necessary - architecture-specific glue (like kvm_arch_supports_gmem_mmap) is - properly defined. - -* Per-instance opt-in (GUEST_MEMFD_FLAG_MMAP): On a per-instance basis, - this functionality is enabled by the guest_memfd flag - GUEST_MEMFD_FLAG_MMAP, which will be set in the KVM_CREATE_GUEST_MEMFD - ioctl. This flag is crucial because when host userspace maps - guest_memfd pages, KVM must *not* manage the these memory regions in - the same way it does for traditional KVM memory slots. The presence of - GUEST_MEMFD_FLAG_MMAP on a guest_memfd instance allows mmap() and - faulting of guest_memfd memory to host userspace. Additionally, it - informs KVM to always consume guest faults to this memory from - guest_memfd, regardless of whether it is a shared or a private fault. - This opt-in mechanism ensures compatibility and prevents conflicts - with existing KVM memory management. This is a per-guest_memfd flag - rather than a per-memslot or per-VM capability because the ability to - mmap directly applies to the specific guest_memfd object, regardless - of how it might be used within various memory slots or VMs. - -[1] https://github.com/firecracker-microvm/firecracker/tree/feature/secret-hiding -[2] https://lore.kernel.org/linux-mm/cc1bb8e9bc3e1ab637700a4d3defeec95b55060a.camel@amazon.com -[3] https://lore.kernel.org/all/c1c9591d-218a-495c-957b-ba356c8f8e09@redhat.com/T/#u - -Reviewed-by: Gavin Shan -Reviewed-by: Shivank Garg -Acked-by: David Hildenbrand -Co-developed-by: Ackerley Tng -Signed-off-by: Ackerley Tng -Signed-off-by: Fuad Tabba ---- - include/linux/kvm_host.h | 13 +++++++ - include/uapi/linux/kvm.h | 1 + - virt/kvm/Kconfig | 4 +++ - virt/kvm/guest_memfd.c | 73 ++++++++++++++++++++++++++++++++++++++++ - 4 files changed, 91 insertions(+) - -diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h -index 1ec71648824c..9ac21985f3b5 100644 ---- a/include/linux/kvm_host.h -+++ b/include/linux/kvm_host.h -@@ -740,6 +740,19 @@ static inline bool kvm_arch_supports_gmem(struct kvm *kvm) - } - #endif - -+/* -+ * Returns true if this VM supports mmap() in guest_memfd. -+ * -+ * Arch code must define kvm_arch_supports_gmem_mmap if support for guest_memfd -+ * is enabled. -+ */ -+#if !defined(kvm_arch_supports_gmem_mmap) -+static inline bool kvm_arch_supports_gmem_mmap(struct kvm *kvm) -+{ -+ return false; -+} -+#endif -+ - #ifndef kvm_arch_has_readonly_mem - static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm) - { -diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h -index 7a4c35ff03fe..3beafbf306af 100644 ---- a/include/uapi/linux/kvm.h -+++ b/include/uapi/linux/kvm.h -@@ -1596,6 +1596,7 @@ struct kvm_memory_attributes { - #define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3) - - #define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd) -+#define GUEST_MEMFD_FLAG_MMAP (1ULL << 0) - - struct kvm_create_guest_memfd { - __u64 size; -diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig -index 559c93ad90be..fa4acbedb953 100644 ---- a/virt/kvm/Kconfig -+++ b/virt/kvm/Kconfig -@@ -128,3 +128,7 @@ config HAVE_KVM_ARCH_GMEM_PREPARE - config HAVE_KVM_ARCH_GMEM_INVALIDATE - bool - depends on KVM_GMEM -+ -+config KVM_GMEM_SUPPORTS_MMAP -+ select KVM_GMEM -+ bool -diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c -index 6db515833f61..07a4b165471d 100644 ---- a/virt/kvm/guest_memfd.c -+++ b/virt/kvm/guest_memfd.c -@@ -312,7 +312,77 @@ static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn) - return gfn - slot->base_gfn + slot->gmem.pgoff; - } - -+static bool kvm_gmem_supports_mmap(struct inode *inode) -+{ -+ const u64 flags = (u64)inode->i_private; -+ -+ if (!IS_ENABLED(CONFIG_KVM_GMEM_SUPPORTS_MMAP)) -+ return false; -+ -+ return flags & GUEST_MEMFD_FLAG_MMAP; -+} -+ -+static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf) -+{ -+ struct inode *inode = file_inode(vmf->vma->vm_file); -+ struct folio *folio; -+ vm_fault_t ret = VM_FAULT_LOCKED; -+ -+ if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode)) -+ return VM_FAULT_SIGBUS; -+ -+ folio = kvm_gmem_get_folio(inode, vmf->pgoff); -+ if (IS_ERR(folio)) { -+ int err = PTR_ERR(folio); -+ -+ if (err == -EAGAIN) -+ return VM_FAULT_RETRY; -+ -+ return vmf_error(err); -+ } -+ -+ if (WARN_ON_ONCE(folio_test_large(folio))) { -+ ret = VM_FAULT_SIGBUS; -+ goto out_folio; -+ } -+ -+ if (!folio_test_uptodate(folio)) { -+ clear_highpage(folio_page(folio, 0)); -+ kvm_gmem_mark_prepared(folio); -+ } -+ -+ vmf->page = folio_file_page(folio, vmf->pgoff); -+ -+out_folio: -+ if (ret != VM_FAULT_LOCKED) { -+ folio_unlock(folio); -+ folio_put(folio); -+ } -+ -+ return ret; -+} -+ -+static const struct vm_operations_struct kvm_gmem_vm_ops = { -+ .fault = kvm_gmem_fault_user_mapping, -+}; -+ -+static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma) -+{ -+ if (!kvm_gmem_supports_mmap(file_inode(file))) -+ return -ENODEV; -+ -+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) != -+ (VM_SHARED | VM_MAYSHARE)) { -+ return -EINVAL; -+ } -+ -+ vma->vm_ops = &kvm_gmem_vm_ops; -+ -+ return 0; -+} -+ - static struct file_operations kvm_gmem_fops = { -+ .mmap = kvm_gmem_mmap, - .open = generic_file_open, - .release = kvm_gmem_release, - .fallocate = kvm_gmem_fallocate, -@@ -463,6 +533,9 @@ int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args) - u64 flags = args->flags; - u64 valid_flags = 0; - -+ if (kvm_arch_supports_gmem_mmap(kvm)) -+ valid_flags |= GUEST_MEMFD_FLAG_MMAP; -+ - if (flags & ~valid_flags) - return -EINVAL; - --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0009-KVM-x86-Enable-KVM_GUEST_MEMFD-for-all-64-bit-builds.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0009-KVM-x86-Enable-KVM_GUEST_MEMFD-for-all-64-bit-builds.patch new file mode 100644 index 00000000000..46490d4b69c --- /dev/null +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0009-KVM-x86-Enable-KVM_GUEST_MEMFD-for-all-64-bit-builds.patch @@ -0,0 +1,144 @@ +From 7b55de369a61bad54d1a110b743c446e2d350c47 Mon Sep 17 00:00:00 2001 +From: Fuad Tabba +Date: Tue, 29 Jul 2025 15:54:40 -0700 +Subject: [PATCH 09/49] KVM: x86: Enable KVM_GUEST_MEMFD for all 64-bit builds + +Enable KVM_GUEST_MEMFD for all KVM x86 64-bit builds, i.e. for "default" +VM types when running on 64-bit KVM. This will allow using guest_memfd +to back non-private memory for all VM shapes, by supporting mmap() on +guest_memfd. + +Opportunistically clean up various conditionals that become tautologies +once x86 selects KVM_GUEST_MEMFD more broadly. Specifically, because +SW protected VMs, SEV, and TDX are all 64-bit only, private memory no +longer needs to take explicit dependencies on KVM_GUEST_MEMFD, because +it is effectively a prerequisite. + +Suggested-by: Sean Christopherson +Signed-off-by: Fuad Tabba +Reviewed-by: Xiaoyao Li +Reviewed-by: David Hildenbrand +Signed-off-by: Sean Christopherson +--- + arch/x86/include/asm/kvm_host.h | 4 +--- + arch/x86/kvm/Kconfig | 12 ++++-------- + include/linux/kvm_host.h | 9 ++------- + virt/kvm/kvm_main.c | 4 ++-- + 4 files changed, 9 insertions(+), 20 deletions(-) + +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index 7b0f2b3e492d..50366a1ca192 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -2276,10 +2276,8 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, + int tdp_max_root_level, int tdp_huge_page_level); + + +-#ifdef CONFIG_KVM_GUEST_MEMFD ++#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES + #define kvm_arch_has_private_mem(kvm) ((kvm)->arch.has_private_mem) +-#else +-#define kvm_arch_has_private_mem(kvm) false + #endif + + #define kvm_arch_has_readonly_mem(kvm) (!(kvm)->arch.has_protected_state) +diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig +index c763446d9b9f..4e43923656d0 100644 +--- a/arch/x86/kvm/Kconfig ++++ b/arch/x86/kvm/Kconfig +@@ -47,6 +47,7 @@ config KVM_X86 + select KVM_GENERIC_HARDWARE_ENABLING + select KVM_GENERIC_PRE_FAULT_MEMORY + select KVM_WERROR if WERROR ++ select KVM_GUEST_MEMFD if X86_64 + + config KVM + tristate "Kernel-based Virtual Machine (KVM) support" +@@ -79,16 +80,11 @@ config KVM_WERROR + + If in doubt, say "N". + +-config KVM_X86_PRIVATE_MEM +- select KVM_GENERIC_MEMORY_ATTRIBUTES +- select KVM_GUEST_MEMFD +- bool +- + config KVM_SW_PROTECTED_VM + bool "Enable support for KVM software-protected VMs" + depends on EXPERT + depends on KVM_X86 && X86_64 +- select KVM_X86_PRIVATE_MEM ++ select KVM_GENERIC_MEMORY_ATTRIBUTES + help + Enable support for KVM software-protected VMs. Currently, software- + protected VMs are purely a development and testing vehicle for +@@ -138,7 +134,7 @@ config KVM_INTEL_TDX + bool "Intel Trust Domain Extensions (TDX) support" + default y + depends on INTEL_TDX_HOST +- select KVM_X86_PRIVATE_MEM ++ select KVM_GENERIC_MEMORY_ATTRIBUTES + select HAVE_KVM_ARCH_GMEM_POPULATE + help + Provides support for launching Intel Trust Domain Extensions (TDX) +@@ -162,7 +158,7 @@ config KVM_AMD_SEV + depends on KVM_AMD && X86_64 + depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m) + select ARCH_HAS_CC_PLATFORM +- select KVM_X86_PRIVATE_MEM ++ select KVM_GENERIC_MEMORY_ATTRIBUTES + select HAVE_KVM_ARCH_GMEM_PREPARE + select HAVE_KVM_ARCH_GMEM_INVALIDATE + select HAVE_KVM_ARCH_GMEM_POPULATE +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h +index 56ea8c862cfd..4d1c44622056 100644 +--- a/include/linux/kvm_host.h ++++ b/include/linux/kvm_host.h +@@ -719,11 +719,7 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) + } + #endif + +-/* +- * Arch code must define kvm_arch_has_private_mem if support for guest_memfd is +- * enabled. +- */ +-#if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_GUEST_MEMFD) ++#ifndef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES + static inline bool kvm_arch_has_private_mem(struct kvm *kvm) + { + return false; +@@ -2505,8 +2501,7 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm, + + static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) + { +- return IS_ENABLED(CONFIG_KVM_GUEST_MEMFD) && +- kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE; ++ return kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE; + } + #else + static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index aa86dfd757db..4f57cb92e109 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -1588,7 +1588,7 @@ static int check_memory_region_flags(struct kvm *kvm, + { + u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; + +- if (kvm_arch_has_private_mem(kvm)) ++ if (IS_ENABLED(CONFIG_KVM_GUEST_MEMFD)) + valid_flags |= KVM_MEM_GUEST_MEMFD; + + /* Dirty logging private memory is not currently supported. */ +@@ -4917,7 +4917,7 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) + #endif + #ifdef CONFIG_KVM_GUEST_MEMFD + case KVM_CAP_GUEST_MEMFD: +- return !kvm || kvm_arch_has_private_mem(kvm); ++ return 1; + #endif + default: + break; +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0010-KVM-guest_memfd-Add-plumbing-to-host-to-map-guest_me.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0010-KVM-guest_memfd-Add-plumbing-to-host-to-map-guest_me.patch new file mode 100644 index 00000000000..141e1915f7d --- /dev/null +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0010-KVM-guest_memfd-Add-plumbing-to-host-to-map-guest_me.patch @@ -0,0 +1,185 @@ +From b280399f5bc244bc6f443a0a67375c400f1a44b6 Mon Sep 17 00:00:00 2001 +From: Fuad Tabba +Date: Tue, 29 Jul 2025 15:54:41 -0700 +Subject: [PATCH 10/49] KVM: guest_memfd: Add plumbing to host to map + guest_memfd pages + +Introduce the core infrastructure to enable host userspace to mmap() +guest_memfd-backed memory. This is needed for several evolving KVM use +cases: + +* Non-CoCo VM backing: Allows VMMs like Firecracker to run guests + entirely backed by guest_memfd, even for non-CoCo VMs [1]. This + provides a unified memory management model and simplifies guest memory + handling. + +* Direct map removal for enhanced security: This is an important step + for direct map removal of guest memory [2]. By allowing host userspace + to fault in guest_memfd pages directly, we can avoid maintaining host + kernel direct maps of guest memory. This provides additional hardening + against Spectre-like transient execution attacks by removing a + potential attack surface within the kernel. + +* Future guest_memfd features: This also lays the groundwork for future + enhancements to guest_memfd, such as supporting huge pages and + enabling in-place sharing of guest memory with the host for CoCo + platforms that permit it [3]. + +Enable the basic mmap and fault handling logic within guest_memfd, but +hold off on allow userspace to actually do mmap() until the architecture +support is also in place. + +[1] https://github.com/firecracker-microvm/firecracker/tree/feature/secret-hiding +[2] https://lore.kernel.org/linux-mm/cc1bb8e9bc3e1ab637700a4d3defeec95b55060a.camel@amazon.com +[3] https://lore.kernel.org/all/c1c9591d-218a-495c-957b-ba356c8f8e09@redhat.com/T/#u + +Reviewed-by: Gavin Shan +Reviewed-by: Shivank Garg +Acked-by: David Hildenbrand +Co-developed-by: Ackerley Tng +Signed-off-by: Ackerley Tng +Signed-off-by: Fuad Tabba +Reviewed-by: Xiaoyao Li +Signed-off-by: Sean Christopherson +--- + arch/x86/kvm/x86.c | 11 +++++++ + include/linux/kvm_host.h | 4 +++ + virt/kvm/guest_memfd.c | 70 ++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 85 insertions(+) + +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index a1c49bc681c4..e5cd54ba1eaa 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -13518,6 +13518,16 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) + } + EXPORT_SYMBOL_GPL(kvm_arch_no_poll); + ++#ifdef CONFIG_KVM_GUEST_MEMFD ++/* ++ * KVM doesn't yet support mmap() on guest_memfd for VMs with private memory ++ * (the private vs. shared tracking needs to be moved into guest_memfd). ++ */ ++bool kvm_arch_supports_gmem_mmap(struct kvm *kvm) ++{ ++ return !kvm_arch_has_private_mem(kvm); ++} ++ + #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE + int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order) + { +@@ -13531,6 +13541,7 @@ void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) + kvm_x86_call(gmem_invalidate)(start, end); + } + #endif ++#endif + + int kvm_spec_ctrl_test_value(u64 value) + { +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h +index 4d1c44622056..26bad600f9fa 100644 +--- a/include/linux/kvm_host.h ++++ b/include/linux/kvm_host.h +@@ -726,6 +726,10 @@ static inline bool kvm_arch_has_private_mem(struct kvm *kvm) + } + #endif + ++#ifdef CONFIG_KVM_GUEST_MEMFD ++bool kvm_arch_supports_gmem_mmap(struct kvm *kvm); ++#endif ++ + #ifndef kvm_arch_has_readonly_mem + static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm) + { +diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c +index a99e11b8b77f..67e7cd7210ef 100644 +--- a/virt/kvm/guest_memfd.c ++++ b/virt/kvm/guest_memfd.c +@@ -312,7 +312,72 @@ static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn) + return gfn - slot->base_gfn + slot->gmem.pgoff; + } + ++static bool kvm_gmem_supports_mmap(struct inode *inode) ++{ ++ return false; ++} ++ ++static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf) ++{ ++ struct inode *inode = file_inode(vmf->vma->vm_file); ++ struct folio *folio; ++ vm_fault_t ret = VM_FAULT_LOCKED; ++ ++ if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode)) ++ return VM_FAULT_SIGBUS; ++ ++ folio = kvm_gmem_get_folio(inode, vmf->pgoff); ++ if (IS_ERR(folio)) { ++ int err = PTR_ERR(folio); ++ ++ if (err == -EAGAIN) ++ return VM_FAULT_RETRY; ++ ++ return vmf_error(err); ++ } ++ ++ if (WARN_ON_ONCE(folio_test_large(folio))) { ++ ret = VM_FAULT_SIGBUS; ++ goto out_folio; ++ } ++ ++ if (!folio_test_uptodate(folio)) { ++ clear_highpage(folio_page(folio, 0)); ++ kvm_gmem_mark_prepared(folio); ++ } ++ ++ vmf->page = folio_file_page(folio, vmf->pgoff); ++ ++out_folio: ++ if (ret != VM_FAULT_LOCKED) { ++ folio_unlock(folio); ++ folio_put(folio); ++ } ++ ++ return ret; ++} ++ ++static const struct vm_operations_struct kvm_gmem_vm_ops = { ++ .fault = kvm_gmem_fault_user_mapping, ++}; ++ ++static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma) ++{ ++ if (!kvm_gmem_supports_mmap(file_inode(file))) ++ return -ENODEV; ++ ++ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) != ++ (VM_SHARED | VM_MAYSHARE)) { ++ return -EINVAL; ++ } ++ ++ vma->vm_ops = &kvm_gmem_vm_ops; ++ ++ return 0; ++} ++ + static struct file_operations kvm_gmem_fops = { ++ .mmap = kvm_gmem_mmap, + .open = generic_file_open, + .release = kvm_gmem_release, + .fallocate = kvm_gmem_fallocate, +@@ -391,6 +456,11 @@ static const struct inode_operations kvm_gmem_iops = { + .setattr = kvm_gmem_setattr, + }; + ++bool __weak kvm_arch_supports_gmem_mmap(struct kvm *kvm) ++{ ++ return true; ++} ++ + static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags) + { + const char *anon_name = "[kvm-gmem]"; +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0010-KVM-x86-mmu-Generalize-private_max_mapping_level-x86.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0010-KVM-x86-mmu-Generalize-private_max_mapping_level-x86.patch deleted file mode 100644 index 0db7dc3db72..00000000000 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0010-KVM-x86-mmu-Generalize-private_max_mapping_level-x86.patch +++ /dev/null @@ -1,241 +0,0 @@ -From 79a313d6ad47fed8265d9a2c39b84d057b48eddd Mon Sep 17 00:00:00 2001 -From: Ackerley Tng -Date: Tue, 15 Jul 2025 10:33:39 +0100 -Subject: [PATCH 10/46] KVM: x86/mmu: Generalize private_max_mapping_level x86 - op to max_mapping_level - -Generalize the private_max_mapping_level x86 operation to -max_mapping_level. - -The private_max_mapping_level operation allows platform-specific code to -limit mapping levels (e.g., forcing 4K pages for certain memory types). -While it was previously used exclusively for private memory, guest_memfd -can now back both private and non-private memory. Platforms may have -specific mapping level restrictions that apply to guest_memfd memory -regardless of its privacy attribute. Therefore, generalize this -operation. - -Rename the operation: Removes the "private" prefix to reflect its -broader applicability to any guest_memfd-backed memory. - -Pass kvm_page_fault information: The operation is updated to receive a -struct kvm_page_fault object instead of just the pfn. This provides -platform-specific implementations (e.g., for TDX or SEV) with additional -context about the fault, such as whether it is private or shared, -allowing them to apply different mapping level rules as needed. - -Enforce "private-only" behavior (for now): Since the current consumers -of this hook (TDX and SEV) still primarily use it to enforce private -memory constraints, platform-specific implementations are made to return -0 for non-private pages. A return value of 0 signals to callers that -platform-specific input should be ignored for that particular fault, -indicating no specific platform-imposed mapping level limits for -non-private pages. This allows the core MMU to continue determining the -mapping level based on generic rules for such cases. - -Acked-by: David Hildenbrand -Suggested-by: Sean Christoperson -Signed-off-by: Ackerley Tng -Signed-off-by: Fuad Tabba ---- - arch/x86/include/asm/kvm-x86-ops.h | 2 +- - arch/x86/include/asm/kvm_host.h | 2 +- - arch/x86/kvm/mmu/mmu.c | 11 ++++++----- - arch/x86/kvm/svm/sev.c | 8 ++++++-- - arch/x86/kvm/svm/svm.c | 2 +- - arch/x86/kvm/svm/svm.h | 4 ++-- - arch/x86/kvm/vmx/main.c | 6 +++--- - arch/x86/kvm/vmx/tdx.c | 5 ++++- - arch/x86/kvm/vmx/x86_ops.h | 2 +- - 9 files changed, 25 insertions(+), 17 deletions(-) - -diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h -index 8d50e3e0a19b..02301fbad449 100644 ---- a/arch/x86/include/asm/kvm-x86-ops.h -+++ b/arch/x86/include/asm/kvm-x86-ops.h -@@ -146,7 +146,7 @@ KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); - KVM_X86_OP_OPTIONAL(get_untagged_addr) - KVM_X86_OP_OPTIONAL(alloc_apic_backing_page) - KVM_X86_OP_OPTIONAL_RET0(gmem_prepare) --KVM_X86_OP_OPTIONAL_RET0(private_max_mapping_level) -+KVM_X86_OP_OPTIONAL_RET0(max_mapping_level) - KVM_X86_OP_OPTIONAL(gmem_invalidate) - - #undef KVM_X86_OP -diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h -index 938b5be03d33..543d09fd4bca 100644 ---- a/arch/x86/include/asm/kvm_host.h -+++ b/arch/x86/include/asm/kvm_host.h -@@ -1907,7 +1907,7 @@ struct kvm_x86_ops { - void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu); - int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order); - void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end); -- int (*private_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn); -+ int (*max_mapping_level)(struct kvm *kvm, struct kvm_page_fault *fault); - }; - - struct kvm_x86_nested_ops { -diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c -index 213904daf1e5..bb925994cbc5 100644 ---- a/arch/x86/kvm/mmu/mmu.c -+++ b/arch/x86/kvm/mmu/mmu.c -@@ -4467,9 +4467,11 @@ static inline u8 kvm_max_level_for_order(int order) - return PG_LEVEL_4K; - } - --static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, -- u8 max_level, int gmem_order) -+static u8 kvm_max_private_mapping_level(struct kvm *kvm, -+ struct kvm_page_fault *fault, -+ int gmem_order) - { -+ u8 max_level = fault->max_level; - u8 req_max_level; - - if (max_level == PG_LEVEL_4K) -@@ -4479,7 +4481,7 @@ static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, - if (max_level == PG_LEVEL_4K) - return PG_LEVEL_4K; - -- req_max_level = kvm_x86_call(private_max_mapping_level)(kvm, pfn); -+ req_max_level = kvm_x86_call(max_mapping_level)(kvm, fault); - if (req_max_level) - max_level = min(max_level, req_max_level); - -@@ -4511,8 +4513,7 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu, - } - - fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY); -- fault->max_level = kvm_max_private_mapping_level(vcpu->kvm, fault->pfn, -- fault->max_level, max_order); -+ fault->max_level = kvm_max_private_mapping_level(vcpu->kvm, fault, max_order); - - return RET_PF_CONTINUE; - } -diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c -index 687392c5bf5d..dd470e26f6a0 100644 ---- a/arch/x86/kvm/svm/sev.c -+++ b/arch/x86/kvm/svm/sev.c -@@ -29,6 +29,7 @@ - #include - #include - -+#include "mmu/mmu_internal.h" - #include "mmu.h" - #include "x86.h" - #include "svm.h" -@@ -4906,7 +4907,7 @@ void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) - } - } - --int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) -+int sev_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault) - { - int level, rc; - bool assigned; -@@ -4914,7 +4915,10 @@ int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) - if (!sev_snp_guest(kvm)) - return 0; - -- rc = snp_lookup_rmpentry(pfn, &assigned, &level); -+ if (!fault->is_private) -+ return 0; -+ -+ rc = snp_lookup_rmpentry(fault->pfn, &assigned, &level); - if (rc || !assigned) - return PG_LEVEL_4K; - -diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c -index d1c484eaa8ad..6ad047189210 100644 ---- a/arch/x86/kvm/svm/svm.c -+++ b/arch/x86/kvm/svm/svm.c -@@ -5347,7 +5347,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { - - .gmem_prepare = sev_gmem_prepare, - .gmem_invalidate = sev_gmem_invalidate, -- .private_max_mapping_level = sev_private_max_mapping_level, -+ .max_mapping_level = sev_max_mapping_level, - }; - - /* -diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h -index e6f3c6a153a0..c2579f7df734 100644 ---- a/arch/x86/kvm/svm/svm.h -+++ b/arch/x86/kvm/svm/svm.h -@@ -787,7 +787,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code); - void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu); - int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order); - void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end); --int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn); -+int sev_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault); - struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu); - void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa); - #else -@@ -816,7 +816,7 @@ static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, in - return 0; - } - static inline void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) {} --static inline int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) -+static inline int sev_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault) - { - return 0; - } -diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c -index d1e02e567b57..8e53554932ba 100644 ---- a/arch/x86/kvm/vmx/main.c -+++ b/arch/x86/kvm/vmx/main.c -@@ -871,10 +871,10 @@ static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp) - return tdx_vcpu_ioctl(vcpu, argp); - } - --static int vt_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) -+static int vt_gmem_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault) - { - if (is_td(kvm)) -- return tdx_gmem_private_max_mapping_level(kvm, pfn); -+ return tdx_gmem_max_mapping_level(kvm, fault); - - return 0; - } -@@ -1044,7 +1044,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { - .mem_enc_ioctl = vt_op_tdx_only(mem_enc_ioctl), - .vcpu_mem_enc_ioctl = vt_op_tdx_only(vcpu_mem_enc_ioctl), - -- .private_max_mapping_level = vt_op_tdx_only(gmem_private_max_mapping_level) -+ .max_mapping_level = vt_op_tdx_only(gmem_max_mapping_level) - }; - - struct kvm_x86_init_ops vt_init_ops __initdata = { -diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c -index a3db6df245ee..7f652241491a 100644 ---- a/arch/x86/kvm/vmx/tdx.c -+++ b/arch/x86/kvm/vmx/tdx.c -@@ -3322,8 +3322,11 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) - return ret; - } - --int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) -+int tdx_gmem_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault) - { -+ if (!fault->is_private) -+ return 0; -+ - return PG_LEVEL_4K; - } - -diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h -index b4596f651232..ca7bc9e0fce5 100644 ---- a/arch/x86/kvm/vmx/x86_ops.h -+++ b/arch/x86/kvm/vmx/x86_ops.h -@@ -163,7 +163,7 @@ int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn, - void tdx_flush_tlb_current(struct kvm_vcpu *vcpu); - void tdx_flush_tlb_all(struct kvm_vcpu *vcpu); - void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); --int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn); -+int tdx_gmem_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault); - #endif - - #endif /* __KVM_X86_VMX_X86_OPS_H */ --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0009-KVM-guest_memfd-Track-guest_memfd-mmap-support-in-me.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0011-KVM-guest_memfd-Track-guest_memfd-mmap-support-in-me.patch similarity index 72% rename from resources/hiding_ci/linux_patches/05-mmap-support/0009-KVM-guest_memfd-Track-guest_memfd-mmap-support-in-me.patch rename to resources/hiding_ci/linux_patches/05-mmap-support/0011-KVM-guest_memfd-Track-guest_memfd-mmap-support-in-me.patch index c6e10423e9c..a2de409fa9e 100644 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0009-KVM-guest_memfd-Track-guest_memfd-mmap-support-in-me.patch +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0011-KVM-guest_memfd-Track-guest_memfd-mmap-support-in-me.patch @@ -1,13 +1,15 @@ -From 09759854a3fbd70fc5c8c1f44da8c11c12cd3ac2 Mon Sep 17 00:00:00 2001 +From a5d0015d5701f7c76c975dcba6ed4bdc8863ced1 Mon Sep 17 00:00:00 2001 From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:38 +0100 -Subject: [PATCH 09/46] KVM: guest_memfd: Track guest_memfd mmap support in +Date: Tue, 29 Jul 2025 15:54:42 -0700 +Subject: [PATCH 11/49] KVM: guest_memfd: Track guest_memfd mmap support in memslot Add a new internal flag, KVM_MEMSLOT_GMEM_ONLY, to the top half of -memslot->flags. This flag tracks when a guest_memfd-backed memory slot -supports host userspace mmap operations. It's strictly for KVM's -internal use. +memslot->flags (which makes it strictly for KVM's internal use). This +flag tracks when a guest_memfd-backed memory slot supports host +userspace mmap operations, which implies that all memory, not just +private memory for CoCo VMs, is consumed through guest_memfd: "gmem +only". This optimization avoids repeatedly checking the underlying guest_memfd file for mmap support, which would otherwise require taking and @@ -17,16 +19,18 @@ logic involved in handling guest_memfd-backed pages for host mappings. Reviewed-by: Gavin Shan Reviewed-by: Shivank Garg +Reviewed-by: Xiaoyao Li Acked-by: David Hildenbrand Suggested-by: David Hildenbrand Signed-off-by: Fuad Tabba +Signed-off-by: Sean Christopherson --- include/linux/kvm_host.h | 11 ++++++++++- virt/kvm/guest_memfd.c | 2 ++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h -index 9ac21985f3b5..d2218ec57ceb 100644 +index 26bad600f9fa..8b47891adca1 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -54,7 +54,8 @@ @@ -39,13 +43,13 @@ index 9ac21985f3b5..d2218ec57ceb 100644 /* * Bit 63 of the memslot generation number is an "update in-progress flag", -@@ -2536,6 +2537,14 @@ static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, +@@ -2490,6 +2491,14 @@ static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE; } +static inline bool kvm_memslot_is_gmem_only(const struct kvm_memory_slot *slot) +{ -+ if (!IS_ENABLED(CONFIG_KVM_GMEM_SUPPORTS_MMAP)) ++ if (!IS_ENABLED(CONFIG_KVM_GUEST_MEMFD)) + return false; + + return slot->flags & KVM_MEMSLOT_GMEM_ONLY; @@ -55,10 +59,10 @@ index 9ac21985f3b5..d2218ec57ceb 100644 static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn) { diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c -index 07a4b165471d..2b00f8796a15 100644 +index 67e7cd7210ef..d5b445548af4 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c -@@ -592,6 +592,8 @@ int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot, +@@ -578,6 +578,8 @@ int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot, */ WRITE_ONCE(slot->gmem.file, file); slot->gmem.pgoff = start; diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0011-KVM-x86-mmu-Allow-NULL-able-fault-in-kvm_max_private.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0011-KVM-x86-mmu-Allow-NULL-able-fault-in-kvm_max_private.patch deleted file mode 100644 index 2c9304524d9..00000000000 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0011-KVM-x86-mmu-Allow-NULL-able-fault-in-kvm_max_private.patch +++ /dev/null @@ -1,76 +0,0 @@ -From 2c056af79dfa13aae23431c1c200b7cad1e57449 Mon Sep 17 00:00:00 2001 -From: Ackerley Tng -Date: Tue, 15 Jul 2025 10:33:40 +0100 -Subject: [PATCH 11/46] KVM: x86/mmu: Allow NULL-able fault in - kvm_max_private_mapping_level - -Refactor kvm_max_private_mapping_level() to accept a NULL kvm_page_fault -pointer and rename it to kvm_gmem_max_mapping_level(). - -The max_mapping_level x86 operation (previously private_max_mapping_level) -is designed to potentially be called without an active page fault, for -instance, when kvm_mmu_max_mapping_level() is determining the maximum -mapping level for a gfn proactively. - -Allow NULL fault pointer: Modify kvm_max_private_mapping_level() to -safely handle a NULL fault argument. This aligns its interface with the -kvm_x86_ops.max_mapping_level operation it wraps, which can also be -called with NULL. - -Rename function to kvm_gmem_max_mapping_level(): This reinforces that -the function's scope is for guest_memfd-backed memory, which can be -either private or non-private, removing any remaining "private" -connotation from its name. - -Optimize max_level checks: Introduce a check in the caller to skip -querying for max_mapping_level if the current max_level is already -PG_LEVEL_4K, as no further reduction is possible. - -Acked-by: David Hildenbrand -Suggested-by: Sean Christoperson -Signed-off-by: Ackerley Tng -Signed-off-by: Fuad Tabba ---- - arch/x86/kvm/mmu/mmu.c | 16 +++++++--------- - 1 file changed, 7 insertions(+), 9 deletions(-) - -diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c -index bb925994cbc5..6bd28fda0fd3 100644 ---- a/arch/x86/kvm/mmu/mmu.c -+++ b/arch/x86/kvm/mmu/mmu.c -@@ -4467,17 +4467,13 @@ static inline u8 kvm_max_level_for_order(int order) - return PG_LEVEL_4K; - } - --static u8 kvm_max_private_mapping_level(struct kvm *kvm, -- struct kvm_page_fault *fault, -- int gmem_order) -+static u8 kvm_gmem_max_mapping_level(struct kvm *kvm, int order, -+ struct kvm_page_fault *fault) - { -- u8 max_level = fault->max_level; - u8 req_max_level; -+ u8 max_level; - -- if (max_level == PG_LEVEL_4K) -- return PG_LEVEL_4K; -- -- max_level = min(kvm_max_level_for_order(gmem_order), max_level); -+ max_level = kvm_max_level_for_order(order); - if (max_level == PG_LEVEL_4K) - return PG_LEVEL_4K; - -@@ -4513,7 +4509,9 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu, - } - - fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY); -- fault->max_level = kvm_max_private_mapping_level(vcpu->kvm, fault, max_order); -+ if (fault->max_level >= PG_LEVEL_4K) -+ fault->max_level = kvm_gmem_max_mapping_level(vcpu->kvm, -+ max_order, fault); - - return RET_PF_CONTINUE; - } --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0012-KVM-x86-mmu-Consult-guest_memfd-when-computing-max_m.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0012-KVM-x86-mmu-Consult-guest_memfd-when-computing-max_m.patch deleted file mode 100644 index d9ee9465001..00000000000 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0012-KVM-x86-mmu-Consult-guest_memfd-when-computing-max_m.patch +++ /dev/null @@ -1,212 +0,0 @@ -From 778110c1ae955ac98afac67c71c66ab5931238fd Mon Sep 17 00:00:00 2001 -From: Ackerley Tng -Date: Tue, 15 Jul 2025 10:33:41 +0100 -Subject: [PATCH 12/46] KVM: x86/mmu: Consult guest_memfd when computing - max_mapping_level - -Modify kvm_mmu_max_mapping_level() to consult guest_memfd for memory -regions backed by it when computing the maximum mapping level, -especially during huge page recovery. - -Previously, kvm_mmu_max_mapping_level() was designed primarily for -host-backed memory and private pages. With guest_memfd now supporting -non-private memory, it's necessary to factor in guest_memfd's influence -on mapping levels for such memory. - -Since guest_memfd can now be used for non-private memory, make -kvm_max_max_mapping_level, when recovering huge pages, take input from -guest_memfd. - -Input is taken from guest_memfd as long as a fault to that slot and gfn -would have been served from guest_memfd. For now, take a shortcut if the -slot and gfn points to memory that is private, since recovering huge -pages aren't supported for private memory yet. - -Since guest_memfd memory can also be faulted into host page tables, -__kvm_mmu_max_mapping_level() still applies since consulting lpage_info -and host page tables are required. - -Move functions kvm_max_level_for_order() and -kvm_gmem_max_mapping_level() so kvm_mmu_max_mapping_level() can use -those functions. - -Acked-by: David Hildenbrand -Signed-off-by: Ackerley Tng -Co-developed-by: Fuad Tabba -Signed-off-by: Fuad Tabba ---- - arch/x86/kvm/mmu/mmu.c | 90 ++++++++++++++++++++++++---------------- - include/linux/kvm_host.h | 7 ++++ - virt/kvm/guest_memfd.c | 17 ++++++++ - 3 files changed, 79 insertions(+), 35 deletions(-) - -diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c -index 6bd28fda0fd3..94be15cde6da 100644 ---- a/arch/x86/kvm/mmu/mmu.c -+++ b/arch/x86/kvm/mmu/mmu.c -@@ -3282,13 +3282,67 @@ static int __kvm_mmu_max_mapping_level(struct kvm *kvm, - return min(host_level, max_level); - } - -+static u8 kvm_max_level_for_order(int order) -+{ -+ BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G); -+ -+ KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) && -+ order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) && -+ order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K)); -+ -+ if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G)) -+ return PG_LEVEL_1G; -+ -+ if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M)) -+ return PG_LEVEL_2M; -+ -+ return PG_LEVEL_4K; -+} -+ -+static u8 kvm_gmem_max_mapping_level(struct kvm *kvm, int order, -+ struct kvm_page_fault *fault) -+{ -+ u8 req_max_level; -+ u8 max_level; -+ -+ max_level = kvm_max_level_for_order(order); -+ if (max_level == PG_LEVEL_4K) -+ return PG_LEVEL_4K; -+ -+ req_max_level = kvm_x86_call(max_mapping_level)(kvm, fault); -+ if (req_max_level) -+ max_level = min(max_level, req_max_level); -+ -+ return max_level; -+} -+ - int kvm_mmu_max_mapping_level(struct kvm *kvm, - const struct kvm_memory_slot *slot, gfn_t gfn) - { - bool is_private = kvm_slot_has_gmem(slot) && - kvm_mem_is_private(kvm, gfn); -+ int max_level = PG_LEVEL_NUM; -+ -+ /* -+ * For now, kvm_mmu_max_mapping_level() is only called from -+ * kvm_mmu_recover_huge_pages(), and that's not yet supported for -+ * private memory, hence we can take a shortcut and return early. -+ */ -+ if (is_private) -+ return PG_LEVEL_4K; - -- return __kvm_mmu_max_mapping_level(kvm, slot, gfn, PG_LEVEL_NUM, is_private); -+ /* -+ * For non-private pages that would have been faulted from guest_memfd, -+ * let guest_memfd influence max_mapping_level. -+ */ -+ if (kvm_memslot_is_gmem_only(slot)) { -+ int order = kvm_gmem_mapping_order(slot, gfn); -+ -+ max_level = min(max_level, -+ kvm_gmem_max_mapping_level(kvm, order, NULL)); -+ } -+ -+ return __kvm_mmu_max_mapping_level(kvm, slot, gfn, max_level, is_private); - } - - void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) -@@ -4450,40 +4504,6 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) - vcpu->stat.pf_fixed++; - } - --static inline u8 kvm_max_level_for_order(int order) --{ -- BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G); -- -- KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) && -- order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) && -- order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K)); -- -- if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G)) -- return PG_LEVEL_1G; -- -- if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M)) -- return PG_LEVEL_2M; -- -- return PG_LEVEL_4K; --} -- --static u8 kvm_gmem_max_mapping_level(struct kvm *kvm, int order, -- struct kvm_page_fault *fault) --{ -- u8 req_max_level; -- u8 max_level; -- -- max_level = kvm_max_level_for_order(order); -- if (max_level == PG_LEVEL_4K) -- return PG_LEVEL_4K; -- -- req_max_level = kvm_x86_call(max_mapping_level)(kvm, fault); -- if (req_max_level) -- max_level = min(max_level, req_max_level); -- -- return max_level; --} -- - static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu, - struct kvm_page_fault *fault, int r) - { -diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h -index d2218ec57ceb..662271314778 100644 ---- a/include/linux/kvm_host.h -+++ b/include/linux/kvm_host.h -@@ -2574,6 +2574,7 @@ static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) - int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, - gfn_t gfn, kvm_pfn_t *pfn, struct page **page, - int *max_order); -+int kvm_gmem_mapping_order(const struct kvm_memory_slot *slot, gfn_t gfn); - #else - static inline int kvm_gmem_get_pfn(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn, -@@ -2583,6 +2584,12 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm, - KVM_BUG_ON(1, kvm); - return -EIO; - } -+static inline int kvm_gmem_mapping_order(const struct kvm_memory_slot *slot, -+ gfn_t gfn) -+{ -+ WARN_ONCE(1, "Unexpected call since gmem is disabled."); -+ return 0; -+} - #endif /* CONFIG_KVM_GMEM */ - - #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE -diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c -index 2b00f8796a15..d01bd7a2c2bd 100644 ---- a/virt/kvm/guest_memfd.c -+++ b/virt/kvm/guest_memfd.c -@@ -713,6 +713,23 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, - } - EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn); - -+/** -+ * kvm_gmem_mapping_order() - Get the mapping order for this @gfn in @slot. -+ * -+ * @slot: the memslot that gfn belongs to. -+ * @gfn: the gfn to look up mapping order for. -+ * -+ * This is equal to max_order that would be returned if kvm_gmem_get_pfn() were -+ * called now. -+ * -+ * Return: the mapping order for this @gfn in @slot. -+ */ -+int kvm_gmem_mapping_order(const struct kvm_memory_slot *slot, gfn_t gfn) -+{ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(kvm_gmem_mapping_order); -+ - #ifdef CONFIG_KVM_GENERIC_GMEM_POPULATE - long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages, - kvm_gmem_populate_cb post_populate, void *opaque) --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0012-KVM-x86-mmu-Rename-.private_max_mapping_level-to-.gm.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0012-KVM-x86-mmu-Rename-.private_max_mapping_level-to-.gm.patch new file mode 100644 index 00000000000..3076af329c1 --- /dev/null +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0012-KVM-x86-mmu-Rename-.private_max_mapping_level-to-.gm.patch @@ -0,0 +1,171 @@ +From 6773a2fb6642b80d20737c3efd86540d9af4bc0a Mon Sep 17 00:00:00 2001 +From: Ackerley Tng +Date: Tue, 29 Jul 2025 15:54:43 -0700 +Subject: [PATCH 12/49] KVM: x86/mmu: Rename .private_max_mapping_level() to + .gmem_max_mapping_level() + +Rename kvm_x86_ops.private_max_mapping_level() to .gmem_max_mapping_level() +in anticipation of extending guest_memfd support to non-private memory. + +No functional change intended. + +Reviewed-by: Xiaoyao Li +Acked-by: David Hildenbrand +Signed-off-by: Ackerley Tng +Signed-off-by: Fuad Tabba +Co-developed-by: Sean Christopherson +Signed-off-by: Sean Christopherson +--- + arch/x86/include/asm/kvm-x86-ops.h | 2 +- + arch/x86/include/asm/kvm_host.h | 2 +- + arch/x86/kvm/mmu/mmu.c | 2 +- + arch/x86/kvm/svm/sev.c | 2 +- + arch/x86/kvm/svm/svm.c | 2 +- + arch/x86/kvm/svm/svm.h | 4 ++-- + arch/x86/kvm/vmx/main.c | 6 +++--- + arch/x86/kvm/vmx/tdx.c | 2 +- + arch/x86/kvm/vmx/x86_ops.h | 2 +- + 9 files changed, 12 insertions(+), 12 deletions(-) + +diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h +index 18a5c3119e1a..62c3e4de3303 100644 +--- a/arch/x86/include/asm/kvm-x86-ops.h ++++ b/arch/x86/include/asm/kvm-x86-ops.h +@@ -145,7 +145,7 @@ KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); + KVM_X86_OP_OPTIONAL(get_untagged_addr) + KVM_X86_OP_OPTIONAL(alloc_apic_backing_page) + KVM_X86_OP_OPTIONAL_RET0(gmem_prepare) +-KVM_X86_OP_OPTIONAL_RET0(private_max_mapping_level) ++KVM_X86_OP_OPTIONAL_RET0(gmem_max_mapping_level) + KVM_X86_OP_OPTIONAL(gmem_invalidate) + + #undef KVM_X86_OP +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index 50366a1ca192..c0a739bf3829 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -1922,7 +1922,7 @@ struct kvm_x86_ops { + void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu); + int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order); + void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end); +- int (*private_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn); ++ int (*gmem_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn); + }; + + struct kvm_x86_nested_ops { +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c +index fdc2824755ee..b735611e8fcd 100644 +--- a/arch/x86/kvm/mmu/mmu.c ++++ b/arch/x86/kvm/mmu/mmu.c +@@ -4532,7 +4532,7 @@ static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, + if (max_level == PG_LEVEL_4K) + return PG_LEVEL_4K; + +- req_max_level = kvm_x86_call(private_max_mapping_level)(kvm, pfn); ++ req_max_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn); + if (req_max_level) + max_level = min(max_level, req_max_level); + +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c +index 7744c210f947..be1c80d79331 100644 +--- a/arch/x86/kvm/svm/sev.c ++++ b/arch/x86/kvm/svm/sev.c +@@ -4947,7 +4947,7 @@ void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) + } + } + +-int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) ++int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) + { + int level, rc; + bool assigned; +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index d9931c6c4bc6..8a66e2e985a4 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -5180,7 +5180,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { + + .gmem_prepare = sev_gmem_prepare, + .gmem_invalidate = sev_gmem_invalidate, +- .private_max_mapping_level = sev_private_max_mapping_level, ++ .gmem_max_mapping_level = sev_gmem_max_mapping_level, + }; + + /* +diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h +index 58b9d168e0c8..d84a83ae18a1 100644 +--- a/arch/x86/kvm/svm/svm.h ++++ b/arch/x86/kvm/svm/svm.h +@@ -866,7 +866,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code); + void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu); + int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order); + void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end); +-int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn); ++int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn); + struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu); + void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa); + #else +@@ -895,7 +895,7 @@ static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, in + return 0; + } + static inline void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) {} +-static inline int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) ++static inline int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) + { + return 0; + } +diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c +index dbab1c15b0cd..dd7687ef7e2d 100644 +--- a/arch/x86/kvm/vmx/main.c ++++ b/arch/x86/kvm/vmx/main.c +@@ -831,10 +831,10 @@ static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp) + return tdx_vcpu_ioctl(vcpu, argp); + } + +-static int vt_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) ++static int vt_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) + { + if (is_td(kvm)) +- return tdx_gmem_private_max_mapping_level(kvm, pfn); ++ return tdx_gmem_max_mapping_level(kvm, pfn); + + return 0; + } +@@ -1005,7 +1005,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { + .mem_enc_ioctl = vt_op_tdx_only(mem_enc_ioctl), + .vcpu_mem_enc_ioctl = vt_op_tdx_only(vcpu_mem_enc_ioctl), + +- .private_max_mapping_level = vt_op_tdx_only(gmem_private_max_mapping_level) ++ .gmem_max_mapping_level = vt_op_tdx_only(gmem_max_mapping_level) + }; + + struct kvm_x86_init_ops vt_init_ops __initdata = { +diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c +index 66744f5768c8..b444714e8e8a 100644 +--- a/arch/x86/kvm/vmx/tdx.c ++++ b/arch/x86/kvm/vmx/tdx.c +@@ -3318,7 +3318,7 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) + return ret; + } + +-int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) ++int tdx_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) + { + return PG_LEVEL_4K; + } +diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h +index 2b3424f638db..6037d1708485 100644 +--- a/arch/x86/kvm/vmx/x86_ops.h ++++ b/arch/x86/kvm/vmx/x86_ops.h +@@ -153,7 +153,7 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp); + void tdx_flush_tlb_current(struct kvm_vcpu *vcpu); + void tdx_flush_tlb_all(struct kvm_vcpu *vcpu); + void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); +-int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn); ++int tdx_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn); + #endif + + #endif /* __KVM_X86_VMX_X86_OPS_H */ +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0013-KVM-x86-mmu-Hoist-guest_memfd-max-level-order-helper.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0013-KVM-x86-mmu-Hoist-guest_memfd-max-level-order-helper.patch new file mode 100644 index 00000000000..5a4a0dc950c --- /dev/null +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0013-KVM-x86-mmu-Hoist-guest_memfd-max-level-order-helper.patch @@ -0,0 +1,113 @@ +From 01be6db3effd560947df13a0471ba58587477192 Mon Sep 17 00:00:00 2001 +From: Sean Christopherson +Date: Tue, 29 Jul 2025 15:54:44 -0700 +Subject: [PATCH 13/49] KVM: x86/mmu: Hoist guest_memfd max level/order helpers + "up" in mmu.c + +Move kvm_max_level_for_order() and kvm_max_private_mapping_level() up in +mmu.c so that they can be used by __kvm_mmu_max_mapping_level(). + +Opportunistically drop the "inline" from kvm_max_level_for_order(). + +No functional change intended. + +Reviewed-by: Xiaoyao Li +Reviewed-by: Ackerley Tng +Signed-off-by: Sean Christopherson +--- + arch/x86/kvm/mmu/mmu.c | 72 +++++++++++++++++++++--------------------- + 1 file changed, 36 insertions(+), 36 deletions(-) + +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c +index b735611e8fcd..20dd9f64156e 100644 +--- a/arch/x86/kvm/mmu/mmu.c ++++ b/arch/x86/kvm/mmu/mmu.c +@@ -3285,6 +3285,42 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, + return level; + } + ++static u8 kvm_max_level_for_order(int order) ++{ ++ BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G); ++ ++ KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) && ++ order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) && ++ order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K)); ++ ++ if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G)) ++ return PG_LEVEL_1G; ++ ++ if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M)) ++ return PG_LEVEL_2M; ++ ++ return PG_LEVEL_4K; ++} ++ ++static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, ++ u8 max_level, int gmem_order) ++{ ++ u8 req_max_level; ++ ++ if (max_level == PG_LEVEL_4K) ++ return PG_LEVEL_4K; ++ ++ max_level = min(kvm_max_level_for_order(gmem_order), max_level); ++ if (max_level == PG_LEVEL_4K) ++ return PG_LEVEL_4K; ++ ++ req_max_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn); ++ if (req_max_level) ++ max_level = min(max_level, req_max_level); ++ ++ return max_level; ++} ++ + static int __kvm_mmu_max_mapping_level(struct kvm *kvm, + const struct kvm_memory_slot *slot, + gfn_t gfn, int max_level, bool is_private) +@@ -4503,42 +4539,6 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) + vcpu->stat.pf_fixed++; + } + +-static inline u8 kvm_max_level_for_order(int order) +-{ +- BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G); +- +- KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) && +- order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) && +- order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K)); +- +- if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G)) +- return PG_LEVEL_1G; +- +- if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M)) +- return PG_LEVEL_2M; +- +- return PG_LEVEL_4K; +-} +- +-static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, +- u8 max_level, int gmem_order) +-{ +- u8 req_max_level; +- +- if (max_level == PG_LEVEL_4K) +- return PG_LEVEL_4K; +- +- max_level = min(kvm_max_level_for_order(gmem_order), max_level); +- if (max_level == PG_LEVEL_4K) +- return PG_LEVEL_4K; +- +- req_max_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn); +- if (req_max_level) +- max_level = min(max_level, req_max_level); +- +- return max_level; +-} +- + static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault, int r) + { +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0014-KVM-x86-Enable-guest_memfd-mmap-for-default-VM-type.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0014-KVM-x86-Enable-guest_memfd-mmap-for-default-VM-type.patch deleted file mode 100644 index 7325f3b5640..00000000000 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0014-KVM-x86-Enable-guest_memfd-mmap-for-default-VM-type.patch +++ /dev/null @@ -1,94 +0,0 @@ -From aa1e5ba0e2a75236ad66c224f24a3f311cdee7b1 Mon Sep 17 00:00:00 2001 -From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:43 +0100 -Subject: [PATCH 14/46] KVM: x86: Enable guest_memfd mmap for default VM type - -Enable host userspace mmap support for guest_memfd-backed memory when -running KVM with the KVM_X86_DEFAULT_VM type: - -* Define kvm_arch_supports_gmem_mmap() for KVM_X86_DEFAULT_VM: Introduce - the architecture-specific kvm_arch_supports_gmem_mmap() macro, - specifically enabling mmap support for KVM_X86_DEFAULT_VM instances. - This macro, gated by CONFIG_KVM_GMEM_SUPPORTS_MMAP, ensures that only - the default VM type can leverage guest_memfd mmap functionality on - x86. This explicit enablement prevents CoCo VMs, which use guest_memfd - primarily for private memory and rely on hardware-enforced privacy, - from accidentally exposing guest memory via host userspace mappings. - -* Select CONFIG_KVM_GMEM_SUPPORTS_MMAP in KVM_X86: Enable the - CONFIG_KVM_GMEM_SUPPORTS_MMAP Kconfig option when KVM_X86 is selected. - This ensures that the necessary code for guest_memfd mmap support - (introduced earlier) is compiled into the kernel for x86. This Kconfig - option acts as a system-wide gate for the guest_memfd mmap capability. - It implicitly enables CONFIG_KVM_GMEM, making guest_memfd available, - and then layers the mmap capability on top specifically for the - default VM. - -These changes make guest_memfd a more versatile memory backing for -standard KVM guests, allowing VMMs to use a unified guest_memfd model -for both private (CoCo) and non-private (default) VMs. This is a -prerequisite for use cases such as running Firecracker guests entirely -backed by guest_memfd and implementing direct map removal for non-CoCo -VMs. - -Co-developed-by: Ackerley Tng -Signed-off-by: Ackerley Tng -Signed-off-by: Fuad Tabba ---- - arch/x86/include/asm/kvm_host.h | 9 +++++++++ - arch/x86/kvm/Kconfig | 1 + - arch/x86/kvm/x86.c | 3 ++- - 3 files changed, 12 insertions(+), 1 deletion(-) - -diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h -index 543d09fd4bca..e1426adfa93e 100644 ---- a/arch/x86/include/asm/kvm_host.h -+++ b/arch/x86/include/asm/kvm_host.h -@@ -2279,9 +2279,18 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, - #ifdef CONFIG_KVM_GMEM - #define kvm_arch_has_private_mem(kvm) ((kvm)->arch.has_private_mem) - #define kvm_arch_supports_gmem(kvm) ((kvm)->arch.supports_gmem) -+ -+/* -+ * CoCo VMs with hardware support that use guest_memfd only for backing private -+ * memory, e.g., TDX, cannot use guest_memfd with userspace mapping enabled. -+ */ -+#define kvm_arch_supports_gmem_mmap(kvm) \ -+ (IS_ENABLED(CONFIG_KVM_GMEM_SUPPORTS_MMAP) && \ -+ (kvm)->arch.vm_type == KVM_X86_DEFAULT_VM) - #else - #define kvm_arch_has_private_mem(kvm) false - #define kvm_arch_supports_gmem(kvm) false -+#define kvm_arch_supports_gmem_mmap(kvm) false - #endif - - #define kvm_arch_has_readonly_mem(kvm) (!(kvm)->arch.has_protected_state) -diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig -index df1fdbb4024b..1ba959b9eadc 100644 ---- a/arch/x86/kvm/Kconfig -+++ b/arch/x86/kvm/Kconfig -@@ -47,6 +47,7 @@ config KVM_X86 - select KVM_GENERIC_HARDWARE_ENABLING - select KVM_GENERIC_PRE_FAULT_MEMORY - select KVM_GENERIC_GMEM_POPULATE if KVM_SW_PROTECTED_VM -+ select KVM_GMEM_SUPPORTS_MMAP if X86_64 - select KVM_WERROR if WERROR - - config KVM -diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index adbdc2cc97d4..ca99187a566e 100644 ---- a/arch/x86/kvm/x86.c -+++ b/arch/x86/kvm/x86.c -@@ -12781,7 +12781,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) - - kvm->arch.vm_type = type; - kvm->arch.has_private_mem = (type == KVM_X86_SW_PROTECTED_VM); -- kvm->arch.supports_gmem = (type == KVM_X86_SW_PROTECTED_VM); -+ kvm->arch.supports_gmem = -+ type == KVM_X86_DEFAULT_VM || type == KVM_X86_SW_PROTECTED_VM; - /* Decided by the vendor code for other VM types. */ - kvm->arch.pre_fault_allowed = - type == KVM_X86_DEFAULT_VM || type == KVM_X86_SW_PROTECTED_VM; --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0014-KVM-x86-mmu-Enforce-guest_memfd-s-max-order-when-rec.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0014-KVM-x86-mmu-Enforce-guest_memfd-s-max-order-when-rec.patch new file mode 100644 index 00000000000..8b14fc2ecac --- /dev/null +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0014-KVM-x86-mmu-Enforce-guest_memfd-s-max-order-when-rec.patch @@ -0,0 +1,196 @@ +From 58e824be4a291883a4b1f3955825605f0f3cfbe5 Mon Sep 17 00:00:00 2001 +From: Sean Christopherson +Date: Tue, 29 Jul 2025 15:54:45 -0700 +Subject: [PATCH 14/49] KVM: x86/mmu: Enforce guest_memfd's max order when + recovering hugepages + +Rework kvm_mmu_max_mapping_level() to provide the plumbing to consult +guest_memfd (and relevant vendor code) when recovering hugepages, e.g. +after disabling live migration. The flaw has existed since guest_memfd was +originally added, but has gone unnoticed due to lack of guest_memfd support +for hugepages or dirty logging. + +Don't actually call into guest_memfd at this time, as it's unclear as to +what the API should be. Ideally, KVM would simply use kvm_gmem_get_pfn(), +but invoking kvm_gmem_get_pfn() would lead to sleeping in atomic context +if guest_memfd needed to allocate memory (mmu_lock is held). Luckily, +the path isn't actually reachable, so just add a TODO and WARN to ensure +the functionality is added alongisde guest_memfd hugepage support, and +punt the guest_memfd API design question to the future. + +Note, calling kvm_mem_is_private() in the non-fault path is safe, so long +as mmu_lock is held, as hugepage recovery operates on shadow-present SPTEs, +i.e. calling kvm_mmu_max_mapping_level() with @fault=NULL is mutually +exclusive with kvm_vm_set_mem_attributes() changing the PRIVATE attribute +of the gfn. + +Signed-off-by: Sean Christopherson +--- + arch/x86/kvm/mmu/mmu.c | 78 +++++++++++++++++++-------------- + arch/x86/kvm/mmu/mmu_internal.h | 2 +- + arch/x86/kvm/mmu/tdp_mmu.c | 2 +- + 3 files changed, 47 insertions(+), 35 deletions(-) + +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c +index 20dd9f64156e..61eb9f723675 100644 +--- a/arch/x86/kvm/mmu/mmu.c ++++ b/arch/x86/kvm/mmu/mmu.c +@@ -3302,31 +3302,54 @@ static u8 kvm_max_level_for_order(int order) + return PG_LEVEL_4K; + } + +-static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, +- u8 max_level, int gmem_order) ++static u8 kvm_max_private_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault, ++ const struct kvm_memory_slot *slot, gfn_t gfn) + { +- u8 req_max_level; ++ u8 max_level, coco_level; ++ kvm_pfn_t pfn; + +- if (max_level == PG_LEVEL_4K) +- return PG_LEVEL_4K; ++ /* For faults, use the gmem information that was resolved earlier. */ ++ if (fault) { ++ pfn = fault->pfn; ++ max_level = fault->max_level; ++ } else { ++ /* TODO: Call into guest_memfd once hugepages are supported. */ ++ WARN_ONCE(1, "Get pfn+order from guest_memfd"); ++ pfn = KVM_PFN_ERR_FAULT; ++ max_level = PG_LEVEL_4K; ++ } + +- max_level = min(kvm_max_level_for_order(gmem_order), max_level); + if (max_level == PG_LEVEL_4K) +- return PG_LEVEL_4K; ++ return max_level; + +- req_max_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn); +- if (req_max_level) +- max_level = min(max_level, req_max_level); ++ /* ++ * CoCo may influence the max mapping level, e.g. due to RMP or S-EPT ++ * restrictions. A return of '0' means "no additional restrictions", to ++ * allow for using an optional "ret0" static call. ++ */ ++ coco_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn); ++ if (coco_level) ++ max_level = min(max_level, coco_level); + + return max_level; + } + +-static int __kvm_mmu_max_mapping_level(struct kvm *kvm, +- const struct kvm_memory_slot *slot, +- gfn_t gfn, int max_level, bool is_private) ++int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault, ++ const struct kvm_memory_slot *slot, gfn_t gfn) + { + struct kvm_lpage_info *linfo; +- int host_level; ++ int host_level, max_level; ++ bool is_private; ++ ++ lockdep_assert_held(&kvm->mmu_lock); ++ ++ if (fault) { ++ max_level = fault->max_level; ++ is_private = fault->is_private; ++ } else { ++ max_level = PG_LEVEL_NUM; ++ is_private = kvm_mem_is_private(kvm, gfn); ++ } + + max_level = min(max_level, max_huge_page_level); + for ( ; max_level > PG_LEVEL_4K; max_level--) { +@@ -3335,25 +3358,16 @@ static int __kvm_mmu_max_mapping_level(struct kvm *kvm, + break; + } + +- if (is_private) +- return max_level; +- + if (max_level == PG_LEVEL_4K) + return PG_LEVEL_4K; + +- host_level = host_pfn_mapping_level(kvm, gfn, slot); ++ if (is_private) ++ host_level = kvm_max_private_mapping_level(kvm, fault, slot, gfn); ++ else ++ host_level = host_pfn_mapping_level(kvm, gfn, slot); + return min(host_level, max_level); + } + +-int kvm_mmu_max_mapping_level(struct kvm *kvm, +- const struct kvm_memory_slot *slot, gfn_t gfn) +-{ +- bool is_private = kvm_slot_has_gmem(slot) && +- kvm_mem_is_private(kvm, gfn); +- +- return __kvm_mmu_max_mapping_level(kvm, slot, gfn, PG_LEVEL_NUM, is_private); +-} +- + void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) + { + struct kvm_memory_slot *slot = fault->slot; +@@ -3374,9 +3388,8 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault + * Enforce the iTLB multihit workaround after capturing the requested + * level, which will be used to do precise, accurate accounting. + */ +- fault->req_level = __kvm_mmu_max_mapping_level(vcpu->kvm, slot, +- fault->gfn, fault->max_level, +- fault->is_private); ++ fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, fault, ++ fault->slot, fault->gfn); + if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed) + return; + +@@ -4564,8 +4577,7 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu, + } + + fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY); +- fault->max_level = kvm_max_private_mapping_level(vcpu->kvm, fault->pfn, +- fault->max_level, max_order); ++ fault->max_level = kvm_max_level_for_order(max_order); + + return RET_PF_CONTINUE; + } +@@ -7165,7 +7177,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, + * mapping if the indirect sp has level = 1. + */ + if (sp->role.direct && +- sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn)) { ++ sp->role.level < kvm_mmu_max_mapping_level(kvm, NULL, slot, sp->gfn)) { + kvm_zap_one_rmap_spte(kvm, rmap_head, sptep); + + if (kvm_available_flush_remote_tlbs_range()) +diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h +index 65f3c89d7c5d..b776be783a2f 100644 +--- a/arch/x86/kvm/mmu/mmu_internal.h ++++ b/arch/x86/kvm/mmu/mmu_internal.h +@@ -411,7 +411,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + return r; + } + +-int kvm_mmu_max_mapping_level(struct kvm *kvm, ++int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault, + const struct kvm_memory_slot *slot, gfn_t gfn); + void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); + void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level); +diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c +index 7f3d7229b2c1..740cb06accdb 100644 +--- a/arch/x86/kvm/mmu/tdp_mmu.c ++++ b/arch/x86/kvm/mmu/tdp_mmu.c +@@ -1813,7 +1813,7 @@ static void recover_huge_pages_range(struct kvm *kvm, + if (iter.gfn < start || iter.gfn >= end) + continue; + +- max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, iter.gfn); ++ max_mapping_level = kvm_mmu_max_mapping_level(kvm, NULL, slot, iter.gfn); + if (max_mapping_level < iter.level) + continue; + +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0015-KVM-x86-mmu-Extend-guest_memfd-s-max-mapping-level-t.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0015-KVM-x86-mmu-Extend-guest_memfd-s-max-mapping-level-t.patch new file mode 100644 index 00000000000..bb9133af62f --- /dev/null +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0015-KVM-x86-mmu-Extend-guest_memfd-s-max-mapping-level-t.patch @@ -0,0 +1,163 @@ +From 66352c48c15b6e80e07f2e79c55d2d6d238573dc Mon Sep 17 00:00:00 2001 +From: Sean Christopherson +Date: Tue, 29 Jul 2025 15:54:46 -0700 +Subject: [PATCH 15/49] KVM: x86/mmu: Extend guest_memfd's max mapping level to + shared mappings + +Rework kvm_mmu_max_mapping_level() to consult guest_memfd for all mappings, +not just private mappings, so that hugepage support plays nice with the +upcoming support for backing non-private memory with guest_memfd. + +In addition to getting the max order from guest_memfd for gmem-only +memslots, update TDX's hook to effectively ignore shared mappings, as TDX's +restrictions on page size only apply to Secure EPT mappings. Do nothing +for SNP, as RMP restrictions apply to both private and shared memory. + +Suggested-by: Ackerley Tng +Signed-off-by: Sean Christopherson +--- + arch/x86/include/asm/kvm_host.h | 2 +- + arch/x86/kvm/mmu/mmu.c | 12 +++++++----- + arch/x86/kvm/svm/sev.c | 2 +- + arch/x86/kvm/svm/svm.h | 4 ++-- + arch/x86/kvm/vmx/main.c | 5 +++-- + arch/x86/kvm/vmx/tdx.c | 5 ++++- + arch/x86/kvm/vmx/x86_ops.h | 2 +- + 7 files changed, 19 insertions(+), 13 deletions(-) + +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index c0a739bf3829..c56cc54d682a 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -1922,7 +1922,7 @@ struct kvm_x86_ops { + void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu); + int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order); + void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end); +- int (*gmem_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn); ++ int (*gmem_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn, bool is_private); + }; + + struct kvm_x86_nested_ops { +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c +index 61eb9f723675..e83d666f32ad 100644 +--- a/arch/x86/kvm/mmu/mmu.c ++++ b/arch/x86/kvm/mmu/mmu.c +@@ -3302,8 +3302,9 @@ static u8 kvm_max_level_for_order(int order) + return PG_LEVEL_4K; + } + +-static u8 kvm_max_private_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault, +- const struct kvm_memory_slot *slot, gfn_t gfn) ++static u8 kvm_gmem_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault, ++ const struct kvm_memory_slot *slot, gfn_t gfn, ++ bool is_private) + { + u8 max_level, coco_level; + kvm_pfn_t pfn; +@@ -3327,7 +3328,7 @@ static u8 kvm_max_private_mapping_level(struct kvm *kvm, struct kvm_page_fault * + * restrictions. A return of '0' means "no additional restrictions", to + * allow for using an optional "ret0" static call. + */ +- coco_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn); ++ coco_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn, is_private); + if (coco_level) + max_level = min(max_level, coco_level); + +@@ -3361,8 +3362,9 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault, + if (max_level == PG_LEVEL_4K) + return PG_LEVEL_4K; + +- if (is_private) +- host_level = kvm_max_private_mapping_level(kvm, fault, slot, gfn); ++ if (is_private || kvm_memslot_is_gmem_only(slot)) ++ host_level = kvm_gmem_max_mapping_level(kvm, fault, slot, gfn, ++ is_private); + else + host_level = host_pfn_mapping_level(kvm, gfn, slot); + return min(host_level, max_level); +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c +index be1c80d79331..807d4b70327a 100644 +--- a/arch/x86/kvm/svm/sev.c ++++ b/arch/x86/kvm/svm/sev.c +@@ -4947,7 +4947,7 @@ void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) + } + } + +-int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) ++int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private) + { + int level, rc; + bool assigned; +diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h +index d84a83ae18a1..70df7c6413cf 100644 +--- a/arch/x86/kvm/svm/svm.h ++++ b/arch/x86/kvm/svm/svm.h +@@ -866,7 +866,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code); + void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu); + int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order); + void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end); +-int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn); ++int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private); + struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu); + void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa); + #else +@@ -895,7 +895,7 @@ static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, in + return 0; + } + static inline void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) {} +-static inline int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) ++static inline int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private) + { + return 0; + } +diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c +index dd7687ef7e2d..bb5f182f6788 100644 +--- a/arch/x86/kvm/vmx/main.c ++++ b/arch/x86/kvm/vmx/main.c +@@ -831,10 +831,11 @@ static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp) + return tdx_vcpu_ioctl(vcpu, argp); + } + +-static int vt_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) ++static int vt_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, ++ bool is_private) + { + if (is_td(kvm)) +- return tdx_gmem_max_mapping_level(kvm, pfn); ++ return tdx_gmem_max_mapping_level(kvm, pfn, is_private); + + return 0; + } +diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c +index b444714e8e8a..ca9c8ec7dd01 100644 +--- a/arch/x86/kvm/vmx/tdx.c ++++ b/arch/x86/kvm/vmx/tdx.c +@@ -3318,8 +3318,11 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) + return ret; + } + +-int tdx_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) ++int tdx_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private) + { ++ if (!is_private) ++ return 0; ++ + return PG_LEVEL_4K; + } + +diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h +index 6037d1708485..4c70f56c57c8 100644 +--- a/arch/x86/kvm/vmx/x86_ops.h ++++ b/arch/x86/kvm/vmx/x86_ops.h +@@ -153,7 +153,7 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp); + void tdx_flush_tlb_current(struct kvm_vcpu *vcpu); + void tdx_flush_tlb_all(struct kvm_vcpu *vcpu); + void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); +-int tdx_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn); ++int tdx_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private); + #endif + + #endif /* __KVM_X86_VMX_X86_OPS_H */ +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0013-KVM-x86-mmu-Handle-guest-page-faults-for-guest_memfd.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0016-KVM-x86-mmu-Handle-guest-page-faults-for-guest_memfd.patch similarity index 69% rename from resources/hiding_ci/linux_patches/05-mmap-support/0013-KVM-x86-mmu-Handle-guest-page-faults-for-guest_memfd.patch rename to resources/hiding_ci/linux_patches/05-mmap-support/0016-KVM-x86-mmu-Handle-guest-page-faults-for-guest_memfd.patch index 88e6a593c52..272234e5d0a 100644 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0013-KVM-x86-mmu-Handle-guest-page-faults-for-guest_memfd.patch +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0016-KVM-x86-mmu-Handle-guest-page-faults-for-guest_memfd.patch @@ -1,7 +1,7 @@ -From 42491fccb59ae63c35c2f38a363479fa186867e1 Mon Sep 17 00:00:00 2001 +From 0bd3fa88d45b2f38ff12ec419e3b7e6fb8cd64fc Mon Sep 17 00:00:00 2001 From: Ackerley Tng -Date: Tue, 15 Jul 2025 10:33:42 +0100 -Subject: [PATCH 13/46] KVM: x86/mmu: Handle guest page faults for guest_memfd +Date: Tue, 29 Jul 2025 15:54:47 -0700 +Subject: [PATCH 16/49] KVM: x86/mmu: Handle guest page faults for guest_memfd with shared memory Update the KVM MMU fault handler to service guest page faults @@ -23,15 +23,17 @@ Signed-off-by: David Hildenbrand Signed-off-by: Ackerley Tng Co-developed-by: Fuad Tabba Signed-off-by: Fuad Tabba +[sean: drop the helper] +Signed-off-by: Sean Christopherson --- - arch/x86/kvm/mmu/mmu.c | 13 +++++++++---- - 1 file changed, 9 insertions(+), 4 deletions(-) + arch/x86/kvm/mmu/mmu.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c -index 94be15cde6da..ad5f337b496c 100644 +index e83d666f32ad..56c80588efa0 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c -@@ -4511,8 +4511,8 @@ static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu, +@@ -4561,8 +4561,8 @@ static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu, r == RET_PF_RETRY, fault->map_writable); } @@ -42,23 +44,13 @@ index 94be15cde6da..ad5f337b496c 100644 { int max_order, r; -@@ -4536,13 +4536,18 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu, - return RET_PF_CONTINUE; - } - -+static bool fault_from_gmem(struct kvm_page_fault *fault) -+{ -+ return fault->is_private || kvm_memslot_is_gmem_only(fault->slot); -+} -+ - static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu, - struct kvm_page_fault *fault) +@@ -4589,8 +4589,8 @@ static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu, { unsigned int foll = fault->write ? FOLL_WRITE : 0; - if (fault->is_private) - return kvm_mmu_faultin_pfn_private(vcpu, fault); -+ if (fault_from_gmem(fault)) ++ if (fault->is_private || kvm_memslot_is_gmem_only(fault->slot)) + return kvm_mmu_faultin_pfn_gmem(vcpu, fault); foll |= FOLL_NOWAIT; diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0015-KVM-arm64-Refactor-user_mem_abort.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0017-KVM-arm64-Refactor-user_mem_abort.patch similarity index 96% rename from resources/hiding_ci/linux_patches/05-mmap-support/0015-KVM-arm64-Refactor-user_mem_abort.patch rename to resources/hiding_ci/linux_patches/05-mmap-support/0017-KVM-arm64-Refactor-user_mem_abort.patch index 2d083ad4da2..fd17a3bb22f 100644 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0015-KVM-arm64-Refactor-user_mem_abort.patch +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0017-KVM-arm64-Refactor-user_mem_abort.patch @@ -1,7 +1,7 @@ -From f6ccdf1e1e958348fade9a66470e3f2783bd0875 Mon Sep 17 00:00:00 2001 +From 3f974a030013f8eac6486d1731b97be765cab5d2 Mon Sep 17 00:00:00 2001 From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:44 +0100 -Subject: [PATCH 15/46] KVM: arm64: Refactor user_mem_abort() +Date: Tue, 29 Jul 2025 15:54:48 -0700 +Subject: [PATCH 17/49] KVM: arm64: Refactor user_mem_abort() Refactor user_mem_abort() to improve code clarity and simplify assumptions within the function. @@ -33,7 +33,9 @@ modifications. Reviewed-by: Gavin Shan Reviewed-by: Marc Zyngier +Reviewed-by: Tao Chan Signed-off-by: Fuad Tabba +Signed-off-by: Sean Christopherson --- arch/arm64/kvm/mmu.c | 110 +++++++++++++++++++++++-------------------- 1 file changed, 59 insertions(+), 51 deletions(-) diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0018-KVM-arm64-Enable-host-mapping-of-shared-guest_memfd-.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0018-KVM-arm64-Enable-host-mapping-of-shared-guest_memfd-.patch deleted file mode 100644 index 4bb38134472..00000000000 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0018-KVM-arm64-Enable-host-mapping-of-shared-guest_memfd-.patch +++ /dev/null @@ -1,89 +0,0 @@ -From 21f32f583ea8384ea7ae90a49e7642ce7b1e912a Mon Sep 17 00:00:00 2001 -From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:47 +0100 -Subject: [PATCH 18/46] KVM: arm64: Enable host mapping of shared guest_memfd - memory - -Enable host userspace mmap support for guest_memfd-backed memory on -arm64. This change provides arm64 with the capability to map guest -memory at the host directly from guest_memfd: - -* Define kvm_arch_supports_gmem_mmap() for arm64: The - kvm_arch_supports_gmem_mmap() macro is defined for arm64 to be true if - CONFIG_KVM_GMEM_SUPPORTS_MMAP is enabled. For existing arm64 KVM VM - types that support guest_memfd, this enables them to use guest_memfd - with host userspace mappings. This provides a consistent behavior as - there are currently no arm64 CoCo VMs that rely on guest_memfd solely - for private, non-mappable memory. Future arm64 VM types can override - or restrict this behavior via the kvm_arch_supports_gmem_mmap() hook - if needed. - -* Select CONFIG_KVM_GMEM_SUPPORTS_MMAP in arm64 Kconfig. - -* Enforce KVM_MEMSLOT_GMEM_ONLY for guest_memfd on arm64: Checks are - added to ensure that if guest_memfd is enabled on arm64, - KVM_GMEM_SUPPORTS_MMAP must also be enabled. This means - guest_memfd-backed memory slots on arm64 are currently only supported - if they are intended for shared memory use cases (i.e., - kvm_memslot_is_gmem_only() is true). This design reflects the current - arm64 KVM ecosystem where guest_memfd is primarily being introduced - for VMs that support shared memory. - -Reviewed-by: James Houghton -Reviewed-by: Gavin Shan -Acked-by: David Hildenbrand -Signed-off-by: Fuad Tabba ---- - arch/arm64/include/asm/kvm_host.h | 4 ++++ - arch/arm64/kvm/Kconfig | 2 ++ - arch/arm64/kvm/mmu.c | 7 +++++++ - 3 files changed, 13 insertions(+) - -diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h -index 3e41a880b062..63f7827cfa1b 100644 ---- a/arch/arm64/include/asm/kvm_host.h -+++ b/arch/arm64/include/asm/kvm_host.h -@@ -1674,5 +1674,9 @@ void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt); - void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1); - void check_feature_map(void); - -+#ifdef CONFIG_KVM_GMEM -+#define kvm_arch_supports_gmem(kvm) true -+#define kvm_arch_supports_gmem_mmap(kvm) IS_ENABLED(CONFIG_KVM_GMEM_SUPPORTS_MMAP) -+#endif - - #endif /* __ARM64_KVM_HOST_H__ */ -diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig -index 713248f240e0..323b46b7c82f 100644 ---- a/arch/arm64/kvm/Kconfig -+++ b/arch/arm64/kvm/Kconfig -@@ -37,6 +37,8 @@ menuconfig KVM - select HAVE_KVM_VCPU_RUN_PID_CHANGE - select SCHED_INFO - select GUEST_PERF_EVENTS if PERF_EVENTS -+ select KVM_GMEM -+ select KVM_GMEM_SUPPORTS_MMAP - help - Support hosting virtualized guest machines. - -diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c -index 8c82df80a835..85559b8a0845 100644 ---- a/arch/arm64/kvm/mmu.c -+++ b/arch/arm64/kvm/mmu.c -@@ -2276,6 +2276,13 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, - if ((new->base_gfn + new->npages) > (kvm_phys_size(&kvm->arch.mmu) >> PAGE_SHIFT)) - return -EFAULT; - -+ /* -+ * Only support guest_memfd backed memslots with mappable memory, since -+ * there aren't any CoCo VMs that support only private memory on arm64. -+ */ -+ if (kvm_slot_has_gmem(new) && !kvm_memslot_is_gmem_only(new)) -+ return -EINVAL; -+ - hva = new->userspace_addr; - reg_end = hva + (new->npages << PAGE_SHIFT); - --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0016-KVM-arm64-Handle-guest_memfd-backed-guest-page-fault.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0018-KVM-arm64-Handle-guest_memfd-backed-guest-page-fault.patch similarity index 94% rename from resources/hiding_ci/linux_patches/05-mmap-support/0016-KVM-arm64-Handle-guest_memfd-backed-guest-page-fault.patch rename to resources/hiding_ci/linux_patches/05-mmap-support/0018-KVM-arm64-Handle-guest_memfd-backed-guest-page-fault.patch index 65f525aba48..5ded77e7cee 100644 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0016-KVM-arm64-Handle-guest_memfd-backed-guest-page-fault.patch +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0018-KVM-arm64-Handle-guest_memfd-backed-guest-page-fault.patch @@ -1,7 +1,7 @@ -From 5dc88aa0c38b04dc510ef6bb8208813479602e28 Mon Sep 17 00:00:00 2001 +From 49e7ea04e12c7b460fd8f1bbb7af396ed015e359 Mon Sep 17 00:00:00 2001 From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:45 +0100 -Subject: [PATCH 16/46] KVM: arm64: Handle guest_memfd-backed guest page faults +Date: Tue, 29 Jul 2025 15:54:49 -0700 +Subject: [PATCH 18/49] KVM: arm64: Handle guest_memfd-backed guest page faults Add arm64 architecture support for handling guest page faults on memory slots backed by guest_memfd. @@ -17,7 +17,9 @@ these memory regions is restricted to PAGE_SIZE. Reviewed-by: Gavin Shan Reviewed-by: James Houghton +Reviewed-by: Marc Zyngier Signed-off-by: Fuad Tabba +Signed-off-by: Sean Christopherson --- arch/arm64/kvm/mmu.c | 86 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 83 insertions(+), 3 deletions(-) diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0019-KVM-Introduce-the-KVM-capability-KVM_CAP_GMEM_MMAP.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0019-KVM-Introduce-the-KVM-capability-KVM_CAP_GMEM_MMAP.patch deleted file mode 100644 index e75dded499f..00000000000 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0019-KVM-Introduce-the-KVM-capability-KVM_CAP_GMEM_MMAP.patch +++ /dev/null @@ -1,77 +0,0 @@ -From c9392e6f8240d3da85fb65cfd82f592fa415ba21 Mon Sep 17 00:00:00 2001 -From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:48 +0100 -Subject: [PATCH 19/46] KVM: Introduce the KVM capability KVM_CAP_GMEM_MMAP - -Introduce the new KVM capability KVM_CAP_GMEM_MMAP. This capability -signals to userspace that a KVM instance supports host userspace mapping -of guest_memfd-backed memory. - -The availability of this capability is determined per architecture, and -its enablement for a specific guest_memfd instance is controlled by the -GUEST_MEMFD_FLAG_MMAP flag at creation time. - -Update the KVM API documentation to detail the KVM_CAP_GMEM_MMAP -capability, the associated GUEST_MEMFD_FLAG_MMAP, and provide essential -information regarding support for mmap in guest_memfd. - -Reviewed-by: David Hildenbrand -Reviewed-by: Gavin Shan -Reviewed-by: Shivank Garg -Signed-off-by: Fuad Tabba ---- - Documentation/virt/kvm/api.rst | 9 +++++++++ - include/uapi/linux/kvm.h | 1 + - virt/kvm/kvm_main.c | 4 ++++ - 3 files changed, 14 insertions(+) - -diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst -index 43ed57e048a8..5169066b53b2 100644 ---- a/Documentation/virt/kvm/api.rst -+++ b/Documentation/virt/kvm/api.rst -@@ -6407,6 +6407,15 @@ most one mapping per page, i.e. binding multiple memory regions to a single - guest_memfd range is not allowed (any number of memory regions can be bound to - a single guest_memfd file, but the bound ranges must not overlap). - -+When the capability KVM_CAP_GMEM_MMAP is supported, the 'flags' field supports -+GUEST_MEMFD_FLAG_MMAP. Setting this flag on guest_memfd creation enables mmap() -+and faulting of guest_memfd memory to host userspace. -+ -+When the KVM MMU performs a PFN lookup to service a guest fault and the backing -+guest_memfd has the GUEST_MEMFD_FLAG_MMAP set, then the fault will always be -+consumed from guest_memfd, regardless of whether it is a shared or a private -+fault. -+ - See KVM_SET_USER_MEMORY_REGION2 for additional details. - - 4.143 KVM_PRE_FAULT_MEMORY -diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h -index 3beafbf306af..698dd407980f 100644 ---- a/include/uapi/linux/kvm.h -+++ b/include/uapi/linux/kvm.h -@@ -960,6 +960,7 @@ struct kvm_enable_cap { - #define KVM_CAP_ARM_EL2 240 - #define KVM_CAP_ARM_EL2_E2H0 241 - #define KVM_CAP_RISCV_MP_STATE_RESET 242 -+#define KVM_CAP_GMEM_MMAP 243 - - struct kvm_irq_routing_irqchip { - __u32 irqchip; -diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c -index 46bddac1dacd..f1ac872e01e9 100644 ---- a/virt/kvm/kvm_main.c -+++ b/virt/kvm/kvm_main.c -@@ -4916,6 +4916,10 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) - #ifdef CONFIG_KVM_GMEM - case KVM_CAP_GUEST_MEMFD: - return !kvm || kvm_arch_supports_gmem(kvm); -+#endif -+#ifdef CONFIG_KVM_GMEM_SUPPORTS_MMAP -+ case KVM_CAP_GMEM_MMAP: -+ return !kvm || kvm_arch_supports_gmem_mmap(kvm); - #endif - default: - break; --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0017-KVM-arm64-nv-Handle-VNCR_EL2-triggered-faults-backed.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0019-KVM-arm64-nv-Handle-VNCR_EL2-triggered-faults-backed.patch similarity index 89% rename from resources/hiding_ci/linux_patches/05-mmap-support/0017-KVM-arm64-nv-Handle-VNCR_EL2-triggered-faults-backed.patch rename to resources/hiding_ci/linux_patches/05-mmap-support/0019-KVM-arm64-nv-Handle-VNCR_EL2-triggered-faults-backed.patch index 3be49eb0542..4c9f81f4410 100644 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0017-KVM-arm64-nv-Handle-VNCR_EL2-triggered-faults-backed.patch +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0019-KVM-arm64-nv-Handle-VNCR_EL2-triggered-faults-backed.patch @@ -1,11 +1,11 @@ -From 802e435b18d56efe200a6e02bb3d0939e032afbc Mon Sep 17 00:00:00 2001 +From e51d1a89f7620263328422b3b12a2d29f80e19d3 Mon Sep 17 00:00:00 2001 From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:46 +0100 -Subject: [PATCH 17/46] KVM: arm64: nv: Handle VNCR_EL2-triggered faults backed +Date: Tue, 29 Jul 2025 15:54:50 -0700 +Subject: [PATCH 19/49] KVM: arm64: nv: Handle VNCR_EL2-triggered faults backed by guest_memfd Handle faults for memslots backed by guest_memfd in arm64 nested -virtualization triggerred by VNCR_EL2. +virtualization triggered by VNCR_EL2. * Introduce is_gmem output parameter to kvm_translate_vncr(), indicating whether the faulted memory slot is backed by guest_memfd. @@ -13,11 +13,13 @@ virtualization triggerred by VNCR_EL2. * Dispatch faults backed by guest_memfd to kvm_gmem_get_pfn(). * Update kvm_handle_vncr_abort() to handle potential guest_memfd errors. - Some of the guest_memfd errors need to be handled by userspace, - instead of attempting to (implicitly) retry by returning to the guest. + Some of the guest_memfd errors need to be handled by userspace instead + of attempting to (implicitly) retry by returning to the guest. Suggested-by: Marc Zyngier +Reviewed-by: Marc Zyngier Signed-off-by: Fuad Tabba +Signed-off-by: Sean Christopherson --- arch/arm64/kvm/nested.c | 41 +++++++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0020-KVM-arm64-Enable-support-for-guest_memfd-backed-memo.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0020-KVM-arm64-Enable-support-for-guest_memfd-backed-memo.patch new file mode 100644 index 00000000000..9b15868d043 --- /dev/null +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0020-KVM-arm64-Enable-support-for-guest_memfd-backed-memo.patch @@ -0,0 +1,61 @@ +From 0a292815117d6ce72fe76168aa51686e052deb9c Mon Sep 17 00:00:00 2001 +From: Fuad Tabba +Date: Tue, 29 Jul 2025 15:54:51 -0700 +Subject: [PATCH 20/49] KVM: arm64: Enable support for guest_memfd backed + memory + +Now that the infrastructure is in place, enable guest_memfd for arm64. + +* Select CONFIG_KVM_GUEST_MEMFD in KVM/arm64 Kconfig. + +* Enforce KVM_MEMSLOT_GMEM_ONLY for guest_memfd on arm64: Ensure that + guest_memfd-backed memory slots on arm64 are only supported if they + are intended for shared memory use cases (i.e., + kvm_memslot_is_gmem_only() is true). This design reflects the current + arm64 KVM ecosystem where guest_memfd is primarily being introduced + for VMs that support shared memory. + +Reviewed-by: James Houghton +Reviewed-by: Gavin Shan +Reviewed-by: Marc Zyngier +Acked-by: David Hildenbrand +Signed-off-by: Fuad Tabba +Signed-off-by: Sean Christopherson +--- + arch/arm64/kvm/Kconfig | 1 + + arch/arm64/kvm/mmu.c | 7 +++++++ + 2 files changed, 8 insertions(+) + +diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig +index 713248f240e0..bff62e75d681 100644 +--- a/arch/arm64/kvm/Kconfig ++++ b/arch/arm64/kvm/Kconfig +@@ -37,6 +37,7 @@ menuconfig KVM + select HAVE_KVM_VCPU_RUN_PID_CHANGE + select SCHED_INFO + select GUEST_PERF_EVENTS if PERF_EVENTS ++ select KVM_GUEST_MEMFD + help + Support hosting virtualized guest machines. + +diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c +index 8c82df80a835..85559b8a0845 100644 +--- a/arch/arm64/kvm/mmu.c ++++ b/arch/arm64/kvm/mmu.c +@@ -2276,6 +2276,13 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, + if ((new->base_gfn + new->npages) > (kvm_phys_size(&kvm->arch.mmu) >> PAGE_SHIFT)) + return -EFAULT; + ++ /* ++ * Only support guest_memfd backed memslots with mappable memory, since ++ * there aren't any CoCo VMs that support only private memory on arm64. ++ */ ++ if (kvm_slot_has_gmem(new) && !kvm_memslot_is_gmem_only(new)) ++ return -EINVAL; ++ + hva = new->userspace_addr; + reg_end = hva + (new->npages << PAGE_SHIFT); + +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0021-KVM-Allow-and-advertise-support-for-host-mmap-on-gue.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0021-KVM-Allow-and-advertise-support-for-host-mmap-on-gue.patch new file mode 100644 index 00000000000..0e112477933 --- /dev/null +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0021-KVM-Allow-and-advertise-support-for-host-mmap-on-gue.patch @@ -0,0 +1,112 @@ +From 61dcc8ae40093daad33c80b115228cf06b35ebc1 Mon Sep 17 00:00:00 2001 +From: Fuad Tabba +Date: Tue, 29 Jul 2025 15:54:52 -0700 +Subject: [PATCH 21/49] KVM: Allow and advertise support for host mmap() on + guest_memfd files + +Now that all the x86 and arm64 plumbing for mmap() on guest_memfd is in +place, allow userspace to set GUEST_MEMFD_FLAG_MMAP and advertise support +via a new capability, KVM_CAP_GUEST_MEMFD_MMAP. + +The availability of this capability is determined per architecture, and +its enablement for a specific guest_memfd instance is controlled by the +GUEST_MEMFD_FLAG_MMAP flag at creation time. + +Update the KVM API documentation to detail the KVM_CAP_GUEST_MEMFD_MMAP +capability, the associated GUEST_MEMFD_FLAG_MMAP, and provide essential +information regarding support for mmap in guest_memfd. + +Reviewed-by: David Hildenbrand +Reviewed-by: Gavin Shan +Reviewed-by: Shivank Garg +Reviewed-by: Xiaoyao Li +Signed-off-by: Fuad Tabba +Signed-off-by: Sean Christopherson +--- + Documentation/virt/kvm/api.rst | 9 +++++++++ + include/uapi/linux/kvm.h | 2 ++ + virt/kvm/guest_memfd.c | 7 ++++++- + virt/kvm/kvm_main.c | 2 ++ + 4 files changed, 19 insertions(+), 1 deletion(-) + +diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst +index fcb783735dd1..1e0c4a68876d 100644 +--- a/Documentation/virt/kvm/api.rst ++++ b/Documentation/virt/kvm/api.rst +@@ -6414,6 +6414,15 @@ most one mapping per page, i.e. binding multiple memory regions to a single + guest_memfd range is not allowed (any number of memory regions can be bound to + a single guest_memfd file, but the bound ranges must not overlap). + ++When the capability KVM_CAP_GUEST_MEMFD_MMAP is supported, the 'flags' field ++supports GUEST_MEMFD_FLAG_MMAP. Setting this flag on guest_memfd creation ++enables mmap() and faulting of guest_memfd memory to host userspace. ++ ++When the KVM MMU performs a PFN lookup to service a guest fault and the backing ++guest_memfd has the GUEST_MEMFD_FLAG_MMAP set, then the fault will always be ++consumed from guest_memfd, regardless of whether it is a shared or a private ++fault. ++ + See KVM_SET_USER_MEMORY_REGION2 for additional details. + + 4.143 KVM_PRE_FAULT_MEMORY +diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h +index aeb2ca10b190..0d96d2ae6e5d 100644 +--- a/include/uapi/linux/kvm.h ++++ b/include/uapi/linux/kvm.h +@@ -961,6 +961,7 @@ struct kvm_enable_cap { + #define KVM_CAP_ARM_EL2 240 + #define KVM_CAP_ARM_EL2_E2H0 241 + #define KVM_CAP_RISCV_MP_STATE_RESET 242 ++#define KVM_CAP_GUEST_MEMFD_MMAP 243 + + struct kvm_irq_routing_irqchip { + __u32 irqchip; +@@ -1597,6 +1598,7 @@ struct kvm_memory_attributes { + #define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3) + + #define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd) ++#define GUEST_MEMFD_FLAG_MMAP (1ULL << 0) + + struct kvm_create_guest_memfd { + __u64 size; +diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c +index d5b445548af4..08a6bc7d25b6 100644 +--- a/virt/kvm/guest_memfd.c ++++ b/virt/kvm/guest_memfd.c +@@ -314,7 +314,9 @@ static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn) + + static bool kvm_gmem_supports_mmap(struct inode *inode) + { +- return false; ++ const u64 flags = (u64)inode->i_private; ++ ++ return flags & GUEST_MEMFD_FLAG_MMAP; + } + + static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf) +@@ -522,6 +524,9 @@ int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args) + u64 flags = args->flags; + u64 valid_flags = 0; + ++ if (kvm_arch_supports_gmem_mmap(kvm)) ++ valid_flags |= GUEST_MEMFD_FLAG_MMAP; ++ + if (flags & ~valid_flags) + return -EINVAL; + +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 4f57cb92e109..18f29ef93543 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -4918,6 +4918,8 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) + #ifdef CONFIG_KVM_GUEST_MEMFD + case KVM_CAP_GUEST_MEMFD: + return 1; ++ case KVM_CAP_GUEST_MEMFD_MMAP: ++ return !kvm || kvm_arch_supports_gmem_mmap(kvm); + #endif + default: + break; +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0021-KVM-selftests-Do-not-use-hardcoded-page-sizes-in-gue.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0022-KVM-selftests-Do-not-use-hardcoded-page-sizes-in-gue.patch similarity index 87% rename from resources/hiding_ci/linux_patches/05-mmap-support/0021-KVM-selftests-Do-not-use-hardcoded-page-sizes-in-gue.patch rename to resources/hiding_ci/linux_patches/05-mmap-support/0022-KVM-selftests-Do-not-use-hardcoded-page-sizes-in-gue.patch index a0ddd437035..7a835dc0ce5 100644 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0021-KVM-selftests-Do-not-use-hardcoded-page-sizes-in-gue.patch +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0022-KVM-selftests-Do-not-use-hardcoded-page-sizes-in-gue.patch @@ -1,7 +1,7 @@ -From f02cb2c4f0d67cfb5a9efc69da258ab60077eb4b Mon Sep 17 00:00:00 2001 +From de2729aec6884d52d796ae7be26c648499694d47 Mon Sep 17 00:00:00 2001 From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:49 +0100 -Subject: [PATCH 21/46] KVM: selftests: Do not use hardcoded page sizes in +Date: Tue, 29 Jul 2025 15:54:53 -0700 +Subject: [PATCH 22/49] KVM: selftests: Do not use hardcoded page sizes in guest_memfd test Update the guest_memfd_test selftest to use getpagesize() instead of @@ -16,19 +16,20 @@ Additionally, build the guest_memfd_test selftest for arm64. Reviewed-by: David Hildenbrand Reviewed-by: Shivank Garg -Suggested-by: Gavin Shan Reviewed-by: Gavin Shan +Suggested-by: Gavin Shan Signed-off-by: Fuad Tabba +Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/Makefile.kvm | 1 + tools/testing/selftests/kvm/guest_memfd_test.c | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm -index 38b95998e1e6..e11ed9e59ab5 100644 +index 40920445bfbe..963687892bcb 100644 --- a/tools/testing/selftests/kvm/Makefile.kvm +++ b/tools/testing/selftests/kvm/Makefile.kvm -@@ -172,6 +172,7 @@ TEST_GEN_PROGS_arm64 += arch_timer +@@ -174,6 +174,7 @@ TEST_GEN_PROGS_arm64 += arch_timer TEST_GEN_PROGS_arm64 += coalesced_io_test TEST_GEN_PROGS_arm64 += dirty_log_perf_test TEST_GEN_PROGS_arm64 += get-reg-list @@ -37,10 +38,10 @@ index 38b95998e1e6..e11ed9e59ab5 100644 TEST_GEN_PROGS_arm64 += memslot_perf_test TEST_GEN_PROGS_arm64 += mmu_stress_test diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c -index beb556293590..1252e74fbb8f 100644 +index ce687f8d248f..341ba616cf55 100644 --- a/tools/testing/selftests/kvm/guest_memfd_test.c +++ b/tools/testing/selftests/kvm/guest_memfd_test.c -@@ -212,24 +212,25 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm) +@@ -146,24 +146,25 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm) { int fd1, fd2, ret; struct stat st1, st2; diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0020-KVM-selftests-guest_memfd-mmap-test-when-mmap-is-sup.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0023-KVM-selftests-guest_memfd-mmap-test-when-mmap-is-sup.patch similarity index 75% rename from resources/hiding_ci/linux_patches/05-mmap-support/0020-KVM-selftests-guest_memfd-mmap-test-when-mmap-is-sup.patch rename to resources/hiding_ci/linux_patches/05-mmap-support/0023-KVM-selftests-guest_memfd-mmap-test-when-mmap-is-sup.patch index ae092389086..a9201e5cf4e 100644 --- a/resources/hiding_ci/linux_patches/05-mmap-support/0020-KVM-selftests-guest_memfd-mmap-test-when-mmap-is-sup.patch +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0023-KVM-selftests-guest_memfd-mmap-test-when-mmap-is-sup.patch @@ -1,7 +1,7 @@ -From 15b9df9fc94222e6bbdab3ffcf5bfc936a4c3bba Mon Sep 17 00:00:00 2001 +From 90618af0f76687d57f422b4a9c292507e38d8591 Mon Sep 17 00:00:00 2001 From: Fuad Tabba -Date: Tue, 15 Jul 2025 10:33:50 +0100 -Subject: [PATCH 20/46] KVM: selftests: guest_memfd mmap() test when mmap is +Date: Tue, 29 Jul 2025 15:54:54 -0700 +Subject: [PATCH 23/49] KVM: selftests: guest_memfd mmap() test when mmap is supported Expand the guest_memfd selftests to comprehensively test host userspace @@ -36,18 +36,20 @@ sizes), file read/write, file size, and invalid punch hole operations are integrated into the new test_with_type() framework to allow testing across different VM types. -Reviewed-by: James Houghton -Reviewed-by: Gavin Shan -Reviewed-by: Shivank Garg +Cc: James Houghton +Cc: Gavin Shan +Cc: Shivank Garg Co-developed-by: Ackerley Tng Signed-off-by: Ackerley Tng Signed-off-by: Fuad Tabba +Co-developed-by: Sean Christopherson +Signed-off-by: Sean Christopherson --- - .../testing/selftests/kvm/guest_memfd_test.c | 197 ++++++++++++++++-- - 1 file changed, 176 insertions(+), 21 deletions(-) + .../testing/selftests/kvm/guest_memfd_test.c | 161 +++++++++++++++--- + 1 file changed, 139 insertions(+), 22 deletions(-) diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c -index ce687f8d248f..beb556293590 100644 +index 341ba616cf55..088053d5f0f5 100644 --- a/tools/testing/selftests/kvm/guest_memfd_test.c +++ b/tools/testing/selftests/kvm/guest_memfd_test.c @@ -13,6 +13,8 @@ @@ -176,57 +178,66 @@ index ce687f8d248f..beb556293590 100644 } static void test_create_guest_memfd_multiple(struct kvm_vm *vm) -@@ -170,30 +236,119 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm) +@@ -171,30 +237,81 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm) close(fd1); } -int main(int argc, char *argv[]) -+static bool check_vm_type(unsigned long vm_type) ++static void test_guest_memfd_flags(struct kvm_vm *vm, uint64_t valid_flags) { - size_t page_size; -+ /* -+ * Not all architectures support KVM_CAP_VM_TYPES. However, those that -+ * support guest_memfd have that support for the default VM type. -+ */ -+ if (vm_type == VM_TYPE_DEFAULT) -+ return true; -+ -+ return kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(vm_type); +- size_t total_size; ++ size_t page_size = getpagesize(); ++ uint64_t flag; + int fd; +- struct kvm_vm *vm; + +- TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD)); ++ for (flag = BIT(0); flag; flag <<= 1) { ++ fd = __vm_create_guest_memfd(vm, page_size, flag); ++ if (flag & valid_flags) { ++ TEST_ASSERT(fd >= 0, ++ "guest_memfd() with flag '0x%lx' should succeed", ++ flag); ++ close(fd); ++ } else { ++ TEST_ASSERT(fd < 0 && errno == EINVAL, ++ "guest_memfd() with flag '0x%lx' should fail with EINVAL", ++ flag); ++ } ++ } +} + -+static void test_with_type(unsigned long vm_type, uint64_t guest_memfd_flags, -+ bool expect_mmap_allowed) ++static void test_guest_memfd(unsigned long vm_type) +{ ++ uint64_t flags = 0; + struct kvm_vm *vm; - size_t total_size; ++ size_t total_size; + size_t page_size; - int fd; -- struct kvm_vm *vm; - -- TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD)); -+ if (!check_vm_type(vm_type)) -+ return; ++ int fd; page_size = getpagesize(); total_size = page_size * 4; - vm = vm_create_barebones(); + vm = vm_create_barebones_type(vm_type); ++ ++ if (vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_MMAP)) ++ flags |= GUEST_MEMFD_FLAG_MMAP; - test_create_guest_memfd_invalid(vm); test_create_guest_memfd_multiple(vm); -+ test_create_guest_memfd_invalid_sizes(vm, guest_memfd_flags, page_size); ++ test_create_guest_memfd_invalid_sizes(vm, flags, page_size); - fd = vm_create_guest_memfd(vm, total_size, 0); -+ fd = vm_create_guest_memfd(vm, total_size, guest_memfd_flags); ++ fd = vm_create_guest_memfd(vm, total_size, flags); test_file_read_write(fd); - test_mmap(fd, page_size); + -+ if (expect_mmap_allowed) { ++ if (flags & GUEST_MEMFD_FLAG_MMAP) { + test_mmap_supported(fd, page_size, total_size); + test_fault_overflow(fd, page_size, total_size); -+ + } else { + test_mmap_not_supported(fd, page_size, total_size); + } @@ -235,74 +246,28 @@ index ce687f8d248f..beb556293590 100644 test_fallocate(fd, page_size, total_size); test_invalid_punch_hole(fd, page_size, total_size); - close(fd); -+ kvm_vm_free(vm); -+} -+ -+static void test_vm_type_gmem_flag_validity(unsigned long vm_type, -+ uint64_t expected_valid_flags) -+{ -+ size_t page_size = getpagesize(); -+ struct kvm_vm *vm; -+ uint64_t flag = 0; -+ int fd; -+ -+ if (!check_vm_type(vm_type)) -+ return; -+ -+ vm = vm_create_barebones_type(vm_type); -+ -+ for (flag = BIT(0); flag; flag <<= 1) { -+ fd = __vm_create_guest_memfd(vm, page_size, flag); -+ -+ if (flag & expected_valid_flags) { -+ TEST_ASSERT(fd >= 0, -+ "guest_memfd() with flag '0x%lx' should be valid", -+ flag); -+ close(fd); -+ } else { -+ TEST_ASSERT(fd < 0 && errno == EINVAL, -+ "guest_memfd() with flag '0x%lx' should fail with EINVAL", -+ flag); -+ } -+ } ++ test_guest_memfd_flags(vm, flags); + + close(fd); + kvm_vm_free(vm); +} + -+static void test_gmem_flag_validity(void) -+{ -+ uint64_t non_coco_vm_valid_flags = 0; -+ -+ if (kvm_has_cap(KVM_CAP_GMEM_MMAP)) -+ non_coco_vm_valid_flags = GUEST_MEMFD_FLAG_MMAP; -+ -+ test_vm_type_gmem_flag_validity(VM_TYPE_DEFAULT, non_coco_vm_valid_flags); -+ -+#ifdef __x86_64__ -+ test_vm_type_gmem_flag_validity(KVM_X86_SW_PROTECTED_VM, 0); -+ test_vm_type_gmem_flag_validity(KVM_X86_SEV_VM, 0); -+ test_vm_type_gmem_flag_validity(KVM_X86_SEV_ES_VM, 0); -+ test_vm_type_gmem_flag_validity(KVM_X86_SNP_VM, 0); -+ test_vm_type_gmem_flag_validity(KVM_X86_TDX_VM, 0); -+#endif -+} -+ +int main(int argc, char *argv[]) +{ -+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD)); ++ unsigned long vm_types, vm_type; + -+ test_gmem_flag_validity(); ++ TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD)); + -+ test_with_type(VM_TYPE_DEFAULT, 0, false); -+ if (kvm_has_cap(KVM_CAP_GMEM_MMAP)) { -+ test_with_type(VM_TYPE_DEFAULT, GUEST_MEMFD_FLAG_MMAP, -+ true); -+ } ++ /* ++ * Not all architectures support KVM_CAP_VM_TYPES. However, those that ++ * support guest_memfd have that support for the default VM type. ++ */ ++ vm_types = kvm_check_cap(KVM_CAP_VM_TYPES); ++ if (!vm_types) ++ vm_types = VM_TYPE_DEFAULT; + -+#ifdef __x86_64__ -+ test_with_type(KVM_X86_SW_PROTECTED_VM, 0, false); -+#endif ++ for_each_set_bit(vm_type, &vm_types, BITS_PER_TYPE(vm_types)) ++ test_guest_memfd(vm_type); } -- 2.50.1 diff --git a/resources/hiding_ci/linux_patches/05-mmap-support/0024-KVM-selftests-Add-guest_memfd-testcase-to-fault-in-o.patch b/resources/hiding_ci/linux_patches/05-mmap-support/0024-KVM-selftests-Add-guest_memfd-testcase-to-fault-in-o.patch new file mode 100644 index 00000000000..7c457a22f16 --- /dev/null +++ b/resources/hiding_ci/linux_patches/05-mmap-support/0024-KVM-selftests-Add-guest_memfd-testcase-to-fault-in-o.patch @@ -0,0 +1,115 @@ +From 11629592f6f88f2b7bd33efb2c15dbf241628faa Mon Sep 17 00:00:00 2001 +From: Sean Christopherson +Date: Tue, 29 Jul 2025 15:54:55 -0700 +Subject: [PATCH 24/49] KVM: selftests: Add guest_memfd testcase to fault-in on + !mmap()'d memory + +Add a guest_memfd testcase to verify that a vCPU can fault-in guest_memfd +memory that supports mmap(), but that is not currently mapped into host +userspace and/or has a userspace address (in the memslot) that points at +something other than the target guest_memfd range. Mapping guest_memfd +memory into the guest is supposed to operate completely independently from +any userspace mappings. + +Signed-off-by: Sean Christopherson +--- + .../testing/selftests/kvm/guest_memfd_test.c | 64 +++++++++++++++++++ + 1 file changed, 64 insertions(+) + +diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c +index 088053d5f0f5..b86bf89a71e0 100644 +--- a/tools/testing/selftests/kvm/guest_memfd_test.c ++++ b/tools/testing/selftests/kvm/guest_memfd_test.c +@@ -13,6 +13,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -21,6 +22,7 @@ + + #include "kvm_util.h" + #include "test_util.h" ++#include "ucall_common.h" + + static void test_file_read_write(int fd) + { +@@ -298,6 +300,66 @@ static void test_guest_memfd(unsigned long vm_type) + kvm_vm_free(vm); + } + ++static void guest_code(uint8_t *mem, uint64_t size) ++{ ++ size_t i; ++ ++ for (i = 0; i < size; i++) ++ __GUEST_ASSERT(mem[i] == 0xaa, ++ "Guest expected 0xaa at offset %lu, got 0x%x", i, mem[i]); ++ ++ memset(mem, 0xff, size); ++ GUEST_DONE(); ++} ++ ++static void test_guest_memfd_guest(void) ++{ ++ /* ++ * Skip the first 4gb and slot0. slot0 maps <1gb and is used to back ++ * the guest's code, stack, and page tables, and low memory contains ++ * the PCI hole and other MMIO regions that need to be avoided. ++ */ ++ const uint64_t gpa = SZ_4G; ++ const int slot = 1; ++ ++ struct kvm_vcpu *vcpu; ++ struct kvm_vm *vm; ++ uint8_t *mem; ++ size_t size; ++ int fd, i; ++ ++ if (!kvm_has_cap(KVM_CAP_GUEST_MEMFD_MMAP)) ++ return; ++ ++ vm = __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, &vcpu, 1, guest_code); ++ ++ TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_MMAP), ++ "Default VM type should always support guest_memfd mmap()"); ++ ++ size = vm->page_size; ++ fd = vm_create_guest_memfd(vm, size, GUEST_MEMFD_FLAG_MMAP); ++ vm_set_user_memory_region2(vm, slot, KVM_MEM_GUEST_MEMFD, gpa, size, NULL, fd, 0); ++ ++ mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); ++ TEST_ASSERT(mem != MAP_FAILED, "mmap() on guest_memfd failed"); ++ memset(mem, 0xaa, size); ++ munmap(mem, size); ++ ++ virt_pg_map(vm, gpa, gpa); ++ vcpu_args_set(vcpu, 2, gpa, size); ++ vcpu_run(vcpu); ++ ++ TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE); ++ ++ mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); ++ TEST_ASSERT(mem != MAP_FAILED, "mmap() on guest_memfd failed"); ++ for (i = 0; i < size; i++) ++ TEST_ASSERT_EQ(mem[i], 0xff); ++ ++ close(fd); ++ kvm_vm_free(vm); ++} ++ + int main(int argc, char *argv[]) + { + unsigned long vm_types, vm_type; +@@ -314,4 +376,6 @@ int main(int argc, char *argv[]) + + for_each_set_bit(vm_type, &vm_types, BITS_PER_TYPE(vm_types)) + test_guest_memfd(vm_type); ++ ++ test_guest_memfd_guest(); + } +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/10-direct-map-removal/0022-filemap-Pass-address_space-mapping-to-free_folio.patch b/resources/hiding_ci/linux_patches/10-direct-map-removal/0025-filemap-Pass-address_space-mapping-to-free_folio.patch similarity index 96% rename from resources/hiding_ci/linux_patches/10-direct-map-removal/0022-filemap-Pass-address_space-mapping-to-free_folio.patch rename to resources/hiding_ci/linux_patches/10-direct-map-removal/0025-filemap-Pass-address_space-mapping-to-free_folio.patch index caad9f28e4f..771499abac9 100644 --- a/resources/hiding_ci/linux_patches/10-direct-map-removal/0022-filemap-Pass-address_space-mapping-to-free_folio.patch +++ b/resources/hiding_ci/linux_patches/10-direct-map-removal/0025-filemap-Pass-address_space-mapping-to-free_folio.patch @@ -1,7 +1,7 @@ -From 3ce2b54021d5ef77509e40929eff63b7686ec566 Mon Sep 17 00:00:00 2001 +From c448db399473016d02b6c6374d749133b1c63f8b Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Fri, 22 Nov 2024 09:29:38 -0800 -Subject: [PATCH 22/46] filemap: Pass address_space mapping to ->free_folio() +Subject: [PATCH 25/49] filemap: Pass address_space mapping to ->free_folio() When guest_memfd removes memory from the host kernel's direct map, direct map entries must be restored before the memory is freed again. To @@ -160,7 +160,7 @@ index bada249b9fb7..6af53c5096fc 100644 } EXPORT_SYMBOL_GPL(replace_page_cache_folio); diff --git a/mm/secretmem.c b/mm/secretmem.c -index 9a11a38a6770..4d2d6c0e342f 100644 +index e042a4a0bc0c..96bcb79a1aa7 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -152,7 +152,8 @@ static int secretmem_migrate_folio(struct address_space *mapping, @@ -174,7 +174,7 @@ index 9a11a38a6770..4d2d6c0e342f 100644 set_direct_map_default_noflush(&folio->page); folio_zero_segment(folio, 0, folio_size(folio)); diff --git a/mm/vmscan.c b/mm/vmscan.c -index f8dfd2864bbf..e23e1a44b92c 100644 +index 424412680cfc..edeb8b903a49 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -797,7 +797,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio, @@ -196,10 +196,10 @@ index f8dfd2864bbf..e23e1a44b92c 100644 return 1; diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c -index d01bd7a2c2bd..0ac1c3a5a433 100644 +index 08a6bc7d25b6..9ec4c45e3cf2 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c -@@ -433,7 +433,8 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol +@@ -430,7 +430,8 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol } #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE diff --git a/resources/hiding_ci/linux_patches/10-direct-map-removal/0023-arch-export-set_direct_map_valid_noflush-to-KVM-modu.patch b/resources/hiding_ci/linux_patches/10-direct-map-removal/0026-arch-export-set_direct_map_valid_noflush-to-KVM-modu.patch similarity index 96% rename from resources/hiding_ci/linux_patches/10-direct-map-removal/0023-arch-export-set_direct_map_valid_noflush-to-KVM-modu.patch rename to resources/hiding_ci/linux_patches/10-direct-map-removal/0026-arch-export-set_direct_map_valid_noflush-to-KVM-modu.patch index 32cade57dd1..2d50e8cc2b4 100644 --- a/resources/hiding_ci/linux_patches/10-direct-map-removal/0023-arch-export-set_direct_map_valid_noflush-to-KVM-modu.patch +++ b/resources/hiding_ci/linux_patches/10-direct-map-removal/0026-arch-export-set_direct_map_valid_noflush-to-KVM-modu.patch @@ -1,7 +1,7 @@ -From 7a443341bf7db1d1b8f9c5c7cc2e74993bd71990 Mon Sep 17 00:00:00 2001 +From 2d29a6cc2acd7f6c15cad81fcde5bd3d6cbe78a9 Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Mon, 2 Jun 2025 12:06:10 +0100 -Subject: [PATCH 23/46] arch: export set_direct_map_valid_noflush to KVM module +Subject: [PATCH 26/49] arch: export set_direct_map_valid_noflush to KVM module Use the new per-module export functionality to allow KVM (and only KVM) access to set_direct_map_valid_noflush(). This allows guest_memfd to diff --git a/resources/hiding_ci/linux_patches/10-direct-map-removal/0024-mm-introduce-AS_NO_DIRECT_MAP.patch b/resources/hiding_ci/linux_patches/10-direct-map-removal/0027-mm-introduce-AS_NO_DIRECT_MAP.patch similarity index 97% rename from resources/hiding_ci/linux_patches/10-direct-map-removal/0024-mm-introduce-AS_NO_DIRECT_MAP.patch rename to resources/hiding_ci/linux_patches/10-direct-map-removal/0027-mm-introduce-AS_NO_DIRECT_MAP.patch index a23ebb1d689..04c392fb0bc 100644 --- a/resources/hiding_ci/linux_patches/10-direct-map-removal/0024-mm-introduce-AS_NO_DIRECT_MAP.patch +++ b/resources/hiding_ci/linux_patches/10-direct-map-removal/0027-mm-introduce-AS_NO_DIRECT_MAP.patch @@ -1,7 +1,7 @@ -From 3c77f823f4dd019b81cb4f3895a85db50dabd2d8 Mon Sep 17 00:00:00 2001 +From 9d0f7fe52db2352cddeca91f8da03b50665a4047 Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Fri, 7 Feb 2025 11:16:06 +0000 -Subject: [PATCH 24/46] mm: introduce AS_NO_DIRECT_MAP +Subject: [PATCH 27/49] mm: introduce AS_NO_DIRECT_MAP Add AS_NO_DIRECT_MAP for mappings where direct map entries of folios are set to not present . Currently, mappings that match this description are @@ -180,7 +180,7 @@ index 3cb72b579ffd..6cde2a5073f0 100644 goto out; diff --git a/mm/secretmem.c b/mm/secretmem.c -index 4d2d6c0e342f..d85c0225b7f4 100644 +index 96bcb79a1aa7..40798ac5e178 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -136,11 +136,6 @@ static int secretmem_mmap_prepare(struct vm_area_desc *desc) diff --git a/resources/hiding_ci/linux_patches/10-direct-map-removal/0025-KVM-guest_memfd-Add-flag-to-remove-from-direct-map.patch b/resources/hiding_ci/linux_patches/10-direct-map-removal/0028-KVM-guest_memfd-Add-flag-to-remove-from-direct-map.patch similarity index 74% rename from resources/hiding_ci/linux_patches/10-direct-map-removal/0025-KVM-guest_memfd-Add-flag-to-remove-from-direct-map.patch rename to resources/hiding_ci/linux_patches/10-direct-map-removal/0028-KVM-guest_memfd-Add-flag-to-remove-from-direct-map.patch index 3d8cdd08ea4..26585771c4c 100644 --- a/resources/hiding_ci/linux_patches/10-direct-map-removal/0025-KVM-guest_memfd-Add-flag-to-remove-from-direct-map.patch +++ b/resources/hiding_ci/linux_patches/10-direct-map-removal/0028-KVM-guest_memfd-Add-flag-to-remove-from-direct-map.patch @@ -1,19 +1,19 @@ -From a00f94321c740ed71e81e452626150545da2c288 Mon Sep 17 00:00:00 2001 +From 75cd1653b7aa0fbef44835b183110f25d0bf584e Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Fri, 7 Feb 2025 14:33:01 +0000 -Subject: [PATCH 25/46] KVM: guest_memfd: Add flag to remove from direct map +Subject: [PATCH 28/49] KVM: guest_memfd: Add flag to remove from direct map -Add KVM_GMEM_NO_DIRECT_MAP flag for KVM_CREATE_GUEST_MEMFD() ioctl. When -set, guest_memfd folios will be removed from the direct map after -preparation, with direct map entries only restored when the folios are -freed. +Add GUEST_MEMFD_FLAG_NO_DIRECT_MAP flag for KVM_CREATE_GUEST_MEMFD() +ioctl. When set, guest_memfd folios will be removed from the direct map +after preparation, with direct map entries only restored when the folios +are freed. To ensure these folios do not end up in places where the kernel cannot deal with them, set AS_NO_DIRECT_MAP on the guest_memfd's struct -address_space if KVM_GMEM_NO_DIRECT_MAP is requested. +address_space if GUEST_MEMFD_FLAG_NO_DIRECT_MAP is requested. -Add KVM_CAP_GMEM_NO_DIRECT_MAP to let userspace discover whether -guest_memfd supports KVM_GMEM_NO_DIRECT_MAP. Support depends on +Add KVM_CAP_GUEST_MEMFD_NO_DIRECT_MAP to let userspace discover whether +guest_memfd supports GUEST_MEMFD_FLAG_NO_DIRECT_MAP. Support depends on guest_memfd itself being supported, but also on whether KVM can manipulate the direct map at page granularity at all (possible most of the time, just arm64 is a notable outlier where its impossible if the @@ -24,10 +24,10 @@ Note that this flag causes removal of direct map entries for all guest_memfd folios independent of whether they are "shared" or "private" (although current guest_memfd only supports either all folios in the "shared" state, or all folios in the "private" state if -!IS_ENABLED(CONFIG_KVM_GMEM_SHARED_MEM)). The usecase for removing -direct map entries of also the shared parts of guest_memfd are a special -type of non-CoCo VM where, host userspace is trusted to have access to -all of guest memory, but where Spectre-style transient execution attacks +GUEST_MEMFD_FLAG_MMAP is not set). The usecase for removing direct map +entries of also the shared parts of guest_memfd are a special type of +non-CoCo VM where, host userspace is trusted to have access to all of +guest memory, but where Spectre-style transient execution attacks through the host kernel's direct map should still be mitigated. In this setup, KVM retains access to guest memory via userspace mappings of guest_memfd, which are reflected back into KVM's memslots via @@ -48,15 +48,15 @@ spectre-gadget. Signed-off-by: Patrick Roy --- - arch/arm64/include/asm/kvm_host.h | 12 +++++++++++- + arch/arm64/include/asm/kvm_host.h | 11 +++++++++++ include/linux/kvm_host.h | 7 +++++++ include/uapi/linux/kvm.h | 2 ++ virt/kvm/guest_memfd.c | 29 +++++++++++++++++++++++++---- virt/kvm/kvm_main.c | 5 +++++ - 5 files changed, 50 insertions(+), 5 deletions(-) + 5 files changed, 50 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h -index 63f7827cfa1b..1f564f6a332f 100644 +index 3e41a880b062..f3e000daa876 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -19,6 +19,7 @@ @@ -67,12 +67,11 @@ index 63f7827cfa1b..1f564f6a332f 100644 #include #include #include -@@ -1677,6 +1678,15 @@ void check_feature_map(void); - #ifdef CONFIG_KVM_GMEM - #define kvm_arch_supports_gmem(kvm) true - #define kvm_arch_supports_gmem_mmap(kvm) IS_ENABLED(CONFIG_KVM_GMEM_SUPPORTS_MMAP) --#endif -+ +@@ -1674,5 +1675,15 @@ void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt); + void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1); + void check_feature_map(void); + ++#ifdef CONFIG_KVM_GUEST_MEMFD +static inline bool kvm_arch_gmem_supports_no_direct_map(void) { + /* + * Without FWB, direct map access is needed in kvm_pgtable_stage2_map(), @@ -81,11 +80,11 @@ index 63f7827cfa1b..1f564f6a332f 100644 + return can_set_direct_map() && cpus_have_final_cap(ARM64_HAS_STAGE2_FWB); +} +#define kvm_arch_gmem_supports_no_direct_map kvm_arch_gmem_supports_no_direct_map -+#endif /* CONFIG_KVM_GMEM */ ++#endif /* CONFIG_KVM_GUEST_MEMFD */ #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h -index 662271314778..7dc9190d2fef 100644 +index 8b47891adca1..37553848e078 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -36,6 +36,7 @@ @@ -96,32 +95,32 @@ index 662271314778..7dc9190d2fef 100644 #include #include -@@ -754,6 +755,12 @@ static inline bool kvm_arch_supports_gmem_mmap(struct kvm *kvm) - } +@@ -731,6 +732,12 @@ static inline bool kvm_arch_has_private_mem(struct kvm *kvm) + bool kvm_arch_supports_gmem_mmap(struct kvm *kvm); #endif -+#ifdef CONFIG_KVM_GMEM ++#ifdef CONFIG_KVM_GUEST_MEMFD +#ifndef kvm_arch_gmem_supports_no_direct_map +#define kvm_arch_gmem_supports_no_direct_map can_set_direct_map +#endif -+#endif /* CONFIG_KVM_GMEM */ ++#endif /* CONFIG_KVM_GUEST_MEMFD */ + #ifndef kvm_arch_has_readonly_mem static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm) { diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h -index 698dd407980f..33b368564b1f 100644 +index 0d96d2ae6e5d..7688ea92b25c 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h -@@ -961,6 +961,7 @@ struct kvm_enable_cap { +@@ -962,6 +962,7 @@ struct kvm_enable_cap { #define KVM_CAP_ARM_EL2_E2H0 241 #define KVM_CAP_RISCV_MP_STATE_RESET 242 - #define KVM_CAP_GMEM_MMAP 243 -+#define KVM_CAP_GMEM_NO_DIRECT_MAP 244 + #define KVM_CAP_GUEST_MEMFD_MMAP 243 ++#define KVM_CAP_GUEST_MEMFD_NO_DIRECT_MAP 244 struct kvm_irq_routing_irqchip { __u32 irqchip; -@@ -1598,6 +1599,7 @@ struct kvm_memory_attributes { +@@ -1599,6 +1600,7 @@ struct kvm_memory_attributes { #define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd) #define GUEST_MEMFD_FLAG_MMAP (1ULL << 0) @@ -130,7 +129,7 @@ index 698dd407980f..33b368564b1f 100644 struct kvm_create_guest_memfd { __u64 size; diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c -index 0ac1c3a5a433..d70ee66bb96d 100644 +index 9ec4c45e3cf2..e3696880405c 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -4,6 +4,7 @@ @@ -160,7 +159,7 @@ index 0ac1c3a5a433..d70ee66bb96d 100644 folio_mark_uptodate(folio); } -@@ -432,25 +443,29 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol +@@ -429,25 +440,29 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol return MF_DELAYED; } @@ -193,8 +192,8 @@ index 0ac1c3a5a433..d70ee66bb96d 100644 -#endif }; - static int kvm_gmem_getattr(struct mnt_idmap *idmap, const struct path *path, -@@ -513,6 +528,9 @@ static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags) + static int kvm_gmem_setattr(struct mnt_idmap *idmap, struct dentry *dentry, +@@ -504,6 +519,9 @@ static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags) /* Unmovable mappings are supposed to be marked unevictable as well. */ WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping)); @@ -204,7 +203,7 @@ index 0ac1c3a5a433..d70ee66bb96d 100644 kvm_get_kvm(kvm); gmem->kvm = kvm; xa_init(&gmem->bindings); -@@ -537,6 +555,9 @@ int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args) +@@ -528,6 +546,9 @@ int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args) if (kvm_arch_supports_gmem_mmap(kvm)) valid_flags |= GUEST_MEMFD_FLAG_MMAP; @@ -215,7 +214,7 @@ index 0ac1c3a5a433..d70ee66bb96d 100644 return -EINVAL; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c -index f1ac872e01e9..d92dd84cca8e 100644 +index 18f29ef93543..0dbfd17e1191 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -65,6 +65,7 @@ @@ -226,17 +225,17 @@ index f1ac872e01e9..d92dd84cca8e 100644 /* Worst case buffer size needed for holding an integer. */ -@@ -4914,6 +4915,10 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) +@@ -4916,6 +4917,10 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) return kvm_supported_mem_attributes(kvm); #endif - #ifdef CONFIG_KVM_GMEM -+ case KVM_CAP_GMEM_NO_DIRECT_MAP: + #ifdef CONFIG_KVM_GUEST_MEMFD ++ case KVM_CAP_GUEST_MEMFD_NO_DIRECT_MAP: + if (!can_set_direct_map()) + return false; + fallthrough; case KVM_CAP_GUEST_MEMFD: - return !kvm || kvm_arch_supports_gmem(kvm); - #endif + return 1; + case KVM_CAP_GUEST_MEMFD_MMAP: -- 2.50.1 diff --git a/resources/hiding_ci/linux_patches/10-direct-map-removal/0029-KVM-Documentation-describe-GUEST_MEMFD_FLAG_NO_DIREC.patch b/resources/hiding_ci/linux_patches/10-direct-map-removal/0029-KVM-Documentation-describe-GUEST_MEMFD_FLAG_NO_DIREC.patch new file mode 100644 index 00000000000..2ae8f2bb09f --- /dev/null +++ b/resources/hiding_ci/linux_patches/10-direct-map-removal/0029-KVM-Documentation-describe-GUEST_MEMFD_FLAG_NO_DIREC.patch @@ -0,0 +1,30 @@ +From 690b035df72fd4058f96af080d3d769035090544 Mon Sep 17 00:00:00 2001 +From: Patrick Roy +Date: Wed, 16 Jul 2025 15:21:10 +0100 +Subject: [PATCH 29/49] KVM: Documentation: describe + GUEST_MEMFD_FLAG_NO_DIRECT_MAP + +Signed-off-by: Patrick Roy +--- + Documentation/virt/kvm/api.rst | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst +index 1e0c4a68876d..4a94bac95dca 100644 +--- a/Documentation/virt/kvm/api.rst ++++ b/Documentation/virt/kvm/api.rst +@@ -6418,6 +6418,11 @@ When the capability KVM_CAP_GUEST_MEMFD_MMAP is supported, the 'flags' field + supports GUEST_MEMFD_FLAG_MMAP. Setting this flag on guest_memfd creation + enables mmap() and faulting of guest_memfd memory to host userspace. + ++When the capability KVM_CAP_GMEM_NO_DIRECT_MAP is supported, the 'flags' field ++supports GUEST_MEMFG_FLAG_NO_DIRECT_MAP. Setting this flag makes the guest_memfd ++instance behave similarly to memfd_secret, and unmaps the memory backing it from ++the kernel's address space after allocation. ++ + When the KVM MMU performs a PFN lookup to service a guest fault and the backing + guest_memfd has the GUEST_MEMFD_FLAG_MMAP set, then the fault will always be + consumed from guest_memfd, regardless of whether it is a shared or a private +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/10-direct-map-removal/0030-KVM-selftests-load-elf-via-bounce-buffer.patch b/resources/hiding_ci/linux_patches/10-direct-map-removal/0030-KVM-selftests-load-elf-via-bounce-buffer.patch new file mode 100644 index 00000000000..f99a8330716 --- /dev/null +++ b/resources/hiding_ci/linux_patches/10-direct-map-removal/0030-KVM-selftests-load-elf-via-bounce-buffer.patch @@ -0,0 +1,105 @@ +From b2a5123fafdbdd7637f3398f7168da24dc84b137 Mon Sep 17 00:00:00 2001 +From: Patrick Roy +Date: Fri, 21 Feb 2025 09:00:45 +0000 +Subject: [PATCH 30/49] KVM: selftests: load elf via bounce buffer + +If guest memory is backed using a VMA that does not allow GUP (e.g. a +userspace mapping of guest_memfd when the fd was allocated using +KVM_GMEM_NO_DIRECT_MAP), then directly loading the test ELF binary into +it via read(2) potentially does not work. To nevertheless support +loading binaries in this cases, do the read(2) syscall using a bounce +buffer, and then memcpy from the bounce buffer into guest memory. + +Signed-off-by: Patrick Roy +--- + .../testing/selftests/kvm/include/test_util.h | 1 + + tools/testing/selftests/kvm/lib/elf.c | 8 +++---- + tools/testing/selftests/kvm/lib/io.c | 23 +++++++++++++++++++ + 3 files changed, 28 insertions(+), 4 deletions(-) + +diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h +index c6ef895fbd9a..0409b7b96c94 100644 +--- a/tools/testing/selftests/kvm/include/test_util.h ++++ b/tools/testing/selftests/kvm/include/test_util.h +@@ -46,6 +46,7 @@ do { \ + + ssize_t test_write(int fd, const void *buf, size_t count); + ssize_t test_read(int fd, void *buf, size_t count); ++ssize_t test_read_bounce(int fd, void *buf, size_t count); + int test_seq_read(const char *path, char **bufp, size_t *sizep); + + void __printf(5, 6) test_assert(bool exp, const char *exp_str, +diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c +index f34d926d9735..e829fbe0a11e 100644 +--- a/tools/testing/selftests/kvm/lib/elf.c ++++ b/tools/testing/selftests/kvm/lib/elf.c +@@ -31,7 +31,7 @@ static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp) + * the real size of the ELF header. + */ + unsigned char ident[EI_NIDENT]; +- test_read(fd, ident, sizeof(ident)); ++ test_read_bounce(fd, ident, sizeof(ident)); + TEST_ASSERT((ident[EI_MAG0] == ELFMAG0) && (ident[EI_MAG1] == ELFMAG1) + && (ident[EI_MAG2] == ELFMAG2) && (ident[EI_MAG3] == ELFMAG3), + "ELF MAGIC Mismatch,\n" +@@ -79,7 +79,7 @@ static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp) + offset_rv = lseek(fd, 0, SEEK_SET); + TEST_ASSERT(offset_rv == 0, "Seek to ELF header failed,\n" + " rv: %zi expected: %i", offset_rv, 0); +- test_read(fd, hdrp, sizeof(*hdrp)); ++ test_read_bounce(fd, hdrp, sizeof(*hdrp)); + TEST_ASSERT(hdrp->e_phentsize == sizeof(Elf64_Phdr), + "Unexpected physical header size,\n" + " hdrp->e_phentsize: %x\n" +@@ -146,7 +146,7 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename) + + /* Read in the program header. */ + Elf64_Phdr phdr; +- test_read(fd, &phdr, sizeof(phdr)); ++ test_read_bounce(fd, &phdr, sizeof(phdr)); + + /* Skip if this header doesn't describe a loadable segment. */ + if (phdr.p_type != PT_LOAD) +@@ -187,7 +187,7 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename) + " expected: 0x%jx", + n1, errno, (intmax_t) offset_rv, + (intmax_t) phdr.p_offset); +- test_read(fd, addr_gva2hva(vm, phdr.p_vaddr), ++ test_read_bounce(fd, addr_gva2hva(vm, phdr.p_vaddr), + phdr.p_filesz); + } + } +diff --git a/tools/testing/selftests/kvm/lib/io.c b/tools/testing/selftests/kvm/lib/io.c +index fedb2a741f0b..74419becc8bc 100644 +--- a/tools/testing/selftests/kvm/lib/io.c ++++ b/tools/testing/selftests/kvm/lib/io.c +@@ -155,3 +155,26 @@ ssize_t test_read(int fd, void *buf, size_t count) + + return num_read; + } ++ ++/* Test read via intermediary buffer ++ * ++ * Same as test_read, except read(2)s happen into a bounce buffer that is memcpy'd ++ * to buf. For use with buffers that cannot be GUP'd (e.g. guest_memfd VMAs if ++ * guest_memfd was created with GUEST_MEMFD_FLAG_NO_DIRECT_MAP). ++ */ ++ssize_t test_read_bounce(int fd, void *buf, size_t count) ++{ ++ void *bounce_buffer; ++ ssize_t num_read; ++ ++ TEST_ASSERT(count >= 0, "Unexpected count, count: %li", count); ++ ++ bounce_buffer = malloc(count); ++ TEST_ASSERT(bounce_buffer != NULL, "Failed to allocate bounce buffer"); ++ ++ num_read = test_read(fd, bounce_buffer, count); ++ memcpy(buf, bounce_buffer, num_read); ++ free(bounce_buffer); ++ ++ return num_read; ++} +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/10-direct-map-removal/0031-KVM-selftests-set-KVM_MEM_GUEST_MEMFD-in-vm_mem_add-.patch b/resources/hiding_ci/linux_patches/10-direct-map-removal/0031-KVM-selftests-set-KVM_MEM_GUEST_MEMFD-in-vm_mem_add-.patch new file mode 100644 index 00000000000..0a0cc6057c3 --- /dev/null +++ b/resources/hiding_ci/linux_patches/10-direct-map-removal/0031-KVM-selftests-set-KVM_MEM_GUEST_MEMFD-in-vm_mem_add-.patch @@ -0,0 +1,71 @@ +From 606298b9b943481badabfce93a65e054a069b628 Mon Sep 17 00:00:00 2001 +From: Patrick Roy +Date: Thu, 20 Feb 2025 14:56:20 +0000 +Subject: [PATCH 31/49] KVM: selftests: set KVM_MEM_GUEST_MEMFD in vm_mem_add() + if guest_memfd != -1 + +Have vm_mem_add() always set KVM_MEM_GUEST_MEMFD in the memslot flags if +a guest_memfd is passed in as an argument. This eliminates the +possibility where a guest_memfd instance is passed to vm_mem_add(), but +it ends up being ignored because the flags argument does not specify +KVM_MEM_GUEST_MEMFD at the same time. + +This makes it easy to support more scenarios in which no vm_mem_add() is +not passed a guest_memfd instance, but is expected to allocate one. +Currently, this only happens if guest_memfd == -1 but flags & +KVM_MEM_GUEST_MEMFD != 0, but later vm_mem_add() will gain support for +loading the test code itself into guest_memfd (via +GUEST_MEMFD_FLAG_MMAP) if requested via a special +vm_mem_backing_src_type, at which point having to make sure the src_type +and flags are in-sync becomes cumbersome. + +Signed-off-by: Patrick Roy +--- + tools/testing/selftests/kvm/lib/kvm_util.c | 26 +++++++++++++--------- + 1 file changed, 15 insertions(+), 11 deletions(-) + +diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c +index c3f5142b0a54..cc67dfecbf65 100644 +--- a/tools/testing/selftests/kvm/lib/kvm_util.c ++++ b/tools/testing/selftests/kvm/lib/kvm_util.c +@@ -1107,22 +1107,26 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, + + region->backing_src_type = src_type; + +- if (flags & KVM_MEM_GUEST_MEMFD) { +- if (guest_memfd < 0) { ++ if (guest_memfd < 0) { ++ if (flags & KVM_MEM_GUEST_MEMFD) { + uint32_t guest_memfd_flags = 0; + TEST_ASSERT(!guest_memfd_offset, + "Offset must be zero when creating new guest_memfd"); + guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags); +- } else { +- /* +- * Install a unique fd for each memslot so that the fd +- * can be closed when the region is deleted without +- * needing to track if the fd is owned by the framework +- * or by the caller. +- */ +- guest_memfd = dup(guest_memfd); +- TEST_ASSERT(guest_memfd >= 0, __KVM_SYSCALL_ERROR("dup()", guest_memfd)); + } ++ } else { ++ /* ++ * Install a unique fd for each memslot so that the fd ++ * can be closed when the region is deleted without ++ * needing to track if the fd is owned by the framework ++ * or by the caller. ++ */ ++ guest_memfd = dup(guest_memfd); ++ TEST_ASSERT(guest_memfd >= 0, __KVM_SYSCALL_ERROR("dup()", guest_memfd)); ++ } ++ ++ if (guest_memfd > 0) { ++ flags |= KVM_MEM_GUEST_MEMFD; + + region->region.guest_memfd = guest_memfd; + region->region.guest_memfd_offset = guest_memfd_offset; +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/10-direct-map-removal/0032-KVM-selftests-Add-guest_memfd-based-vm_mem_backing_s.patch b/resources/hiding_ci/linux_patches/10-direct-map-removal/0032-KVM-selftests-Add-guest_memfd-based-vm_mem_backing_s.patch new file mode 100644 index 00000000000..56006bd4cc6 --- /dev/null +++ b/resources/hiding_ci/linux_patches/10-direct-map-removal/0032-KVM-selftests-Add-guest_memfd-based-vm_mem_backing_s.patch @@ -0,0 +1,190 @@ +From 9658e71c08d2e2cfe9f49938706f812e5ac0ebc1 Mon Sep 17 00:00:00 2001 +From: Patrick Roy +Date: Thu, 20 Feb 2025 11:08:22 +0000 +Subject: [PATCH 32/49] KVM: selftests: Add guest_memfd based + vm_mem_backing_src_types + +Allow selftests to configure their memslots such that userspace_addr is +set to a MAP_SHARED mapping of the guest_memfd that's associated with +the memslot. This setup is the configuration for non-CoCo VMs, where all +guest memory is backed by a guest_memfd whose folios are all marked +shared, but KVM is still able to access guest memory to provide +functionality such as MMIO emulation on x86. + +Add backing types for normal guest_memfd, as well as direct map removed +guest_memfd. + +Signed-off-by: Patrick Roy +--- + .../testing/selftests/kvm/include/kvm_util.h | 18 ++++++ + .../testing/selftests/kvm/include/test_util.h | 7 +++ + tools/testing/selftests/kvm/lib/kvm_util.c | 63 ++++++++++--------- + tools/testing/selftests/kvm/lib/test_util.c | 8 +++ + 4 files changed, 66 insertions(+), 30 deletions(-) + +diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h +index 23a506d7eca3..5204a0a18a7f 100644 +--- a/tools/testing/selftests/kvm/include/kvm_util.h ++++ b/tools/testing/selftests/kvm/include/kvm_util.h +@@ -635,6 +635,24 @@ static inline bool is_smt_on(void) + + void vm_create_irqchip(struct kvm_vm *vm); + ++static inline uint32_t backing_src_guest_memfd_flags(enum vm_mem_backing_src_type t) ++{ ++ uint32_t flags = 0; ++ ++ switch (t) { ++ case VM_MEM_SRC_GUEST_MEMFD: ++ flags |= GUEST_MEMFD_FLAG_MMAP; ++ fallthrough; ++ case VM_MEM_SRC_GUEST_MEMFD_NO_DIRECT_MAP: ++ flags |= GUEST_MEMFD_FLAG_NO_DIRECT_MAP; ++ break; ++ default: ++ break; ++ } ++ ++ return flags; ++} ++ + static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, + uint64_t flags) + { +diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h +index 0409b7b96c94..a56e53fc7b39 100644 +--- a/tools/testing/selftests/kvm/include/test_util.h ++++ b/tools/testing/selftests/kvm/include/test_util.h +@@ -133,6 +133,8 @@ enum vm_mem_backing_src_type { + VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB, + VM_MEM_SRC_SHMEM, + VM_MEM_SRC_SHARED_HUGETLB, ++ VM_MEM_SRC_GUEST_MEMFD, ++ VM_MEM_SRC_GUEST_MEMFD_NO_DIRECT_MAP, + NUM_SRC_TYPES, + }; + +@@ -165,6 +167,11 @@ static inline bool backing_src_is_shared(enum vm_mem_backing_src_type t) + return vm_mem_backing_src_alias(t)->flag & MAP_SHARED; + } + ++static inline bool backing_src_is_guest_memfd(enum vm_mem_backing_src_type t) ++{ ++ return t == VM_MEM_SRC_GUEST_MEMFD || t == VM_MEM_SRC_GUEST_MEMFD_NO_DIRECT_MAP; ++} ++ + static inline bool backing_src_can_be_huge(enum vm_mem_backing_src_type t) + { + return t != VM_MEM_SRC_ANONYMOUS && t != VM_MEM_SRC_SHMEM; +diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c +index cc67dfecbf65..a81089f7c83f 100644 +--- a/tools/testing/selftests/kvm/lib/kvm_util.c ++++ b/tools/testing/selftests/kvm/lib/kvm_util.c +@@ -1060,6 +1060,34 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, + alignment = 1; + #endif + ++ if (guest_memfd < 0) { ++ if ((flags & KVM_MEM_GUEST_MEMFD) || backing_src_is_guest_memfd(src_type)) { ++ uint32_t guest_memfd_flags = backing_src_guest_memfd_flags(src_type); ++ ++ TEST_ASSERT(!guest_memfd_offset, ++ "Offset must be zero when creating new guest_memfd"); ++ guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags); ++ } ++ } else { ++ /* ++ * Install a unique fd for each memslot so that the fd ++ * can be closed when the region is deleted without ++ * needing to track if the fd is owned by the framework ++ * or by the caller. ++ */ ++ guest_memfd = dup(guest_memfd); ++ TEST_ASSERT(guest_memfd >= 0, __KVM_SYSCALL_ERROR("dup()", guest_memfd)); ++ } ++ ++ if (guest_memfd > 0) { ++ flags |= KVM_MEM_GUEST_MEMFD; ++ ++ region->region.guest_memfd = guest_memfd; ++ region->region.guest_memfd_offset = guest_memfd_offset; ++ } else { ++ region->region.guest_memfd = -1; ++ } ++ + /* + * When using THP mmap is not guaranteed to returned a hugepage aligned + * address so we have to pad the mmap. Padding is not needed for HugeTLB +@@ -1075,10 +1103,13 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, + if (alignment > 1) + region->mmap_size += alignment; + +- region->fd = -1; +- if (backing_src_is_shared(src_type)) ++ if (backing_src_is_guest_memfd(src_type)) ++ region->fd = guest_memfd; ++ else if (backing_src_is_shared(src_type)) + region->fd = kvm_memfd_alloc(region->mmap_size, + src_type == VM_MEM_SRC_SHARED_HUGETLB); ++ else ++ region->fd = -1; + + region->mmap_start = mmap(NULL, region->mmap_size, + PROT_READ | PROT_WRITE, +@@ -1106,34 +1137,6 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, + } + + region->backing_src_type = src_type; +- +- if (guest_memfd < 0) { +- if (flags & KVM_MEM_GUEST_MEMFD) { +- uint32_t guest_memfd_flags = 0; +- TEST_ASSERT(!guest_memfd_offset, +- "Offset must be zero when creating new guest_memfd"); +- guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags); +- } +- } else { +- /* +- * Install a unique fd for each memslot so that the fd +- * can be closed when the region is deleted without +- * needing to track if the fd is owned by the framework +- * or by the caller. +- */ +- guest_memfd = dup(guest_memfd); +- TEST_ASSERT(guest_memfd >= 0, __KVM_SYSCALL_ERROR("dup()", guest_memfd)); +- } +- +- if (guest_memfd > 0) { +- flags |= KVM_MEM_GUEST_MEMFD; +- +- region->region.guest_memfd = guest_memfd; +- region->region.guest_memfd_offset = guest_memfd_offset; +- } else { +- region->region.guest_memfd = -1; +- } +- + region->unused_phy_pages = sparsebit_alloc(); + if (vm_arch_has_protected_memory(vm)) + region->protected_phy_pages = sparsebit_alloc(); +diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c +index 03eb99af9b8d..b2baee680083 100644 +--- a/tools/testing/selftests/kvm/lib/test_util.c ++++ b/tools/testing/selftests/kvm/lib/test_util.c +@@ -299,6 +299,14 @@ const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i) + */ + .flag = MAP_SHARED, + }, ++ [VM_MEM_SRC_GUEST_MEMFD] = { ++ .name = "guest_memfd", ++ .flag = MAP_SHARED, ++ }, ++ [VM_MEM_SRC_GUEST_MEMFD_NO_DIRECT_MAP] = { ++ .name = "guest_memfd_no_direct_map", ++ .flag = MAP_SHARED, ++ } + }; + _Static_assert(ARRAY_SIZE(aliases) == NUM_SRC_TYPES, + "Missing new backing src types?"); +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/10-direct-map-removal/0033-KVM-selftests-stuff-vm_mem_backing_src_type-into-vm_.patch b/resources/hiding_ci/linux_patches/10-direct-map-removal/0033-KVM-selftests-stuff-vm_mem_backing_src_type-into-vm_.patch new file mode 100644 index 00000000000..416ded372d3 --- /dev/null +++ b/resources/hiding_ci/linux_patches/10-direct-map-removal/0033-KVM-selftests-stuff-vm_mem_backing_src_type-into-vm_.patch @@ -0,0 +1,98 @@ +From 2356665bc3949fa79c497246e2aa261c3f5184cd Mon Sep 17 00:00:00 2001 +From: Patrick Roy +Date: Thu, 20 Feb 2025 13:46:01 +0000 +Subject: [PATCH 33/49] KVM: selftests: stuff vm_mem_backing_src_type into + vm_shape + +Use one of the padding fields in struct vm_shape to carry an enum +vm_mem_backing_src_type value, to give the option to overwrite the +default of VM_MEM_SRC_ANONYMOUS in __vm_create(). + +Overwriting this default will allow tests to create VMs where the test +code is backed by mmap'd guest_memfd instead of anonymous memory. + +Signed-off-by: Patrick Roy +--- + .../testing/selftests/kvm/include/kvm_util.h | 19 ++++++++++--------- + tools/testing/selftests/kvm/lib/kvm_util.c | 2 +- + tools/testing/selftests/kvm/lib/x86/sev.c | 1 + + .../selftests/kvm/pre_fault_memory_test.c | 1 + + 4 files changed, 13 insertions(+), 10 deletions(-) + +diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h +index 5204a0a18a7f..8baa0bbacd09 100644 +--- a/tools/testing/selftests/kvm/include/kvm_util.h ++++ b/tools/testing/selftests/kvm/include/kvm_util.h +@@ -188,7 +188,7 @@ enum vm_guest_mode { + struct vm_shape { + uint32_t type; + uint8_t mode; +- uint8_t pad0; ++ uint8_t src_type; + uint16_t pad1; + }; + +@@ -196,14 +196,15 @@ kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t)); + + #define VM_TYPE_DEFAULT 0 + +-#define VM_SHAPE(__mode) \ +-({ \ +- struct vm_shape shape = { \ +- .mode = (__mode), \ +- .type = VM_TYPE_DEFAULT \ +- }; \ +- \ +- shape; \ ++#define VM_SHAPE(__mode) \ ++({ \ ++ struct vm_shape shape = { \ ++ .mode = (__mode), \ ++ .type = VM_TYPE_DEFAULT, \ ++ .src_type = VM_MEM_SRC_ANONYMOUS \ ++ }; \ ++ \ ++ shape; \ + }) + + #if defined(__aarch64__) +diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c +index a81089f7c83f..3a22794bd959 100644 +--- a/tools/testing/selftests/kvm/lib/kvm_util.c ++++ b/tools/testing/selftests/kvm/lib/kvm_util.c +@@ -495,7 +495,7 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, + if (is_guest_memfd_required(shape)) + flags |= KVM_MEM_GUEST_MEMFD; + +- vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, flags); ++ vm_userspace_mem_region_add(vm, shape.src_type, 0, 0, nr_pages, flags); + for (i = 0; i < NR_MEM_REGIONS; i++) + vm->memslots[i] = 0; + +diff --git a/tools/testing/selftests/kvm/lib/x86/sev.c b/tools/testing/selftests/kvm/lib/x86/sev.c +index c3a9838f4806..d920880e4fc0 100644 +--- a/tools/testing/selftests/kvm/lib/x86/sev.c ++++ b/tools/testing/selftests/kvm/lib/x86/sev.c +@@ -164,6 +164,7 @@ struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, + struct vm_shape shape = { + .mode = VM_MODE_DEFAULT, + .type = type, ++ .src_type = VM_MEM_SRC_ANONYMOUS, + }; + struct kvm_vm *vm; + struct kvm_vcpu *cpus[1]; +diff --git a/tools/testing/selftests/kvm/pre_fault_memory_test.c b/tools/testing/selftests/kvm/pre_fault_memory_test.c +index 0350a8896a2f..d403f8d2f26f 100644 +--- a/tools/testing/selftests/kvm/pre_fault_memory_test.c ++++ b/tools/testing/selftests/kvm/pre_fault_memory_test.c +@@ -68,6 +68,7 @@ static void __test_pre_fault_memory(unsigned long vm_type, bool private) + const struct vm_shape shape = { + .mode = VM_MODE_DEFAULT, + .type = vm_type, ++ .src_type = VM_MEM_SRC_ANONYMOUS, + }; + struct kvm_vcpu *vcpu; + struct kvm_run *run; +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/10-direct-map-removal/0034-KVM-selftests-cover-GUEST_MEMFD_FLAG_NO_DIRECT_MAP-i.patch b/resources/hiding_ci/linux_patches/10-direct-map-removal/0034-KVM-selftests-cover-GUEST_MEMFD_FLAG_NO_DIRECT_MAP-i.patch new file mode 100644 index 00000000000..74a5489fac4 --- /dev/null +++ b/resources/hiding_ci/linux_patches/10-direct-map-removal/0034-KVM-selftests-cover-GUEST_MEMFD_FLAG_NO_DIRECT_MAP-i.patch @@ -0,0 +1,49 @@ +From 18f619c94a1cb0737639d6f8fc1178e0c41d9d36 Mon Sep 17 00:00:00 2001 +From: Patrick Roy +Date: Thu, 24 Oct 2024 07:18:57 +0100 +Subject: [PATCH 34/49] KVM: selftests: cover GUEST_MEMFD_FLAG_NO_DIRECT_MAP in + mem conversion tests + +Cover the scenario that the guest can fault in and write gmem-backed +guest memory even if its direct map removed. + +Signed-off-by: Patrick Roy +--- + .../selftests/kvm/x86/private_mem_conversions_test.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c b/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c +index 82a8d88b5338..8427d9fbdb23 100644 +--- a/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c ++++ b/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c +@@ -367,7 +367,7 @@ static void *__test_mem_conversions(void *__vcpu) + } + + static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t nr_vcpus, +- uint32_t nr_memslots) ++ uint32_t nr_memslots, uint64_t gmem_flags) + { + /* + * Allocate enough memory so that each vCPU's chunk of memory can be +@@ -394,7 +394,7 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t + + vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, (1 << KVM_HC_MAP_GPA_RANGE)); + +- memfd = vm_create_guest_memfd(vm, memfd_size, 0); ++ memfd = vm_create_guest_memfd(vm, memfd_size, gmem_flags); + + for (i = 0; i < nr_memslots; i++) + vm_mem_add(vm, src_type, BASE_DATA_GPA + slot_size * i, +@@ -477,7 +477,8 @@ int main(int argc, char *argv[]) + } + } + +- test_mem_conversions(src_type, nr_vcpus, nr_memslots); ++ test_mem_conversions(src_type, nr_vcpus, nr_memslots, 0); ++ test_mem_conversions(src_type, nr_vcpus, nr_memslots, GUEST_MEMFD_FLAG_NO_DIRECT_MAP); + + return 0; + } +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/10-direct-map-removal/0035-KVM-selftests-cover-GUEST_MEMFD_FLAG_NO_DIRECT_MAP-i.patch b/resources/hiding_ci/linux_patches/10-direct-map-removal/0035-KVM-selftests-cover-GUEST_MEMFD_FLAG_NO_DIRECT_MAP-i.patch new file mode 100644 index 00000000000..31f1394e17b --- /dev/null +++ b/resources/hiding_ci/linux_patches/10-direct-map-removal/0035-KVM-selftests-cover-GUEST_MEMFD_FLAG_NO_DIRECT_MAP-i.patch @@ -0,0 +1,27 @@ +From 1c1fdb1be73ab38b5d7377dcf68cc6781521ea56 Mon Sep 17 00:00:00 2001 +From: Patrick Roy +Date: Wed, 16 Jul 2025 15:30:39 +0100 +Subject: [PATCH 35/49] KVM: selftests: cover GUEST_MEMFD_FLAG_NO_DIRECT_MAP in + guest_memfd_test.c + +Signed-off-by: Patrick Roy +--- + tools/testing/selftests/kvm/guest_memfd_test.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c +index b86bf89a71e0..2ca82bd58322 100644 +--- a/tools/testing/selftests/kvm/guest_memfd_test.c ++++ b/tools/testing/selftests/kvm/guest_memfd_test.c +@@ -275,6 +275,8 @@ static void test_guest_memfd(unsigned long vm_type) + + if (vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_MMAP)) + flags |= GUEST_MEMFD_FLAG_MMAP; ++ if (vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_NO_DIRECT_MAP)) ++ flags |= GUEST_MEMFD_FLAG_NO_DIRECT_MAP; + + test_create_guest_memfd_multiple(vm); + test_create_guest_memfd_invalid_sizes(vm, flags, page_size); +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/10-direct-map-removal/0036-KVM-selftests-Test-guest-execution-from-direct-map-r.patch b/resources/hiding_ci/linux_patches/10-direct-map-removal/0036-KVM-selftests-Test-guest-execution-from-direct-map-r.patch new file mode 100644 index 00000000000..e2f7313824b --- /dev/null +++ b/resources/hiding_ci/linux_patches/10-direct-map-removal/0036-KVM-selftests-Test-guest-execution-from-direct-map-r.patch @@ -0,0 +1,88 @@ +From 6b47a2e73562b32e250c1395aae6e54ebc3a5aa8 Mon Sep 17 00:00:00 2001 +From: Patrick Roy +Date: Fri, 21 Feb 2025 08:18:24 +0000 +Subject: [PATCH 36/49] KVM: selftests: Test guest execution from direct map + removed gmem + +Add a selftest that loads itself into guest_memfd (via +GUEST_MEMFD_FLAG_MMAP) and triggers an MMIO exit when executed. This +exercises x86 MMIO emulation code inside KVM for guest_memfd-backed +memslots where the guest_memfd folios are direct map removed. +Particularly, it validates that x86 MMIO emulation code (guest page +table walks + instruction fetch) correctly accesses gmem through the VMA +that's been reflected into the memslot's userspace_addr field (instead +of trying to do direct map accesses). + +Signed-off-by: Patrick Roy +--- + .../selftests/kvm/set_memory_region_test.c | 45 ++++++++++++++++++- + 1 file changed, 43 insertions(+), 2 deletions(-) + +diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c +index ce3ac0fd6dfb..ab18c0083780 100644 +--- a/tools/testing/selftests/kvm/set_memory_region_test.c ++++ b/tools/testing/selftests/kvm/set_memory_region_test.c +@@ -603,6 +603,41 @@ static void test_mmio_during_vectoring(void) + + kvm_vm_free(vm); + } ++ ++static void guest_code_trigger_mmio(void) ++{ ++ /* ++ * Read some GPA that is not backed by a memslot. KVM consider this ++ * as MMIO and tell userspace to emulate the read. ++ */ ++ READ_ONCE(*((uint64_t *)MEM_REGION_GPA)); ++ ++ GUEST_DONE(); ++} ++ ++static void test_guest_memfd_mmio(void) ++{ ++ struct kvm_vm *vm; ++ struct kvm_vcpu *vcpu; ++ struct vm_shape shape = { ++ .mode = VM_MODE_DEFAULT, ++ .src_type = VM_MEM_SRC_GUEST_MEMFD_NO_DIRECT_MAP, ++ }; ++ pthread_t vcpu_thread; ++ ++ pr_info("Testing MMIO emulation for instructions in gmem\n"); ++ ++ vm = __vm_create_shape_with_one_vcpu(shape, &vcpu, 0, guest_code_trigger_mmio); ++ ++ virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 1); ++ ++ pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu); ++ ++ /* If the MMIO read was successfully emulated, the vcpu thread will exit */ ++ pthread_join(vcpu_thread, NULL); ++ ++ kvm_vm_free(vm); ++} + #endif + + int main(int argc, char *argv[]) +@@ -626,10 +661,16 @@ int main(int argc, char *argv[]) + test_add_max_memory_regions(); + + #ifdef __x86_64__ +- if (kvm_has_cap(KVM_CAP_GUEST_MEMFD) && +- (kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM))) { ++ if (kvm_has_cap(KVM_CAP_GUEST_MEMFD)) { ++ if (kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM)) { + test_add_private_memory_region(); + test_add_overlapping_private_memory_regions(); ++ } ++ ++ if (kvm_has_cap(KVM_CAP_GUEST_MEMFD_MMAP) && kvm_has_cap(KVM_CAP_GUEST_MEMFD_NO_DIRECT_MAP)) ++ test_guest_memfd_mmio(); ++ else ++ pr_info("Skipping tests requiring KVM_CAP_GUEST_MEMFD_MMAP | KVM_CAP_GUEST_MEMFD_NO_DIRECT_MAP"); + } else { + pr_info("Skipping tests for KVM_MEM_GUEST_MEMFD memory regions\n"); + } +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/11-kvm-clock/0026-de-gpc-ify-kvm-clock.patch b/resources/hiding_ci/linux_patches/11-kvm-clock/0026-de-gpc-ify-kvm-clock.patch deleted file mode 100644 index 7c2647d96f1..00000000000 --- a/resources/hiding_ci/linux_patches/11-kvm-clock/0026-de-gpc-ify-kvm-clock.patch +++ /dev/null @@ -1,147 +0,0 @@ -From 4ef5b49cbbe248dd9a7a783c1339c577f1b9635c Mon Sep 17 00:00:00 2001 -From: Patrick Roy -Date: Tue, 3 Jun 2025 13:57:15 +0100 -Subject: [PATCH 26/46] de-gpc-ify kvm-clock - -Signed-off-by: Patrick Roy ---- - arch/x86/include/asm/kvm_host.h | 2 +- - arch/x86/kvm/x86.c | 47 ++++++++++----------------------- - 2 files changed, 15 insertions(+), 34 deletions(-) - -diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h -index e1426adfa93e..d93b291a7d31 100644 ---- a/arch/x86/include/asm/kvm_host.h -+++ b/arch/x86/include/asm/kvm_host.h -@@ -930,7 +930,7 @@ struct kvm_vcpu_arch { - s8 pvclock_tsc_shift; - u32 pvclock_tsc_mul; - unsigned int hw_tsc_khz; -- struct gfn_to_pfn_cache pv_time; -+ gpa_t system_time; - /* set guest stopped flag in pvclock flags field */ - bool pvclock_set_guest_stopped_request; - -diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index ca99187a566e..8b7ae1db9fd0 100644 ---- a/arch/x86/kvm/x86.c -+++ b/arch/x86/kvm/x86.c -@@ -2349,12 +2349,9 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time, - - /* we verify if the enable bit is set... */ - if (system_time & 1) -- kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL, -- sizeof(struct pvclock_vcpu_time_info)); -+ vcpu->arch.system_time = system_time & ~1ULL; - else -- kvm_gpc_deactivate(&vcpu->arch.pv_time); -- -- return; -+ vcpu->arch.system_time = INVALID_GPA; - } - - static uint32_t div_frac(uint32_t dividend, uint32_t divisor) -@@ -3148,26 +3145,14 @@ u64 get_kvmclock_ns(struct kvm *kvm) - - static void kvm_setup_guest_pvclock(struct pvclock_vcpu_time_info *ref_hv_clock, - struct kvm_vcpu *vcpu, -- struct gfn_to_pfn_cache *gpc, -- unsigned int offset) -+ gpa_t gpa) - { -- struct pvclock_vcpu_time_info *guest_hv_clock; -+ struct pvclock_vcpu_time_info guest_hv_clock; - struct pvclock_vcpu_time_info hv_clock; -- unsigned long flags; - - memcpy(&hv_clock, ref_hv_clock, sizeof(hv_clock)); - -- read_lock_irqsave(&gpc->lock, flags); -- while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) { -- read_unlock_irqrestore(&gpc->lock, flags); -- -- if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock))) -- return; -- -- read_lock_irqsave(&gpc->lock, flags); -- } -- -- guest_hv_clock = (void *)(gpc->khva + offset); -+ kvm_read_guest(vcpu->kvm, gpa, &guest_hv_clock, sizeof(struct pvclock_vcpu_time_info)); - - /* - * This VCPU is paused, but it's legal for a guest to read another -@@ -3176,20 +3161,18 @@ static void kvm_setup_guest_pvclock(struct pvclock_vcpu_time_info *ref_hv_clock, - * it is consistent. - */ - -- guest_hv_clock->version = hv_clock.version = (guest_hv_clock->version + 1) | 1; -+ guest_hv_clock.version = hv_clock.version = (guest_hv_clock.version + 1) | 1; - smp_wmb(); - - /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ -- hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); -+ hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); - -- memcpy(guest_hv_clock, &hv_clock, sizeof(*guest_hv_clock)); -+ kvm_write_guest(vcpu->kvm, gpa, &hv_clock, sizeof(struct pvclock_vcpu_time_info)); - - smp_wmb(); - -- guest_hv_clock->version = ++hv_clock.version; -- -- kvm_gpc_mark_dirty_in_slot(gpc); -- read_unlock_irqrestore(&gpc->lock, flags); -+ ++hv_clock.version; -+ kvm_write_guest(vcpu->kvm, gpa + offsetof(struct pvclock_vcpu_time_info, version), &hv_clock.version, sizeof(hv_clock.version)); - - trace_kvm_pvclock_update(vcpu->vcpu_id, &hv_clock); - } -@@ -3282,7 +3265,7 @@ int kvm_guest_time_update(struct kvm_vcpu *v) - if (use_master_clock) - hv_clock.flags |= PVCLOCK_TSC_STABLE_BIT; - -- if (vcpu->pv_time.active) { -+ if (vcpu->system_time != INVALID_GPA) { - /* - * GUEST_STOPPED is only supported by kvmclock, and KVM's - * historic behavior is to only process the request if kvmclock -@@ -3292,7 +3275,7 @@ int kvm_guest_time_update(struct kvm_vcpu *v) - hv_clock.flags |= PVCLOCK_GUEST_STOPPED; - vcpu->pvclock_set_guest_stopped_request = false; - } -- kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->pv_time, 0); -+ kvm_setup_guest_pvclock(&hv_clock, v, vcpu->system_time); - - hv_clock.flags &= ~PVCLOCK_GUEST_STOPPED; - } -@@ -3608,7 +3591,7 @@ static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data) - - static void kvmclock_reset(struct kvm_vcpu *vcpu) - { -- kvm_gpc_deactivate(&vcpu->arch.pv_time); -+ vcpu->arch.system_time = INVALID_GPA; - vcpu->arch.time = 0; - } - -@@ -5729,7 +5712,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, - */ - static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) - { -- if (!vcpu->arch.pv_time.active) -+ if (vcpu->arch.system_time == INVALID_GPA) - return -EINVAL; - vcpu->arch.pvclock_set_guest_stopped_request = true; - kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); -@@ -12336,8 +12319,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) - vcpu->arch.regs_avail = ~0; - vcpu->arch.regs_dirty = ~0; - -- kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm); -- - if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) - kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE); - else --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/11-kvm-clock/0037-KVM-x86-use-uhva-for-kvm-clock-if-kvm_gpc_refresh-fa.patch b/resources/hiding_ci/linux_patches/11-kvm-clock/0037-KVM-x86-use-uhva-for-kvm-clock-if-kvm_gpc_refresh-fa.patch new file mode 100644 index 00000000000..59c0bc72622 --- /dev/null +++ b/resources/hiding_ci/linux_patches/11-kvm-clock/0037-KVM-x86-use-uhva-for-kvm-clock-if-kvm_gpc_refresh-fa.patch @@ -0,0 +1,103 @@ +From 71bcbd4705fda07a87b0274f86eee7f1742ab863 Mon Sep 17 00:00:00 2001 +From: Patrick Roy +Date: Fri, 18 Jul 2025 15:59:39 +0100 +Subject: [PATCH 37/49] KVM: x86: use uhva for kvm-clock if kvm_gpc_refresh() + fails + +kvm-clock uses a gfn_to_pfn_cache to avoid repeated gpa->pfn +computations, relying on mmu notifiers to determine when the translation +needs to be redone. + +If the guest places the kvm-clock for some vcpu into memory that is +backed by a KVM_MEMSLOT_GMEM_ONLY memslot, and the guest_memfd instance +has GUEST_MEMFD_FLAG_NO_DIRECT_MAP set, this does not work: +gfn_to_pfn_cache internally uses GUP to resolve uhva->pfn, which +returned -EFAULT for direct map removed memory. But even if this pfn +computation were to work, the subsequent attempts to access guest memory +through the direct map would obviously fail. + +For this scenario, all other parts of kvm fall back to instead accessing +guest memory through userspace mapping of guest_memfd, which is stored +in the memslots userspace_addr. Have kvm-clock do the same by handling +failures in kvm_gpc_refresh() with a fallback to a pvclock update +routine that operates on userspace mappings. This looses the +optimization of gfn_to_pfn_cache for these VMs, but on modern hardawre +kvm-clock update requests should be rare enough for this to not matter +(and guest_memfd is not support for Xen VMs, where speed of pvclock +accesses is more relevant). + +Alternatively, it would be possible to team gfn_to_pfn_cache about +(direct map removed) guest_memfd, however the combination of on-demand +direct map reinsertion (and its induced ref-counting) and hooking +gfn_to_pfn_caches up to gmem invalidations has proven significantly more +complex [1], and hence simply falling back to userspace mappings was +suggested by Sean at one of the guest_memfd upstream calls. + +[1]: https://lore.kernel.org/kvm/20240910163038.1298452-9-roypat@amazon.co.uk/ + https://lore.kernel.org/kvm/20240910163038.1298452-10-roypat@amazon.co.uk/ + +Signed-off-by: Patrick Roy +--- + arch/x86/kvm/x86.c | 38 +++++++++++++++++++++++++++++++++++++- + 1 file changed, 37 insertions(+), 1 deletion(-) + +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index e5cd54ba1eaa..197428567239 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -3149,6 +3149,40 @@ u64 get_kvmclock_ns(struct kvm *kvm) + return data.clock; + } + ++static void kvm_setup_guest_pvclock_slow(struct pvclock_vcpu_time_info *ref_hv_clock, ++ struct kvm_vcpu *vcpu, ++ gpa_t gpa) ++{ ++ struct pvclock_vcpu_time_info guest_hv_clock; ++ struct pvclock_vcpu_time_info hv_clock; ++ ++ memcpy(&hv_clock, ref_hv_clock, sizeof(hv_clock)); ++ ++ kvm_read_guest(vcpu->kvm, gpa, &guest_hv_clock, sizeof(struct pvclock_vcpu_time_info)); ++ ++ /* ++ * This VCPU is paused, but it's legal for a guest to read another ++ * VCPU's kvmclock, so we really have to follow the specification where ++ * it says that version is odd if data is being modified, and even after ++ * it is consistent. ++ */ ++ ++ guest_hv_clock.version = hv_clock.version = (guest_hv_clock.version + 1) | 1; ++ smp_wmb(); ++ ++ /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ ++ hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); ++ ++ kvm_write_guest(vcpu->kvm, gpa, &hv_clock, sizeof(struct pvclock_vcpu_time_info)); ++ ++ smp_wmb(); ++ ++ ++hv_clock.version; ++ kvm_write_guest(vcpu->kvm, gpa + offsetof(struct pvclock_vcpu_time_info, version), &hv_clock.version, sizeof(hv_clock.version)); ++ ++ trace_kvm_pvclock_update(vcpu->vcpu_id, &hv_clock); ++} ++ + static void kvm_setup_guest_pvclock(struct pvclock_vcpu_time_info *ref_hv_clock, + struct kvm_vcpu *vcpu, + struct gfn_to_pfn_cache *gpc, +@@ -3164,8 +3198,10 @@ static void kvm_setup_guest_pvclock(struct pvclock_vcpu_time_info *ref_hv_clock, + while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) { + read_unlock_irqrestore(&gpc->lock, flags); + +- if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock))) ++ if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock))) { ++ kvm_setup_guest_pvclock_slow(ref_hv_clock, vcpu, gpc->gpa + offset); + return; ++ } + + read_lock_irqsave(&gpc->lock, flags); + } +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0033-KVM-selftests-Fix-vm_mem_region_set_flags-docstring.patch b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0033-KVM-selftests-Fix-vm_mem_region_set_flags-docstring.patch deleted file mode 100644 index 3520213381f..00000000000 --- a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0033-KVM-selftests-Fix-vm_mem_region_set_flags-docstring.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 955d6fe23e81d0574b96ee7f0c3540f2e98dbb7d Mon Sep 17 00:00:00 2001 -From: James Houghton -Date: Thu, 9 Jan 2025 20:49:23 +0000 -Subject: [PATCH 33/46] KVM: selftests: Fix vm_mem_region_set_flags docstring - -`flags` is what region->region.flags gets set to. - -Signed-off-by: James Houghton ---- - tools/testing/selftests/kvm/lib/kvm_util.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c -index a055343a7bf7..ca1aa1699f8a 100644 ---- a/tools/testing/selftests/kvm/lib/kvm_util.c -+++ b/tools/testing/selftests/kvm/lib/kvm_util.c -@@ -1200,7 +1200,7 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot) - * - * Input Args: - * vm - Virtual Machine -- * flags - Starting guest physical address -+ * flags - Flags for the memslot - * - * Output Args: None - * --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0034-KVM-selftests-Fix-prefault_mem-logic.patch b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0034-KVM-selftests-Fix-prefault_mem-logic.patch deleted file mode 100644 index f515dcdb591..00000000000 --- a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0034-KVM-selftests-Fix-prefault_mem-logic.patch +++ /dev/null @@ -1,37 +0,0 @@ -From e1557fa30edfe01a34ef5becc0124d8a67f4e284 Mon Sep 17 00:00:00 2001 -From: James Houghton -Date: Thu, 9 Jan 2025 20:49:24 +0000 -Subject: [PATCH 34/46] KVM: selftests: Fix prefault_mem logic - -The previous logic didn't handle the case where memory was partitioned -AND we were using a single userfaultfd. It would only prefault the first -vCPU's memory and not the rest. - -Signed-off-by: James Houghton ---- - tools/testing/selftests/kvm/demand_paging_test.c | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c -index 0202b78f8680..315f5c9037b4 100644 ---- a/tools/testing/selftests/kvm/demand_paging_test.c -+++ b/tools/testing/selftests/kvm/demand_paging_test.c -@@ -172,11 +172,13 @@ static void run_test(enum vm_guest_mode mode, void *arg) - memset(guest_data_prototype, 0xAB, demand_paging_size); - - if (p->uffd_mode == UFFDIO_REGISTER_MODE_MINOR) { -- num_uffds = p->single_uffd ? 1 : nr_vcpus; -- for (i = 0; i < num_uffds; i++) { -+ for (i = 0; i < nr_vcpus; i++) { - vcpu_args = &memstress_args.vcpu_args[i]; - prefault_mem(addr_gpa2alias(vm, vcpu_args->gpa), - vcpu_args->pages * memstress_args.guest_page_size); -+ if (!p->partition_vcpu_memory_access) -+ /* We prefaulted everything */ -+ break; - } - } - --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0035-KVM-selftests-Add-va_start-end-into-uffd_desc.patch b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0035-KVM-selftests-Add-va_start-end-into-uffd_desc.patch deleted file mode 100644 index 4af06032478..00000000000 --- a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0035-KVM-selftests-Add-va_start-end-into-uffd_desc.patch +++ /dev/null @@ -1,44 +0,0 @@ -From f15d0613d406ff7940656054615672948b6a21a8 Mon Sep 17 00:00:00 2001 -From: James Houghton -Date: Thu, 9 Jan 2025 20:49:25 +0000 -Subject: [PATCH 35/46] KVM: selftests: Add va_start/end into uffd_desc - -This will be used for the self-test to look up which userfaultfd we -should be using when handling a KVM Userfault (in the event KVM -Userfault and userfaultfd are being used together). - -Signed-off-by: James Houghton ---- - tools/testing/selftests/kvm/include/userfaultfd_util.h | 2 ++ - tools/testing/selftests/kvm/lib/userfaultfd_util.c | 2 ++ - 2 files changed, 4 insertions(+) - -diff --git a/tools/testing/selftests/kvm/include/userfaultfd_util.h b/tools/testing/selftests/kvm/include/userfaultfd_util.h -index 60f7f9d435dc..b62fecdfe745 100644 ---- a/tools/testing/selftests/kvm/include/userfaultfd_util.h -+++ b/tools/testing/selftests/kvm/include/userfaultfd_util.h -@@ -30,6 +30,8 @@ struct uffd_desc { - int *pipefds; - pthread_t *readers; - struct uffd_reader_args *reader_args; -+ void *va_start; -+ void *va_end; - }; - - struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, -diff --git a/tools/testing/selftests/kvm/lib/userfaultfd_util.c b/tools/testing/selftests/kvm/lib/userfaultfd_util.c -index 5bde176cedd5..31d38b3a9d12 100644 ---- a/tools/testing/selftests/kvm/lib/userfaultfd_util.c -+++ b/tools/testing/selftests/kvm/lib/userfaultfd_util.c -@@ -152,6 +152,8 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, - expected_ioctls, "missing userfaultfd ioctls"); - - uffd_desc->uffd = uffd; -+ uffd_desc->va_start = hva; -+ uffd_desc->va_end = (char *)hva + len; - for (i = 0; i < uffd_desc->num_readers; ++i) { - int pipes[2]; - --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0036-KVM-selftests-Inform-set_memory_region_test-of-KVM_M.patch b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0036-KVM-selftests-Inform-set_memory_region_test-of-KVM_M.patch deleted file mode 100644 index 6139aebaa29..00000000000 --- a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0036-KVM-selftests-Inform-set_memory_region_test-of-KVM_M.patch +++ /dev/null @@ -1,31 +0,0 @@ -From da111a044428b5d1f413c63b8ae4d25cf2ad98ee Mon Sep 17 00:00:00 2001 -From: James Houghton -Date: Thu, 9 Jan 2025 20:49:27 +0000 -Subject: [PATCH 36/46] KVM: selftests: Inform set_memory_region_test of - KVM_MEM_USERFAULT - -The KVM_MEM_USERFAULT flag is supported iff KVM_CAP_USERFAULT is -available. - -Signed-off-by: James Houghton ---- - tools/testing/selftests/kvm/set_memory_region_test.c | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c -index ce3ac0fd6dfb..ba3fe8a53b33 100644 ---- a/tools/testing/selftests/kvm/set_memory_region_test.c -+++ b/tools/testing/selftests/kvm/set_memory_region_test.c -@@ -364,6 +364,9 @@ static void test_invalid_memory_region_flags(void) - if (kvm_check_cap(KVM_CAP_MEMORY_ATTRIBUTES) & KVM_MEMORY_ATTRIBUTE_PRIVATE) - supported_flags |= KVM_MEM_GUEST_MEMFD; - -+ if (kvm_check_cap(KVM_CAP_USERFAULT)) -+ supported_flags |= KVM_MEM_USERFAULT; -+ - for (i = 0; i < 32; i++) { - if ((supported_flags & BIT(i)) && !(v2_only_flags & BIT(i))) - continue; --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0037-KVM-selftests-Add-KVM-Userfault-mode-to-demand_pagin.patch b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0037-KVM-selftests-Add-KVM-Userfault-mode-to-demand_pagin.patch deleted file mode 100644 index e0ab7ba4aff..00000000000 --- a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0037-KVM-selftests-Add-KVM-Userfault-mode-to-demand_pagin.patch +++ /dev/null @@ -1,381 +0,0 @@ -From ba7239056fb3dc13c1dc0c394a1a468ead5a18f7 Mon Sep 17 00:00:00 2001 -From: James Houghton -Date: Thu, 9 Jan 2025 20:49:26 +0000 -Subject: [PATCH 37/46] KVM: selftests: Add KVM Userfault mode to - demand_paging_test - -Add a way for the KVM_RUN loop to handle -EFAULT exits when they are for -KVM_MEMORY_EXIT_FLAG_USERFAULT. In this case, preemptively handle the -UFFDIO_COPY or UFFDIO_CONTINUE if userfaultfd is also in use. This saves -the trip through the userfaultfd poll/read/WAKE loop. - -When preemptively handling UFFDIO_COPY/CONTINUE, do so with -MODE_DONTWAKE, as there will not be a thread to wake. If a thread *does* -take the userfaultfd slow path, we will get a regular userfault, and we -will call handle_uffd_page_request() which will do a full wake-up. In -the EEXIST case, a wake-up will not occur. Make sure to call UFFDIO_WAKE -explicitly in this case. - -When handling KVM userfaults, make sure to set the bitmap with -memory_order_release. Although it wouldn't affect the functionality of -the test (because memstress doesn't actually require any particular -guest memory contents), it is what userspace normally needs to do. - -Add `-k` to set the test to use KVM Userfault. - -Add the vm_mem_region_set_flags_userfault() helper for setting -`userfault_bitmap` and KVM_MEM_USERFAULT at the same time. - -Signed-off-by: James Houghton ---- - .../selftests/kvm/demand_paging_test.c | 139 +++++++++++++++++- - .../testing/selftests/kvm/include/kvm_util.h | 5 + - tools/testing/selftests/kvm/lib/kvm_util.c | 40 ++++- - 3 files changed, 176 insertions(+), 8 deletions(-) - -diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c -index 315f5c9037b4..183c70731093 100644 ---- a/tools/testing/selftests/kvm/demand_paging_test.c -+++ b/tools/testing/selftests/kvm/demand_paging_test.c -@@ -12,7 +12,9 @@ - #include - #include - #include -+#include - #include -+#include - - #include "kvm_util.h" - #include "test_util.h" -@@ -24,11 +26,21 @@ - #ifdef __NR_userfaultfd - - static int nr_vcpus = 1; -+static int num_uffds; - static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; - - static size_t demand_paging_size; -+static size_t host_page_size; - static char *guest_data_prototype; - -+static struct { -+ bool enabled; -+ int uffd_mode; /* set if userfaultfd is also in use */ -+ struct uffd_desc **uffd_descs; -+} kvm_userfault_data; -+ -+static void resolve_kvm_userfault(u64 gpa, u64 size); -+ - static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) - { - struct kvm_vcpu *vcpu = vcpu_args->vcpu; -@@ -41,8 +53,22 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) - clock_gettime(CLOCK_MONOTONIC, &start); - - /* Let the guest access its memory */ -+restart: - ret = _vcpu_run(vcpu); -- TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret); -+ if (ret < 0 && errno == EFAULT && kvm_userfault_data.enabled) { -+ /* Check for userfault. */ -+ TEST_ASSERT(run->exit_reason == KVM_EXIT_MEMORY_FAULT, -+ "Got invalid exit reason: %x", run->exit_reason); -+ TEST_ASSERT(run->memory_fault.flags == -+ KVM_MEMORY_EXIT_FLAG_USERFAULT, -+ "Got invalid memory fault exit: %llx", -+ run->memory_fault.flags); -+ resolve_kvm_userfault(run->memory_fault.gpa, -+ run->memory_fault.size); -+ goto restart; -+ } else -+ TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret); -+ - if (get_ucall(vcpu, NULL) != UCALL_SYNC) { - TEST_ASSERT(false, - "Invalid guest sync status: exit_reason=%s", -@@ -54,11 +80,10 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) - ts_diff.tv_sec, ts_diff.tv_nsec); - } - --static int handle_uffd_page_request(int uffd_mode, int uffd, -- struct uffd_msg *msg) -+static int resolve_uffd_page_request(int uffd_mode, int uffd, uint64_t addr, -+ bool wake) - { - pid_t tid = syscall(__NR_gettid); -- uint64_t addr = msg->arg.pagefault.address; - struct timespec start; - struct timespec ts_diff; - int r; -@@ -71,7 +96,7 @@ static int handle_uffd_page_request(int uffd_mode, int uffd, - copy.src = (uint64_t)guest_data_prototype; - copy.dst = addr; - copy.len = demand_paging_size; -- copy.mode = 0; -+ copy.mode = wake ? 0 : UFFDIO_COPY_MODE_DONTWAKE; - - r = ioctl(uffd, UFFDIO_COPY, ©); - /* -@@ -96,6 +121,7 @@ static int handle_uffd_page_request(int uffd_mode, int uffd, - - cont.range.start = addr; - cont.range.len = demand_paging_size; -+ cont.mode = wake ? 0 : UFFDIO_CONTINUE_MODE_DONTWAKE; - - r = ioctl(uffd, UFFDIO_CONTINUE, &cont); - /* -@@ -119,6 +145,20 @@ static int handle_uffd_page_request(int uffd_mode, int uffd, - TEST_FAIL("Invalid uffd mode %d", uffd_mode); - } - -+ if (r < 0 && wake) { -+ /* -+ * No wake-up occurs when UFFDIO_COPY/CONTINUE fails, but we -+ * have a thread waiting. Wake it up. -+ */ -+ struct uffdio_range range = {0}; -+ -+ range.start = addr; -+ range.len = demand_paging_size; -+ -+ TEST_ASSERT(ioctl(uffd, UFFDIO_WAKE, &range) == 0, -+ "UFFDIO_WAKE failed: 0x%lx", addr); -+ } -+ - ts_diff = timespec_elapsed(start); - - PER_PAGE_DEBUG("UFFD page-in %d \t%ld ns\n", tid, -@@ -129,6 +169,58 @@ static int handle_uffd_page_request(int uffd_mode, int uffd, - return 0; - } - -+static int handle_uffd_page_request(int uffd_mode, int uffd, -+ struct uffd_msg *msg) -+{ -+ uint64_t addr = msg->arg.pagefault.address; -+ -+ return resolve_uffd_page_request(uffd_mode, uffd, addr, true); -+} -+ -+static void resolve_kvm_userfault(u64 gpa, u64 size) -+{ -+ struct kvm_vm *vm = memstress_args.vm; -+ struct userspace_mem_region *region; -+ unsigned long *bitmap_chunk; -+ u64 page, gpa_offset; -+ -+ region = (struct userspace_mem_region *) userspace_mem_region_find( -+ vm, gpa, (gpa + size - 1)); -+ -+ if (kvm_userfault_data.uffd_mode) { -+ /* -+ * Resolve userfaults early, without needing to read them -+ * off the userfaultfd. -+ */ -+ uint64_t hva = (uint64_t)addr_gpa2hva(vm, gpa); -+ struct uffd_desc **descs = kvm_userfault_data.uffd_descs; -+ int i, fd; -+ -+ for (i = 0; i < num_uffds; ++i) -+ if (hva >= (uint64_t)descs[i]->va_start && -+ hva < (uint64_t)descs[i]->va_end) -+ break; -+ -+ TEST_ASSERT(i < num_uffds, -+ "Did not find userfaultfd for hva: %lx", hva); -+ -+ fd = kvm_userfault_data.uffd_descs[i]->uffd; -+ resolve_uffd_page_request(kvm_userfault_data.uffd_mode, fd, -+ hva, false); -+ } else { -+ uint64_t hva = (uint64_t)addr_gpa2hva(vm, gpa); -+ -+ memcpy((char *)hva, guest_data_prototype, demand_paging_size); -+ } -+ -+ gpa_offset = gpa - region->region.guest_phys_addr; -+ page = gpa_offset / host_page_size; -+ bitmap_chunk = (unsigned long *)region->region.userfault_bitmap + -+ page / BITS_PER_LONG; -+ atomic_fetch_and_explicit((_Atomic unsigned long *)bitmap_chunk, -+ ~(1ul << (page % BITS_PER_LONG)), memory_order_release); -+} -+ - struct test_params { - int uffd_mode; - bool single_uffd; -@@ -136,6 +228,7 @@ struct test_params { - int readers_per_uffd; - enum vm_mem_backing_src_type src_type; - bool partition_vcpu_memory_access; -+ bool kvm_userfault; - }; - - static void prefault_mem(void *alias, uint64_t len) -@@ -149,6 +242,25 @@ static void prefault_mem(void *alias, uint64_t len) - } - } - -+static void enable_userfault(struct kvm_vm *vm, int slots) -+{ -+ for (int i = 0; i < slots; ++i) { -+ int slot = MEMSTRESS_MEM_SLOT_INDEX + i; -+ struct userspace_mem_region *region; -+ unsigned long *userfault_bitmap; -+ int flags = KVM_MEM_USERFAULT; -+ -+ region = memslot2region(vm, slot); -+ userfault_bitmap = bitmap_zalloc(region->mmap_size / -+ host_page_size); -+ /* everything is userfault initially */ -+ memset(userfault_bitmap, -1, region->mmap_size / host_page_size / CHAR_BIT); -+ printf("Setting bitmap: %p\n", userfault_bitmap); -+ vm_mem_region_set_flags_userfault(vm, slot, flags, -+ userfault_bitmap); -+ } -+} -+ - static void run_test(enum vm_guest_mode mode, void *arg) - { - struct memstress_vcpu_args *vcpu_args; -@@ -159,12 +271,13 @@ static void run_test(enum vm_guest_mode mode, void *arg) - struct timespec ts_diff; - double vcpu_paging_rate; - struct kvm_vm *vm; -- int i, num_uffds = 0; -+ int i; - - vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1, - p->src_type, p->partition_vcpu_memory_access); - - demand_paging_size = get_backing_src_pagesz(p->src_type); -+ host_page_size = getpagesize(); - - guest_data_prototype = malloc(demand_paging_size); - TEST_ASSERT(guest_data_prototype, -@@ -208,6 +321,14 @@ static void run_test(enum vm_guest_mode mode, void *arg) - } - } - -+ if (p->kvm_userfault) { -+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_USERFAULT)); -+ kvm_userfault_data.enabled = true; -+ kvm_userfault_data.uffd_mode = p->uffd_mode; -+ kvm_userfault_data.uffd_descs = uffd_descs; -+ enable_userfault(vm, 1); -+ } -+ - pr_info("Finished creating vCPUs and starting uffd threads\n"); - - clock_gettime(CLOCK_MONOTONIC, &start); -@@ -265,6 +386,7 @@ static void help(char *name) - printf(" -v: specify the number of vCPUs to run.\n"); - printf(" -o: Overlap guest memory accesses instead of partitioning\n" - " them into a separate region of memory for each vCPU.\n"); -+ printf(" -k: Use KVM Userfault\n"); - puts(""); - exit(0); - } -@@ -283,7 +405,7 @@ int main(int argc, char *argv[]) - - guest_modes_append_default(); - -- while ((opt = getopt(argc, argv, "ahom:u:d:b:s:v:c:r:")) != -1) { -+ while ((opt = getopt(argc, argv, "ahokm:u:d:b:s:v:c:r:")) != -1) { - switch (opt) { - case 'm': - guest_modes_cmdline(optarg); -@@ -326,6 +448,9 @@ int main(int argc, char *argv[]) - "Invalid number of readers per uffd %d: must be >=1", - p.readers_per_uffd); - break; -+ case 'k': -+ p.kvm_userfault = true; -+ break; - case 'h': - default: - help(argv[0]); -diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h -index bee65ca08721..5642d075900f 100644 ---- a/tools/testing/selftests/kvm/include/kvm_util.h -+++ b/tools/testing/selftests/kvm/include/kvm_util.h -@@ -630,6 +630,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, - void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, - uint64_t guest_paddr, uint32_t slot, uint64_t npages, - uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset); -+struct userspace_mem_region * -+userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end); - - #ifndef vm_arch_has_protected_memory - static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) -@@ -639,6 +641,9 @@ static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) - #endif - - void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); -+void vm_mem_region_set_flags_userfault(struct kvm_vm *vm, uint32_t slot, -+ uint32_t flags, -+ unsigned long *userfault_bitmap); - void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); - void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); - struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); -diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c -index ca1aa1699f8a..3c215df1d2d8 100644 ---- a/tools/testing/selftests/kvm/lib/kvm_util.c -+++ b/tools/testing/selftests/kvm/lib/kvm_util.c -@@ -694,7 +694,7 @@ void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], - * of the regions is returned. Null is returned only when no overlapping - * region exists. - */ --static struct userspace_mem_region * -+struct userspace_mem_region * - userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) - { - struct rb_node *node; -@@ -1225,6 +1225,44 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) - ret, errno, slot, flags); - } - -+/* -+ * VM Memory Region Flags Set with a userfault bitmap -+ * -+ * Input Args: -+ * vm - Virtual Machine -+ * flags - Flags for the memslot -+ * userfault_bitmap - The bitmap to use for KVM_MEM_USERFAULT -+ * -+ * Output Args: None -+ * -+ * Return: None -+ * -+ * Sets the flags of the memory region specified by the value of slot, -+ * to the values given by flags. This helper adds a way to provide a -+ * userfault_bitmap. -+ */ -+void vm_mem_region_set_flags_userfault(struct kvm_vm *vm, uint32_t slot, -+ uint32_t flags, -+ unsigned long *userfault_bitmap) -+{ -+ int ret; -+ struct userspace_mem_region *region; -+ -+ region = memslot2region(vm, slot); -+ -+ TEST_ASSERT(!userfault_bitmap ^ (flags & KVM_MEM_USERFAULT), -+ "KVM_MEM_USERFAULT must be specified with a bitmap"); -+ -+ region->region.flags = flags; -+ region->region.userfault_bitmap = (__u64)userfault_bitmap; -+ -+ ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); -+ -+ TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n" -+ " rc: %i errno: %i slot: %u flags: 0x%x", -+ ret, errno, slot, flags); -+} -+ - /* - * VM Memory Region Move - * --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0027-KVM-Add-KVM_MEM_USERFAULT-memslot-flag-and-bitmap.patch b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0038-KVM-Add-KVM_MEM_USERFAULT-memslot-flag-and-bitmap.patch similarity index 86% rename from resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0027-KVM-Add-KVM_MEM_USERFAULT-memslot-flag-and-bitmap.patch rename to resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0038-KVM-Add-KVM_MEM_USERFAULT-memslot-flag-and-bitmap.patch index 8e36f0b3d28..c1b0e940739 100644 --- a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0027-KVM-Add-KVM_MEM_USERFAULT-memslot-flag-and-bitmap.patch +++ b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0038-KVM-Add-KVM_MEM_USERFAULT-memslot-flag-and-bitmap.patch @@ -1,7 +1,7 @@ -From 3cb19e4da39630a3e1ab34d9daed24fcb1c441d3 Mon Sep 17 00:00:00 2001 +From 1e250b57d6044939dae8f9e5068a0a8325d33652 Mon Sep 17 00:00:00 2001 From: James Houghton Date: Thu, 9 Jan 2025 20:49:17 +0000 -Subject: [PATCH 27/46] KVM: Add KVM_MEM_USERFAULT memslot flag and bitmap +Subject: [PATCH 38/49] KVM: Add KVM_MEM_USERFAULT memslot flag and bitmap Use one of the 14 reserved u64s in struct kvm_userspace_memory_region2 for the user to provide `userfault_bitmap`. @@ -20,10 +20,10 @@ Signed-off-by: James Houghton 4 files changed, 55 insertions(+), 1 deletion(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h -index 7dc9190d2fef..ad37db1bed39 100644 +index 37553848e078..716f958e852c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h -@@ -599,6 +599,7 @@ struct kvm_memory_slot { +@@ -600,6 +600,7 @@ struct kvm_memory_slot { unsigned long *dirty_bitmap; struct kvm_arch_memory_slot arch; unsigned long userspace_addr; @@ -31,7 +31,7 @@ index 7dc9190d2fef..ad37db1bed39 100644 u32 flags; short id; u16 as_id; -@@ -768,6 +769,11 @@ static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm) +@@ -745,6 +746,11 @@ static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm) } #endif @@ -43,7 +43,7 @@ index 7dc9190d2fef..ad37db1bed39 100644 struct kvm_memslots { u64 generation; atomic_long_t last_used_slot; -@@ -2649,4 +2655,12 @@ static inline int kvm_enable_virtualization(void) { return 0; } +@@ -2595,4 +2601,12 @@ static inline int kvm_enable_virtualization(void) { return 0; } static inline void kvm_disable_virtualization(void) { } #endif @@ -57,7 +57,7 @@ index 7dc9190d2fef..ad37db1bed39 100644 + #endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h -index 33b368564b1f..c871e2a76e90 100644 +index 7688ea92b25c..d834eb428318 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -40,7 +40,8 @@ struct kvm_userspace_memory_region2 { @@ -79,18 +79,18 @@ index 33b368564b1f..c871e2a76e90 100644 /* for KVM_IRQ_LINE */ struct kvm_irq_level { diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig -index fa4acbedb953..fd46bc7d194a 100644 +index 1b7d5be0b6c4..1ba90f2af313 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig -@@ -132,3 +132,6 @@ config HAVE_KVM_ARCH_GMEM_INVALIDATE - config KVM_GMEM_SUPPORTS_MMAP - select KVM_GMEM +@@ -127,3 +127,6 @@ config HAVE_KVM_ARCH_GMEM_INVALIDATE + config HAVE_KVM_ARCH_GMEM_POPULATE bool + depends on KVM_GUEST_MEMFD + +config HAVE_KVM_USERFAULT + bool diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c -index d92dd84cca8e..9208ec56a26b 100644 +index 0dbfd17e1191..41c8ac9fe514 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1605,6 +1605,9 @@ static int check_memory_region_flags(struct kvm *kvm, @@ -126,7 +126,7 @@ index d92dd84cca8e..9208ec56a26b 100644 r = kvm_set_memslot(kvm, old, new, change); if (r) -@@ -6548,3 +6560,26 @@ void kvm_exit(void) +@@ -6551,3 +6563,26 @@ void kvm_exit(void) kvm_irqfd_exit(); } EXPORT_SYMBOL_GPL(kvm_exit); diff --git a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0038-KVM-selftests-Add-KVM_MEM_USERFAULT-guest_memfd-togg.patch b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0038-KVM-selftests-Add-KVM_MEM_USERFAULT-guest_memfd-togg.patch deleted file mode 100644 index b37e6c28be7..00000000000 --- a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0038-KVM-selftests-Add-KVM_MEM_USERFAULT-guest_memfd-togg.patch +++ /dev/null @@ -1,65 +0,0 @@ -From 2feda9131e1f2b87f22bbc4234f55def8ec9b4ac Mon Sep 17 00:00:00 2001 -From: James Houghton -Date: Thu, 9 Jan 2025 20:49:28 +0000 -Subject: [PATCH 38/46] KVM: selftests: Add KVM_MEM_USERFAULT + guest_memfd - toggle tests - -Make sure KVM_MEM_USERFAULT can be toggled on and off for -KVM_MEM_GUEST_MEMFD memslots. - -Signed-off-by: James Houghton ---- - .../selftests/kvm/set_memory_region_test.c | 30 +++++++++++++++++++ - 1 file changed, 30 insertions(+) - -diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c -index ba3fe8a53b33..20a03cb57acf 100644 ---- a/tools/testing/selftests/kvm/set_memory_region_test.c -+++ b/tools/testing/selftests/kvm/set_memory_region_test.c -@@ -606,6 +606,35 @@ static void test_mmio_during_vectoring(void) - - kvm_vm_free(vm); - } -+ -+static void test_private_memory_region_userfault(void) -+{ -+ struct kvm_vm *vm; -+ int memfd; -+ -+ pr_info("Testing toggling KVM_MEM_USERFAULT on KVM_MEM_GUEST_MEMFD memory regions\n"); -+ -+ vm = vm_create_barebones_type(KVM_X86_SW_PROTECTED_VM); -+ -+ test_invalid_guest_memfd(vm, vm->kvm_fd, 0, "KVM fd should fail"); -+ test_invalid_guest_memfd(vm, vm->fd, 0, "VM's fd should fail"); -+ -+ memfd = vm_create_guest_memfd(vm, MEM_REGION_SIZE, 0); -+ -+ vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, -+ MEM_REGION_GPA, MEM_REGION_SIZE, 0, memfd, 0); -+ -+ vm_set_user_memory_region2(vm, MEM_REGION_SLOT, -+ KVM_MEM_GUEST_MEMFD | KVM_MEM_USERFAULT, -+ MEM_REGION_GPA, MEM_REGION_SIZE, 0, memfd, 0); -+ -+ vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, -+ MEM_REGION_GPA, MEM_REGION_SIZE, 0, memfd, 0); -+ -+ close(memfd); -+ -+ kvm_vm_free(vm); -+} - #endif - - int main(int argc, char *argv[]) -@@ -633,6 +662,7 @@ int main(int argc, char *argv[]) - (kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM))) { - test_add_private_memory_region(); - test_add_overlapping_private_memory_regions(); -+ test_private_memory_region_userfault(); - } else { - pr_info("Skipping tests for KVM_MEM_GUEST_MEMFD memory regions\n"); - } --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0028-KVM-Add-KVM_MEMORY_EXIT_FLAG_USERFAULT.patch b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0039-KVM-Add-KVM_MEMORY_EXIT_FLAG_USERFAULT.patch similarity index 81% rename from resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0028-KVM-Add-KVM_MEMORY_EXIT_FLAG_USERFAULT.patch rename to resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0039-KVM-Add-KVM_MEMORY_EXIT_FLAG_USERFAULT.patch index c8513ef4d5d..bdf185775d5 100644 --- a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0028-KVM-Add-KVM_MEMORY_EXIT_FLAG_USERFAULT.patch +++ b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0039-KVM-Add-KVM_MEMORY_EXIT_FLAG_USERFAULT.patch @@ -1,7 +1,7 @@ -From 264248675db749124ca67b3c7b4e724480b4ee24 Mon Sep 17 00:00:00 2001 +From 56d26e4a6d9e3dd57edc166fdd5ea49e6d982e5e Mon Sep 17 00:00:00 2001 From: James Houghton Date: Thu, 9 Jan 2025 20:49:18 +0000 -Subject: [PATCH 28/46] KVM: Add KVM_MEMORY_EXIT_FLAG_USERFAULT +Subject: [PATCH 39/49] KVM: Add KVM_MEMORY_EXIT_FLAG_USERFAULT This flag is used for vCPU memory faults caused by KVM Userfault; i.e., the bit in `userfault_bitmap` corresponding to the faulting gfn was set. @@ -12,7 +12,7 @@ Signed-off-by: James Houghton 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h -index c871e2a76e90..24070a4d13a1 100644 +index d834eb428318..9d08e36ea93b 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -446,6 +446,7 @@ struct kvm_run { diff --git a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0029-KVM-Allow-late-setting-of-KVM_MEM_USERFAULT-on-guest.patch b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0040-KVM-Allow-late-setting-of-KVM_MEM_USERFAULT-on-guest.patch similarity index 92% rename from resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0029-KVM-Allow-late-setting-of-KVM_MEM_USERFAULT-on-guest.patch rename to resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0040-KVM-Allow-late-setting-of-KVM_MEM_USERFAULT-on-guest.patch index 22feecb4cb2..bc562d8f8c7 100644 --- a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0029-KVM-Allow-late-setting-of-KVM_MEM_USERFAULT-on-guest.patch +++ b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0040-KVM-Allow-late-setting-of-KVM_MEM_USERFAULT-on-guest.patch @@ -1,7 +1,7 @@ -From f1d9e28902fdf448ed89e6a3d7c94853e13e7188 Mon Sep 17 00:00:00 2001 +From 5f5c0d38adade0abfb63f9473a26638dd9fc0a84 Mon Sep 17 00:00:00 2001 From: James Houghton Date: Thu, 9 Jan 2025 20:49:19 +0000 -Subject: [PATCH 29/46] KVM: Allow late setting of KVM_MEM_USERFAULT on +Subject: [PATCH 40/49] KVM: Allow late setting of KVM_MEM_USERFAULT on guest_memfd memslot Currently guest_memfd memslots can only be deleted. Slightly change the @@ -14,7 +14,7 @@ Signed-off-by: James Houghton 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c -index 9208ec56a26b..fee5a233ee03 100644 +index 41c8ac9fe514..ff2d40636a7a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2081,9 +2081,6 @@ static int kvm_set_memory_region(struct kvm *kvm, diff --git a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0030-KVM-x86-mmu-Add-support-for-KVM_MEM_USERFAULT.patch b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0041-KVM-x86-mmu-Add-support-for-KVM_MEM_USERFAULT.patch similarity index 88% rename from resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0030-KVM-x86-mmu-Add-support-for-KVM_MEM_USERFAULT.patch rename to resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0041-KVM-x86-mmu-Add-support-for-KVM_MEM_USERFAULT.patch index 60830bd5ee3..56a128197f1 100644 --- a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0030-KVM-x86-mmu-Add-support-for-KVM_MEM_USERFAULT.patch +++ b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0041-KVM-x86-mmu-Add-support-for-KVM_MEM_USERFAULT.patch @@ -1,7 +1,7 @@ -From 6513e6746526d2c56aaf560ea27ca173202132b4 Mon Sep 17 00:00:00 2001 +From a8936a9daf5ed24a1dafe514da65b92df92b79e0 Mon Sep 17 00:00:00 2001 From: James Houghton Date: Thu, 9 Jan 2025 20:49:21 +0000 -Subject: [PATCH 30/46] KVM: x86/mmu: Add support for KVM_MEM_USERFAULT +Subject: [PATCH 41/49] KVM: x86/mmu: Add support for KVM_MEM_USERFAULT Adhering to the requirements of KVM Userfault: @@ -55,22 +55,22 @@ index b3edd7f7c8cd..2e2d03e578b5 100644 } } diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig -index 1ba959b9eadc..aa215d0df63b 100644 +index 4e43923656d0..1390ba799d4f 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig -@@ -49,6 +49,7 @@ config KVM_X86 - select KVM_GENERIC_GMEM_POPULATE if KVM_SW_PROTECTED_VM - select KVM_GMEM_SUPPORTS_MMAP if X86_64 +@@ -48,6 +48,7 @@ config KVM_X86 + select KVM_GENERIC_PRE_FAULT_MEMORY select KVM_WERROR if WERROR + select KVM_GUEST_MEMFD if X86_64 + select HAVE_KVM_USERFAULT config KVM tristate "Kernel-based Virtual Machine (KVM) support" diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c -index ad5f337b496c..eb746b183ab2 100644 +index 56c80588efa0..ae0f244357a5 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c -@@ -4545,6 +4545,18 @@ static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu, +@@ -4588,6 +4588,18 @@ static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { unsigned int foll = fault->write ? FOLL_WRITE : 0; @@ -87,13 +87,13 @@ index ad5f337b496c..eb746b183ab2 100644 + if (kvm_memslot_userfault(fault->slot)) + fault->max_level = PG_LEVEL_4K; - if (fault_from_gmem(fault)) + if (fault->is_private || kvm_memslot_is_gmem_only(fault->slot)) return kvm_mmu_faultin_pfn_gmem(vcpu, fault); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h -index db8f33e4de62..84e4bb34abed 100644 +index b776be783a2f..120ce9d340b4 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h -@@ -336,12 +336,26 @@ enum { +@@ -339,12 +339,26 @@ enum { */ static_assert(RET_PF_CONTINUE == 0); @@ -124,10 +124,10 @@ index db8f33e4de62..84e4bb34abed 100644 static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 8b7ae1db9fd0..0cbdb8874ed1 100644 +index 197428567239..2279bb7cf9fe 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -13136,12 +13136,36 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, +@@ -13091,12 +13091,36 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, u32 new_flags = new ? new->flags : 0; bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES; @@ -166,7 +166,7 @@ index 8b7ae1db9fd0..0cbdb8874ed1 100644 /* * Nothing more to do for RO slots (which can't be dirtied and can't be -@@ -13161,14 +13185,6 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, +@@ -13116,14 +13140,6 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY)) return; @@ -182,10 +182,10 @@ index 8b7ae1db9fd0..0cbdb8874ed1 100644 /* * Recover huge page mappings in the slot now that dirty logging diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h -index ad37db1bed39..fb9cdfe3a2cd 100644 +index 716f958e852c..59f4857e8ec2 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h -@@ -2538,7 +2538,8 @@ static inline void kvm_account_pgtable_pages(void *virt, int nr) +@@ -2492,7 +2492,8 @@ static inline void kvm_account_pgtable_pages(void *virt, int nr) static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, gpa_t gpa, gpa_t size, bool is_write, bool is_exec, @@ -195,7 +195,7 @@ index ad37db1bed39..fb9cdfe3a2cd 100644 { vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT; vcpu->run->memory_fault.gpa = gpa; -@@ -2548,6 +2549,8 @@ static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, +@@ -2502,6 +2503,8 @@ static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, vcpu->run->memory_fault.flags = 0; if (is_private) vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE; diff --git a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0031-KVM-Advertise-KVM_CAP_USERFAULT-in-KVM_CHECK_EXTENSI.patch b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0042-KVM-Advertise-KVM_CAP_USERFAULT-in-KVM_CHECK_EXTENSI.patch similarity index 73% rename from resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0031-KVM-Advertise-KVM_CAP_USERFAULT-in-KVM_CHECK_EXTENSI.patch rename to resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0042-KVM-Advertise-KVM_CAP_USERFAULT-in-KVM_CHECK_EXTENSI.patch index 6f9fb6f68cf..b287331cc48 100644 --- a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0031-KVM-Advertise-KVM_CAP_USERFAULT-in-KVM_CHECK_EXTENSI.patch +++ b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0042-KVM-Advertise-KVM_CAP_USERFAULT-in-KVM_CHECK_EXTENSI.patch @@ -1,7 +1,7 @@ -From 6cb18bc91ffe494bae887176b63a173e3cf53668 Mon Sep 17 00:00:00 2001 +From 93c8b3d7b039acdd213a3250b47043218da38428 Mon Sep 17 00:00:00 2001 From: James Houghton Date: Thu, 9 Jan 2025 20:49:20 +0000 -Subject: [PATCH 31/46] KVM: Advertise KVM_CAP_USERFAULT in KVM_CHECK_EXTENSION +Subject: [PATCH 42/49] KVM: Advertise KVM_CAP_USERFAULT in KVM_CHECK_EXTENSION Advertise support for KVM_CAP_USERFAULT when kvm_has_userfault() returns true. Currently this is merely IS_ENABLED(CONFIG_HAVE_KVM_USERFAULT), so @@ -14,24 +14,24 @@ Signed-off-by: James Houghton 2 files changed, 5 insertions(+) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h -index 24070a4d13a1..bae2fc737759 100644 +index 9d08e36ea93b..71b639e86a26 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h -@@ -965,6 +965,7 @@ struct kvm_enable_cap { +@@ -966,6 +966,7 @@ struct kvm_enable_cap { #define KVM_CAP_RISCV_MP_STATE_RESET 242 - #define KVM_CAP_GMEM_MMAP 243 - #define KVM_CAP_GMEM_NO_DIRECT_MAP 244 + #define KVM_CAP_GUEST_MEMFD_MMAP 243 + #define KVM_CAP_GUEST_MEMFD_NO_DIRECT_MAP 244 +#define KVM_CAP_USERFAULT 245 struct kvm_irq_routing_irqchip { __u32 irqchip; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c -index fee5a233ee03..f2e88fb9d4bb 100644 +index ff2d40636a7a..c089e03b066b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4944,6 +4944,10 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) - #ifdef CONFIG_KVM_GMEM_SUPPORTS_MMAP - case KVM_CAP_GMEM_MMAP: + return 1; + case KVM_CAP_GUEST_MEMFD_MMAP: return !kvm || kvm_arch_supports_gmem_mmap(kvm); +#endif +#ifdef CONFIG_HAVE_KVM_USERFAULT diff --git a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0032-KVM-arm64-Add-support-for-KVM_MEM_USERFAULT.patch b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0043-KVM-arm64-Add-support-for-KVM_MEM_USERFAULT.patch similarity index 92% rename from resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0032-KVM-arm64-Add-support-for-KVM_MEM_USERFAULT.patch rename to resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0043-KVM-arm64-Add-support-for-KVM_MEM_USERFAULT.patch index bbf66bf55a9..9e330a80ced 100644 --- a/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0032-KVM-arm64-Add-support-for-KVM_MEM_USERFAULT.patch +++ b/resources/hiding_ci/linux_patches/15-kvm-mem-userfault/0043-KVM-arm64-Add-support-for-KVM_MEM_USERFAULT.patch @@ -1,7 +1,7 @@ -From 3bf9f7353147f7aa5fc766846b7632f7117c0bef Mon Sep 17 00:00:00 2001 +From 5179bf5e8ebe11d20c73513c51d78fb0f48cd44c Mon Sep 17 00:00:00 2001 From: James Houghton Date: Thu, 9 Jan 2025 20:49:22 +0000 -Subject: [PATCH 32/46] KVM: arm64: Add support for KVM_MEM_USERFAULT +Subject: [PATCH 43/49] KVM: arm64: Add support for KVM_MEM_USERFAULT Adhering to the requirements of KVM Userfault: 1. When it is toggled on, zap the second stage with @@ -19,13 +19,13 @@ Signed-off-by: James Houghton 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig -index 323b46b7c82f..ce3bd1a1ac89 100644 +index bff62e75d681..c75d6bcd3dd8 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig -@@ -39,6 +39,7 @@ menuconfig KVM +@@ -38,6 +38,7 @@ menuconfig KVM + select SCHED_INFO select GUEST_PERF_EVENTS if PERF_EVENTS - select KVM_GMEM - select KVM_GMEM_SUPPORTS_MMAP + select KVM_GUEST_MEMFD + select HAVE_KVM_USERFAULT help Support hosting virtualized guest machines. diff --git a/resources/hiding_ci/linux_patches/20-gmem-write/0039-KVM-guest_memfd-add-generic-population-via-write.patch b/resources/hiding_ci/linux_patches/20-gmem-write/0039-KVM-guest_memfd-add-generic-population-via-write.patch deleted file mode 100644 index 586824b9c7c..00000000000 --- a/resources/hiding_ci/linux_patches/20-gmem-write/0039-KVM-guest_memfd-add-generic-population-via-write.patch +++ /dev/null @@ -1,133 +0,0 @@ -From 859e9756eb70cfc8cf4fb4f0870908e55521cc7f Mon Sep 17 00:00:00 2001 -From: Nikita Kalyazin -Date: Mon, 3 Mar 2025 13:08:37 +0000 -Subject: [PATCH 39/46] KVM: guest_memfd: add generic population via write - -write syscall populates guest_memfd with user-supplied data in a generic -way, ie no vendor-specific preparation is performed. This is supposed -to be used in non-CoCo setups where guest memory is not -hardware-encrypted. - -The following behaviour is implemented: - - only page-aligned count and offset are allowed - - if the memory is already allocated, the call will successfully - populate it - - if the memory is not allocated, the call will both allocate and - populate - - if the memory is already populated, the call will not repopulate it - -Signed-off-by: Nikita Kalyazin ---- - virt/kvm/guest_memfd.c | 88 +++++++++++++++++++++++++++++++++++++++++- - 1 file changed, 87 insertions(+), 1 deletion(-) - -diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c -index d70ee66bb96d..809da2a2fb37 100644 ---- a/virt/kvm/guest_memfd.c -+++ b/virt/kvm/guest_memfd.c -@@ -392,8 +392,93 @@ static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma) - return 0; - } - -+static ssize_t kvm_kmem_gmem_write(struct file *file, const char __user *buf, -+ size_t count, loff_t *offset) -+{ -+ pgoff_t start, end, index; -+ ssize_t ret = 0; -+ -+ if (!PAGE_ALIGNED(*offset) || !PAGE_ALIGNED(count)) -+ return -EINVAL; -+ -+ if (*offset + count > i_size_read(file_inode(file))) -+ return -EINVAL; -+ -+ if (!buf) -+ return -EINVAL; -+ -+ start = *offset >> PAGE_SHIFT; -+ end = (*offset + count) >> PAGE_SHIFT; -+ -+ filemap_invalidate_lock_shared(file->f_mapping); -+ -+ for (index = start; index < end; ) { -+ struct folio *folio; -+ void *vaddr; -+ pgoff_t buf_offset = (index - start) << PAGE_SHIFT; -+ -+ if (signal_pending(current)) { -+ ret = -EINTR; -+ goto out; -+ } -+ -+ folio = kvm_gmem_get_folio(file_inode(file), index); -+ if (IS_ERR(folio)) { -+ ret = -EFAULT; -+ goto out; -+ } -+ -+ if (folio_test_hwpoison(folio)) { -+ folio_unlock(folio); -+ folio_put(folio); -+ ret = -EFAULT; -+ goto out; -+ } -+ -+ /* No support for huge pages. */ -+ if (WARN_ON_ONCE(folio_test_large(folio))) { -+ folio_unlock(folio); -+ folio_put(folio); -+ ret = -EFAULT; -+ goto out; -+ } -+ -+ if (folio_test_uptodate(folio)) { -+ folio_unlock(folio); -+ folio_put(folio); -+ ret = -ENOSPC; -+ goto out; -+ } -+ -+ folio_unlock(folio); -+ -+ vaddr = kmap_local_folio(folio, 0); -+ ret = copy_from_user(vaddr, buf + buf_offset, PAGE_SIZE); -+ kunmap_local(vaddr); -+ if (ret) { -+ ret = -EINVAL; -+ folio_put(folio); -+ goto out; -+ } -+ -+ kvm_gmem_mark_prepared(folio); -+ folio_put(folio); -+ -+ index = folio_next_index(folio); -+ *offset += PAGE_SIZE; -+ } -+ -+out: -+ filemap_invalidate_unlock_shared(file->f_mapping); -+ -+ return ret && start == (*offset >> PAGE_SHIFT) ? -+ ret : *offset - (start << PAGE_SHIFT); -+} -+ - static struct file_operations kvm_gmem_fops = { -- .mmap = kvm_gmem_mmap, -+ .mmap = kvm_gmem_mmap, -+ .llseek = default_llseek, -+ .write = kvm_kmem_gmem_write, - .open = generic_file_open, - .release = kvm_gmem_release, - .fallocate = kvm_gmem_fallocate, -@@ -514,6 +599,7 @@ static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags) - } - - file->f_flags |= O_LARGEFILE; -+ file->f_mode |= FMODE_LSEEK | FMODE_PWRITE; - - inode = file->f_inode; - WARN_ON(file->f_mapping != inode->i_mapping); --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/20-gmem-write/0040-KVM-selftests-update-guest_memfd-write-tests.patch b/resources/hiding_ci/linux_patches/20-gmem-write/0040-KVM-selftests-update-guest_memfd-write-tests.patch deleted file mode 100644 index 7d7861619b8..00000000000 --- a/resources/hiding_ci/linux_patches/20-gmem-write/0040-KVM-selftests-update-guest_memfd-write-tests.patch +++ /dev/null @@ -1,126 +0,0 @@ -From 205f30ad3c9fa142c34ab2ac6a3f6a762f6fc6b1 Mon Sep 17 00:00:00 2001 -From: Nikita Kalyazin -Date: Mon, 3 Mar 2025 13:08:38 +0000 -Subject: [PATCH 40/46] KVM: selftests: update guest_memfd write tests - -This is to reflect that the write syscall is now implemented for -guest_memfd. - -Signed-off-by: Nikita Kalyazin ---- - .../testing/selftests/kvm/guest_memfd_test.c | 85 +++++++++++++++++-- - 1 file changed, 79 insertions(+), 6 deletions(-) - -diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c -index 1252e74fbb8f..3965a842896e 100644 ---- a/tools/testing/selftests/kvm/guest_memfd_test.c -+++ b/tools/testing/selftests/kvm/guest_memfd_test.c -@@ -22,18 +22,90 @@ - #include "kvm_util.h" - #include "test_util.h" - --static void test_file_read_write(int fd) -+static void test_file_read(int fd) - { - char buf[64]; - - TEST_ASSERT(read(fd, buf, sizeof(buf)) < 0, - "read on a guest_mem fd should fail"); -- TEST_ASSERT(write(fd, buf, sizeof(buf)) < 0, -- "write on a guest_mem fd should fail"); - TEST_ASSERT(pread(fd, buf, sizeof(buf), 0) < 0, - "pread on a guest_mem fd should fail"); -- TEST_ASSERT(pwrite(fd, buf, sizeof(buf), 0) < 0, -- "pwrite on a guest_mem fd should fail"); -+} -+ -+static void test_file_write(int fd, size_t total_size) -+{ -+ size_t page_size = getpagesize(); -+ void *buf = NULL; -+ int ret; -+ -+ ret = posix_memalign(&buf, page_size, total_size); -+ TEST_ASSERT_EQ(ret, 0); -+ -+ /* Check arguments correctness checks work as expected */ -+ -+ ret = pwrite(fd, buf, page_size - 1, 0); -+ TEST_ASSERT(ret == -1, "write unaligned count on a guest_mem fd should fail"); -+ TEST_ASSERT_EQ(errno, EINVAL); -+ -+ ret = pwrite(fd, buf, page_size, 1); -+ TEST_ASSERT(ret == -1, "write unaligned offset on a guest_mem fd should fail"); -+ TEST_ASSERT_EQ(errno, EINVAL); -+ -+ ret = pwrite(fd, buf, page_size, total_size); -+ TEST_ASSERT(ret == -1, "writing past the file size on a guest_mem fd should fail"); -+ TEST_ASSERT_EQ(errno, EINVAL); -+ -+ ret = pwrite(fd, NULL, page_size, 0); -+ TEST_ASSERT(ret == -1, "supplying a NULL buffer when writing a guest_mem fd should fail"); -+ TEST_ASSERT_EQ(errno, EINVAL); -+ -+ /* Check double population is not allowed */ -+ -+ ret = pwrite(fd, buf, page_size, 0); -+ TEST_ASSERT(ret == page_size, "page-aligned write on a guest_mem fd should succeed"); -+ -+ ret = pwrite(fd, buf, page_size, 0); -+ TEST_ASSERT(ret == -1, "write on already populated guest_mem fd should fail"); -+ TEST_ASSERT_EQ(errno, ENOSPC); -+ -+ ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, page_size); -+ TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) should succeed"); -+ -+ /* Check population is allowed again after punching a hole */ -+ -+ ret = pwrite(fd, buf, page_size, 0); -+ TEST_ASSERT(ret == page_size, "page-aligned write on a punched guest_mem fd should succeed"); -+ -+ ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, page_size); -+ TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) should succeed"); -+ -+ /* Check population of already allocated memory is allowed */ -+ -+ ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, page_size); -+ TEST_ASSERT(!ret, "fallocate with aligned offset and size should succeed"); -+ -+ ret = pwrite(fd, buf, page_size, 0); -+ TEST_ASSERT(ret == page_size, "write on a preallocated guest_mem fd should succeed"); -+ -+ ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, page_size); -+ TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) should succeed"); -+ -+ /* Check population works until an already populated page is encountered */ -+ -+ ret = pwrite(fd, buf, total_size, 0); -+ TEST_ASSERT(ret == total_size, "page-aligned write on a guest_mem fd should succeed"); -+ -+ ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, page_size); -+ TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) should succeed"); -+ -+ ret = pwrite(fd, buf, total_size, 0); -+ TEST_ASSERT(ret == page_size, "write on a guest_mem fd should not overwrite data"); -+ -+ ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, total_size); -+ TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) should succeed"); -+ -+ -+ free(buf); - } - - static void test_mmap_supported(int fd, size_t page_size, size_t total_size) -@@ -270,7 +342,8 @@ static void test_with_type(unsigned long vm_type, uint64_t guest_memfd_flags, - - fd = vm_create_guest_memfd(vm, total_size, guest_memfd_flags); - -- test_file_read_write(fd); -+ test_file_read(fd); -+ test_file_write(fd, total_size); - - if (expect_mmap_allowed) { - test_mmap_supported(fd, page_size, total_size); --- -2.50.1 - diff --git a/resources/hiding_ci/linux_patches/20-gmem-write/0044-KVM-guest_memfd-add-generic-population-via-write.patch b/resources/hiding_ci/linux_patches/20-gmem-write/0044-KVM-guest_memfd-add-generic-population-via-write.patch new file mode 100644 index 00000000000..e699a0d396a --- /dev/null +++ b/resources/hiding_ci/linux_patches/20-gmem-write/0044-KVM-guest_memfd-add-generic-population-via-write.patch @@ -0,0 +1,118 @@ +From cd8f88bd30341d368432371f53de7704ccc73c87 Mon Sep 17 00:00:00 2001 +From: Nikita Kalyazin +Date: Mon, 3 Mar 2025 13:08:37 +0000 +Subject: [PATCH 44/49] KVM: guest_memfd: add generic population via write + +write syscall populates guest_memfd with user-supplied data in a generic +way, ie no vendor-specific preparation is performed. This is supposed +to be used in non-CoCo setups where guest memory is not +hardware-encrypted. + +The following behaviour is implemented: + - only page-aligned count and offset are allowed + - if the memory is already allocated, the call will successfully + populate it + - if the memory is not allocated, the call will both allocate and + populate + - if the memory is already populated, the call will not repopulate it + +Signed-off-by: Nikita Kalyazin +--- + virt/kvm/guest_memfd.c | 60 +++++++++++++++++++++++++++++++++++++++++- + 1 file changed, 59 insertions(+), 1 deletion(-) + +diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c +index e3696880405c..7f5134a7c8e4 100644 +--- a/virt/kvm/guest_memfd.c ++++ b/virt/kvm/guest_memfd.c +@@ -390,7 +390,9 @@ static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma) + } + + static struct file_operations kvm_gmem_fops = { +- .mmap = kvm_gmem_mmap, ++ .mmap = kvm_gmem_mmap, ++ .llseek = default_llseek, ++ .write_iter = generic_perform_write, + .open = generic_file_open, + .release = kvm_gmem_release, + .fallocate = kvm_gmem_fallocate, +@@ -401,6 +403,59 @@ void kvm_gmem_init(struct module *module) + kvm_gmem_fops.owner = module; + } + ++static int kvm_kmem_gmem_write_begin(struct file *file, struct address_space *mapping, ++ loff_t pos, unsigned len, struct folio **foliop, ++ void **fsdata) ++{ ++ pgoff_t index = pos >> PAGE_SHIFT; ++ struct folio *folio; ++ ++ if (!PAGE_ALIGNED(pos) || len != PAGE_SIZE) ++ return -EINVAL; ++ ++ if (pos + len > i_size_read(file_inode(file))) ++ return -EINVAL; ++ ++ folio = kvm_gmem_get_folio(file_inode(file), index); ++ if (IS_ERR(folio)) ++ return -EFAULT; ++ ++ if (WARN_ON_ONCE(folio_test_large(folio))) { ++ folio_unlock(folio); ++ folio_put(folio); ++ return -EFAULT; ++ } ++ ++ if (folio_test_uptodate(folio)) { ++ folio_unlock(folio); ++ folio_put(folio); ++ return -ENOSPC; ++ } ++ ++ *foliop = folio; ++ return 0; ++} ++ ++static int kvm_kmem_gmem_write_end(struct file *file, struct address_space *mapping, ++ loff_t pos, unsigned len, unsigned copied, ++ struct folio *folio, void *fsdata) ++{ ++ int ret; ++ ++ if (copied == len) { ++ kvm_gmem_mark_prepared(folio); ++ ret = copied; ++ } else { ++ filemap_remove_folio(folio); ++ ret = 0; ++ } ++ ++ folio_unlock(folio); ++ folio_put(folio); ++ ++ return ret; ++} ++ + static int kvm_gmem_migrate_folio(struct address_space *mapping, + struct folio *dst, struct folio *src, + enum migrate_mode mode) +@@ -460,6 +515,8 @@ static void kvm_gmem_free_folio(struct address_space *mapping, + + static const struct address_space_operations kvm_gmem_aops = { + .dirty_folio = noop_dirty_folio, ++ .write_begin = kvm_kmem_gmem_write_begin, ++ .write_end = kvm_kmem_gmem_write_end, + .migrate_folio = kvm_gmem_migrate_folio, + .error_remove_folio = kvm_gmem_error_folio, + .free_folio = kvm_gmem_free_folio, +@@ -505,6 +562,7 @@ static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags) + } + + file->f_flags |= O_LARGEFILE; ++ file->f_mode |= FMODE_LSEEK | FMODE_PWRITE; + + inode = file->f_inode; + WARN_ON(file->f_mapping != inode->i_mapping); +-- +2.50.1 + diff --git a/resources/hiding_ci/linux_patches/25-gmem-uffd/0041-mm-userfaultfd-generic-continue-for-non-hugetlbfs.patch b/resources/hiding_ci/linux_patches/25-gmem-uffd/0045-mm-userfaultfd-generic-continue-for-non-hugetlbfs.patch similarity index 97% rename from resources/hiding_ci/linux_patches/25-gmem-uffd/0041-mm-userfaultfd-generic-continue-for-non-hugetlbfs.patch rename to resources/hiding_ci/linux_patches/25-gmem-uffd/0045-mm-userfaultfd-generic-continue-for-non-hugetlbfs.patch index 36f5d7a2450..9c59626b077 100644 --- a/resources/hiding_ci/linux_patches/25-gmem-uffd/0041-mm-userfaultfd-generic-continue-for-non-hugetlbfs.patch +++ b/resources/hiding_ci/linux_patches/25-gmem-uffd/0045-mm-userfaultfd-generic-continue-for-non-hugetlbfs.patch @@ -1,7 +1,7 @@ -From cb3a4587ef307f7cd3205e502abe3b5663c5c877 Mon Sep 17 00:00:00 2001 +From aa9cd17534cb5f91d2f6a4dcbbb460492deace71 Mon Sep 17 00:00:00 2001 From: Nikita Kalyazin Date: Mon, 31 Mar 2025 10:15:35 +0000 -Subject: [PATCH 41/46] mm: userfaultfd: generic continue for non hugetlbfs +Subject: [PATCH 45/49] mm: userfaultfd: generic continue for non hugetlbfs Remove shmem-specific code from UFFDIO_CONTINUE implementation for non-huge pages by calling vm_ops->fault(). A new VMF flag, diff --git a/resources/hiding_ci/linux_patches/25-gmem-uffd/0042-mm-provide-can_userfault-vma-operation.patch b/resources/hiding_ci/linux_patches/25-gmem-uffd/0046-mm-provide-can_userfault-vma-operation.patch similarity index 95% rename from resources/hiding_ci/linux_patches/25-gmem-uffd/0042-mm-provide-can_userfault-vma-operation.patch rename to resources/hiding_ci/linux_patches/25-gmem-uffd/0046-mm-provide-can_userfault-vma-operation.patch index 76e124381a7..8d678acfc15 100644 --- a/resources/hiding_ci/linux_patches/25-gmem-uffd/0042-mm-provide-can_userfault-vma-operation.patch +++ b/resources/hiding_ci/linux_patches/25-gmem-uffd/0046-mm-provide-can_userfault-vma-operation.patch @@ -1,7 +1,7 @@ -From 0aa0ceee2a842559241467722237002b4b9101f8 Mon Sep 17 00:00:00 2001 +From 3e1004dc6c19b37c8776069c03b58b75085e9dfd Mon Sep 17 00:00:00 2001 From: Nikita Kalyazin Date: Fri, 4 Apr 2025 14:15:18 +0000 -Subject: [PATCH 42/46] mm: provide can_userfault vma operation +Subject: [PATCH 46/49] mm: provide can_userfault vma operation The new operation allows to decouple the userfaulfd code from dependencies to VMA types, specifically, shmem and hugetlb. The diff --git a/resources/hiding_ci/linux_patches/25-gmem-uffd/0043-mm-userfaultfd-use-can_userfault-vma-operation.patch b/resources/hiding_ci/linux_patches/25-gmem-uffd/0047-mm-userfaultfd-use-can_userfault-vma-operation.patch similarity index 95% rename from resources/hiding_ci/linux_patches/25-gmem-uffd/0043-mm-userfaultfd-use-can_userfault-vma-operation.patch rename to resources/hiding_ci/linux_patches/25-gmem-uffd/0047-mm-userfaultfd-use-can_userfault-vma-operation.patch index 4693b362211..e852cd91f7f 100644 --- a/resources/hiding_ci/linux_patches/25-gmem-uffd/0043-mm-userfaultfd-use-can_userfault-vma-operation.patch +++ b/resources/hiding_ci/linux_patches/25-gmem-uffd/0047-mm-userfaultfd-use-can_userfault-vma-operation.patch @@ -1,7 +1,7 @@ -From b41e9aeb778fe03d0779d2ac3891ab3463ccc3eb Mon Sep 17 00:00:00 2001 +From 375b685ebb60ff5f7314ca0bc888898439fe4e93 Mon Sep 17 00:00:00 2001 From: Nikita Kalyazin Date: Fri, 4 Apr 2025 14:16:49 +0000 -Subject: [PATCH 43/46] mm: userfaultfd: use can_userfault vma operation +Subject: [PATCH 47/49] mm: userfaultfd: use can_userfault vma operation Signed-off-by: Nikita Kalyazin --- diff --git a/resources/hiding_ci/linux_patches/25-gmem-uffd/0044-KVM-guest_memfd-add-support-for-userfaultfd-minor.patch b/resources/hiding_ci/linux_patches/25-gmem-uffd/0048-KVM-guest_memfd-add-support-for-userfaultfd-minor.patch similarity index 80% rename from resources/hiding_ci/linux_patches/25-gmem-uffd/0044-KVM-guest_memfd-add-support-for-userfaultfd-minor.patch rename to resources/hiding_ci/linux_patches/25-gmem-uffd/0048-KVM-guest_memfd-add-support-for-userfaultfd-minor.patch index 2b74a02fe75..1758c3fe92a 100644 --- a/resources/hiding_ci/linux_patches/25-gmem-uffd/0044-KVM-guest_memfd-add-support-for-userfaultfd-minor.patch +++ b/resources/hiding_ci/linux_patches/25-gmem-uffd/0048-KVM-guest_memfd-add-support-for-userfaultfd-minor.patch @@ -1,7 +1,7 @@ -From b8139ee748086f421b16251e1d0c4ab2b9fed591 Mon Sep 17 00:00:00 2001 +From 36f7212593738d97042676841e0d4f95a1ac6a95 Mon Sep 17 00:00:00 2001 From: Nikita Kalyazin Date: Tue, 1 Apr 2025 15:02:56 +0000 -Subject: [PATCH 44/46] KVM: guest_memfd: add support for userfaultfd minor +Subject: [PATCH 48/49] KVM: guest_memfd: add support for userfaultfd minor Add support for sending a pagefault event if userfaultfd is registered. Only page minor event is currently supported. @@ -12,7 +12,7 @@ Signed-off-by: Nikita Kalyazin 1 file changed, 7 insertions(+) diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c -index 809da2a2fb37..e7fcd422c801 100644 +index 7f5134a7c8e4..a9f91db3687b 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -5,6 +5,7 @@ @@ -23,7 +23,7 @@ index 809da2a2fb37..e7fcd422c801 100644 #include "kvm_mm.h" -@@ -362,6 +363,12 @@ static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf) +@@ -359,6 +360,12 @@ static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf) kvm_gmem_mark_prepared(folio); } diff --git a/resources/hiding_ci/linux_patches/25-gmem-uffd/0045-mm-userfaultfd-add-UFFD_FEATURE_MINOR_GUEST_MEMFD.patch b/resources/hiding_ci/linux_patches/25-gmem-uffd/0049-mm-userfaultfd-add-UFFD_FEATURE_MINOR_GUEST_MEMFD.patch similarity index 94% rename from resources/hiding_ci/linux_patches/25-gmem-uffd/0045-mm-userfaultfd-add-UFFD_FEATURE_MINOR_GUEST_MEMFD.patch rename to resources/hiding_ci/linux_patches/25-gmem-uffd/0049-mm-userfaultfd-add-UFFD_FEATURE_MINOR_GUEST_MEMFD.patch index 42808c8b6d5..2efd99e47f5 100644 --- a/resources/hiding_ci/linux_patches/25-gmem-uffd/0045-mm-userfaultfd-add-UFFD_FEATURE_MINOR_GUEST_MEMFD.patch +++ b/resources/hiding_ci/linux_patches/25-gmem-uffd/0049-mm-userfaultfd-add-UFFD_FEATURE_MINOR_GUEST_MEMFD.patch @@ -1,7 +1,7 @@ -From d1e4c970e57a9fed70ab2667fb840fc656d53cfa Mon Sep 17 00:00:00 2001 +From bc53880a8867a3b4e26a102a8e0aef2bf3f37b59 Mon Sep 17 00:00:00 2001 From: Nikita Kalyazin Date: Fri, 4 Apr 2025 14:18:03 +0000 -Subject: [PATCH 45/46] mm: userfaultfd: add UFFD_FEATURE_MINOR_GUEST_MEMFD +Subject: [PATCH 49/49] mm: userfaultfd: add UFFD_FEATURE_MINOR_GUEST_MEMFD Signed-off-by: Nikita Kalyazin --- diff --git a/resources/hiding_ci/linux_patches/25-gmem-uffd/0046-fixup-for-guest_memfd-uffd-v3.patch b/resources/hiding_ci/linux_patches/25-gmem-uffd/0050-fixup-for-guest_memfd-uffd-v3.patch similarity index 91% rename from resources/hiding_ci/linux_patches/25-gmem-uffd/0046-fixup-for-guest_memfd-uffd-v3.patch rename to resources/hiding_ci/linux_patches/25-gmem-uffd/0050-fixup-for-guest_memfd-uffd-v3.patch index 5729f5a91f8..1495b425241 100644 --- a/resources/hiding_ci/linux_patches/25-gmem-uffd/0046-fixup-for-guest_memfd-uffd-v3.patch +++ b/resources/hiding_ci/linux_patches/25-gmem-uffd/0050-fixup-for-guest_memfd-uffd-v3.patch @@ -1,7 +1,7 @@ -From 8bfc3443eeb52ebb0c018c2f93f782407c68282e Mon Sep 17 00:00:00 2001 +From e4e7a96ac22a2f6740cc6afbafa1753935ac3fe3 Mon Sep 17 00:00:00 2001 From: Nikita Kalyazin Date: Thu, 10 Apr 2025 14:18:53 +0000 -Subject: [PATCH 46/46] fixup for guest_memfd uffd v3 +Subject: [PATCH] fixup for guest_memfd uffd v3 - implement can_userfault for guest_memfd - check vma->vm_ops pointer before dereferencing @@ -47,10 +47,10 @@ index d900dfd03bbe..7fb92714bc5c 100644 if (!vma_is_anonymous(dst_vma) && !can_userfault) diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c -index e7fcd422c801..0226086a1a01 100644 +index a9f91db3687b..3fbff4ba8f95 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c -@@ -380,8 +380,15 @@ static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf) +@@ -377,8 +377,15 @@ static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf) return ret; }