|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* |
| 3 | + * Copyright (C) 2024, Intel, Inc |
| 4 | + * |
| 5 | + * Author: |
| 6 | + * Isaku Yamahata <isaku.yamahata at gmail.com> |
| 7 | + */ |
| 8 | +#include <linux/sizes.h> |
| 9 | + |
| 10 | +#include <test_util.h> |
| 11 | +#include <kvm_util.h> |
| 12 | +#include <processor.h> |
| 13 | + |
| 14 | +/* Arbitrarily chosen values */ |
| 15 | +#define TEST_SIZE (SZ_2M + PAGE_SIZE) |
| 16 | +#define TEST_NPAGES (TEST_SIZE / PAGE_SIZE) |
| 17 | +#define TEST_SLOT 10 |
| 18 | + |
| 19 | +static void guest_code(uint64_t base_gpa) |
| 20 | +{ |
| 21 | + volatile uint64_t val __used; |
| 22 | + int i; |
| 23 | + |
| 24 | + for (i = 0; i < TEST_NPAGES; i++) { |
| 25 | + uint64_t *src = (uint64_t *)(base_gpa + i * PAGE_SIZE); |
| 26 | + |
| 27 | + val = *src; |
| 28 | + } |
| 29 | + |
| 30 | + GUEST_DONE(); |
| 31 | +} |
| 32 | + |
| 33 | +static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 gpa, u64 size, |
| 34 | + u64 left) |
| 35 | +{ |
| 36 | + struct kvm_pre_fault_memory range = { |
| 37 | + .gpa = gpa, |
| 38 | + .size = size, |
| 39 | + .flags = 0, |
| 40 | + }; |
| 41 | + u64 prev; |
| 42 | + int ret, save_errno; |
| 43 | + |
| 44 | + do { |
| 45 | + prev = range.size; |
| 46 | + ret = __vcpu_ioctl(vcpu, KVM_PRE_FAULT_MEMORY, &range); |
| 47 | + save_errno = errno; |
| 48 | + TEST_ASSERT((range.size < prev) ^ (ret < 0), |
| 49 | + "%sexpecting range.size to change on %s", |
| 50 | + ret < 0 ? "not " : "", |
| 51 | + ret < 0 ? "failure" : "success"); |
| 52 | + } while (ret >= 0 ? range.size : save_errno == EINTR); |
| 53 | + |
| 54 | + TEST_ASSERT(range.size == left, |
| 55 | + "Completed with %lld bytes left, expected %" PRId64, |
| 56 | + range.size, left); |
| 57 | + |
| 58 | + if (left == 0) |
| 59 | + __TEST_ASSERT_VM_VCPU_IOCTL(!ret, "KVM_PRE_FAULT_MEMORY", ret, vcpu->vm); |
| 60 | + else |
| 61 | + /* No memory slot causes RET_PF_EMULATE. it results in -ENOENT. */ |
| 62 | + __TEST_ASSERT_VM_VCPU_IOCTL(ret && save_errno == ENOENT, |
| 63 | + "KVM_PRE_FAULT_MEMORY", ret, vcpu->vm); |
| 64 | +} |
| 65 | + |
| 66 | +static void __test_pre_fault_memory(unsigned long vm_type, bool private) |
| 67 | +{ |
| 68 | + const struct vm_shape shape = { |
| 69 | + .mode = VM_MODE_DEFAULT, |
| 70 | + .type = vm_type, |
| 71 | + }; |
| 72 | + struct kvm_vcpu *vcpu; |
| 73 | + struct kvm_run *run; |
| 74 | + struct kvm_vm *vm; |
| 75 | + struct ucall uc; |
| 76 | + |
| 77 | + uint64_t guest_test_phys_mem; |
| 78 | + uint64_t guest_test_virt_mem; |
| 79 | + uint64_t alignment, guest_page_size; |
| 80 | + |
| 81 | + vm = vm_create_shape_with_one_vcpu(shape, &vcpu, guest_code); |
| 82 | + |
| 83 | + alignment = guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size; |
| 84 | + guest_test_phys_mem = (vm->max_gfn - TEST_NPAGES) * guest_page_size; |
| 85 | +#ifdef __s390x__ |
| 86 | + alignment = max(0x100000UL, guest_page_size); |
| 87 | +#else |
| 88 | + alignment = SZ_2M; |
| 89 | +#endif |
| 90 | + guest_test_phys_mem = align_down(guest_test_phys_mem, alignment); |
| 91 | + guest_test_virt_mem = guest_test_phys_mem & ((1ULL << (vm->va_bits - 1)) - 1); |
| 92 | + |
| 93 | + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, |
| 94 | + guest_test_phys_mem, TEST_SLOT, TEST_NPAGES, |
| 95 | + private ? KVM_MEM_GUEST_MEMFD : 0); |
| 96 | + virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, TEST_NPAGES); |
| 97 | + |
| 98 | + if (private) |
| 99 | + vm_mem_set_private(vm, guest_test_phys_mem, TEST_SIZE); |
| 100 | + pre_fault_memory(vcpu, guest_test_phys_mem, SZ_2M, 0); |
| 101 | + pre_fault_memory(vcpu, guest_test_phys_mem + SZ_2M, PAGE_SIZE * 2, PAGE_SIZE); |
| 102 | + pre_fault_memory(vcpu, guest_test_phys_mem + TEST_SIZE, PAGE_SIZE, PAGE_SIZE); |
| 103 | + |
| 104 | + vcpu_args_set(vcpu, 1, guest_test_virt_mem); |
| 105 | + vcpu_run(vcpu); |
| 106 | + |
| 107 | + run = vcpu->run; |
| 108 | + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, |
| 109 | + "Wanted KVM_EXIT_IO, got exit reason: %u (%s)", |
| 110 | + run->exit_reason, exit_reason_str(run->exit_reason)); |
| 111 | + |
| 112 | + switch (get_ucall(vcpu, &uc)) { |
| 113 | + case UCALL_ABORT: |
| 114 | + REPORT_GUEST_ASSERT(uc); |
| 115 | + break; |
| 116 | + case UCALL_DONE: |
| 117 | + break; |
| 118 | + default: |
| 119 | + TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd); |
| 120 | + break; |
| 121 | + } |
| 122 | + |
| 123 | + kvm_vm_free(vm); |
| 124 | +} |
| 125 | + |
| 126 | +static void test_pre_fault_memory(unsigned long vm_type, bool private) |
| 127 | +{ |
| 128 | + if (vm_type && !(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(vm_type))) { |
| 129 | + pr_info("Skipping tests for vm_type 0x%lx\n", vm_type); |
| 130 | + return; |
| 131 | + } |
| 132 | + |
| 133 | + __test_pre_fault_memory(vm_type, private); |
| 134 | +} |
| 135 | + |
| 136 | +int main(int argc, char *argv[]) |
| 137 | +{ |
| 138 | + TEST_REQUIRE(kvm_check_cap(KVM_CAP_PRE_FAULT_MEMORY)); |
| 139 | + |
| 140 | + test_pre_fault_memory(0, false); |
| 141 | +#ifdef __x86_64__ |
| 142 | + test_pre_fault_memory(KVM_X86_SW_PROTECTED_VM, false); |
| 143 | + test_pre_fault_memory(KVM_X86_SW_PROTECTED_VM, true); |
| 144 | +#endif |
| 145 | + return 0; |
| 146 | +} |
0 commit comments