|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +#include "test_util.h" |
| 3 | +#include "kvm_util.h" |
| 4 | +#include "processor.h" |
| 5 | +#include "vmx.h" |
| 6 | + |
| 7 | +#include <string.h> |
| 8 | +#include <sys/ioctl.h> |
| 9 | + |
| 10 | +#include "kselftest.h" |
| 11 | + |
| 12 | +#define VCPU_ID 0 |
| 13 | +#define ARBITRARY_IO_PORT 0x2000 |
| 14 | + |
| 15 | +static struct kvm_vm *vm; |
| 16 | + |
| 17 | +static void l2_guest_code(void) |
| 18 | +{ |
| 19 | + /* |
| 20 | + * Generate an exit to L0 userspace, i.e. main(), via I/O to an |
| 21 | + * arbitrary port. |
| 22 | + */ |
| 23 | + asm volatile("inb %%dx, %%al" |
| 24 | + : : [port] "d" (ARBITRARY_IO_PORT) : "rax"); |
| 25 | +} |
| 26 | + |
| 27 | +static void l1_guest_code(struct vmx_pages *vmx_pages) |
| 28 | +{ |
| 29 | +#define L2_GUEST_STACK_SIZE 64 |
| 30 | + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; |
| 31 | + |
| 32 | + GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); |
| 33 | + GUEST_ASSERT(load_vmcs(vmx_pages)); |
| 34 | + |
| 35 | + /* Prepare the VMCS for L2 execution. */ |
| 36 | + prepare_vmcs(vmx_pages, l2_guest_code, |
| 37 | + &l2_guest_stack[L2_GUEST_STACK_SIZE]); |
| 38 | + |
| 39 | + /* |
| 40 | + * L2 must be run without unrestricted guest, verify that the selftests |
| 41 | + * library hasn't enabled it. Because KVM selftests jump directly to |
| 42 | + * 64-bit mode, unrestricted guest support isn't required. |
| 43 | + */ |
| 44 | + GUEST_ASSERT(!(vmreadz(CPU_BASED_VM_EXEC_CONTROL) & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) || |
| 45 | + !(vmreadz(SECONDARY_VM_EXEC_CONTROL) & SECONDARY_EXEC_UNRESTRICTED_GUEST)); |
| 46 | + |
| 47 | + GUEST_ASSERT(!vmlaunch()); |
| 48 | + |
| 49 | + /* L2 should triple fault after main() stuffs invalid guest state. */ |
| 50 | + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_TRIPLE_FAULT); |
| 51 | + GUEST_DONE(); |
| 52 | +} |
| 53 | + |
| 54 | +int main(int argc, char *argv[]) |
| 55 | +{ |
| 56 | + vm_vaddr_t vmx_pages_gva; |
| 57 | + struct kvm_sregs sregs; |
| 58 | + struct kvm_run *run; |
| 59 | + struct ucall uc; |
| 60 | + |
| 61 | + nested_vmx_check_supported(); |
| 62 | + |
| 63 | + vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); |
| 64 | + |
| 65 | + /* Allocate VMX pages and shared descriptors (vmx_pages). */ |
| 66 | + vcpu_alloc_vmx(vm, &vmx_pages_gva); |
| 67 | + vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); |
| 68 | + |
| 69 | + vcpu_run(vm, VCPU_ID); |
| 70 | + |
| 71 | + run = vcpu_state(vm, VCPU_ID); |
| 72 | + |
| 73 | + /* |
| 74 | + * The first exit to L0 userspace should be an I/O access from L2. |
| 75 | + * Running L1 should launch L2 without triggering an exit to userspace. |
| 76 | + */ |
| 77 | + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, |
| 78 | + "Expected KVM_EXIT_IO, got: %u (%s)\n", |
| 79 | + run->exit_reason, exit_reason_str(run->exit_reason)); |
| 80 | + |
| 81 | + TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT, |
| 82 | + "Expected IN from port %d from L2, got port %d", |
| 83 | + ARBITRARY_IO_PORT, run->io.port); |
| 84 | + |
| 85 | + /* |
| 86 | + * Stuff invalid guest state for L2 by making TR unusuable. The next |
| 87 | + * KVM_RUN should induce a TRIPLE_FAULT in L2 as KVM doesn't support |
| 88 | + * emulating invalid guest state for L2. |
| 89 | + */ |
| 90 | + memset(&sregs, 0, sizeof(sregs)); |
| 91 | + vcpu_sregs_get(vm, VCPU_ID, &sregs); |
| 92 | + sregs.tr.unusable = 1; |
| 93 | + vcpu_sregs_set(vm, VCPU_ID, &sregs); |
| 94 | + |
| 95 | + vcpu_run(vm, VCPU_ID); |
| 96 | + |
| 97 | + switch (get_ucall(vm, VCPU_ID, &uc)) { |
| 98 | + case UCALL_DONE: |
| 99 | + break; |
| 100 | + case UCALL_ABORT: |
| 101 | + TEST_FAIL("%s", (const char *)uc.args[0]); |
| 102 | + default: |
| 103 | + TEST_FAIL("Unexpected ucall: %lu", uc.cmd); |
| 104 | + } |
| 105 | +} |
0 commit comments