Skip to content

Commit 45acde4

Browse files
Ricardo KollerMarc Zyngier
authored andcommitted
KVM: selftests: aarch64: Add readonly memslot tests into page_fault_test
Add some readonly memslot tests into page_fault_test. Mark the data and/or page-table memory regions as readonly, perform some accesses, and check that the right fault is triggered when expected (e.g., a store with no write-back should lead to an mmio exit). Signed-off-by: Ricardo Koller <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent a4edf25 commit 45acde4

File tree

1 file changed

+101
-1
lines changed

1 file changed

+101
-1
lines changed

tools/testing/selftests/kvm/aarch64/page_fault_test.c

Lines changed: 101 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,8 @@ static uint64_t *guest_test_memory = (uint64_t *)TEST_GVA;
4141
#define CHECK_FN_NR 10
4242

4343
static struct event_cnt {
44+
int mmio_exits;
45+
int fail_vcpu_runs;
4446
int uffd_faults;
4547
/* uffd_faults is incremented from multiple threads. */
4648
pthread_mutex_t uffd_faults_mutex;
@@ -57,6 +59,8 @@ struct test_desc {
5759
uffd_handler_t uffd_data_handler;
5860
void (*dabt_handler)(struct ex_regs *regs);
5961
void (*iabt_handler)(struct ex_regs *regs);
62+
void (*mmio_handler)(struct kvm_vm *vm, struct kvm_run *run);
63+
void (*fail_vcpu_run_handler)(int ret);
6064
uint32_t pt_memslot_flags;
6165
uint32_t data_memslot_flags;
6266
bool skip;
@@ -415,6 +419,31 @@ static bool punch_hole_in_backing_store(struct kvm_vm *vm,
415419
return true;
416420
}
417421

422+
static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run)
423+
{
424+
struct userspace_mem_region *region;
425+
void *hva;
426+
427+
region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
428+
hva = (void *)region->region.userspace_addr;
429+
430+
ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr);
431+
432+
memcpy(hva, run->mmio.data, run->mmio.len);
433+
events.mmio_exits += 1;
434+
}
435+
436+
static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run)
437+
{
438+
uint64_t data;
439+
440+
memcpy(&data, run->mmio.data, sizeof(data));
441+
pr_debug("addr=%lld len=%d w=%d data=%lx\n",
442+
run->mmio.phys_addr, run->mmio.len,
443+
run->mmio.is_write, data);
444+
TEST_FAIL("There was no MMIO exit expected.");
445+
}
446+
418447
static bool check_write_in_dirty_log(struct kvm_vm *vm,
419448
struct userspace_mem_region *region,
420449
uint64_t host_pg_nr)
@@ -463,6 +492,18 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd)
463492
return continue_test;
464493
}
465494

495+
void fail_vcpu_run_no_handler(int ret)
496+
{
497+
TEST_FAIL("Unexpected vcpu run failure\n");
498+
}
499+
500+
void fail_vcpu_run_mmio_no_syndrome_handler(int ret)
501+
{
502+
TEST_ASSERT(errno == ENOSYS,
503+
"The mmio handler should have returned not implemented.");
504+
events.fail_vcpu_runs += 1;
505+
}
506+
466507
typedef uint32_t aarch64_insn_t;
467508
extern aarch64_insn_t __exec_test[2];
468509

@@ -564,9 +605,20 @@ static void setup_memslots(struct kvm_vm *vm, struct test_params *p)
564605
vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT;
565606
}
566607

608+
static void setup_default_handlers(struct test_desc *test)
609+
{
610+
if (!test->mmio_handler)
611+
test->mmio_handler = mmio_no_handler;
612+
613+
if (!test->fail_vcpu_run_handler)
614+
test->fail_vcpu_run_handler = fail_vcpu_run_no_handler;
615+
}
616+
567617
static void check_event_counts(struct test_desc *test)
568618
{
569619
ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults);
620+
ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits);
621+
ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs);
570622
}
571623

572624
static void print_test_banner(enum vm_guest_mode mode, struct test_params *p)
@@ -591,10 +643,18 @@ static void reset_event_counts(void)
591643
static void vcpu_run_loop(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
592644
struct test_desc *test)
593645
{
646+
struct kvm_run *run;
594647
struct ucall uc;
648+
int ret;
649+
650+
run = vcpu->run;
595651

596652
for (;;) {
597-
vcpu_run(vcpu);
653+
ret = _vcpu_run(vcpu);
654+
if (ret) {
655+
test->fail_vcpu_run_handler(ret);
656+
goto done;
657+
}
598658

599659
switch (get_ucall(vcpu, &uc)) {
600660
case UCALL_SYNC:
@@ -608,6 +668,10 @@ static void vcpu_run_loop(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
608668
break;
609669
case UCALL_DONE:
610670
goto done;
671+
case UCALL_NONE:
672+
if (run->exit_reason == KVM_EXIT_MMIO)
673+
test->mmio_handler(vm, run);
674+
break;
611675
default:
612676
TEST_FAIL("Unknown ucall %lu", uc.cmd);
613677
}
@@ -647,6 +711,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
647711
load_exec_code_for_test(vm);
648712
setup_uffd(vm, p, &pt_uffd, &data_uffd);
649713
setup_abort_handlers(vm, vcpu, test);
714+
setup_default_handlers(test);
650715
vcpu_args_set(vcpu, 1, test);
651716

652717
vcpu_run_loop(vm, vcpu, test);
@@ -734,6 +799,25 @@ static void help(char *name)
734799
.expected_events = { 0 }, \
735800
}
736801

802+
#define TEST_RO_MEMSLOT(_access, _mmio_handler, _mmio_exits) \
803+
{ \
804+
.name = SCAT3(ro_memslot, _access, _with_af), \
805+
.data_memslot_flags = KVM_MEM_READONLY, \
806+
.guest_prepare = { _PREPARE(_access) }, \
807+
.guest_test = _access, \
808+
.mmio_handler = _mmio_handler, \
809+
.expected_events = { .mmio_exits = _mmio_exits }, \
810+
}
811+
812+
#define TEST_RO_MEMSLOT_NO_SYNDROME(_access) \
813+
{ \
814+
.name = SCAT2(ro_memslot_no_syndrome, _access), \
815+
.data_memslot_flags = KVM_MEM_READONLY, \
816+
.guest_test = _access, \
817+
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
818+
.expected_events = { .fail_vcpu_runs = 1 }, \
819+
}
820+
737821
static struct test_desc tests[] = {
738822

739823
/* Check that HW is setting the Access Flag (AF) (sanity checks). */
@@ -808,6 +892,22 @@ static struct test_desc tests[] = {
808892
TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log),
809893
TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log),
810894

895+
/*
896+
* Try accesses when the data memory region is marked read-only
897+
* (with KVM_MEM_READONLY). Writes with a syndrome result in an
898+
* MMIO exit, writes with no syndrome (e.g., CAS) result in a
899+
* failed vcpu run, and reads/execs with and without syndroms do
900+
* not fault.
901+
*/
902+
TEST_RO_MEMSLOT(guest_read64, 0, 0),
903+
TEST_RO_MEMSLOT(guest_ld_preidx, 0, 0),
904+
TEST_RO_MEMSLOT(guest_at, 0, 0),
905+
TEST_RO_MEMSLOT(guest_exec, 0, 0),
906+
TEST_RO_MEMSLOT(guest_write64, mmio_on_test_gpa_handler, 1),
907+
TEST_RO_MEMSLOT_NO_SYNDROME(guest_dc_zva),
908+
TEST_RO_MEMSLOT_NO_SYNDROME(guest_cas),
909+
TEST_RO_MEMSLOT_NO_SYNDROME(guest_st_preidx),
910+
811911
{ 0 }
812912
};
813913

0 commit comments

Comments
 (0)