1
+ // Copyright © 2024 Institute of Software, CAS. All rights reserved.
2
+ //
1
3
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
4
// SPDX-License-Identifier: Apache-2.0 OR MIT
3
5
//
@@ -2231,6 +2233,7 @@ mod tests {
2231
2233
target_arch = "x86_64" ,
2232
2234
target_arch = "arm" ,
2233
2235
target_arch = "aarch64" ,
2236
+ target_arch = "riscv64" ,
2234
2237
target_arch = "s390x"
2235
2238
) ) ]
2236
2239
#[ test]
@@ -2405,6 +2408,98 @@ mod tests {
2405
2408
}
2406
2409
}
2407
2410
2411
+ #[ cfg( target_arch = "riscv64" ) ]
2412
+ #[ test]
2413
+ fn test_run_code ( ) {
2414
+ use std:: io:: Write ;
2415
+
2416
+ let kvm = Kvm :: new ( ) . unwrap ( ) ;
2417
+ let vm = kvm. create_vm ( ) . unwrap ( ) ;
2418
+ #[ rustfmt:: skip]
2419
+ let code = [
2420
+ 0x13 , 0x05 , 0x50 , 0x40 , // li a0, 0x0405;
2421
+ 0x23 , 0x20 , 0xac , 0x00 , // sw a0, 0(s8); test physical memory write
2422
+ 0x03 , 0xa5 , 0x0c , 0x00 , // lw a0, 0(s9); test MMIO read
2423
+ 0x93 , 0x05 , 0x70 , 0x60 , // li a1, 0x0607;
2424
+ 0x23 , 0xa0 , 0xbc , 0x00 , // sw a1, 0(s9); test MMIO write
2425
+ 0x6f , 0x00 , 0x00 , 0x00 , // j .; shouldn't get here, but if so loop forever
2426
+ ] ;
2427
+
2428
+ let mem_size = 0x20000 ;
2429
+ let load_addr = mmap_anonymous ( mem_size) . as_ptr ( ) ;
2430
+ let guest_addr: u64 = 0x10000 ;
2431
+ let slot: u32 = 0 ;
2432
+ let mem_region = kvm_userspace_memory_region {
2433
+ slot,
2434
+ guest_phys_addr : guest_addr,
2435
+ memory_size : mem_size as u64 ,
2436
+ userspace_addr : load_addr as u64 ,
2437
+ flags : KVM_MEM_LOG_DIRTY_PAGES ,
2438
+ } ;
2439
+ unsafe {
2440
+ vm. set_user_memory_region ( mem_region) . unwrap ( ) ;
2441
+ }
2442
+
2443
+ unsafe {
2444
+ // Get a mutable slice of `mem_size` from `load_addr`.
2445
+ // This is safe because we mapped it before.
2446
+ let mut slice = std:: slice:: from_raw_parts_mut ( load_addr, mem_size) ;
2447
+ slice. write_all ( & code) . unwrap ( ) ;
2448
+ }
2449
+
2450
+ let mut vcpu_fd = vm. create_vcpu ( 0 ) . unwrap ( ) ;
2451
+
2452
+ let core_reg_base: u64 = 0x8030_0000_0200_0000 ;
2453
+ let mmio_addr: u64 = guest_addr + mem_size as u64 ;
2454
+
2455
+ // Set the PC to the guest address where we loaded the code.
2456
+ vcpu_fd
2457
+ . set_one_reg ( core_reg_base, & ( guest_addr as u128 ) . to_le_bytes ( ) )
2458
+ . unwrap ( ) ;
2459
+
2460
+ // Set s8 and s9 to the addresses the guest test code needs
2461
+ vcpu_fd
2462
+ . set_one_reg (
2463
+ core_reg_base + 24 ,
2464
+ & ( guest_addr as u128 + 0x10000 ) . to_le_bytes ( ) ,
2465
+ )
2466
+ . unwrap ( ) ;
2467
+ vcpu_fd
2468
+ . set_one_reg ( core_reg_base + 25 , & ( mmio_addr as u128 ) . to_le_bytes ( ) )
2469
+ . unwrap ( ) ;
2470
+
2471
+ loop {
2472
+ match vcpu_fd. run ( ) . expect ( "run failed" ) {
2473
+ VcpuExit :: MmioRead ( addr, data) => {
2474
+ assert_eq ! ( addr, mmio_addr) ;
2475
+ assert_eq ! ( data. len( ) , 4 ) ;
2476
+ data[ 3 ] = 0x0 ;
2477
+ data[ 2 ] = 0x0 ;
2478
+ data[ 1 ] = 0x5 ;
2479
+ data[ 0 ] = 0x6 ;
2480
+ }
2481
+ VcpuExit :: MmioWrite ( addr, data) => {
2482
+ assert_eq ! ( addr, mmio_addr) ;
2483
+ assert_eq ! ( data. len( ) , 4 ) ;
2484
+ assert_eq ! ( data[ 3 ] , 0x0 ) ;
2485
+ assert_eq ! ( data[ 2 ] , 0x0 ) ;
2486
+ assert_eq ! ( data[ 1 ] , 0x6 ) ;
2487
+ assert_eq ! ( data[ 0 ] , 0x7 ) ;
2488
+ // The code snippet dirties one page at guest_addr + 0x10000.
2489
+ // The code page should not be dirty, as it's not written by the guest.
2490
+ let dirty_pages_bitmap = vm. get_dirty_log ( slot, mem_size) . unwrap ( ) ;
2491
+ let dirty_pages: u32 = dirty_pages_bitmap
2492
+ . into_iter ( )
2493
+ . map ( |page| page. count_ones ( ) )
2494
+ . sum ( ) ;
2495
+ assert_eq ! ( dirty_pages, 1 ) ;
2496
+ break ;
2497
+ }
2498
+ r => panic ! ( "unexpected exit reason: {:?}" , r) ,
2499
+ }
2500
+ }
2501
+ }
2502
+
2408
2503
#[ cfg( target_arch = "x86_64" ) ]
2409
2504
#[ test]
2410
2505
fn test_run_code ( ) {
@@ -2537,7 +2632,8 @@ mod tests {
2537
2632
target_arch = "x86" ,
2538
2633
target_arch = "x86_64" ,
2539
2634
target_arch = "arm" ,
2540
- target_arch = "aarch64"
2635
+ target_arch = "aarch64" ,
2636
+ target_arch = "riscv64"
2541
2637
) ) ]
2542
2638
fn test_faulty_vcpu_fd ( ) {
2543
2639
use std:: os:: unix:: io:: { FromRawFd , IntoRawFd } ;
@@ -2564,10 +2660,22 @@ mod tests {
2564
2660
. errno( ) ,
2565
2661
badf_errno
2566
2662
) ;
2663
+ #[ cfg( any(
2664
+ target_arch = "x86" ,
2665
+ target_arch = "x86_64" ,
2666
+ target_arch = "arm" ,
2667
+ target_arch = "aarch64"
2668
+ ) ) ]
2567
2669
assert_eq ! (
2568
2670
faulty_vcpu_fd. get_vcpu_events( ) . unwrap_err( ) . errno( ) ,
2569
2671
badf_errno
2570
2672
) ;
2673
+ #[ cfg( any(
2674
+ target_arch = "x86" ,
2675
+ target_arch = "x86_64" ,
2676
+ target_arch = "arm" ,
2677
+ target_arch = "aarch64"
2678
+ ) ) ]
2571
2679
assert_eq ! (
2572
2680
faulty_vcpu_fd
2573
2681
. set_vcpu_events( & kvm_vcpu_events:: default ( ) )
@@ -2804,6 +2912,52 @@ mod tests {
2804
2912
faulty_vcpu_fd. vcpu . into_raw_fd ( ) ;
2805
2913
}
2806
2914
2915
+ #[ test]
2916
+ #[ cfg( target_arch = "riscv64" ) ]
2917
+ fn test_faulty_vcpu_fd_riscv64 ( ) {
2918
+ use std:: os:: unix:: io:: { FromRawFd , IntoRawFd } ;
2919
+
2920
+ let badf_errno = libc:: EBADF ;
2921
+
2922
+ let faulty_vcpu_fd = VcpuFd {
2923
+ vcpu : unsafe { File :: from_raw_fd ( -2 ) } ,
2924
+ kvm_run_ptr : KvmRunWrapper {
2925
+ kvm_run_ptr : mmap_anonymous ( 10 ) . cast ( ) ,
2926
+ mmap_size : 10 ,
2927
+ } ,
2928
+ coalesced_mmio_ring : None ,
2929
+ } ;
2930
+
2931
+ let reg_id = 0x8030_0000_0200_000a ;
2932
+ let mut reg_data = 0u128 . to_le_bytes ( ) ;
2933
+
2934
+ assert_eq ! (
2935
+ faulty_vcpu_fd
2936
+ . get_reg_list( & mut RegList :: new( 200 ) . unwrap( ) )
2937
+ . unwrap_err( )
2938
+ . errno( ) ,
2939
+ badf_errno
2940
+ ) ;
2941
+ assert_eq ! (
2942
+ faulty_vcpu_fd
2943
+ . set_one_reg( reg_id, & reg_data)
2944
+ . unwrap_err( )
2945
+ . errno( ) ,
2946
+ badf_errno
2947
+ ) ;
2948
+ assert_eq ! (
2949
+ faulty_vcpu_fd
2950
+ . get_one_reg( reg_id, & mut reg_data)
2951
+ . unwrap_err( )
2952
+ . errno( ) ,
2953
+ badf_errno
2954
+ ) ;
2955
+
2956
+ // Don't drop the File object, or it'll notice the file it's trying to close is
2957
+ // invalid and abort the process.
2958
+ faulty_vcpu_fd. vcpu . into_raw_fd ( ) ;
2959
+ }
2960
+
2807
2961
#[ test]
2808
2962
#[ cfg( any( target_arch = "arm" , target_arch = "aarch64" ) ) ]
2809
2963
fn test_get_preferred_target ( ) {
@@ -2912,6 +3066,79 @@ mod tests {
2912
3066
vcpu. get_reg_list ( & mut reg_list) . unwrap ( )
2913
3067
}
2914
3068
3069
+ #[ test]
3070
+ #[ cfg( target_arch = "riscv64" ) ]
3071
+ fn test_set_one_reg ( ) {
3072
+ let kvm = Kvm :: new ( ) . unwrap ( ) ;
3073
+ let vm = kvm. create_vm ( ) . unwrap ( ) ;
3074
+ let vcpu = vm. create_vcpu ( 0 ) . unwrap ( ) ;
3075
+
3076
+ let data: u128 = 0 ;
3077
+ let reg_id: u64 = 0 ;
3078
+
3079
+ vcpu. set_one_reg ( reg_id, & data. to_le_bytes ( ) ) . unwrap_err ( ) ;
3080
+ // Exercising KVM_SET_ONE_REG by trying to alter the data inside the A0
3081
+ // register.
3082
+ // This regiseter is 64 bit wide (8 bytes).
3083
+ const A0_REG_ID : u64 = 0x8030_0000_0200_000a ;
3084
+ vcpu. set_one_reg ( A0_REG_ID , & data. to_le_bytes ( ) )
3085
+ . expect ( "Failed to set a0 register" ) ;
3086
+
3087
+ // Trying to set 8 byte register with 7 bytes must fail.
3088
+ vcpu. set_one_reg ( A0_REG_ID , & [ 0_u8 ; 7 ] ) . unwrap_err ( ) ;
3089
+ }
3090
+
3091
+ #[ test]
3092
+ #[ cfg( target_arch = "riscv64" ) ]
3093
+ fn test_get_one_reg ( ) {
3094
+ let kvm = Kvm :: new ( ) . unwrap ( ) ;
3095
+ let vm = kvm. create_vm ( ) . unwrap ( ) ;
3096
+ let vcpu = vm. create_vcpu ( 0 ) . unwrap ( ) ;
3097
+
3098
+ const PRESET : u64 = 0x7 ;
3099
+ let data: u128 = PRESET as u128 ;
3100
+ const A0_REG_ID : u64 = 0x8030_0000_0200_000a ;
3101
+ vcpu. set_one_reg ( A0_REG_ID , & data. to_le_bytes ( ) )
3102
+ . expect ( "Failed to set a0 register" ) ;
3103
+
3104
+ let mut bytes = [ 0_u8 ; 16 ] ;
3105
+ vcpu. get_one_reg ( A0_REG_ID , & mut bytes)
3106
+ . expect ( "Failed to get a0 register" ) ;
3107
+ let data = u128:: from_le_bytes ( bytes) ;
3108
+ assert_eq ! ( data, PRESET as u128 ) ;
3109
+
3110
+ // Trying to get 8 byte register with 7 bytes must fail.
3111
+ vcpu. get_one_reg ( A0_REG_ID , & mut [ 0_u8 ; 7 ] ) . unwrap_err ( ) ;
3112
+ }
3113
+
3114
+ #[ test]
3115
+ #[ cfg( target_arch = "riscv64" ) ]
3116
+ fn test_get_reg_list ( ) {
3117
+ let kvm = Kvm :: new ( ) . unwrap ( ) ;
3118
+ let vm = kvm. create_vm ( ) . unwrap ( ) ;
3119
+ let vcpu = vm. create_vcpu ( 0 ) . unwrap ( ) ;
3120
+
3121
+ let mut reg_list = RegList :: new ( 1 ) . unwrap ( ) ;
3122
+
3123
+ // KVM_GET_REG_LIST offers us a number of registers for which we have
3124
+ // not allocated memory, so the first time it fails.
3125
+ let err = vcpu. get_reg_list ( & mut reg_list) . unwrap_err ( ) ;
3126
+ assert ! ( err. errno( ) == libc:: E2BIG ) ;
3127
+ // SAFETY: This structure is a result from a specific vCPU ioctl
3128
+ assert ! ( unsafe { reg_list. as_mut_fam_struct( ) } . n > 0 ) ;
3129
+
3130
+ // We make use of the number of registers returned to allocate memory and
3131
+ // try one more time.
3132
+ // SAFETY: This structure is a result from a specific vCPU ioctl
3133
+ let mut reg_list =
3134
+ RegList :: new ( unsafe { reg_list. as_mut_fam_struct ( ) } . n as usize ) . unwrap ( ) ;
3135
+ vcpu. get_reg_list ( & mut reg_list) . unwrap ( ) ;
3136
+
3137
+ // Test get a register list contains 200 registers explicitly
3138
+ let mut reg_list = RegList :: new ( 200 ) . unwrap ( ) ;
3139
+ vcpu. get_reg_list ( & mut reg_list) . unwrap ( ) ;
3140
+ }
3141
+
2915
3142
#[ test]
2916
3143
fn test_get_kvm_run ( ) {
2917
3144
let kvm = Kvm :: new ( ) . unwrap ( ) ;
0 commit comments