@@ -110,17 +110,33 @@ pub const MMIO_MEM_SIZE: u64 = MEM_32BIT_GAP_SIZE;
110
110
/// These should be used to configure the GuestMemoryMmap structure for the platform.
111
111
/// For x86_64 all addresses are valid from the start of the kernel except a
112
112
/// carve out at the end of 32bit address space.
113
- pub fn arch_memory_regions ( size : usize ) -> Vec < ( GuestAddress , usize ) > {
113
+ pub fn arch_memory_regions ( offset : usize , size : usize ) -> Vec < ( GuestAddress , usize ) > {
114
+ // If we get here with size == 0 something has seriously gone wrong. Firecracker should never
115
+ // try to allocate guest memory of size 0
116
+ assert ! ( size > 0 , "Attempt to allocate guest memory of length 0" ) ;
117
+ assert ! (
118
+ offset. checked_add( size) . is_some( ) ,
119
+ "Attempt to allocate guest memory such that the address space would wrap around"
120
+ ) ;
121
+
114
122
// It's safe to cast MMIO_MEM_START to usize because it fits in a u32 variable
115
123
// (It points to an address in the 32 bit space).
116
- match size. checked_sub ( usize :: try_from ( MMIO_MEM_START ) . unwrap ( ) ) {
124
+ match ( size + offset ) . checked_sub ( u64_to_usize ( MMIO_MEM_START ) ) {
117
125
// case1: guest memory fits before the gap
118
- None | Some ( 0 ) => vec ! [ ( GuestAddress ( 0 ) , size) ] ,
119
- // case2: guest memory extends beyond the gap
120
- Some ( remaining) => vec ! [
121
- ( GuestAddress ( 0 ) , usize :: try_from( MMIO_MEM_START ) . unwrap( ) ) ,
126
+ None | Some ( 0 ) => vec ! [ ( GuestAddress ( offset as u64 ) , size) ] ,
127
+ // case2: starts before the gap, but doesn't completely fit
128
+ Some ( remaining) if ( offset as u64 ) < MMIO_MEM_START => vec ! [
129
+ (
130
+ GuestAddress ( offset as u64 ) ,
131
+ u64_to_usize( MMIO_MEM_START ) - offset,
132
+ ) ,
122
133
( GuestAddress ( FIRST_ADDR_PAST_32BITS ) , remaining) ,
123
134
] ,
135
+ // case3: guest memory start after the gap
136
+ Some ( _) => vec ! [ (
137
+ GuestAddress ( FIRST_ADDR_PAST_32BITS . max( offset as u64 ) ) ,
138
+ size,
139
+ ) ] ,
124
140
}
125
141
}
126
142
@@ -456,6 +472,56 @@ pub fn load_kernel(
456
472
} )
457
473
}
458
474
475
+ #[ cfg( kani) ]
476
+ mod verification {
477
+ use crate :: arch:: x86_64:: FIRST_ADDR_PAST_32BITS ;
478
+ use crate :: arch:: { MMIO_MEM_START , arch_memory_regions} ;
479
+
480
+ #[ kani:: proof]
481
+ #[ kani:: unwind( 3 ) ]
482
+ fn verify_arch_memory_regions ( ) {
483
+ let offset: u64 = kani:: any :: < u64 > ( ) ;
484
+ let len: u64 = kani:: any :: < u64 > ( ) ;
485
+
486
+ kani:: assume ( len > 0 ) ;
487
+ kani:: assume ( offset. checked_add ( len) . is_some ( ) ) ;
488
+
489
+ let regions = arch_memory_regions ( offset as usize , len as usize ) ;
490
+
491
+ // There's only one MMIO gap, so we can get either 1 or 2 regions
492
+ assert ! ( regions. len( ) <= 2 ) ;
493
+ assert ! ( regions. len( ) >= 1 ) ;
494
+
495
+ // The total length of all regions is what we requested
496
+ assert_eq ! (
497
+ regions. iter( ) . map( |& ( _, len) | len) . sum:: <usize >( ) ,
498
+ len as usize
499
+ ) ;
500
+
501
+ // No region overlaps the MMIO gap
502
+ assert ! (
503
+ regions
504
+ . iter( )
505
+ . all( |& ( start, len) | start. 0 >= FIRST_ADDR_PAST_32BITS
506
+ || start. 0 + len as u64 <= MMIO_MEM_START )
507
+ ) ;
508
+
509
+ // All regions start after our specified offset
510
+ assert ! ( regions. iter( ) . all( |& ( start, _) | start. 0 >= offset as u64 ) ) ;
511
+
512
+ // All regions have non-zero length
513
+ assert ! ( regions. iter( ) . all( |& ( _, len) | len > 0 ) ) ;
514
+
515
+ // If there's two regions, they perfectly snuggle up to the MMIO gap
516
+ if regions. len ( ) == 2 {
517
+ kani:: cover!( ) ;
518
+
519
+ assert_eq ! ( regions[ 0 ] . 0.0 + regions[ 0 ] . 1 as u64 , MMIO_MEM_START ) ;
520
+ assert_eq ! ( regions[ 1 ] . 0.0 , FIRST_ADDR_PAST_32BITS ) ;
521
+ }
522
+ }
523
+ }
524
+
459
525
#[ cfg( test) ]
460
526
mod tests {
461
527
use linux_loader:: loader:: bootparam:: boot_e820_entry;
@@ -466,18 +532,41 @@ mod tests {
466
532
467
533
#[ test]
468
534
fn regions_lt_4gb ( ) {
469
- let regions = arch_memory_regions ( 1usize << 29 ) ;
535
+ let regions = arch_memory_regions ( 0 , 1usize << 29 ) ;
470
536
assert_eq ! ( 1 , regions. len( ) ) ;
471
537
assert_eq ! ( GuestAddress ( 0 ) , regions[ 0 ] . 0 ) ;
472
538
assert_eq ! ( 1usize << 29 , regions[ 0 ] . 1 ) ;
539
+
540
+ let regions = arch_memory_regions ( 1 << 28 , 1 << 29 ) ;
541
+ assert_eq ! ( 1 , regions. len( ) ) ;
542
+ assert_eq ! ( regions[ 0 ] , ( GuestAddress ( 1 << 28 ) , 1 << 29 ) ) ;
473
543
}
474
544
475
545
#[ test]
476
546
fn regions_gt_4gb ( ) {
477
- let regions = arch_memory_regions ( ( 1usize << 32 ) + 0x8000 ) ;
547
+ const MEMORY_SIZE : usize = ( 1 << 32 ) + 0x8000 ;
548
+
549
+ let regions = arch_memory_regions ( 0 , MEMORY_SIZE ) ;
478
550
assert_eq ! ( 2 , regions. len( ) ) ;
479
551
assert_eq ! ( GuestAddress ( 0 ) , regions[ 0 ] . 0 ) ;
480
552
assert_eq ! ( GuestAddress ( 1u64 << 32 ) , regions[ 1 ] . 0 ) ;
553
+
554
+ let regions = arch_memory_regions ( 1 << 31 , MEMORY_SIZE ) ;
555
+ assert_eq ! ( 2 , regions. len( ) ) ;
556
+ assert_eq ! (
557
+ regions[ 0 ] ,
558
+ (
559
+ GuestAddress ( 1 << 31 ) ,
560
+ u64_to_usize( MMIO_MEM_START ) - ( 1 << 31 )
561
+ )
562
+ ) ;
563
+ assert_eq ! (
564
+ regions[ 1 ] ,
565
+ (
566
+ GuestAddress ( FIRST_ADDR_PAST_32BITS ) ,
567
+ MEMORY_SIZE - regions[ 0 ] . 1
568
+ )
569
+ )
481
570
}
482
571
483
572
#[ test]
0 commit comments