1
1
//! This module is responsible for managing the absolute addresses that allocations are located at,
2
2
//! and for casting between pointers and integers based on those addresses.
3
3
4
+ mod address_generator;
4
5
mod reuse_pool;
5
6
6
7
use std:: cell:: RefCell ;
7
- use std:: cmp:: max;
8
8
9
- use rand:: Rng ;
10
9
use rustc_abi:: { Align , Size } ;
11
10
use rustc_data_structures:: fx:: { FxHashMap , FxHashSet } ;
11
+ use rustc_middle:: ty:: TyCtxt ;
12
12
13
+ pub use self :: address_generator:: AddressGenerator ;
13
14
use self :: reuse_pool:: ReusePool ;
14
15
use crate :: concurrency:: VClock ;
15
16
use crate :: * ;
@@ -49,9 +50,8 @@ pub struct GlobalStateInner {
49
50
/// Whether an allocation has been exposed or not. This cannot be put
50
51
/// into `AllocExtra` for the same reason as `base_addr`.
51
52
exposed : FxHashSet < AllocId > ,
52
- /// This is used as a memory address when a new pointer is casted to an integer. It
53
- /// is always larger than any address that was previously made part of a block.
54
- next_base_addr : u64 ,
53
+ /// The generator for new addresses in a given range.
54
+ address_generator : AddressGenerator ,
55
55
/// The provenance to use for int2ptr casts
56
56
provenance_mode : ProvenanceMode ,
57
57
}
@@ -64,7 +64,7 @@ impl VisitProvenance for GlobalStateInner {
64
64
prepared_alloc_bytes : _,
65
65
reuse : _,
66
66
exposed : _,
67
- next_base_addr : _,
67
+ address_generator : _,
68
68
provenance_mode : _,
69
69
} = self ;
70
70
// Though base_addr, int_to_ptr_map, and exposed contain AllocIds, we do not want to visit them.
@@ -77,14 +77,14 @@ impl VisitProvenance for GlobalStateInner {
77
77
}
78
78
79
79
impl GlobalStateInner {
80
- pub fn new ( config : & MiriConfig , stack_addr : u64 ) -> Self {
80
+ pub fn new < ' tcx > ( config : & MiriConfig , stack_addr : u64 , tcx : TyCtxt < ' tcx > ) -> Self {
81
81
GlobalStateInner {
82
82
int_to_ptr_map : Vec :: default ( ) ,
83
83
base_addr : FxHashMap :: default ( ) ,
84
84
prepared_alloc_bytes : FxHashMap :: default ( ) ,
85
85
reuse : ReusePool :: new ( config) ,
86
86
exposed : FxHashSet :: default ( ) ,
87
- next_base_addr : stack_addr,
87
+ address_generator : AddressGenerator :: new ( stack_addr..tcx . target_usize_max ( ) ) ,
88
88
provenance_mode : config. provenance_mode ,
89
89
}
90
90
}
@@ -96,15 +96,6 @@ impl GlobalStateInner {
96
96
}
97
97
}
98
98
99
- /// Shifts `addr` to make it aligned with `align` by rounding `addr` to the smallest multiple
100
- /// of `align` that is larger or equal to `addr`
101
- fn align_addr ( addr : u64 , align : u64 ) -> u64 {
102
- match addr % align {
103
- 0 => addr,
104
- rem => addr. strict_add ( align) - rem,
105
- }
106
- }
107
-
108
99
impl < ' tcx > EvalContextExtPriv < ' tcx > for crate :: MiriInterpCx < ' tcx > { }
109
100
trait EvalContextExtPriv < ' tcx > : crate :: MiriInterpCxExt < ' tcx > {
110
101
fn addr_from_alloc_id_uncached (
@@ -194,34 +185,17 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
194
185
interp_ok ( reuse_addr)
195
186
} else {
196
187
// We have to pick a fresh address.
197
- // Leave some space to the previous allocation, to give it some chance to be less aligned.
198
- // We ensure that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
199
- let slack = rng. random_range ( 0 ..16 ) ;
200
- // From next_base_addr + slack, round up to adjust for alignment.
201
- let base_addr = global_state
202
- . next_base_addr
203
- . checked_add ( slack)
204
- . ok_or_else ( || err_exhaust ! ( AddressSpaceFull ) ) ?;
205
- let base_addr = align_addr ( base_addr, info. align . bytes ( ) ) ;
206
-
207
- // Remember next base address. If this allocation is zero-sized, leave a gap of at
208
- // least 1 to avoid two allocations having the same base address. (The logic in
209
- // `alloc_id_from_addr` assumes unique addresses, and different function/vtable pointers
210
- // need to be distinguishable!)
211
- global_state. next_base_addr = base_addr
212
- . checked_add ( max ( info. size . bytes ( ) , 1 ) )
213
- . ok_or_else ( || err_exhaust ! ( AddressSpaceFull ) ) ?;
214
- // Even if `Size` didn't overflow, we might still have filled up the address space.
215
- if global_state. next_base_addr > this. target_usize_max ( ) {
216
- throw_exhaust ! ( AddressSpaceFull ) ;
217
- }
188
+ let new_addr =
189
+ global_state. address_generator . generate ( info. size , info. align , & mut rng) ?;
190
+
218
191
// If we filled up more than half the address space, start aggressively reusing
219
192
// addresses to avoid running out.
220
- if global_state. next_base_addr > u64:: try_from ( this. target_isize_max ( ) ) . unwrap ( ) {
193
+ let remaining_range = global_state. address_generator . get_remaining ( ) ;
194
+ if remaining_range. start > remaining_range. end / 2 {
221
195
global_state. reuse . address_space_shortage ( ) ;
222
196
}
223
197
224
- interp_ok ( base_addr )
198
+ interp_ok ( new_addr )
225
199
}
226
200
}
227
201
}
@@ -519,14 +493,3 @@ impl<'tcx> MiriMachine<'tcx> {
519
493
} )
520
494
}
521
495
}
522
-
523
- #[ cfg( test) ]
524
- mod tests {
525
- use super :: * ;
526
-
527
- #[ test]
528
- fn test_align_addr ( ) {
529
- assert_eq ! ( align_addr( 37 , 4 ) , 40 ) ;
530
- assert_eq ! ( align_addr( 44 , 4 ) , 44 ) ;
531
- }
532
- }
0 commit comments