@@ -4,6 +4,177 @@ use rustc_target::abi::{LayoutOf, Size};
4
4
use crate :: stacked_borrows:: Tag ;
5
5
use crate :: * ;
6
6
7
+ fn assert_ptr_target_min_size < ' mir , ' tcx : ' mir > (
8
+ ecx : & MiriEvalContext < ' mir , ' tcx > ,
9
+ operand : OpTy < ' tcx , Tag > ,
10
+ min_size : u64 ,
11
+ ) -> InterpResult < ' tcx , ( ) > {
12
+ let target_ty = match operand. layout . ty . kind {
13
+ TyKind :: RawPtr ( TypeAndMut { ty, mutbl : _ } ) => ty,
14
+ _ => panic ! ( "Argument to pthread function was not a raw pointer" ) ,
15
+ } ;
16
+ let target_layout = ecx. layout_of ( target_ty) ?;
17
+ assert ! ( target_layout. size. bytes( ) >= min_size) ;
18
+ Ok ( ( ) )
19
+ }
20
+
21
+ // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
22
+
23
+ // Our chosen memory layout: store an i32 in the first four bytes equal to the
24
+ // corresponding libc mutex kind constant (i.e. PTHREAD_MUTEX_NORMAL)
25
+
26
+ fn mutexattr_get_kind < ' mir , ' tcx : ' mir > (
27
+ ecx : & MiriEvalContext < ' mir , ' tcx > ,
28
+ attr_op : OpTy < ' tcx , Tag > ,
29
+ ) -> InterpResult < ' tcx , ScalarMaybeUndef < Tag > > {
30
+ // Ensure that the following read at an offset to the attr pointer is within bounds
31
+ assert_ptr_target_min_size ( ecx, attr_op, 4 ) ?;
32
+ let attr_place = ecx. deref_operand ( attr_op) ?;
33
+ let i32_layout = ecx. layout_of ( ecx. tcx . types . i32 ) ?;
34
+ let kind_place = attr_place. offset ( Size :: ZERO , MemPlaceMeta :: None , i32_layout, ecx) ?;
35
+ ecx. read_scalar ( kind_place. into ( ) )
36
+ }
37
+
38
+ fn mutexattr_set_kind < ' mir , ' tcx : ' mir > (
39
+ ecx : & mut MiriEvalContext < ' mir , ' tcx > ,
40
+ attr_op : OpTy < ' tcx , Tag > ,
41
+ kind : impl Into < ScalarMaybeUndef < Tag > > ,
42
+ ) -> InterpResult < ' tcx , ( ) > {
43
+ // Ensure that the following write at an offset to the attr pointer is within bounds
44
+ assert_ptr_target_min_size ( ecx, attr_op, 4 ) ?;
45
+ let attr_place = ecx. deref_operand ( attr_op) ?;
46
+ let i32_layout = ecx. layout_of ( ecx. tcx . types . i32 ) ?;
47
+ let kind_place = attr_place. offset ( Size :: ZERO , MemPlaceMeta :: None , i32_layout, ecx) ?;
48
+ ecx. write_scalar ( kind. into ( ) , kind_place. into ( ) )
49
+ }
50
+
51
+ // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
52
+
53
+ // Our chosen memory layout:
54
+ // bytes 0-3: reserved for signature on macOS
55
+ // (need to avoid this because it is set by static initializer macros)
56
+ // bytes 4-7: count of how many times this mutex has been locked, as a u32
57
+ // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
58
+ // (the kind has to be at its offset for compatibility with static initializer macros)
59
+
60
+ fn mutex_get_locked_count < ' mir , ' tcx : ' mir > (
61
+ ecx : & MiriEvalContext < ' mir , ' tcx > ,
62
+ mutex_op : OpTy < ' tcx , Tag > ,
63
+ ) -> InterpResult < ' tcx , ScalarMaybeUndef < Tag > > {
64
+ // Ensure that the following read at an offset to the mutex pointer is within bounds
65
+ assert_ptr_target_min_size ( ecx, mutex_op, 20 ) ?;
66
+ let mutex_place = ecx. deref_operand ( mutex_op) ?;
67
+ let u32_layout = ecx. layout_of ( ecx. tcx . types . u32 ) ?;
68
+ let locked_count_place =
69
+ mutex_place. offset ( Size :: from_bytes ( 4 ) , MemPlaceMeta :: None , u32_layout, ecx) ?;
70
+ ecx. read_scalar ( locked_count_place. into ( ) )
71
+ }
72
+
73
+ fn mutex_set_locked_count < ' mir , ' tcx : ' mir > (
74
+ ecx : & mut MiriEvalContext < ' mir , ' tcx > ,
75
+ mutex_op : OpTy < ' tcx , Tag > ,
76
+ locked_count : impl Into < ScalarMaybeUndef < Tag > > ,
77
+ ) -> InterpResult < ' tcx , ( ) > {
78
+ // Ensure that the following write at an offset to the mutex pointer is within bounds
79
+ assert_ptr_target_min_size ( ecx, mutex_op, 20 ) ?;
80
+ let mutex_place = ecx. deref_operand ( mutex_op) ?;
81
+ let u32_layout = ecx. layout_of ( ecx. tcx . types . u32 ) ?;
82
+ let locked_count_place =
83
+ mutex_place. offset ( Size :: from_bytes ( 4 ) , MemPlaceMeta :: None , u32_layout, ecx) ?;
84
+ ecx. write_scalar ( locked_count. into ( ) , locked_count_place. into ( ) )
85
+ }
86
+
87
+ fn mutex_get_kind < ' mir , ' tcx : ' mir > (
88
+ ecx : & mut MiriEvalContext < ' mir , ' tcx > ,
89
+ mutex_op : OpTy < ' tcx , Tag > ,
90
+ ) -> InterpResult < ' tcx , ScalarMaybeUndef < Tag > > {
91
+ // Ensure that the following read at an offset to the mutex pointer is within bounds
92
+ assert_ptr_target_min_size ( ecx, mutex_op, 20 ) ?;
93
+ let mutex_place = ecx. deref_operand ( mutex_op) ?;
94
+ let i32_layout = ecx. layout_of ( ecx. tcx . types . i32 ) ?;
95
+ let kind_offset = if ecx. pointer_size ( ) . bytes ( ) == 8 { 16 } else { 12 } ;
96
+ let kind_place =
97
+ mutex_place. offset ( Size :: from_bytes ( kind_offset) , MemPlaceMeta :: None , i32_layout, ecx) ?;
98
+ ecx. read_scalar ( kind_place. into ( ) )
99
+ }
100
+
101
+ fn mutex_set_kind < ' mir , ' tcx : ' mir > (
102
+ ecx : & mut MiriEvalContext < ' mir , ' tcx > ,
103
+ mutex_op : OpTy < ' tcx , Tag > ,
104
+ kind : impl Into < ScalarMaybeUndef < Tag > > ,
105
+ ) -> InterpResult < ' tcx , ( ) > {
106
+ // Ensure that the following write at an offset to the mutex pointer is within bounds
107
+ assert_ptr_target_min_size ( ecx, mutex_op, 20 ) ?;
108
+ let mutex_place = ecx. deref_operand ( mutex_op) ?;
109
+ let i32_layout = ecx. layout_of ( ecx. tcx . types . i32 ) ?;
110
+ let kind_offset = if ecx. pointer_size ( ) . bytes ( ) == 8 { 16 } else { 12 } ;
111
+ let kind_place =
112
+ mutex_place. offset ( Size :: from_bytes ( kind_offset) , MemPlaceMeta :: None , i32_layout, ecx) ?;
113
+ ecx. write_scalar ( kind. into ( ) , kind_place. into ( ) )
114
+ }
115
+
116
+ // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
117
+
118
+ // Our chosen memory layout:
119
+ // bytes 0-3: reserved for signature on macOS
120
+ // (need to avoid this because it is set by static initializer macros)
121
+ // bytes 4-7: reader count, as a u32
122
+ // bytes 8-11: writer count, as a u32
123
+
124
+ fn rwlock_get_readers < ' mir , ' tcx : ' mir > (
125
+ ecx : & MiriEvalContext < ' mir , ' tcx > ,
126
+ rwlock_op : OpTy < ' tcx , Tag > ,
127
+ ) -> InterpResult < ' tcx , ScalarMaybeUndef < Tag > > {
128
+ // Ensure that the following read at an offset to the rwlock pointer is within bounds
129
+ assert_ptr_target_min_size ( ecx, rwlock_op, 12 ) ?;
130
+ let rwlock_place = ecx. deref_operand ( rwlock_op) ?;
131
+ let u32_layout = ecx. layout_of ( ecx. tcx . types . u32 ) ?;
132
+ let readers_place =
133
+ rwlock_place. offset ( Size :: from_bytes ( 4 ) , MemPlaceMeta :: None , u32_layout, ecx) ?;
134
+ ecx. read_scalar ( readers_place. into ( ) )
135
+ }
136
+
137
+ fn rwlock_set_readers < ' mir , ' tcx : ' mir > (
138
+ ecx : & mut MiriEvalContext < ' mir , ' tcx > ,
139
+ rwlock_op : OpTy < ' tcx , Tag > ,
140
+ readers : impl Into < ScalarMaybeUndef < Tag > > ,
141
+ ) -> InterpResult < ' tcx , ( ) > {
142
+ // Ensure that the following write at an offset to the rwlock pointer is within bounds
143
+ assert_ptr_target_min_size ( ecx, rwlock_op, 12 ) ?;
144
+ let rwlock_place = ecx. deref_operand ( rwlock_op) ?;
145
+ let u32_layout = ecx. layout_of ( ecx. tcx . types . u32 ) ?;
146
+ let readers_place =
147
+ rwlock_place. offset ( Size :: from_bytes ( 4 ) , MemPlaceMeta :: None , u32_layout, ecx) ?;
148
+ ecx. write_scalar ( readers. into ( ) , readers_place. into ( ) )
149
+ }
150
+
151
+ fn rwlock_get_writers < ' mir , ' tcx : ' mir > (
152
+ ecx : & MiriEvalContext < ' mir , ' tcx > ,
153
+ rwlock_op : OpTy < ' tcx , Tag > ,
154
+ ) -> InterpResult < ' tcx , ScalarMaybeUndef < Tag > > {
155
+ // Ensure that the following read at an offset to the rwlock pointer is within bounds
156
+ assert_ptr_target_min_size ( ecx, rwlock_op, 12 ) ?;
157
+ let rwlock_place = ecx. deref_operand ( rwlock_op) ?;
158
+ let u32_layout = ecx. layout_of ( ecx. tcx . types . u32 ) ?;
159
+ let writers_place =
160
+ rwlock_place. offset ( Size :: from_bytes ( 8 ) , MemPlaceMeta :: None , u32_layout, ecx) ?;
161
+ ecx. read_scalar ( writers_place. into ( ) )
162
+ }
163
+
164
+ fn rwlock_set_writers < ' mir , ' tcx : ' mir > (
165
+ ecx : & mut MiriEvalContext < ' mir , ' tcx > ,
166
+ rwlock_op : OpTy < ' tcx , Tag > ,
167
+ writers : impl Into < ScalarMaybeUndef < Tag > > ,
168
+ ) -> InterpResult < ' tcx , ( ) > {
169
+ // Ensure that the following write at an offset to the rwlock pointer is within bounds
170
+ assert_ptr_target_min_size ( ecx, rwlock_op, 12 ) ?;
171
+ let rwlock_place = ecx. deref_operand ( rwlock_op) ?;
172
+ let u32_layout = ecx. layout_of ( ecx. tcx . types . u32 ) ?;
173
+ let writers_place =
174
+ rwlock_place. offset ( Size :: from_bytes ( 8 ) , MemPlaceMeta :: None , u32_layout, ecx) ?;
175
+ ecx. write_scalar ( writers. into ( ) , writers_place. into ( ) )
176
+ }
177
+
7
178
impl < ' mir , ' tcx > EvalContextExt < ' mir , ' tcx > for crate :: MiriEvalContext < ' mir , ' tcx > { }
8
179
pub trait EvalContextExt < ' mir , ' tcx : ' mir > : crate :: MiriEvalContextExt < ' mir , ' tcx > {
9
180
fn pthread_mutexattr_init ( & mut self , attr_op : OpTy < ' tcx , Tag > ) -> InterpResult < ' tcx , i32 > {
@@ -348,174 +519,3 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
348
519
Ok ( 0 )
349
520
}
350
521
}
351
-
352
- fn assert_ptr_target_min_size < ' mir , ' tcx : ' mir > (
353
- ecx : & MiriEvalContext < ' mir , ' tcx > ,
354
- operand : OpTy < ' tcx , Tag > ,
355
- min_size : u64 ,
356
- ) -> InterpResult < ' tcx , ( ) > {
357
- let target_ty = match operand. layout . ty . kind {
358
- TyKind :: RawPtr ( TypeAndMut { ty, mutbl : _ } ) => ty,
359
- _ => panic ! ( "Argument to pthread function was not a raw pointer" ) ,
360
- } ;
361
- let target_layout = ecx. layout_of ( target_ty) ?;
362
- assert ! ( target_layout. size. bytes( ) >= min_size) ;
363
- Ok ( ( ) )
364
- }
365
-
366
- // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
367
-
368
- // Our chosen memory layout: store an i32 in the first four bytes equal to the
369
- // corresponding libc mutex kind constant (i.e. PTHREAD_MUTEX_NORMAL)
370
-
371
- fn mutexattr_get_kind < ' mir , ' tcx : ' mir > (
372
- ecx : & MiriEvalContext < ' mir , ' tcx > ,
373
- attr_op : OpTy < ' tcx , Tag > ,
374
- ) -> InterpResult < ' tcx , ScalarMaybeUndef < Tag > > {
375
- // Ensure that the following read at an offset to the attr pointer is within bounds
376
- assert_ptr_target_min_size ( ecx, attr_op, 4 ) ?;
377
- let attr_place = ecx. deref_operand ( attr_op) ?;
378
- let i32_layout = ecx. layout_of ( ecx. tcx . types . i32 ) ?;
379
- let kind_place = attr_place. offset ( Size :: ZERO , MemPlaceMeta :: None , i32_layout, ecx) ?;
380
- ecx. read_scalar ( kind_place. into ( ) )
381
- }
382
-
383
- fn mutexattr_set_kind < ' mir , ' tcx : ' mir > (
384
- ecx : & mut MiriEvalContext < ' mir , ' tcx > ,
385
- attr_op : OpTy < ' tcx , Tag > ,
386
- kind : impl Into < ScalarMaybeUndef < Tag > > ,
387
- ) -> InterpResult < ' tcx , ( ) > {
388
- // Ensure that the following write at an offset to the attr pointer is within bounds
389
- assert_ptr_target_min_size ( ecx, attr_op, 4 ) ?;
390
- let attr_place = ecx. deref_operand ( attr_op) ?;
391
- let i32_layout = ecx. layout_of ( ecx. tcx . types . i32 ) ?;
392
- let kind_place = attr_place. offset ( Size :: ZERO , MemPlaceMeta :: None , i32_layout, ecx) ?;
393
- ecx. write_scalar ( kind. into ( ) , kind_place. into ( ) )
394
- }
395
-
396
- // pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
397
-
398
- // Our chosen memory layout:
399
- // bytes 0-3: reserved for signature on macOS
400
- // (need to avoid this because it is set by static initializer macros)
401
- // bytes 4-7: count of how many times this mutex has been locked, as a u32
402
- // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
403
- // (the kind has to be at its offset for compatibility with static initializer macros)
404
-
405
- fn mutex_get_locked_count < ' mir , ' tcx : ' mir > (
406
- ecx : & MiriEvalContext < ' mir , ' tcx > ,
407
- mutex_op : OpTy < ' tcx , Tag > ,
408
- ) -> InterpResult < ' tcx , ScalarMaybeUndef < Tag > > {
409
- // Ensure that the following read at an offset to the mutex pointer is within bounds
410
- assert_ptr_target_min_size ( ecx, mutex_op, 20 ) ?;
411
- let mutex_place = ecx. deref_operand ( mutex_op) ?;
412
- let u32_layout = ecx. layout_of ( ecx. tcx . types . u32 ) ?;
413
- let locked_count_place =
414
- mutex_place. offset ( Size :: from_bytes ( 4 ) , MemPlaceMeta :: None , u32_layout, ecx) ?;
415
- ecx. read_scalar ( locked_count_place. into ( ) )
416
- }
417
-
418
- fn mutex_set_locked_count < ' mir , ' tcx : ' mir > (
419
- ecx : & mut MiriEvalContext < ' mir , ' tcx > ,
420
- mutex_op : OpTy < ' tcx , Tag > ,
421
- locked_count : impl Into < ScalarMaybeUndef < Tag > > ,
422
- ) -> InterpResult < ' tcx , ( ) > {
423
- // Ensure that the following write at an offset to the mutex pointer is within bounds
424
- assert_ptr_target_min_size ( ecx, mutex_op, 20 ) ?;
425
- let mutex_place = ecx. deref_operand ( mutex_op) ?;
426
- let u32_layout = ecx. layout_of ( ecx. tcx . types . u32 ) ?;
427
- let locked_count_place =
428
- mutex_place. offset ( Size :: from_bytes ( 4 ) , MemPlaceMeta :: None , u32_layout, ecx) ?;
429
- ecx. write_scalar ( locked_count. into ( ) , locked_count_place. into ( ) )
430
- }
431
-
432
- fn mutex_get_kind < ' mir , ' tcx : ' mir > (
433
- ecx : & mut MiriEvalContext < ' mir , ' tcx > ,
434
- mutex_op : OpTy < ' tcx , Tag > ,
435
- ) -> InterpResult < ' tcx , ScalarMaybeUndef < Tag > > {
436
- // Ensure that the following read at an offset to the mutex pointer is within bounds
437
- assert_ptr_target_min_size ( ecx, mutex_op, 20 ) ?;
438
- let mutex_place = ecx. deref_operand ( mutex_op) ?;
439
- let i32_layout = ecx. layout_of ( ecx. tcx . types . i32 ) ?;
440
- let kind_offset = if ecx. pointer_size ( ) . bytes ( ) == 8 { 16 } else { 12 } ;
441
- let kind_place =
442
- mutex_place. offset ( Size :: from_bytes ( kind_offset) , MemPlaceMeta :: None , i32_layout, ecx) ?;
443
- ecx. read_scalar ( kind_place. into ( ) )
444
- }
445
-
446
- fn mutex_set_kind < ' mir , ' tcx : ' mir > (
447
- ecx : & mut MiriEvalContext < ' mir , ' tcx > ,
448
- mutex_op : OpTy < ' tcx , Tag > ,
449
- kind : impl Into < ScalarMaybeUndef < Tag > > ,
450
- ) -> InterpResult < ' tcx , ( ) > {
451
- // Ensure that the following write at an offset to the mutex pointer is within bounds
452
- assert_ptr_target_min_size ( ecx, mutex_op, 20 ) ?;
453
- let mutex_place = ecx. deref_operand ( mutex_op) ?;
454
- let i32_layout = ecx. layout_of ( ecx. tcx . types . i32 ) ?;
455
- let kind_offset = if ecx. pointer_size ( ) . bytes ( ) == 8 { 16 } else { 12 } ;
456
- let kind_place =
457
- mutex_place. offset ( Size :: from_bytes ( kind_offset) , MemPlaceMeta :: None , i32_layout, ecx) ?;
458
- ecx. write_scalar ( kind. into ( ) , kind_place. into ( ) )
459
- }
460
-
461
- // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
462
-
463
- // Our chosen memory layout:
464
- // bytes 0-3: reserved for signature on macOS
465
- // (need to avoid this because it is set by static initializer macros)
466
- // bytes 4-7: reader count, as a u32
467
- // bytes 8-11: writer count, as a u32
468
-
469
- fn rwlock_get_readers < ' mir , ' tcx : ' mir > (
470
- ecx : & MiriEvalContext < ' mir , ' tcx > ,
471
- rwlock_op : OpTy < ' tcx , Tag > ,
472
- ) -> InterpResult < ' tcx , ScalarMaybeUndef < Tag > > {
473
- // Ensure that the following read at an offset to the rwlock pointer is within bounds
474
- assert_ptr_target_min_size ( ecx, rwlock_op, 12 ) ?;
475
- let rwlock_place = ecx. deref_operand ( rwlock_op) ?;
476
- let u32_layout = ecx. layout_of ( ecx. tcx . types . u32 ) ?;
477
- let readers_place =
478
- rwlock_place. offset ( Size :: from_bytes ( 4 ) , MemPlaceMeta :: None , u32_layout, ecx) ?;
479
- ecx. read_scalar ( readers_place. into ( ) )
480
- }
481
-
482
- fn rwlock_set_readers < ' mir , ' tcx : ' mir > (
483
- ecx : & mut MiriEvalContext < ' mir , ' tcx > ,
484
- rwlock_op : OpTy < ' tcx , Tag > ,
485
- readers : impl Into < ScalarMaybeUndef < Tag > > ,
486
- ) -> InterpResult < ' tcx , ( ) > {
487
- // Ensure that the following write at an offset to the rwlock pointer is within bounds
488
- assert_ptr_target_min_size ( ecx, rwlock_op, 12 ) ?;
489
- let rwlock_place = ecx. deref_operand ( rwlock_op) ?;
490
- let u32_layout = ecx. layout_of ( ecx. tcx . types . u32 ) ?;
491
- let readers_place =
492
- rwlock_place. offset ( Size :: from_bytes ( 4 ) , MemPlaceMeta :: None , u32_layout, ecx) ?;
493
- ecx. write_scalar ( readers. into ( ) , readers_place. into ( ) )
494
- }
495
-
496
- fn rwlock_get_writers < ' mir , ' tcx : ' mir > (
497
- ecx : & MiriEvalContext < ' mir , ' tcx > ,
498
- rwlock_op : OpTy < ' tcx , Tag > ,
499
- ) -> InterpResult < ' tcx , ScalarMaybeUndef < Tag > > {
500
- // Ensure that the following read at an offset to the rwlock pointer is within bounds
501
- assert_ptr_target_min_size ( ecx, rwlock_op, 12 ) ?;
502
- let rwlock_place = ecx. deref_operand ( rwlock_op) ?;
503
- let u32_layout = ecx. layout_of ( ecx. tcx . types . u32 ) ?;
504
- let writers_place =
505
- rwlock_place. offset ( Size :: from_bytes ( 8 ) , MemPlaceMeta :: None , u32_layout, ecx) ?;
506
- ecx. read_scalar ( writers_place. into ( ) )
507
- }
508
-
509
- fn rwlock_set_writers < ' mir , ' tcx : ' mir > (
510
- ecx : & mut MiriEvalContext < ' mir , ' tcx > ,
511
- rwlock_op : OpTy < ' tcx , Tag > ,
512
- writers : impl Into < ScalarMaybeUndef < Tag > > ,
513
- ) -> InterpResult < ' tcx , ( ) > {
514
- // Ensure that the following write at an offset to the rwlock pointer is within bounds
515
- assert_ptr_target_min_size ( ecx, rwlock_op, 12 ) ?;
516
- let rwlock_place = ecx. deref_operand ( rwlock_op) ?;
517
- let u32_layout = ecx. layout_of ( ecx. tcx . types . u32 ) ?;
518
- let writers_place =
519
- rwlock_place. offset ( Size :: from_bytes ( 8 ) , MemPlaceMeta :: None , u32_layout, ecx) ?;
520
- ecx. write_scalar ( writers. into ( ) , writers_place. into ( ) )
521
- }
0 commit comments