1
1
use std:: ops:: Range ;
2
2
3
- use rustc_abi:: { Align , HasDataLayout , Primitive , Scalar , Size , WrappingRange } ;
3
+ use rustc_abi:: { Align , Endian , HasDataLayout , Primitive , Scalar , Size , WrappingRange } ;
4
4
use rustc_codegen_ssa:: common;
5
5
use rustc_codegen_ssa:: traits:: * ;
6
6
use rustc_hir:: LangItem ;
@@ -28,6 +28,7 @@ pub(crate) fn const_alloc_to_llvm<'ll>(
28
28
cx : & CodegenCx < ' ll , ' _ > ,
29
29
alloc : & Allocation ,
30
30
is_static : bool ,
31
+ vtable_base : Option < & ' ll Value > ,
31
32
) -> & ' ll Value {
32
33
// We expect that callers of const_alloc_to_llvm will instead directly codegen a pointer or
33
34
// integer for any &ZST where the ZST is a constant (i.e. not a static). We should never be
@@ -43,6 +44,8 @@ pub(crate) fn const_alloc_to_llvm<'ll>(
43
44
let dl = cx. data_layout ( ) ;
44
45
let pointer_size = dl. pointer_size ( ) ;
45
46
let pointer_size_bytes = pointer_size. bytes ( ) as usize ;
47
+ let use_relative_layout = cx. sess ( ) . opts . unstable_opts . experimental_relative_rust_abi_vtables
48
+ && vtable_base. is_some ( ) ;
46
49
47
50
// Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`, so `range`
48
51
// must be within the bounds of `alloc` and not contain or overlap a pointer provenance.
@@ -51,7 +54,11 @@ pub(crate) fn const_alloc_to_llvm<'ll>(
51
54
cx : & ' a CodegenCx < ' ll , ' b > ,
52
55
alloc : & ' a Allocation ,
53
56
range : Range < usize > ,
57
+ use_relative_layout : bool ,
54
58
) {
59
+ let dl = cx. data_layout ( ) ;
60
+ let pointer_size = dl. pointer_size ( ) ;
61
+ let pointer_size_bytes = pointer_size. bytes ( ) as usize ;
55
62
let chunks = alloc. init_mask ( ) . range_as_init_chunks ( range. clone ( ) . into ( ) ) ;
56
63
57
64
let chunk_to_llval = move |chunk| match chunk {
@@ -74,7 +81,43 @@ pub(crate) fn const_alloc_to_llvm<'ll>(
74
81
let allow_uninit_chunks = chunks. clone ( ) . take ( max. saturating_add ( 1 ) ) . count ( ) <= max;
75
82
76
83
if allow_uninit_chunks {
77
- llvals. extend ( chunks. map ( chunk_to_llval) ) ;
84
+ if use_relative_layout {
85
+ // Rather than being stored as a struct of pointers or byte-arrays, a relative
86
+ // vtable is a pure i32 array, so its components must be chunks of i32s. Here we
87
+ // explicitly group any sequence of bytes into i32s.
88
+ //
89
+ // Normally we can only do this if an 8-byte constant can fit into 4 bytes.
90
+ for chunk in chunks {
91
+ match chunk {
92
+ InitChunk :: Init ( range) => {
93
+ let range =
94
+ ( range. start . bytes ( ) as usize ) ..( range. end . bytes ( ) as usize ) ;
95
+ let bytes =
96
+ alloc. inspect_with_uninit_and_ptr_outside_interpreter ( range) ;
97
+ for bytes in bytes. chunks_exact ( pointer_size_bytes) {
98
+ assert ! (
99
+ bytes[ 4 ..pointer_size_bytes] . iter( ) . all( |& x| x == 0 ) ,
100
+ "Cannot fit constant into 4-bytes: {:?}" ,
101
+ bytes
102
+ ) ;
103
+ let bytes: [ u8 ; 4 ] = bytes[ 0 ..4 ] . try_into ( ) . unwrap ( ) ;
104
+ let val: u32 = match dl. endian {
105
+ Endian :: Big => u32:: from_be_bytes ( bytes) ,
106
+ Endian :: Little => u32:: from_le_bytes ( bytes) ,
107
+ } ;
108
+ llvals. push ( cx. const_u32 ( val) ) ;
109
+ }
110
+ }
111
+ InitChunk :: Uninit ( range) => {
112
+ let len = range. end . bytes ( ) - range. start . bytes ( ) ;
113
+ let val = cx. const_undef ( cx. type_array ( cx. type_i8 ( ) , len / 2 ) ) ;
114
+ llvals. push ( val) ;
115
+ }
116
+ } ;
117
+ }
118
+ } else {
119
+ llvals. extend ( chunks. map ( chunk_to_llval) ) ;
120
+ }
78
121
} else {
79
122
// If this allocation contains any uninit bytes, codegen as if it was initialized
80
123
// (using some arbitrary value for uninit bytes).
@@ -92,7 +135,13 @@ pub(crate) fn const_alloc_to_llvm<'ll>(
92
135
// This `inspect` is okay since we have checked that there is no provenance, it
93
136
// is within the bounds of the allocation, and it doesn't affect interpreter execution
94
137
// (we inspect the result after interpreter execution).
95
- append_chunks_of_init_and_uninit_bytes ( & mut llvals, cx, alloc, next_offset..offset) ;
138
+ append_chunks_of_init_and_uninit_bytes (
139
+ & mut llvals,
140
+ cx,
141
+ alloc,
142
+ next_offset..offset,
143
+ use_relative_layout,
144
+ ) ;
96
145
}
97
146
let ptr_offset = read_target_uint (
98
147
dl. endian ,
@@ -108,38 +157,64 @@ pub(crate) fn const_alloc_to_llvm<'ll>(
108
157
109
158
let address_space = cx. tcx . global_alloc ( prov. alloc_id ( ) ) . address_space ( cx) ;
110
159
111
- llvals. push ( cx. scalar_to_backend (
112
- InterpScalar :: from_pointer ( Pointer :: new ( prov, Size :: from_bytes ( ptr_offset) ) , & cx. tcx ) ,
113
- Scalar :: Initialized {
114
- value : Primitive :: Pointer ( address_space) ,
115
- valid_range : WrappingRange :: full ( pointer_size) ,
116
- } ,
117
- cx. type_ptr_ext ( address_space) ,
118
- ) ) ;
160
+ let s = {
161
+ let scalar = cx. scalar_to_backend (
162
+ InterpScalar :: from_pointer (
163
+ Pointer :: new ( prov, Size :: from_bytes ( ptr_offset) ) ,
164
+ & cx. tcx ,
165
+ ) ,
166
+ Scalar :: Initialized {
167
+ value : Primitive :: Pointer ( address_space) ,
168
+ valid_range : WrappingRange :: full ( pointer_size) ,
169
+ } ,
170
+ cx. type_ptr_ext ( address_space) ,
171
+ ) ;
172
+
173
+ if use_relative_layout {
174
+ unsafe {
175
+ let fptr = llvm:: LLVMDSOLocalEquivalent ( scalar) ;
176
+ let sub = llvm:: LLVMConstSub (
177
+ llvm:: LLVMConstPtrToInt ( fptr, cx. type_i64 ( ) ) ,
178
+ llvm:: LLVMConstPtrToInt ( vtable_base. unwrap ( ) , cx. type_i64 ( ) ) ,
179
+ ) ;
180
+ llvm:: LLVMConstTrunc ( sub, cx. type_i32 ( ) )
181
+ }
182
+ } else {
183
+ scalar
184
+ }
185
+ } ;
186
+
187
+ llvals. push ( s) ;
119
188
next_offset = offset + pointer_size_bytes;
120
189
}
121
190
if alloc. len ( ) >= next_offset {
122
191
let range = next_offset..alloc. len ( ) ;
123
192
// This `inspect` is okay since we have check that it is after all provenance, it is
124
193
// within the bounds of the allocation, and it doesn't affect interpreter execution (we
125
194
// inspect the result after interpreter execution).
126
- append_chunks_of_init_and_uninit_bytes ( & mut llvals, cx, alloc, range) ;
195
+ append_chunks_of_init_and_uninit_bytes ( & mut llvals, cx, alloc, range, use_relative_layout ) ;
127
196
}
128
197
129
198
// Avoid wrapping in a struct if there is only a single value. This ensures
130
199
// that LLVM is able to perform the string merging optimization if the constant
131
200
// is a valid C string. LLVM only considers bare arrays for this optimization,
132
201
// not arrays wrapped in a struct. LLVM handles this at:
133
202
// https://github.com/rust-lang/llvm-project/blob/acaea3d2bb8f351b740db7ebce7d7a40b9e21488/llvm/lib/Target/TargetLoweringObjectFile.cpp#L249-L280
134
- if let & [ data] = & * llvals { data } else { cx. const_struct ( & llvals, true ) }
203
+ if let & [ data] = & * llvals {
204
+ data
205
+ } else if use_relative_layout {
206
+ cx. const_array ( cx. type_i32 ( ) , & llvals)
207
+ } else {
208
+ cx. const_struct ( & llvals, true )
209
+ }
135
210
}
136
211
137
212
fn codegen_static_initializer < ' ll , ' tcx > (
138
213
cx : & CodegenCx < ' ll , ' tcx > ,
139
214
def_id : DefId ,
140
215
) -> Result < ( & ' ll Value , ConstAllocation < ' tcx > ) , ErrorHandled > {
141
216
let alloc = cx. tcx . eval_static_initializer ( def_id) ?;
142
- Ok ( ( const_alloc_to_llvm ( cx, alloc. inner ( ) , /*static*/ true ) , alloc) )
217
+ Ok ( ( const_alloc_to_llvm ( cx, alloc. inner ( ) , /*static*/ true , /*vtable_base*/ None ) , alloc) )
143
218
}
144
219
145
220
fn set_global_alignment < ' ll > ( cx : & CodegenCx < ' ll , ' _ > , gv : & ' ll Value , mut align : Align ) {
@@ -232,19 +307,29 @@ impl<'ll> CodegenCx<'ll, '_> {
232
307
cv : & ' ll Value ,
233
308
align : Align ,
234
309
kind : Option < & str > ,
310
+ ) -> & ' ll Value {
311
+ let gv = self . static_addr_of_mut_from_type ( self . val_ty ( cv) , align, kind) ;
312
+ llvm:: set_initializer ( gv, cv) ;
313
+ gv
314
+ }
315
+
316
+ pub ( crate ) fn static_addr_of_mut_from_type (
317
+ & self ,
318
+ ty : & ' ll Type ,
319
+ align : Align ,
320
+ kind : Option < & str > ,
235
321
) -> & ' ll Value {
236
322
let gv = match kind {
237
323
Some ( kind) if !self . tcx . sess . fewer_names ( ) => {
238
324
let name = self . generate_local_symbol_name ( kind) ;
239
- let gv = self . define_global ( & name, self . val_ty ( cv ) ) . unwrap_or_else ( || {
325
+ let gv = self . define_global ( & name, ty ) . unwrap_or_else ( || {
240
326
bug ! ( "symbol `{}` is already defined" , name) ;
241
327
} ) ;
242
328
llvm:: set_linkage ( gv, llvm:: Linkage :: PrivateLinkage ) ;
243
329
gv
244
330
}
245
- _ => self . define_private_global ( self . val_ty ( cv ) ) ,
331
+ _ => self . define_private_global ( ty ) ,
246
332
} ;
247
- llvm:: set_initializer ( gv, cv) ;
248
333
set_global_alignment ( self , gv, align) ;
249
334
llvm:: set_unnamed_address ( gv, llvm:: UnnamedAddr :: Global ) ;
250
335
gv
@@ -277,6 +362,15 @@ impl<'ll> CodegenCx<'ll, '_> {
277
362
gv
278
363
}
279
364
365
+ pub ( crate ) fn static_addr_of_impl_for_gv ( & self , cv : & ' ll Value , gv : & ' ll Value ) -> & ' ll Value {
366
+ assert ! ( !self . const_globals. borrow( ) . contains_key( & cv) ) ;
367
+ let mut binding = self . const_globals . borrow_mut ( ) ;
368
+ binding. insert ( cv, gv) ;
369
+ llvm:: set_initializer ( gv, cv) ;
370
+ llvm:: set_global_constant ( gv, true ) ;
371
+ gv
372
+ }
373
+
280
374
#[ instrument( level = "debug" , skip( self ) ) ]
281
375
pub ( crate ) fn get_static ( & self , def_id : DefId ) -> & ' ll Value {
282
376
let instance = Instance :: mono ( self . tcx , def_id) ;
0 commit comments