@@ -102,51 +102,51 @@ mod ptr {
102
102
#[ lang = "const_ptr" ]
103
103
impl < T > * const T {
104
104
pub unsafe fn offset ( self , count : isize ) -> * const T {
105
- intrinsics:: offset ( self , count)
105
+ crate :: intrinsics:: offset ( self , count)
106
106
}
107
107
}
108
108
109
109
#[ lang = "mut_ptr" ]
110
110
impl < T > * mut T {
111
111
pub unsafe fn offset ( self , count : isize ) -> * mut T {
112
- intrinsics:: offset ( self , count) as * mut T
112
+ crate :: intrinsics:: offset ( self , count) as * mut T
113
113
}
114
114
}
115
115
116
116
pub unsafe fn swap_nonoverlapping < T > ( x : * mut T , y : * mut T , count : usize ) {
117
117
let x = x as * mut u8 ;
118
118
let y = y as * mut u8 ;
119
- let len = mem:: size_of :: < T > ( ) * count;
119
+ let len = crate :: mem:: size_of :: < T > ( ) * count;
120
120
swap_nonoverlapping_bytes ( x, y, len)
121
121
}
122
122
123
123
pub unsafe fn swap_nonoverlapping_one < T > ( x : * mut T , y : * mut T ) {
124
124
// For types smaller than the block optimization below,
125
125
// just swap directly to avoid pessimizing codegen.
126
- if mem:: size_of :: < T > ( ) < 32 {
126
+ if crate :: mem:: size_of :: < T > ( ) < 32 {
127
127
let z = read ( x) ;
128
- intrinsics:: copy_nonoverlapping ( y, x, 1 ) ;
128
+ crate :: intrinsics:: copy_nonoverlapping ( y, x, 1 ) ;
129
129
write ( y, z) ;
130
130
} else {
131
131
swap_nonoverlapping ( x, y, 1 ) ;
132
132
}
133
133
}
134
134
135
135
pub unsafe fn write < T > ( dst : * mut T , src : T ) {
136
- intrinsics:: move_val_init ( & mut * dst, src)
136
+ crate :: intrinsics:: move_val_init ( & mut * dst, src)
137
137
}
138
138
139
139
pub unsafe fn read < T > ( src : * const T ) -> T {
140
- let mut tmp: T = mem:: uninitialized ( ) ;
141
- intrinsics:: copy_nonoverlapping ( src, & mut tmp, 1 ) ;
140
+ let mut tmp: T = crate :: mem:: uninitialized ( ) ;
141
+ crate :: intrinsics:: copy_nonoverlapping ( src, & mut tmp, 1 ) ;
142
142
tmp
143
143
}
144
144
145
145
pub unsafe fn swap_nonoverlapping_bytes ( x : * mut u8 , y : * mut u8 , len : usize ) {
146
146
struct Block ( u64 , u64 , u64 , u64 ) ;
147
147
struct UnalignedBlock ( u64 , u64 , u64 , u64 ) ;
148
148
149
- let block_size = mem:: size_of :: < Block > ( ) ;
149
+ let block_size = crate :: mem:: size_of :: < Block > ( ) ;
150
150
151
151
// Loop through x & y, copying them `Block` at a time
152
152
// The optimizer should unroll the loop fully for most types
@@ -155,31 +155,31 @@ mod ptr {
155
155
while i + block_size <= len {
156
156
// Create some uninitialized memory as scratch space
157
157
// Declaring `t` here avoids aligning the stack when this loop is unused
158
- let mut t: Block = mem:: uninitialized ( ) ;
158
+ let mut t: Block = crate :: mem:: uninitialized ( ) ;
159
159
let t = & mut t as * mut _ as * mut u8 ;
160
160
let x = x. offset ( i as isize ) ;
161
161
let y = y. offset ( i as isize ) ;
162
162
163
163
// Swap a block of bytes of x & y, using t as a temporary buffer
164
164
// This should be optimized into efficient SIMD operations where available
165
- intrinsics:: copy_nonoverlapping ( x, t, block_size) ;
166
- intrinsics:: copy_nonoverlapping ( y, x, block_size) ;
167
- intrinsics:: copy_nonoverlapping ( t, y, block_size) ;
165
+ crate :: intrinsics:: copy_nonoverlapping ( x, t, block_size) ;
166
+ crate :: intrinsics:: copy_nonoverlapping ( y, x, block_size) ;
167
+ crate :: intrinsics:: copy_nonoverlapping ( t, y, block_size) ;
168
168
i += block_size;
169
169
}
170
170
171
171
if i < len {
172
172
// Swap any remaining bytes
173
- let mut t: UnalignedBlock = mem:: uninitialized ( ) ;
173
+ let mut t: UnalignedBlock = crate :: mem:: uninitialized ( ) ;
174
174
let rem = len - i;
175
175
176
176
let t = & mut t as * mut _ as * mut u8 ;
177
177
let x = x. offset ( i as isize ) ;
178
178
let y = y. offset ( i as isize ) ;
179
179
180
- intrinsics:: copy_nonoverlapping ( x, t, rem) ;
181
- intrinsics:: copy_nonoverlapping ( y, x, rem) ;
182
- intrinsics:: copy_nonoverlapping ( t, y, rem) ;
180
+ crate :: intrinsics:: copy_nonoverlapping ( x, t, rem) ;
181
+ crate :: intrinsics:: copy_nonoverlapping ( y, x, rem) ;
182
+ crate :: intrinsics:: copy_nonoverlapping ( t, y, rem) ;
183
183
}
184
184
}
185
185
}
@@ -194,7 +194,7 @@ mod mem {
194
194
195
195
pub fn swap < T > ( x : & mut T , y : & mut T ) {
196
196
unsafe {
197
- ptr:: swap_nonoverlapping_one ( x, y) ;
197
+ crate :: ptr:: swap_nonoverlapping_one ( x, y) ;
198
198
}
199
199
}
200
200
@@ -204,7 +204,7 @@ mod mem {
204
204
}
205
205
206
206
pub unsafe fn uninitialized < T > ( ) -> T {
207
- intrinsics:: uninit ( )
207
+ crate :: intrinsics:: uninit ( )
208
208
}
209
209
}
210
210
@@ -214,25 +214,25 @@ macro_rules! impl_uint {
214
214
impl $ty {
215
215
pub fn wrapping_add( self , rhs: Self ) -> Self {
216
216
unsafe {
217
- intrinsics:: wrapping_add( self , rhs)
217
+ crate :: intrinsics:: wrapping_add( self , rhs)
218
218
}
219
219
}
220
220
221
221
pub fn wrapping_sub( self , rhs: Self ) -> Self {
222
222
unsafe {
223
- intrinsics:: wrapping_sub( self , rhs)
223
+ crate :: intrinsics:: wrapping_sub( self , rhs)
224
224
}
225
225
}
226
226
227
227
pub fn rotate_left( self , n: u32 ) -> Self {
228
228
unsafe {
229
- intrinsics:: rotate_left( self , n as Self )
229
+ crate :: intrinsics:: rotate_left( self , n as Self )
230
230
}
231
231
}
232
232
233
233
pub fn rotate_right( self , n: u32 ) -> Self {
234
234
unsafe {
235
- intrinsics:: rotate_right( self , n as Self )
235
+ crate :: intrinsics:: rotate_right( self , n as Self )
236
236
}
237
237
}
238
238
@@ -243,7 +243,7 @@ macro_rules! impl_uint {
243
243
}
244
244
}
245
245
246
- pub const fn from_le_bytes( bytes: [ u8 ; mem:: size_of:: <Self >( ) ] ) -> Self {
246
+ pub const fn from_le_bytes( bytes: [ u8 ; crate :: mem:: size_of:: <Self >( ) ] ) -> Self {
247
247
Self :: from_le( Self :: from_ne_bytes( bytes) )
248
248
}
249
249
@@ -254,8 +254,8 @@ macro_rules! impl_uint {
254
254
}
255
255
}
256
256
257
- pub const fn from_ne_bytes( bytes: [ u8 ; mem:: size_of:: <Self >( ) ] ) -> Self {
258
- unsafe { mem:: transmute( bytes) }
257
+ pub const fn from_ne_bytes( bytes: [ u8 ; crate :: mem:: size_of:: <Self >( ) ] ) -> Self {
258
+ unsafe { crate :: mem:: transmute( bytes) }
259
259
}
260
260
261
261
pub fn checked_add( self , rhs: Self ) -> Option <Self > {
@@ -268,7 +268,7 @@ macro_rules! impl_uint {
268
268
}
269
269
270
270
pub fn overflowing_add( self , rhs: Self ) -> ( Self , bool ) {
271
- let ( a, b) = unsafe { intrinsics:: add_with_overflow( self as $ty, rhs as $ty) } ;
271
+ let ( a, b) = unsafe { crate :: intrinsics:: add_with_overflow( self as $ty, rhs as $ty) } ;
272
272
( a as Self , b)
273
273
}
274
274
}
@@ -384,12 +384,12 @@ macro_rules! step_identical_methods {
384
384
( ) => {
385
385
#[ inline]
386
386
fn replace_one( & mut self ) -> Self {
387
- mem:: replace( self , 1 )
387
+ crate :: mem:: replace( self , 1 )
388
388
}
389
389
390
390
#[ inline]
391
391
fn replace_zero( & mut self ) -> Self {
392
- mem:: replace( self , 0 )
392
+ crate :: mem:: replace( self , 0 )
393
393
}
394
394
395
395
#[ inline]
@@ -505,7 +505,7 @@ impl<A: Step> Iterator for Range<A> {
505
505
// and this won't actually result in an extra check in an optimized build.
506
506
match self . start . add_usize ( 1 ) {
507
507
Option :: Some ( mut n) => {
508
- mem:: swap ( & mut n, & mut self . start ) ;
508
+ crate :: mem:: swap ( & mut n, & mut self . start ) ;
509
509
Option :: Some ( n)
510
510
}
511
511
Option :: None => Option :: None ,
0 commit comments