@@ -23,143 +23,261 @@ intrinsics! {
2323 #[ naked]
2424 #[ cfg( not( target_env = "msvc" ) ) ]
2525 pub unsafe extern "C" fn __aeabi_uidivmod( ) {
26- core:: arch:: naked_asm!(
27- "push {{lr}}" ,
28- "sub sp, sp, #4" ,
29- "mov r2, sp" ,
30- bl!( "__udivmodsi4" ) ,
31- "ldr r1, [sp]" ,
32- "add sp, sp, #4" ,
33- "pop {{pc}}" ,
34- ) ;
26+ unsafe {
27+ core:: arch:: naked_asm!(
28+ "push {{lr}}" ,
29+ "sub sp, sp, #4" ,
30+ "mov r2, sp" ,
31+ bl!( "__udivmodsi4" ) ,
32+ "ldr r1, [sp]" ,
33+ "add sp, sp, #4" ,
34+ "pop {{pc}}" ,
35+ ) ;
36+ }
3537 }
3638
3739 #[ naked]
3840 pub unsafe extern "C" fn __aeabi_uldivmod( ) {
39- core:: arch:: naked_asm!(
40- "push {{r4, lr}}" ,
41- "sub sp, sp, #16" ,
42- "add r4, sp, #8" ,
43- "str r4, [sp]" ,
44- bl!( "__udivmoddi4" ) ,
45- "ldr r2, [sp, #8]" ,
46- "ldr r3, [sp, #12]" ,
47- "add sp, sp, #16" ,
48- "pop {{r4, pc}}" ,
49- ) ;
41+ unsafe {
42+ core:: arch:: naked_asm!(
43+ "push {{r4, lr}}" ,
44+ "sub sp, sp, #16" ,
45+ "add r4, sp, #8" ,
46+ "str r4, [sp]" ,
47+ bl!( "__udivmoddi4" ) ,
48+ "ldr r2, [sp, #8]" ,
49+ "ldr r3, [sp, #12]" ,
50+ "add sp, sp, #16" ,
51+ "pop {{r4, pc}}" ,
52+ ) ;
53+ }
5054 }
5155
5256 #[ naked]
5357 pub unsafe extern "C" fn __aeabi_idivmod( ) {
54- core:: arch:: naked_asm!(
55- "push {{r0, r1, r4, lr}}" ,
56- bl!( "__aeabi_idiv" ) ,
57- "pop {{r1, r2}}" ,
58- "muls r2, r2, r0" ,
59- "subs r1, r1, r2" ,
60- "pop {{r4, pc}}" ,
61- ) ;
58+ unsafe {
59+ core:: arch:: naked_asm!(
60+ "push {{r0, r1, r4, lr}}" ,
61+ bl!( "__aeabi_idiv" ) ,
62+ "pop {{r1, r2}}" ,
63+ "muls r2, r2, r0" ,
64+ "subs r1, r1, r2" ,
65+ "pop {{r4, pc}}" ,
66+ ) ;
67+ }
6268 }
6369
6470 #[ naked]
6571 pub unsafe extern "C" fn __aeabi_ldivmod( ) {
66- core:: arch:: naked_asm!(
67- "push {{r4, lr}}" ,
68- "sub sp, sp, #16" ,
69- "add r4, sp, #8" ,
70- "str r4, [sp]" ,
71- bl!( "__divmoddi4" ) ,
72- "ldr r2, [sp, #8]" ,
73- "ldr r3, [sp, #12]" ,
74- "add sp, sp, #16" ,
75- "pop {{r4, pc}}" ,
76- ) ;
72+ unsafe {
73+ core:: arch:: naked_asm!(
74+ "push {{r4, lr}}" ,
75+ "sub sp, sp, #16" ,
76+ "add r4, sp, #8" ,
77+ "str r4, [sp]" ,
78+ bl!( "__divmoddi4" ) ,
79+ "ldr r2, [sp, #8]" ,
80+ "ldr r3, [sp, #12]" ,
81+ "add sp, sp, #16" ,
82+ "pop {{r4, pc}}" ,
83+ ) ;
84+ }
7785 }
7886
79- // FIXME: The `*4` and `*8` variants should be defined as aliases.
87+ // FIXME(arm) : The `*4` and `*8` variants should be defined as aliases.
8088
89+ /// `memcpy` provided with the `aapcs` ABI.
90+ ///
91+ /// # Safety
92+ ///
93+ /// Usual `memcpy` requirements apply.
8194 #[ cfg( not( target_vendor = "apple" ) ) ]
82- pub unsafe extern "aapcs" fn __aeabi_memcpy( dest: * mut u8 , src: * const u8 , n: usize ) {
83- crate :: mem:: memcpy( dest, src, n) ;
95+ pub unsafe extern "aapcs" fn __aeabi_memcpy( dst: * mut u8 , src: * const u8 , n: usize ) {
96+ // SAFETY: memcpy preconditions apply.
97+ unsafe { crate :: mem:: memcpy( dst, src, n) } ;
8498 }
8599
100+ /// `memcpy` for 4-byte alignment.
101+ ///
102+ /// # Safety
103+ ///
104+ /// Usual `memcpy` requirements apply. Additionally, `dest` and `src` must be aligned to
105+ /// four bytes.
86106 #[ cfg( not( target_vendor = "apple" ) ) ]
87- pub unsafe extern "aapcs" fn __aeabi_memcpy4( dest : * mut u8 , src: * const u8 , n: usize ) {
107+ pub unsafe extern "aapcs" fn __aeabi_memcpy4( dst : * mut u8 , src: * const u8 , n: usize ) {
88108 // We are guaranteed 4-alignment, so accessing at u32 is okay.
89- let mut dest = dest as * mut u32 ;
90- let mut src = src as * mut u32 ;
109+ let mut dst = dst. cast:: <u32 >( ) ;
110+ let mut src = src. cast:: <u32 >( ) ;
111+ debug_assert!( dst. is_aligned( ) ) ;
112+ debug_assert!( src. is_aligned( ) ) ;
91113 let mut n = n;
92114
93115 while n >= 4 {
94- * dest = * src;
95- dest = dest. offset( 1 ) ;
96- src = src. offset( 1 ) ;
116+ // SAFETY: `dst` and `src` are both valid for at least 4 bytes, from
117+ // `memcpy` preconditions and the loop guard.
118+ unsafe { * dst = * src } ;
119+
120+ // TODO
121+ unsafe {
122+ dst = dst. offset( 1 ) ;
123+ src = src. offset( 1 ) ;
124+ }
125+
97126 n -= 4 ;
98127 }
99128
100- __aeabi_memcpy( dest as * mut u8 , src as * const u8 , n) ;
129+ // SAFETY: `dst` and `src` will still be valid for `n` bytes
130+ unsafe { __aeabi_memcpy( dst. cast:: <u8 >( ) , src. cast:: <u8 >( ) , n) } ;
101131 }
102132
133+ /// `memcpy` for 8-byte alignment.
134+ ///
135+ /// # Safety
136+ ///
137+ /// Usual `memcpy` requirements apply. Additionally, `dest` and `src` must be aligned to
138+ /// eight bytes.
103139 #[ cfg( not( target_vendor = "apple" ) ) ]
104- pub unsafe extern "aapcs" fn __aeabi_memcpy8( dest: * mut u8 , src: * const u8 , n: usize ) {
105- __aeabi_memcpy4( dest, src, n) ;
140+ pub unsafe extern "aapcs" fn __aeabi_memcpy8( dst: * mut u8 , src: * const u8 , n: usize ) {
141+ debug_assert!( dst. addr( ) & 7 == 0 ) ;
142+ debug_assert!( src. addr( ) & 7 == 0 ) ;
143+
144+ // SAFETY: memcpy preconditions apply, less strict alignment.
145+ unsafe { __aeabi_memcpy4( dst, src, n) } ;
106146 }
107147
148+ /// `memmove` provided with the `aapcs` ABI.
149+ ///
150+ /// # Safety
151+ ///
152+ /// Usual `memmove` requirements apply.
108153 #[ cfg( not( target_vendor = "apple" ) ) ]
109- pub unsafe extern "aapcs" fn __aeabi_memmove( dest: * mut u8 , src: * const u8 , n: usize ) {
110- crate :: mem:: memmove( dest, src, n) ;
154+ pub unsafe extern "aapcs" fn __aeabi_memmove( dst: * mut u8 , src: * const u8 , n: usize ) {
155+ // SAFETY: memmove preconditions apply.
156+ unsafe { crate :: mem:: memmove( dst, src, n) } ;
111157 }
112158
159+ /// `memmove` for 4-byte alignment.
160+ ///
161+ /// # Safety
162+ ///
163+ /// Usual `memmove` requirements apply. Additionally, `dest` and `src` must be aligned to
164+ /// four bytes.
113165 #[ cfg( not( any( target_vendor = "apple" , target_env = "msvc" ) ) ) ]
114- pub unsafe extern "aapcs" fn __aeabi_memmove4( dest: * mut u8 , src: * const u8 , n: usize ) {
115- __aeabi_memmove( dest, src, n) ;
166+ pub unsafe extern "aapcs" fn __aeabi_memmove4( dst: * mut u8 , src: * const u8 , n: usize ) {
167+ debug_assert!( dst. addr( ) & 3 == 0 ) ;
168+ debug_assert!( src. addr( ) & 3 == 0 ) ;
169+
170+ // SAFETY: same preconditions, less strict aligment.
171+ unsafe { __aeabi_memmove( dst, src, n) } ;
116172 }
117173
174+ /// `memmove` for 8-byte alignment.
175+ ///
176+ /// # Safety
177+ ///
178+ /// Usual `memmove` requirements apply. Additionally, `dst` and `src` must be aligned to
179+ /// eight bytes.
118180 #[ cfg( not( any( target_vendor = "apple" , target_env = "msvc" ) ) ) ]
119- pub unsafe extern "aapcs" fn __aeabi_memmove8( dest: * mut u8 , src: * const u8 , n: usize ) {
120- __aeabi_memmove( dest, src, n) ;
181+ pub unsafe extern "aapcs" fn __aeabi_memmove8( dst: * mut u8 , src: * const u8 , n: usize ) {
182+ debug_assert!( dst. addr( ) & 7 == 0 ) ;
183+ debug_assert!( src. addr( ) & 7 == 0 ) ;
184+
185+ // SAFETY: memmove preconditions apply, less strict alignment.
186+ unsafe { __aeabi_memmove( dst, src, n) } ;
121187 }
122188
189+ /// `memset` provided with the `aapcs` ABI.
190+ ///
191+ /// # Safety
192+ ///
193+ /// Usual `memset` requirements apply.
123194 #[ cfg( not( target_vendor = "apple" ) ) ]
124- pub unsafe extern "aapcs" fn __aeabi_memset( dest : * mut u8 , n: usize , c: i32 ) {
195+ pub unsafe extern "aapcs" fn __aeabi_memset( dst : * mut u8 , n: usize , c: i32 ) {
125196 // Note the different argument order
126- crate :: mem:: memset( dest, c, n) ;
197+ // SAFETY: memset preconditions apply.
198+ unsafe { crate :: mem:: memset( dst, c, n) } ;
127199 }
128200
201+ /// `memset` for 4-byte alignment.
202+ ///
203+ /// # Safety
204+ ///
205+ /// Usual `memset` requirements apply. Additionally, `dest` and `src` must be aligned to
206+ /// four bytes.
129207 #[ cfg( not( target_vendor = "apple" ) ) ]
130- pub unsafe extern "aapcs" fn __aeabi_memset4( dest: * mut u8 , n: usize , c: i32 ) {
131- let mut dest = dest as * mut u32 ;
208+ pub unsafe extern "aapcs" fn __aeabi_memset4( dst: * mut u8 , n: usize , c: i32 ) {
209+ let mut dst = dst. cast:: <u32 >( ) ;
210+ debug_assert!( dst. is_aligned( ) ) ;
132211 let mut n = n;
133212
134213 let byte = ( c as u32 ) & 0xff ;
135214 let c = ( byte << 24 ) | ( byte << 16 ) | ( byte << 8 ) | byte;
136215
137216 while n >= 4 {
138- * dest = c;
139- dest = dest. offset( 1 ) ;
217+ // SAFETY: `dst` is valid for at least 4 bytes, from `memset` preconditions and
218+ // the loop guard.
219+ unsafe { * dst = c } ;
220+ // TODO
221+ unsafe {
222+ dst = dst. offset( 1 ) ;
223+ }
140224 n -= 4 ;
141225 }
142226
143- __aeabi_memset( dest as * mut u8 , n, byte as i32 ) ;
227+ // SAFETY: `dst` will still be valid for `n` bytes
228+ unsafe { __aeabi_memset( dst. cast:: <u8 >( ) , n, byte as i32 ) } ;
144229 }
145230
231+ /// `memset` for 8-byte alignment.
232+ ///
233+ /// # Safety
234+ ///
235+ /// Usual `memset` requirements apply. Additionally, `dst` and `src` must be aligned to
236+ /// eight bytes.
146237 #[ cfg( not( target_vendor = "apple" ) ) ]
147- pub unsafe extern "aapcs" fn __aeabi_memset8( dest: * mut u8 , n: usize , c: i32 ) {
148- __aeabi_memset4( dest, n, c) ;
238+ pub unsafe extern "aapcs" fn __aeabi_memset8( dst: * mut u8 , n: usize , c: i32 ) {
239+ debug_assert!( dst. addr( ) & 7 == 0 ) ;
240+
241+ // SAFETY: memset preconditions apply, less strict alignment.
242+ unsafe { __aeabi_memset4( dst, n, c) } ;
149243 }
150244
245+ /// `memclr` provided with the `aapcs` ABI.
246+ ///
247+ /// # Safety
248+ ///
249+ /// Usual `memclr` requirements apply.
151250 #[ cfg( not( target_vendor = "apple" ) ) ]
152- pub unsafe extern "aapcs" fn __aeabi_memclr( dest: * mut u8 , n: usize ) {
153- __aeabi_memset( dest, n, 0 ) ;
251+ pub unsafe extern "aapcs" fn __aeabi_memclr( dst: * mut u8 , n: usize ) {
252+ // SAFETY: memclr preconditions apply, less strict alignment.
253+ unsafe { __aeabi_memset( dst, n, 0 ) } ;
154254 }
155255
256+ /// `memclr` for 4-byte alignment.
257+ ///
258+ /// # Safety
259+ ///
260+ /// Usual `memclr` requirements apply. Additionally, `dest` and `src` must be aligned to
261+ /// four bytes.
156262 #[ cfg( not( any( target_vendor = "apple" , target_env = "msvc" ) ) ) ]
157- pub unsafe extern "aapcs" fn __aeabi_memclr4( dest: * mut u8 , n: usize ) {
158- __aeabi_memset4( dest, n, 0 ) ;
263+ pub unsafe extern "aapcs" fn __aeabi_memclr4( dst: * mut u8 , n: usize ) {
264+ debug_assert!( dst. addr( ) & 3 == 0 ) ;
265+
266+ // SAFETY: memclr preconditions apply, less strict alignment.
267+ unsafe { __aeabi_memset4( dst, n, 0 ) } ;
159268 }
160269
270+ /// `memclr` for 8-byte alignment.
271+ ///
272+ /// # Safety
273+ ///
274+ /// Usual `memclr` requirements apply. Additionally, `dst` and `src` must be aligned to
275+ /// eight bytes.
161276 #[ cfg( not( any( target_vendor = "apple" , target_env = "msvc" ) ) ) ]
162- pub unsafe extern "aapcs" fn __aeabi_memclr8( dest: * mut u8 , n: usize ) {
163- __aeabi_memset4( dest, n, 0 ) ;
277+ pub unsafe extern "aapcs" fn __aeabi_memclr8( dst: * mut u8 , n: usize ) {
278+ debug_assert!( dst. addr( ) & 7 == 0 ) ;
279+
280+ // SAFETY: memclr preconditions apply, less strict alignment.
281+ unsafe { __aeabi_memset4( dst, n, 0 ) } ;
164282 }
165283}
0 commit comments