1
1
/**************************************************************************/ /**
2
2
* @file cmsis_gcc.h
3
3
* @brief CMSIS compiler specific macros, functions, instructions
4
- * @version V1.2 .0
5
- * @date 17. May 2019
4
+ * @version V1.3 .0
5
+ * @date 17. December 2019
6
6
******************************************************************************/
7
7
/*
8
8
* Copyright (c) 2009-2019 Arm Limited. All rights reserved.
@@ -119,6 +119,15 @@ __STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
119
119
}
120
120
121
121
122
+ __STATIC_FORCEINLINE uint32_t __QSUB8 (uint32_t op1 , uint32_t op2 )
123
+ {
124
+ uint32_t result ;
125
+
126
+ __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result ) : "r" (op1 ), "r" (op2 ) );
127
+ return (result );
128
+ }
129
+
130
+
122
131
__STATIC_FORCEINLINE uint32_t __QADD16 (uint32_t op1 , uint32_t op2 )
123
132
{
124
133
uint32_t result ;
@@ -127,6 +136,14 @@ __STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
127
136
return (result );
128
137
}
129
138
139
+ __STATIC_FORCEINLINE uint32_t __QADD8 (uint32_t op1 , uint32_t op2 )
140
+ {
141
+ uint32_t result ;
142
+
143
+ __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result ) : "r" (op1 ), "r" (op2 ) );
144
+ return (result );
145
+ }
146
+
130
147
__STATIC_FORCEINLINE int32_t __QADD ( int32_t op1 , int32_t op2 )
131
148
{
132
149
int32_t result ;
@@ -135,6 +152,22 @@ __STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2)
135
152
return (result );
136
153
}
137
154
155
+ __STATIC_FORCEINLINE uint32_t __QSAX (uint32_t op1 , uint32_t op2 )
156
+ {
157
+ uint32_t result ;
158
+
159
+ __ASM ("qsax %0, %1, %2" : "=r" (result ) : "r" (op1 ), "r" (op2 ) );
160
+ return (result );
161
+ }
162
+
163
+ __STATIC_FORCEINLINE uint32_t __SHSAX (uint32_t op1 , uint32_t op2 )
164
+ {
165
+ uint32_t result ;
166
+
167
+ __ASM ("shsax %0, %1, %2" : "=r" (result ) : "r" (op1 ), "r" (op2 ) );
168
+ return (result );
169
+ }
170
+
138
171
__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1 , uint32_t op2 , uint64_t acc )
139
172
{
140
173
union llreg_u {
@@ -160,6 +193,15 @@ __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2)
160
193
return (result );
161
194
}
162
195
196
+ __STATIC_FORCEINLINE uint32_t __SXTB16 (uint32_t op1 )
197
+ {
198
+ uint32_t result ;
199
+
200
+ __ASM ("sxtb16 %0, %1" : "=r" (result ) : "r" (op1 ));
201
+ return (result );
202
+ }
203
+
204
+
163
205
__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1 , uint32_t op2 )
164
206
{
165
207
uint32_t result ;
@@ -168,9 +210,14 @@ __STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
168
210
return (result );
169
211
}
170
212
213
+
214
+
171
215
#define __PKHBT (ARG1 ,ARG2 ,ARG3 ) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
172
216
((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
173
217
218
+ #define __PKHTB (ARG1 ,ARG2 ,ARG3 ) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
219
+ ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
220
+
174
221
__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1 , uint32_t op2 , uint32_t op3 )
175
222
{
176
223
uint32_t result ;
@@ -220,7 +267,61 @@ __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
220
267
return (result );
221
268
}
222
269
270
+ __STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1 , uint32_t op2 )
271
+ {
272
+ uint32_t result ;
273
+
274
+ __ASM volatile ("smusd %0, %1, %2" : "=r" (result ) : "r" (op1 ), "r" (op2 ) );
275
+ return (result );
276
+ }
277
+
278
+ __STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1 , uint32_t op2 )
279
+ {
280
+ uint32_t result ;
281
+
282
+ __ASM volatile ("smusdx %0, %1, %2" : "=r" (result ) : "r" (op1 ), "r" (op2 ) );
283
+ return (result );
284
+ }
285
+
286
+ __STATIC_FORCEINLINE uint32_t __QASX (uint32_t op1 , uint32_t op2 )
287
+ {
288
+ uint32_t result ;
289
+
290
+ __ASM ("qasx %0, %1, %2" : "=r" (result ) : "r" (op1 ), "r" (op2 ) );
291
+ return (result );
292
+ }
293
+
294
+ __STATIC_FORCEINLINE uint32_t __SHADD16 (uint32_t op1 , uint32_t op2 )
295
+ {
296
+ uint32_t result ;
297
+
298
+ __ASM ("shadd16 %0, %1, %2" : "=r" (result ) : "r" (op1 ), "r" (op2 ) );
299
+ return (result );
300
+ }
301
+
302
+ __STATIC_FORCEINLINE uint32_t __SHSUB16 (uint32_t op1 , uint32_t op2 )
303
+ {
304
+ uint32_t result ;
223
305
306
+ __ASM ("shsub16 %0, %1, %2" : "=r" (result ) : "r" (op1 ), "r" (op2 ) );
307
+ return (result );
308
+ }
309
+
310
+ __STATIC_FORCEINLINE uint32_t __SHASX (uint32_t op1 , uint32_t op2 )
311
+ {
312
+ uint32_t result ;
313
+
314
+ __ASM ("shasx %0, %1, %2" : "=r" (result ) : "r" (op1 ), "r" (op2 ) );
315
+ return (result );
316
+ }
317
+
318
+ __STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1 , uint32_t op2 , uint32_t op3 )
319
+ {
320
+ uint32_t result ;
321
+
322
+ __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result ) : "r" (op1 ), "r" (op2 ), "r" (op3 ) );
323
+ return (result );
324
+ }
224
325
225
326
226
327
/* ########################## Core Instruction Access ######################### */
@@ -232,12 +333,12 @@ __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
232
333
/**
233
334
\brief Wait For Interrupt
234
335
*/
235
- #define __WFI () __ASM volatile ("wfi")
336
+ #define __WFI () __ASM volatile ("wfi":::"memory" )
236
337
237
338
/**
238
339
\brief Wait For Event
239
340
*/
240
- #define __WFE () __ASM volatile ("wfe")
341
+ #define __WFE () __ASM volatile ("wfe":::"memory" )
241
342
242
343
/**
243
344
\brief Send Event
@@ -289,7 +390,7 @@ __STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
289
390
#else
290
391
uint32_t result ;
291
392
292
- __ASM volatile ("rev %0, %1" : "=r" (result ) : "r" (value ) );
393
+ __ASM ("rev %0, %1" : "=r" (result ) : "r" (value ) );
293
394
return result ;
294
395
#endif
295
396
}
@@ -300,14 +401,12 @@ __STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
300
401
\param [in] value Value to reverse
301
402
\return Reversed value
302
403
*/
303
- #ifndef __NO_EMBEDDED_ASM
304
- __attribute__((section (".rev16_text" ))) __STATIC_INLINE uint32_t __REV16 (uint32_t value )
404
+ __STATIC_FORCEINLINE uint32_t __REV16 (uint32_t value )
305
405
{
306
406
uint32_t result ;
307
- __ASM volatile ("rev16 %0, %1" : "=r" (result ) : "r" (value ));
407
+ __ASM ("rev16 %0, %1" : "=r" (result ) : "r" (value ));
308
408
return result ;
309
409
}
310
- #endif
311
410
312
411
/**
313
412
\brief Reverse byte order (16 bit)
@@ -322,7 +421,7 @@ __STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
322
421
#else
323
422
int16_t result ;
324
423
325
- __ASM volatile ("revsh %0, %1" : "=r" (result ) : "r" (value ) );
424
+ __ASM ("revsh %0, %1" : "=r" (result ) : "r" (value ) );
326
425
return result ;
327
426
#endif
328
427
}
@@ -364,7 +463,7 @@ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
364
463
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1 )) || \
365
464
(defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1 )) || \
366
465
(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1 )) )
367
- __ASM volatile ("rbit %0, %1" : "=r" (result ) : "r" (value ) );
466
+ __ASM ("rbit %0, %1" : "=r" (result ) : "r" (value ) );
368
467
#else
369
468
int32_t s = (4U /*sizeof(v)*/ * 8U ) - 1U ; /* extra shift needed at end */
370
469
@@ -529,11 +628,11 @@ __STATIC_FORCEINLINE void __CLREX(void)
529
628
\param [in] sat Bit position to saturate to (1..32)
530
629
\return Saturated value
531
630
*/
532
- #define __SSAT (ARG1 ,ARG2 ) \
631
+ #define __SSAT (ARG1 , ARG2 ) \
533
632
__extension__ \
534
633
({ \
535
634
int32_t __RES, __ARG1 = (ARG1); \
536
- __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
635
+ __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
537
636
__RES; \
538
637
})
539
638
@@ -545,11 +644,11 @@ __extension__ \
545
644
\param [in] sat Bit position to saturate to (0..31)
546
645
\return Saturated value
547
646
*/
548
- #define __USAT (ARG1 ,ARG2 ) \
647
+ #define __USAT (ARG1 , ARG2 ) \
549
648
__extension__ \
550
649
({ \
551
650
uint32_t __RES, __ARG1 = (ARG1); \
552
- __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
651
+ __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
553
652
__RES; \
554
653
})
555
654
@@ -637,7 +736,7 @@ __STATIC_FORCEINLINE uint32_t __get_CPSR(void)
637
736
*/
638
737
__STATIC_FORCEINLINE void __set_CPSR (uint32_t cpsr )
639
738
{
640
- __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr ) : "memory" );
739
+ __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr ) : "cc" , " memory" );
641
740
}
642
741
643
742
/** \brief Get Mode
0 commit comments