@@ -202,6 +202,43 @@ unsafe fn test_vqadd_u8() {
202
202
assert_eq ! ( r, e) ;
203
203
}
204
204
205
+ #[ cfg( target_arch = "aarch64" ) ]
206
+ unsafe fn test_vmaxq_f32 ( ) {
207
+ // AArch64 llvm intrinsic: llvm.aarch64.neon.fmax.v4f32
208
+ let a = f32x4:: from ( [ 0. , -1. , 2. , -3. ] ) ;
209
+ let b = f32x4:: from ( [ -4. , 5. , -6. , 7. ] ) ;
210
+ let e = f32x4:: from ( [ 0. , 5. , 2. , 7. ] ) ;
211
+ let r: f32x4 = transmute ( vmaxq_f32 ( transmute ( a) , transmute ( b) ) ) ;
212
+ assert_eq ! ( r, e) ;
213
+ }
214
+
215
+ #[ cfg( target_arch = "aarch64" ) ]
216
+ unsafe fn test_vminq_f32 ( ) {
217
+ // AArch64 llvm intrinsic: llvm.aarch64.neon.fmin.v4f32
218
+ let a = f32x4:: from ( [ 0. , -1. , 2. , -3. ] ) ;
219
+ let b = f32x4:: from ( [ -4. , 5. , -6. , 7. ] ) ;
220
+ let e = f32x4:: from ( [ -4. , -1. , -6. , -3. ] ) ;
221
+ let r: f32x4 = transmute ( vminq_f32 ( transmute ( a) , transmute ( b) ) ) ;
222
+ assert_eq ! ( r, e) ;
223
+ }
224
+
225
+ #[ cfg( target_arch = "aarch64" ) ]
226
+ unsafe fn test_vaddvq_f32 ( ) {
227
+ // AArch64 llvm intrinsic: llvm.aarch64.neon.faddv.f32.v4f32
228
+ let a = f32x4:: from ( [ 0. , 1. , 2. , 3. ] ) ;
229
+ let e = 6f32 ;
230
+ let r = vaddvq_f32 ( transmute ( a) ) ;
231
+ assert_eq ! ( r, e) ;
232
+ }
233
+
234
+ unsafe fn test_vrndnq_f32 ( ) {
235
+ // AArch64 llvm intrinsic: llvm.aarch64.neon.frintn.v4f32
236
+ let a = f32x4:: from ( [ 0.1 , -1.9 , 4.5 , 5.5 ] ) ;
237
+ let e = f32x4:: from ( [ 0. , -2. , 4. , 6. ] ) ;
238
+ let r: f32x4 = transmute ( vrndnq_f32 ( transmute ( a) ) ) ;
239
+ assert_eq ! ( r, e) ;
240
+ }
241
+
205
242
#[ cfg( target_arch = "aarch64" ) ]
206
243
fn main ( ) {
207
244
unsafe {
@@ -229,6 +266,11 @@ fn main() {
229
266
230
267
test_vqsub_u8 ( ) ;
231
268
test_vqadd_u8 ( ) ;
269
+
270
+ test_vmaxq_f32 ( ) ;
271
+ test_vminq_f32 ( ) ;
272
+ test_vaddvq_f32 ( ) ;
273
+ test_vrndnq_f32 ( ) ;
232
274
}
233
275
}
234
276
0 commit comments