|
24 | 24 | %acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ] |
25 | 25 | %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i |
26 | 26 | %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i |
27 | | - %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1 |
28 | | - %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1 |
| 27 | + %a = load <8 x i8>, ptr %ptr1_i, align 1 |
| 28 | + %b = load <8 x i8>, ptr %ptr2_i, align 1 |
29 | 29 | %vabd = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %a, <8 x i8> %b) |
30 | 30 | %vabd_ext = zext <8 x i8> %vabd to <8 x i16> |
31 | 31 | %acc_next = add <8 x i16> %vabd_ext, %acc_phi |
|
65 | 65 | %acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ] |
66 | 66 | %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i |
67 | 67 | %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i |
68 | | - %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1 |
69 | | - %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1 |
| 68 | + %a = load <4 x i16>, ptr %ptr1_i, align 1 |
| 69 | + %b = load <4 x i16>, ptr %ptr2_i, align 1 |
70 | 70 | %vabd = tail call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %a, <4 x i16> %b) |
71 | 71 | %vmov = zext <4 x i16> %vabd to <4 x i32> |
72 | 72 | %acc_next = add <4 x i32> %vmov, %acc_phi |
@@ -116,8 +116,8 @@ loop: |
116 | 116 | %acc_phi_lo = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next_lo, %loop ] |
117 | 117 | %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i |
118 | 118 | %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i |
119 | | - %a = load <16 x i8>, <16 x i8>* %ptr1_i, align 1 |
120 | | - %b = load <16 x i8>, <16 x i8>* %ptr2_i, align 1 |
| 119 | + %a = load <16 x i8>, ptr %ptr1_i, align 1 |
| 120 | + %b = load <16 x i8>, ptr %ptr2_i, align 1 |
121 | 121 | %a_hi = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> |
122 | 122 | %b_hi = shufflevector <16 x i8> %b, <16 x i8> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> |
123 | 123 | %a_lo = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> |
@@ -160,8 +160,8 @@ loop: |
160 | 160 | %acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ] |
161 | 161 | %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i |
162 | 162 | %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i |
163 | | - %a = load <4 x i32>, <4 x i32>* %ptr1_i, align 1 |
164 | | - %b = load <4 x i32>, <4 x i32>* %ptr2_i, align 1 |
| 163 | + %a = load <4 x i32>, ptr %ptr1_i, align 1 |
| 164 | + %b = load <4 x i32>, ptr %ptr2_i, align 1 |
165 | 165 | %vabd = tail call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %a, <4 x i32> %b) |
166 | 166 | %acc_next = add <4 x i32> %acc_phi, %vabd |
167 | 167 | %next_i = add i32 %i, 4 |
@@ -198,8 +198,8 @@ loop: |
198 | 198 | ; Load values from ptr1 and ptr2 |
199 | 199 | %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i |
200 | 200 | %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i |
201 | | - %a = load <4 x i32>, <4 x i32>* %ptr1_i, align 1 |
202 | | - %b = load <4 x i32>, <4 x i32>* %ptr2_i, align 1 |
| 201 | + %a = load <4 x i32>, ptr %ptr1_i, align 1 |
| 202 | + %b = load <4 x i32>, ptr %ptr2_i, align 1 |
203 | 203 | ; Perform the intrinsic operation |
204 | 204 | %vabd = tail call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %a, <4 x i32> %b) |
205 | 205 | %acc_next = add <4 x i32> %acc_phi, %vabd |
@@ -237,8 +237,8 @@ loop: |
237 | 237 | %acc_phi = phi <2 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ] |
238 | 238 | %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i |
239 | 239 | %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i |
240 | | - %a = load <2 x i32>, <2 x i32>* %ptr1_i, align 1 |
241 | | - %b = load <2 x i32>, <2 x i32>* %ptr2_i, align 1 |
| 240 | + %a = load <2 x i32>, ptr %ptr1_i, align 1 |
| 241 | + %b = load <2 x i32>, ptr %ptr2_i, align 1 |
242 | 242 | %vabd = tail call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %a, <2 x i32> %b) |
243 | 243 | %acc_next = add <2 x i32> %acc_phi, %vabd |
244 | 244 | %next_i = add i32 %i, 2 |
@@ -272,8 +272,8 @@ loop: |
272 | 272 | %acc_phi = phi <8 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ] |
273 | 273 | %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i |
274 | 274 | %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i |
275 | | - %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1 |
276 | | - %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1 |
| 275 | + %a = load <8 x i8>, ptr %ptr1_i, align 1 |
| 276 | + %b = load <8 x i8>, ptr %ptr2_i, align 1 |
277 | 277 | %vabd = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %a, <8 x i8> %b) |
278 | 278 | %acc_next = add <8 x i8> %acc_phi, %vabd |
279 | 279 | %next_i = add i32 %i, 8 |
@@ -307,8 +307,8 @@ loop: |
307 | 307 | %acc_phi = phi <16 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ] |
308 | 308 | %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i |
309 | 309 | %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i |
310 | | - %a = load <16 x i8>, <16 x i8>* %ptr1_i, align 1 |
311 | | - %b = load <16 x i8>, <16 x i8>* %ptr2_i, align 1 |
| 310 | + %a = load <16 x i8>, ptr %ptr1_i, align 1 |
| 311 | + %b = load <16 x i8>, ptr %ptr2_i, align 1 |
312 | 312 | %vabd = tail call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %a, <16 x i8> %b) |
313 | 313 | %acc_next = add <16 x i8> %acc_phi, %vabd |
314 | 314 | %next_i = add i32 %i, 16 |
@@ -342,8 +342,8 @@ loop: |
342 | 342 | %acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ] |
343 | 343 | %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i |
344 | 344 | %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i |
345 | | - %a = load <8 x i16>, <8 x i16>* %ptr1_i, align 1 |
346 | | - %b = load <8 x i16>, <8 x i16>* %ptr2_i, align 1 |
| 345 | + %a = load <8 x i16>, ptr %ptr1_i, align 1 |
| 346 | + %b = load <8 x i16>, ptr %ptr2_i, align 1 |
347 | 347 | %vabd = tail call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %a, <8 x i16> %b) |
348 | 348 | %acc_next = add <8 x i16> %acc_phi, %vabd |
349 | 349 | %next_i = add i32 %i, 8 |
@@ -377,8 +377,8 @@ loop: |
377 | 377 | %acc_phi = phi <8 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ] |
378 | 378 | %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i |
379 | 379 | %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i |
380 | | - %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1 |
381 | | - %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1 |
| 380 | + %a = load <8 x i8>, ptr %ptr1_i, align 1 |
| 381 | + %b = load <8 x i8>, ptr %ptr2_i, align 1 |
382 | 382 | %vabd = tail call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %a, <8 x i8> %b) |
383 | 383 | %acc_next = add <8 x i8> %acc_phi, %vabd |
384 | 384 | %next_i = add i32 %i, 8 |
@@ -411,8 +411,8 @@ loop: |
411 | 411 | %acc_phi = phi <4 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ] |
412 | 412 | %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i |
413 | 413 | %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i |
414 | | - %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1 |
415 | | - %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1 |
| 414 | + %a = load <4 x i16>, ptr %ptr1_i, align 1 |
| 415 | + %b = load <4 x i16>, ptr %ptr2_i, align 1 |
416 | 416 | %vabd = tail call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %a, <4 x i16> %b) |
417 | 417 | %acc_next = add <4 x i16> %acc_phi, %vabd |
418 | 418 | %next_i = add i32 %i, 4 |
@@ -445,8 +445,8 @@ loop: |
445 | 445 | %acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ] |
446 | 446 | %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i |
447 | 447 | %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i |
448 | | - %a = load <8 x i16>, <8 x i16>* %ptr1_i, align 1 |
449 | | - %b = load <8 x i16>, <8 x i16>* %ptr2_i, align 1 |
| 448 | + %a = load <8 x i16>, ptr %ptr1_i, align 1 |
| 449 | + %b = load <8 x i16>, ptr %ptr2_i, align 1 |
450 | 450 | %vabd = tail call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %a, <8 x i16> %b) |
451 | 451 | %acc_next = add <8 x i16> %acc_phi, %vabd |
452 | 452 | %next_i = add i32 %i, 8 |
@@ -480,8 +480,8 @@ loop: |
480 | 480 | %acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ] |
481 | 481 | %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i |
482 | 482 | %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i |
483 | | - %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1 |
484 | | - %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1 |
| 483 | + %a = load <8 x i8>, ptr %ptr1_i, align 1 |
| 484 | + %b = load <8 x i8>, ptr %ptr2_i, align 1 |
485 | 485 | %vabd = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %a, <8 x i8> %b) |
486 | 486 | %vmov = zext <8 x i8> %vabd to <8 x i16> |
487 | 487 | %acc_next = add <8 x i16> %vmov, %acc_phi |
@@ -516,8 +516,8 @@ loop: |
516 | 516 | %acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ] |
517 | 517 | %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i |
518 | 518 | %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i |
519 | | - %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1 |
520 | | - %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1 |
| 519 | + %a = load <4 x i16>, ptr %ptr1_i, align 1 |
| 520 | + %b = load <4 x i16>, ptr %ptr2_i, align 1 |
521 | 521 | %vabd = tail call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %a, <4 x i16> %b) |
522 | 522 | %vmov = zext <4 x i16> %vabd to <4 x i32> |
523 | 523 | %acc_next = add <4 x i32> %vmov, %acc_phi |
|
0 commit comments