|
1 | 1 | ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
2 | 2 |
|
3 | | -; RUN: opt -mtriple=riscv64 -mattr=+m,+v -passes=slp-vectorizer -S < %s | FileCheck %s |
| 3 | +; RUN: opt -mtriple=riscv64 -mattr=+m,+v,+unaligned-vector-mem -passes=slp-vectorizer -S < %s | FileCheck %s |
4 | 4 |
|
5 | 5 | define void @const_stride_1_no_reordering(ptr %pl, ptr %ps) { |
6 | 6 | ; CHECK-LABEL: define void @const_stride_1_no_reordering( |
@@ -622,9 +622,9 @@ define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) |
622 | 622 | ; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { |
623 | 623 | ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 |
624 | 624 | ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 |
625 | | -; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 16 [[GEP_L0]], i64 100, <4 x i1> splat (i1 true), i32 4) |
626 | | -; CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8> |
627 | | -; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 16 |
| 625 | +; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 1 [[GEP_L0]], i64 100, <4 x i1> splat (i1 true), i32 4) |
| 626 | +; CHECK-NEXT: [[TMP11:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8> |
| 627 | +; CHECK-NEXT: store <16 x i8> [[TMP11]], ptr [[GEP_S0]], align 1 |
628 | 628 | ; CHECK-NEXT: ret void |
629 | 629 | ; |
630 | 630 | %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 |
|
0 commit comments