33//CHECK-LABEL: define void @_QPsimd_aligned_pointer() {
44//CHECK: %[[A_PTR:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8 }, i64 1, align 8
55//CHECK: %[[A_VAL:.*]] = load ptr, ptr %[[A_PTR]], align 8
6- //CHECK: omp_loop.preheader: ; preds = %0
76//CHECK: call void @llvm.assume(i1 true) [ "align"(ptr %[[A_VAL]], i64 256) ]
87llvm.func @_QPsimd_aligned_pointer () {
98 %1 = llvm.mlir.constant (1 : i64 ) : i64
@@ -17,14 +16,14 @@ llvm.func @_QPsimd_aligned_pointer() {
1716 llvm.store %arg0 , %3 : i32 , !llvm.ptr
1817 omp.yield
1918 }
19+ omp.terminator
2020 }
2121 llvm.return
2222}
2323
2424//CHECK-LABEL: define void @_QPsimd_aligned_cptr() {
2525//CHECK: %[[A_CPTR:.*]] = alloca %_QM__fortran_builtinsT__builtin_c_ptr, i64 1, align 8
2626//CHECK: %[[A_VAL:.*]] = load ptr, ptr %[[A_CPTR]], align 8
27- //CHECK: omp_loop.preheader: ; preds = %0
2827//CHECK: call void @llvm.assume(i1 true) [ "align"(ptr %[[A_VAL]], i64 256) ]
2928llvm.func @_QPsimd_aligned_cptr () {
3029 %0 = llvm.mlir.constant (1 : i64 ) : i64
@@ -39,14 +38,14 @@ llvm.func @_QPsimd_aligned_cptr() {
3938 llvm.store %arg0 , %3 : i32 , !llvm.ptr
4039 omp.yield
4140 }
41+ omp.terminator
4242 }
4343 llvm.return
4444}
4545
4646//CHECK-LABEL: define void @_QPsimd_aligned_allocatable() {
4747//CHECK: %[[A_ADDR:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, i64 1, align 8
4848//CHECK: %[[A_VAL:.*]] = load ptr, ptr %[[A_ADDR]], align 8
49- //CHECK: omp_loop.preheader: ; preds = %0
5049//CHECK: call void @llvm.assume(i1 true) [ "align"(ptr %[[A_VAL]], i64 256) ]
5150llvm.func @_QPsimd_aligned_allocatable () {
5251 %0 = llvm.mlir.constant (1 : i64 ) : i64
@@ -58,6 +57,7 @@ llvm.func @_QPsimd_aligned_allocatable() {
5857 omp.loop_nest (%arg0 ) : i32 = (%2 ) to (%3 ) inclusive step (%4 ) {
5958 omp.yield
6059 }
60+ omp.terminator
6161 }
6262 llvm.return
6363}
0 commit comments