@@ -17,42 +17,9 @@ define protected amdgpu_kernel void @func_int8(ptr addrspace(1) %p_a_grid.coerce
17
17
; CHECK: [[FOR_BODY_LR_PH]]:
18
18
; CHECK-NEXT: [[MUL15:%.*]] = mul nsw i32 [[ADD]], [[K]]
19
19
; CHECK-NEXT: [[MUL17:%.*]] = mul nsw i32 [[ADD12]], [[K]]
20
- ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[K]], 2
21
- ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
22
- ; CHECK: [[VECTOR_PH]]:
23
- ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[K]], 2
24
- ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[K]], [[N_MOD_VF]]
25
- ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
26
- ; CHECK: [[VECTOR_BODY]]:
27
- ; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
28
- ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[VECTOR_BODY]] ]
29
- ; CHECK-NEXT: [[TMP1:%.*]] = add nsw i32 [[TMP0]], [[MUL15]]
30
- ; CHECK-NEXT: [[TMP2:%.*]] = add nsw i32 [[TMP0]], [[MUL17]]
31
- ; CHECK-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64
32
- ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[P_A_GRID_COERCE]], i64 [[TMP3]]
33
- ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP4]], i32 0
34
- ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i8>, ptr addrspace(1) [[TMP5]], align 1
35
- ; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[TMP2]] to i64
36
- ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[P_B_GRID_COERCE]], i64 [[TMP6]]
37
- ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP7]], i32 0
38
- ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x i8>, ptr addrspace(1) [[TMP8]], align 1
39
- ; CHECK-NEXT: [[TMP9:%.*]] = zext <2 x i8> [[WIDE_LOAD]] to <2 x i32>
40
- ; CHECK-NEXT: [[TMP10:%.*]] = zext <2 x i8> [[WIDE_LOAD1]] to <2 x i32>
41
- ; CHECK-NEXT: [[TMP11:%.*]] = mul nuw nsw <2 x i32> [[TMP10]], [[TMP9]]
42
- ; CHECK-NEXT: [[TMP12]] = add <2 x i32> [[TMP11]], [[VEC_PHI]]
43
- ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[TMP0]], 2
44
- ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
45
- ; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
46
- ; CHECK: [[MIDDLE_BLOCK]]:
47
- ; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[TMP12]])
48
- ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[K]], [[N_VEC]]
49
- ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
50
- ; CHECK: [[SCALAR_PH]]:
51
- ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[FOR_BODY_LR_PH]] ]
52
- ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP14]], %[[MIDDLE_BLOCK]] ], [ 0, %[[FOR_BODY_LR_PH]] ]
53
20
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
54
- ; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT]]:
55
- ; CHECK-NEXT: [[ADD24_LCSSA:%.*]] = phi i32 [ [[ADD24:%.*]], %[[FOR_BODY]] ], [ [[TMP14]], %[[MIDDLE_BLOCK]] ]
21
+ ; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.* ]]:
22
+ ; CHECK-NEXT: [[ADD24_LCSSA:%.*]] = phi i32 [ [[ADD24:%.*]], %[[FOR_BODY]] ]
56
23
; CHECK-NEXT: [[TMP15:%.*]] = trunc i32 [[ADD24_LCSSA]] to i8
57
24
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]]
58
25
; CHECK: [[FOR_COND_CLEANUP]]:
@@ -64,8 +31,8 @@ define protected amdgpu_kernel void @func_int8(ptr addrspace(1) %p_a_grid.coerce
64
31
; CHECK-NEXT: store i8 [[V_ACC_0_LCSSA]], ptr addrspace(1) [[ARRAYIDX28]], align 1
65
32
; CHECK-NEXT: br label %[[IF_END]]
66
33
; CHECK: [[FOR_BODY]]:
67
- ; CHECK-NEXT: [[K_IDX_046:%.*]] = phi i32 [ [[BC_RESUME_VAL]] , %[[SCALAR_PH ]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
68
- ; CHECK-NEXT: [[V_ACC_045:%.*]] = phi i32 [ [[BC_MERGE_RDX]] , %[[SCALAR_PH ]] ], [ [[ADD24]], %[[FOR_BODY]] ]
34
+ ; CHECK-NEXT: [[K_IDX_046:%.*]] = phi i32 [ 0 , %[[FOR_BODY_LR_PH ]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
35
+ ; CHECK-NEXT: [[V_ACC_045:%.*]] = phi i32 [ 0 , %[[FOR_BODY_LR_PH ]] ], [ [[ADD24]], %[[FOR_BODY]] ]
69
36
; CHECK-NEXT: [[ADD16:%.*]] = add nsw i32 [[K_IDX_046]], [[MUL15]]
70
37
; CHECK-NEXT: [[ADD18:%.*]] = add nsw i32 [[K_IDX_046]], [[MUL17]]
71
38
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD16]] to i64
@@ -80,7 +47,7 @@ define protected amdgpu_kernel void @func_int8(ptr addrspace(1) %p_a_grid.coerce
80
47
; CHECK-NEXT: [[ADD24]] = add i32 [[MUL23]], [[V_ACC_045]]
81
48
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[K_IDX_046]], 1
82
49
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[K]]
83
- ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
50
+ ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP_LOOPEXIT]], label %[[FOR_BODY]]
84
51
; CHECK: [[IF_END]]:
85
52
; CHECK-NEXT: ret void
86
53
;
@@ -132,9 +99,3 @@ for.body: ; preds = %for.body, %for.body
132
99
if.end: ; preds = %for.cond.cleanup, %entry
133
100
ret void
134
101
}
135
- ;.
136
- ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
137
- ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
138
- ; CHECK: [[LOOP2]] = distinct !{[[LOOP2]], [[META3:![0-9]+]], [[META1]]}
139
- ; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
140
- ;.
0 commit comments