@@ -16,12 +16,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
1616; IF-EVL-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64
1717; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
1818; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
19- ; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N]]
20- ; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
21- ; IF-EVL-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
22- ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.umax.i64(i64 13, i64 [[TMP2]])
23- ; IF-EVL-NEXT: [[TMP22:%.*]] = icmp ult i64 [[TMP0]], [[TMP3]]
24- ; IF-EVL-NEXT: br i1 [[TMP22]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
19+ ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
2520; IF-EVL: [[VECTOR_MEMCHECK]]:
2621; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
2722; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
@@ -130,12 +125,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
130125; IF-EVL-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64
131126; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
132127; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
133- ; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N]]
134- ; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
135- ; IF-EVL-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
136- ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.umax.i64(i64 13, i64 [[TMP2]])
137- ; IF-EVL-NEXT: [[TMP22:%.*]] = icmp ult i64 [[TMP0]], [[TMP3]]
138- ; IF-EVL-NEXT: br i1 [[TMP22]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
128+ ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
139129; IF-EVL: [[VECTOR_MEMCHECK]]:
140130; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
141131; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
@@ -244,12 +234,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
244234; IF-EVL-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64
245235; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
246236; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
247- ; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N]]
248- ; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
249- ; IF-EVL-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
250- ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.umax.i64(i64 13, i64 [[TMP2]])
251- ; IF-EVL-NEXT: [[TMP22:%.*]] = icmp ult i64 [[TMP0]], [[TMP3]]
252- ; IF-EVL-NEXT: br i1 [[TMP22]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
237+ ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
253238; IF-EVL: [[VECTOR_MEMCHECK]]:
254239; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
255240; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
@@ -358,12 +343,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
358343; IF-EVL-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64
359344; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
360345; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
361- ; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N]]
362- ; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
363- ; IF-EVL-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
364- ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.umax.i64(i64 13, i64 [[TMP2]])
365- ; IF-EVL-NEXT: [[TMP22:%.*]] = icmp ult i64 [[TMP0]], [[TMP3]]
366- ; IF-EVL-NEXT: br i1 [[TMP22]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
346+ ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
367347; IF-EVL: [[VECTOR_MEMCHECK]]:
368348; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
369349; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
@@ -472,11 +452,7 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
472452; IF-EVL-NEXT: [[ENTRY:.*]]:
473453; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
474454; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
475- ; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N]]
476- ; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
477- ; IF-EVL-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
478- ; IF-EVL-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
479- ; IF-EVL-NEXT: br i1 [[TMP3]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
455+ ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
480456; IF-EVL: [[VECTOR_MEMCHECK]]:
481457; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
482458; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
@@ -571,11 +547,7 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
571547; IF-EVL-NEXT: [[ENTRY:.*]]:
572548; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
573549; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
574- ; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N]]
575- ; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
576- ; IF-EVL-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
577- ; IF-EVL-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
578- ; IF-EVL-NEXT: br i1 [[TMP3]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
550+ ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
579551; IF-EVL: [[VECTOR_MEMCHECK]]:
580552; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
581553; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
@@ -670,12 +642,7 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
670642; IF-EVL-NEXT: [[ENTRY:.*]]:
671643; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
672644; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
673- ; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N]]
674- ; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
675- ; IF-EVL-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
676- ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.umax.i64(i64 9, i64 [[TMP2]])
677- ; IF-EVL-NEXT: [[TMP22:%.*]] = icmp ult i64 [[TMP0]], [[TMP3]]
678- ; IF-EVL-NEXT: br i1 [[TMP22]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
645+ ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
679646; IF-EVL: [[VECTOR_MEMCHECK]]:
680647; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
681648; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
@@ -778,12 +745,7 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
778745; IF-EVL-NEXT: [[ENTRY:.*]]:
779746; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
780747; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
781- ; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N]]
782- ; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
783- ; IF-EVL-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
784- ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.umax.i64(i64 9, i64 [[TMP2]])
785- ; IF-EVL-NEXT: [[TMP22:%.*]] = icmp ult i64 [[TMP0]], [[TMP3]]
786- ; IF-EVL-NEXT: br i1 [[TMP22]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
748+ ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
787749; IF-EVL: [[VECTOR_MEMCHECK]]:
788750; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
789751; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
@@ -886,12 +848,7 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
886848; IF-EVL-NEXT: [[ENTRY:.*]]:
887849; IF-EVL-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
888850; IF-EVL-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
889- ; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N]]
890- ; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
891- ; IF-EVL-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
892- ; IF-EVL-NEXT: [[TMP3:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP2]])
893- ; IF-EVL-NEXT: [[TMP19:%.*]] = icmp ult i64 [[TMP0]], [[TMP3]]
894- ; IF-EVL-NEXT: br i1 [[TMP19]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
851+ ; IF-EVL-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
895852; IF-EVL: [[VECTOR_MEMCHECK]]:
896853; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
897854; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
0 commit comments