|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: opt -O3 -S %s | FileCheck %s |
| 3 | + |
| 4 | +target triple = "arm64-apple-macosx15.0.0" |
| 5 | + |
| 6 | +define i64 @std_find_i16_constant_offset_with_assumptions(ptr %first.coerce, i16 noundef signext %s) nofree nosync { |
| 7 | +; CHECK-LABEL: define i64 @std_find_i16_constant_offset_with_assumptions( |
| 8 | +; CHECK-SAME: ptr [[FIRST_COERCE:%.*]], i16 noundef signext [[S:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] { |
| 9 | +; CHECK-NEXT: [[ENTRY:.*]]: |
| 10 | +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[FIRST_COERCE]], i64 2) ] |
| 11 | +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[FIRST_COERCE]], i64 256) ] |
| 12 | +; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[FIRST_COERCE]] to i64 |
| 13 | +; CHECK-NEXT: [[COERCE_VAL_PI_I:%.*]] = add i64 [[TMP0]], 256 |
| 14 | +; CHECK-NEXT: [[COERCE_VAL_IP:%.*]] = inttoptr i64 [[COERCE_VAL_PI_I]] to ptr |
| 15 | +; CHECK-NEXT: [[CMP_NOT6_I_I:%.*]] = icmp eq ptr [[FIRST_COERCE]], [[COERCE_VAL_IP]] |
| 16 | +; CHECK-NEXT: br i1 [[CMP_NOT6_I_I]], label %[[RETURN:.*]], label %[[LOOP_HEADER:.*]] |
| 17 | +; CHECK: [[LOOP_HEADER]]: |
| 18 | +; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[PTR_IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[FIRST_COERCE]], %[[ENTRY]] ] |
| 19 | +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[PTR_IV]], align 2 |
| 20 | +; CHECK-NEXT: [[CMP2_I_I:%.*]] = icmp eq i16 [[TMP1]], [[S]] |
| 21 | +; CHECK-NEXT: br i1 [[CMP2_I_I]], label %[[RETURN_LOOPEXIT:.*]], label %[[LOOP_LATCH]] |
| 22 | +; CHECK: [[LOOP_LATCH]]: |
| 23 | +; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr inbounds nuw i8, ptr [[PTR_IV]], i64 2 |
| 24 | +; CHECK-NEXT: [[CMP_NOT_I_I:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[COERCE_VAL_IP]] |
| 25 | +; CHECK-NEXT: br i1 [[CMP_NOT_I_I]], label %[[RETURN_LOOPEXIT]], label %[[LOOP_HEADER]] |
| 26 | +; CHECK: [[RETURN_LOOPEXIT]]: |
| 27 | +; CHECK-NEXT: [[MERGE_PH:%.*]] = phi ptr [ [[COERCE_VAL_IP]], %[[LOOP_LATCH]] ], [ [[PTR_IV]], %[[LOOP_HEADER]] ] |
| 28 | +; CHECK-NEXT: [[DOTPRE:%.*]] = ptrtoint ptr [[MERGE_PH]] to i64 |
| 29 | +; CHECK-NEXT: br label %[[RETURN]] |
| 30 | +; CHECK: [[RETURN]]: |
| 31 | +; CHECK-NEXT: [[RES_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], %[[RETURN_LOOPEXIT]] ], [ [[TMP0]], %[[ENTRY]] ] |
| 32 | +; CHECK-NEXT: ret i64 [[RES_PRE_PHI]] |
| 33 | +; |
| 34 | +entry: |
| 35 | + %first = alloca { ptr }, align 8 |
| 36 | + %s.addr = alloca i16, align 2 |
| 37 | + store ptr %first.coerce, ptr %first, align 8 |
| 38 | + store i16 %s, ptr %s.addr, align 2 |
| 39 | + %0 = load ptr, ptr %first, align 8 |
| 40 | + call void @llvm.assume(i1 true) [ "align"(ptr %0, i64 2) ] |
| 41 | + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %0, i64 256) ] |
| 42 | + %start.ptr = load ptr, ptr %first, align 8 |
| 43 | + %1 = load i64, ptr %first, align 8 |
| 44 | + %coerce.val.pi.i = add i64 %1, 256 |
| 45 | + %coerce.val.ip = inttoptr i64 %coerce.val.pi.i to ptr |
| 46 | + %cmp.not6.i.i = icmp eq ptr %start.ptr, %coerce.val.ip |
| 47 | + br i1 %cmp.not6.i.i, label %return, label %loop.ph |
| 48 | + |
| 49 | +loop.ph: |
| 50 | + %2 = load i16, ptr %s.addr, align 2 |
| 51 | + br label %loop.header |
| 52 | + |
| 53 | +loop.header: |
| 54 | + %ptr.iv = phi ptr [ %start.ptr, %loop.ph ], [ %ptr.iv.next, %loop.latch ] |
| 55 | + %3 = load i16, ptr %ptr.iv, align 2 |
| 56 | + %cmp2.i.i = icmp eq i16 %3, %2 |
| 57 | + br i1 %cmp2.i.i, label %return, label %loop.latch |
| 58 | + |
| 59 | +loop.latch: |
| 60 | + %ptr.iv.next = getelementptr inbounds nuw i8, ptr %ptr.iv, i64 2 |
| 61 | + %cmp.not.i.i = icmp eq ptr %ptr.iv.next, %coerce.val.ip |
| 62 | + br i1 %cmp.not.i.i, label %return, label %loop.header |
| 63 | + |
| 64 | +return: |
| 65 | + %merge = phi ptr [ %start.ptr, %entry ], [ %coerce.val.ip, %loop.latch ], [ %ptr.iv, %loop.header ] |
| 66 | + %res = ptrtoint ptr %merge to i64 |
| 67 | + ret i64 %res |
| 68 | +} |
| 69 | + |
| 70 | +define i64 @std_find_i16_constant_offset_no_assumptions(ptr %first.coerce, i16 noundef signext %s) nofree nosync { |
| 71 | +; CHECK-LABEL: define i64 @std_find_i16_constant_offset_no_assumptions( |
| 72 | +; CHECK-SAME: ptr [[FIRST_COERCE:%.*]], i16 noundef signext [[S:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] { |
| 73 | +; CHECK-NEXT: [[ENTRY:.*]]: |
| 74 | +; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[FIRST_COERCE]] to i64 |
| 75 | +; CHECK-NEXT: [[COERCE_VAL_PI_I:%.*]] = add i64 [[TMP0]], 256 |
| 76 | +; CHECK-NEXT: [[COERCE_VAL_IP:%.*]] = inttoptr i64 [[COERCE_VAL_PI_I]] to ptr |
| 77 | +; CHECK-NEXT: [[CMP_NOT6_I_I:%.*]] = icmp eq ptr [[FIRST_COERCE]], [[COERCE_VAL_IP]] |
| 78 | +; CHECK-NEXT: br i1 [[CMP_NOT6_I_I]], label %[[RETURN:.*]], label %[[LOOP_HEADER:.*]] |
| 79 | +; CHECK: [[LOOP_HEADER]]: |
| 80 | +; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[PTR_IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[FIRST_COERCE]], %[[ENTRY]] ] |
| 81 | +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[PTR_IV]], align 2 |
| 82 | +; CHECK-NEXT: [[CMP2_I_I:%.*]] = icmp eq i16 [[TMP1]], [[S]] |
| 83 | +; CHECK-NEXT: br i1 [[CMP2_I_I]], label %[[RETURN_LOOPEXIT:.*]], label %[[LOOP_LATCH]] |
| 84 | +; CHECK: [[LOOP_LATCH]]: |
| 85 | +; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr inbounds nuw i8, ptr [[PTR_IV]], i64 2 |
| 86 | +; CHECK-NEXT: [[CMP_NOT_I_I:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[COERCE_VAL_IP]] |
| 87 | +; CHECK-NEXT: br i1 [[CMP_NOT_I_I]], label %[[RETURN_LOOPEXIT]], label %[[LOOP_HEADER]] |
| 88 | +; CHECK: [[RETURN_LOOPEXIT]]: |
| 89 | +; CHECK-NEXT: [[MERGE_PH:%.*]] = phi ptr [ [[COERCE_VAL_IP]], %[[LOOP_LATCH]] ], [ [[PTR_IV]], %[[LOOP_HEADER]] ] |
| 90 | +; CHECK-NEXT: [[DOTPRE:%.*]] = ptrtoint ptr [[MERGE_PH]] to i64 |
| 91 | +; CHECK-NEXT: br label %[[RETURN]] |
| 92 | +; CHECK: [[RETURN]]: |
| 93 | +; CHECK-NEXT: [[RES_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], %[[RETURN_LOOPEXIT]] ], [ [[TMP0]], %[[ENTRY]] ] |
| 94 | +; CHECK-NEXT: ret i64 [[RES_PRE_PHI]] |
| 95 | +; |
| 96 | +entry: |
| 97 | + %first = alloca { ptr }, align 8 |
| 98 | + %s.addr = alloca i16, align 2 |
| 99 | + store ptr %first.coerce, ptr %first, align 8 |
| 100 | + store i16 %s, ptr %s.addr, align 2 |
| 101 | + %0 = load ptr, ptr %first, align 8 |
| 102 | + %start.ptr = load ptr, ptr %first, align 8 |
| 103 | + %1 = load i64, ptr %first, align 8 |
| 104 | + %coerce.val.pi.i = add i64 %1, 256 |
| 105 | + %coerce.val.ip = inttoptr i64 %coerce.val.pi.i to ptr |
| 106 | + %cmp.not6.i.i = icmp eq ptr %start.ptr, %coerce.val.ip |
| 107 | + br i1 %cmp.not6.i.i, label %return, label %loop.ph |
| 108 | + |
| 109 | +loop.ph: |
| 110 | + %2 = load i16, ptr %s.addr, align 2 |
| 111 | + br label %loop.header |
| 112 | + |
| 113 | +loop.header: |
| 114 | + %ptr.iv = phi ptr [ %start.ptr, %loop.ph ], [ %ptr.iv.next, %loop.latch ] |
| 115 | + %3 = load i16, ptr %ptr.iv, align 2 |
| 116 | + %cmp2.i.i = icmp eq i16 %3, %2 |
| 117 | + br i1 %cmp2.i.i, label %return, label %loop.latch |
| 118 | + |
| 119 | +loop.latch: |
| 120 | + %ptr.iv.next = getelementptr inbounds nuw i8, ptr %ptr.iv, i64 2 |
| 121 | + %cmp.not.i.i = icmp eq ptr %ptr.iv.next, %coerce.val.ip |
| 122 | + br i1 %cmp.not.i.i, label %return, label %loop.header |
| 123 | + |
| 124 | +return: |
| 125 | + %merge = phi ptr [ %start.ptr, %entry ], [ %coerce.val.ip, %loop.latch ], [ %ptr.iv, %loop.header ] |
| 126 | + %res = ptrtoint ptr %merge to i64 |
| 127 | + ret i64 %res |
| 128 | +} |
| 129 | + |
| 130 | +declare void @llvm.assume(i1 noundef) |
0 commit comments