|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 |
| 2 | +; RUN: opt < %s -passes=instcombine -S | FileCheck %s |
| 3 | + |
| 4 | +define i1 @addition_and_bitwise1(ptr %0) { |
| 5 | +; CHECK-LABEL: define i1 @addition_and_bitwise1( |
| 6 | +; CHECK-SAME: ptr [[TMP0:%.*]]) { |
| 7 | +; CHECK-NEXT: [[V0:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4 |
| 8 | +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[V0]], align 4 |
| 9 | +; CHECK-NEXT: [[V2:%.*]] = zext i32 [[V1]] to i64 |
| 10 | +; CHECK-NEXT: [[V3:%.*]] = ptrtoint ptr [[V0]] to i64 |
| 11 | +; CHECK-NEXT: [[V4:%.*]] = add i64 [[V2]], [[V3]] |
| 12 | +; CHECK-NEXT: [[V5:%.*]] = and i64 [[V4]], 2 |
| 13 | +; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V5]], 0 |
| 14 | +; CHECK-NEXT: ret i1 [[V6]] |
| 15 | +; |
| 16 | + %v0 = getelementptr inbounds nuw i8, ptr %0, i64 4 |
| 17 | + %v1 = load i32, ptr %v0, align 4 |
| 18 | + %v2 = zext i32 %v1 to i64 |
| 19 | + %v3 = ptrtoint ptr %v0 to i64 |
| 20 | + %v4 = add i64 %v2, %v3 |
| 21 | + %v5 = and i64 %v4, 2 |
| 22 | + %v6 = icmp eq i64 %v5, 0 |
| 23 | + ret i1 %v6 |
| 24 | +} |
| 25 | + |
| 26 | +define i1 @addition_and_bitwise2(ptr %0) { |
| 27 | +; CHECK-LABEL: define i1 @addition_and_bitwise2( |
| 28 | +; CHECK-SAME: ptr [[TMP0:%.*]]) { |
| 29 | +; CHECK-NEXT: [[V0:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4 |
| 30 | +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[V0]], align 16 |
| 31 | +; CHECK-NEXT: [[V2:%.*]] = zext i32 [[V1]] to i64 |
| 32 | +; CHECK-NEXT: [[V3:%.*]] = ptrtoint ptr [[V0]] to i64 |
| 33 | +; CHECK-NEXT: [[V4:%.*]] = add i64 [[V2]], [[V3]] |
| 34 | +; CHECK-NEXT: [[V5:%.*]] = and i64 [[V4]], 4 |
| 35 | +; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V5]], 0 |
| 36 | +; CHECK-NEXT: ret i1 [[V6]] |
| 37 | +; |
| 38 | + %v0 = getelementptr inbounds nuw i8, ptr %0, i64 4 |
| 39 | + %v1 = load i32, ptr %v0, align 16 |
| 40 | + %v2 = zext i32 %v1 to i64 |
| 41 | + %v3 = ptrtoint ptr %v0 to i64 |
| 42 | + %v4 = add i64 %v2, %v3 |
| 43 | + %v5 = and i64 %v4, 4 |
| 44 | + %v6 = icmp eq i64 %v5, 0 |
| 45 | + ret i1 %v6 |
| 46 | +} |
| 47 | + |
| 48 | +define i1 @addition_and_bitwise3(ptr %0) { |
| 49 | +; CHECK-LABEL: define i1 @addition_and_bitwise3( |
| 50 | +; CHECK-SAME: ptr [[TMP0:%.*]]) { |
| 51 | +; CHECK-NEXT: [[V0:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4 |
| 52 | +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[V0]], align 16 |
| 53 | +; CHECK-NEXT: [[V2:%.*]] = zext i32 [[V1]] to i64 |
| 54 | +; CHECK-NEXT: [[V3:%.*]] = ptrtoint ptr [[V0]] to i64 |
| 55 | +; CHECK-NEXT: [[V4:%.*]] = add i64 [[V3]], [[V2]] |
| 56 | +; CHECK-NEXT: [[V5:%.*]] = and i64 [[V4]], 4 |
| 57 | +; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V5]], 0 |
| 58 | +; CHECK-NEXT: ret i1 [[V6]] |
| 59 | +; |
| 60 | + %v0 = getelementptr inbounds nuw i8, ptr %0, i64 4 |
| 61 | + %v1 = load i32, ptr %v0, align 16 |
| 62 | + %v2 = zext i32 %v1 to i64 |
| 63 | + %v3 = ptrtoint ptr %v0 to i64 |
| 64 | + %v4 = add i64 %v3, %v2 |
| 65 | + %v5 = and i64 %v4, 4 |
| 66 | + %v6 = icmp eq i64 %v5, 0 |
| 67 | + ret i1 %v6 |
| 68 | +} |
| 69 | + |
| 70 | +define i32 @test(i32 %x, i32 %y) { |
| 71 | +; CHECK-LABEL: define i32 @test( |
| 72 | +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { |
| 73 | +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 2 |
| 74 | +; CHECK-NEXT: ret i32 [[AND]] |
| 75 | +; |
| 76 | + %y_masked = and i32 %y, -4 |
| 77 | + %add = add i32 %x, %y_masked |
| 78 | + %and = and i32 %add, 2 |
| 79 | + ret i32 %and |
| 80 | +} |
| 81 | + |
0 commit comments