|
4 | 4 | ; PR125228 |
5 | 5 |
|
6 | 6 | define <16 x i8> @knownbits_bitcast_masked_shift(<16 x i8> %arg1, <16 x i8> %arg2) { |
7 | | -<<<<<<< HEAD |
8 | | -; CHECK-LABEL: define <16 x i8> @knownbits_bitcast_masked_shift( |
9 | | -; CHECK-SAME: <16 x i8> [[ARG1:%.*]], <16 x i8> [[ARG2:%.*]]) { |
10 | | -; CHECK-NEXT: [[AND:%.*]] = and <16 x i8> [[ARG1]], splat (i8 3) |
11 | | -; CHECK-NEXT: [[AND3:%.*]] = and <16 x i8> [[ARG2]], splat (i8 48) |
12 | | -; CHECK-NEXT: [[OR:%.*]] = or disjoint <16 x i8> [[AND3]], [[AND]] |
13 | | -; CHECK-NEXT: [[BITCAST4:%.*]] = bitcast <16 x i8> [[OR]] to <8 x i16> |
14 | | -; CHECK-NEXT: [[SHL5:%.*]] = shl nuw <8 x i16> [[BITCAST4]], splat (i16 2) |
15 | | -; CHECK-NEXT: [[BITCAST6:%.*]] = bitcast <8 x i16> [[SHL5]] to <16 x i8> |
16 | | -; CHECK-NEXT: [[AND7:%.*]] = and <16 x i8> [[BITCAST6]], splat (i8 -52) |
17 | | -; CHECK-NEXT: ret <16 x i8> [[AND7]] |
18 | | -; |
19 | | - %and = and <16 x i8> %arg1, splat (i8 3) |
20 | | - %and3 = and <16 x i8> %arg2, splat (i8 48) |
21 | | - %or = or disjoint <16 x i8> %and3, %and |
22 | | - %bitcast4 = bitcast <16 x i8> %or to <8 x i16> |
23 | | - %shl5 = shl nuw <8 x i16> %bitcast4, splat (i16 2) |
24 | | - %bitcast6 = bitcast <8 x i16> %shl5 to <16 x i8> |
25 | | - %and7 = and <16 x i8> %bitcast6, splat (i8 -52) |
26 | | - ret <16 x i8> %and7 |
27 | | -} |
28 | | - |
29 | | -define <16 x i8> @knownbits_shuffle_masked_nibble_shift(<16 x i8> %arg) { |
30 | | -; CHECK-LABEL: define <16 x i8> @knownbits_shuffle_masked_nibble_shift( |
31 | | -; CHECK-SAME: <16 x i8> [[ARG:%.*]]) { |
32 | | -; CHECK-NEXT: [[AND:%.*]] = and <16 x i8> [[ARG]], splat (i8 15) |
33 | | -; CHECK-NEXT: [[SHUFFLEVECTOR:%.*]] = shufflevector <16 x i8> [[AND]], <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14> |
34 | | -; CHECK-NEXT: [[BITCAST1:%.*]] = bitcast <16 x i8> [[SHUFFLEVECTOR]] to <8 x i16> |
35 | | -; CHECK-NEXT: [[SHL:%.*]] = shl nuw <8 x i16> [[BITCAST1]], splat (i16 4) |
36 | | -; CHECK-NEXT: [[BITCAST2:%.*]] = bitcast <8 x i16> [[SHL]] to <16 x i8> |
37 | | -; CHECK-NEXT: [[AND3:%.*]] = and <16 x i8> [[BITCAST2]], splat (i8 -16) |
38 | | -; CHECK-NEXT: ret <16 x i8> [[AND3]] |
39 | | -; |
40 | | - %and = and <16 x i8> %arg, splat (i8 15) |
41 | | - %shufflevector = shufflevector <16 x i8> %and, <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14> |
42 | | - %bitcast1 = bitcast <16 x i8> %shufflevector to <8 x i16> |
43 | | - %shl = shl nuw <8 x i16> %bitcast1, splat (i16 4) |
44 | | - %bitcast2 = bitcast <8 x i16> %shl to <16 x i8> |
45 | | - %and3 = and <16 x i8> %bitcast2, splat (i8 -16) |
46 | | - ret <16 x i8> %and3 |
47 | | -} |
48 | | - |
49 | | -define <16 x i8> @knownbits_reverse_shuffle_masked_shift(<16 x i8> %arg) { |
50 | | -; CHECK-LABEL: define <16 x i8> @knownbits_reverse_shuffle_masked_shift( |
51 | | -; CHECK-SAME: <16 x i8> [[ARG:%.*]]) { |
52 | | -; CHECK-NEXT: [[AND:%.*]] = and <16 x i8> [[ARG]], splat (i8 15) |
53 | | -; CHECK-NEXT: [[SHUFFLEVECTOR:%.*]] = shufflevector <16 x i8> [[AND]], <16 x i8> poison, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12> |
54 | | -; CHECK-NEXT: [[BITCAST1:%.*]] = bitcast <16 x i8> [[SHUFFLEVECTOR]] to <8 x i16> |
55 | | -; CHECK-NEXT: [[SHL:%.*]] = shl nuw <8 x i16> [[BITCAST1]], splat (i16 4) |
56 | | -; CHECK-NEXT: [[BITCAST2:%.*]] = bitcast <8 x i16> [[SHL]] to <16 x i8> |
57 | | -; CHECK-NEXT: [[AND3:%.*]] = and <16 x i8> [[BITCAST2]], splat (i8 -16) |
58 | | -; CHECK-NEXT: ret <16 x i8> [[AND3]] |
59 | | -; |
60 | | - %and = and <16 x i8> %arg, splat (i8 15) |
61 | | - %shufflevector = shufflevector <16 x i8> %and, <16 x i8> poison, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12> |
62 | | - %bitcast1 = bitcast <16 x i8> %shufflevector to <8 x i16> |
63 | | - %shl = shl nuw <8 x i16> %bitcast1, splat (i16 4) |
64 | | - %bitcast2 = bitcast <8 x i16> %shl to <16 x i8> |
65 | | - %and3 = and <16 x i8> %bitcast2, splat (i8 -16) |
66 | | - ret <16 x i8> %and3 |
67 | | -} |
68 | | - |
69 | | -define <16 x i8> @knownbits_extract_bit(<8 x i16> %arg) { |
70 | | -; CHECK-LABEL: define <16 x i8> @knownbits_extract_bit( |
71 | | -; CHECK-SAME: <8 x i16> [[ARG:%.*]]) { |
72 | | -; CHECK-NEXT: [[LSHR:%.*]] = lshr <8 x i16> [[ARG]], splat (i16 15) |
73 | | -; CHECK-NEXT: [[BITCAST1:%.*]] = bitcast <8 x i16> [[LSHR]] to <16 x i8> |
74 | | -; CHECK-NEXT: [[AND:%.*]] = and <16 x i8> [[BITCAST1]], splat (i8 1) |
75 | | -; CHECK-NEXT: ret <16 x i8> [[AND]] |
76 | | -; |
77 | | - %lshr = lshr <8 x i16> %arg, splat (i16 15) |
78 | | - %bitcast1 = bitcast <8 x i16> %lshr to <16 x i8> |
79 | | - %and = and <16 x i8> %bitcast1, splat (i8 1) |
80 | | - ret <16 x i8> %and |
81 | | -} |
82 | | - |
83 | | -define { i32, i1 } @knownbits_popcount_add_with_overflow(<2 x i64> %arg1, <2 x i64> %arg2) { |
84 | | -; CHECK-LABEL: define { i32, i1 } @knownbits_popcount_add_with_overflow( |
85 | | -; CHECK-SAME: <2 x i64> [[ARG1:%.*]], <2 x i64> [[ARG2:%.*]]) { |
86 | | -; CHECK-NEXT: [[CALL:%.*]] = tail call range(i64 0, 65) <2 x i64> @llvm.ctpop.v2i64(<2 x i64> [[ARG1]]) |
87 | | -; CHECK-NEXT: [[BITCAST5:%.*]] = bitcast <2 x i64> [[CALL]] to <4 x i32> |
88 | | -; CHECK-NEXT: [[EXTRACTELEMENT:%.*]] = extractelement <4 x i32> [[BITCAST5]], i64 0 |
89 | | -; CHECK-NEXT: [[CALL9:%.*]] = tail call range(i64 0, 65) <2 x i64> @llvm.ctpop.v2i64(<2 x i64> [[ARG2]]) |
90 | | -; CHECK-NEXT: [[BITCAST10:%.*]] = bitcast <2 x i64> [[CALL9]] to <4 x i32> |
91 | | -; CHECK-NEXT: [[EXTRACTELEMENT11:%.*]] = extractelement <4 x i32> [[BITCAST10]], i64 0 |
92 | | -; CHECK-NEXT: [[TMP1:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[EXTRACTELEMENT]], i32 [[EXTRACTELEMENT11]]) |
93 | | -; CHECK-NEXT: ret { i32, i1 } [[TMP1]] |
94 | | -; |
95 | | - %call = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %arg1) |
96 | | - %bitcast5 = bitcast <2 x i64> %call to <4 x i32> |
97 | | - %extractelement = extractelement <4 x i32> %bitcast5, i64 0 |
98 | | - %call9 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %arg2) |
99 | | - %bitcast10 = bitcast <2 x i64> %call9 to <4 x i32> |
100 | | - %extractelement11 = extractelement <4 x i32> %bitcast10, i64 0 |
101 | | - %call12 = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %extractelement, i32 %extractelement11) |
102 | | - ret { i32, i1 } %call12 |
103 | | -} |
104 | | - |
105 | | -define <16 x i8> @knownbits_shuffle_add_shift_v32i8(<16 x i8> %arg1, <8 x i16> %arg2, <8 x i16> %arg3) { |
106 | | -; CHECK-LABEL: define <16 x i8> @knownbits_shuffle_add_shift_v32i8( |
107 | | -; CHECK-SAME: <16 x i8> [[ARG1:%.*]], <8 x i16> [[ARG2:%.*]], <8 x i16> [[ARG3:%.*]]) { |
108 | | -; CHECK-NEXT: [[SHL6:%.*]] = shl <8 x i16> [[ARG2]], splat (i16 8) |
109 | | -; CHECK-NEXT: [[BITCAST7:%.*]] = bitcast <8 x i16> [[SHL6]] to <16 x i8> |
110 | | -; CHECK-NEXT: [[SHL10:%.*]] = shl <8 x i16> [[ARG3]], splat (i16 8) |
111 | | -; CHECK-NEXT: [[BITCAST11:%.*]] = bitcast <8 x i16> [[SHL10]] to <16 x i8> |
112 | | -; CHECK-NEXT: [[ADD12:%.*]] = add <16 x i8> [[BITCAST11]], [[BITCAST7]] |
113 | | -; CHECK-NEXT: [[ADD14:%.*]] = add <16 x i8> [[ADD12]], [[ARG1]] |
114 | | -; CHECK-NEXT: [[BITCAST14:%.*]] = bitcast <16 x i8> [[ADD12]] to <8 x i16> |
115 | | -; CHECK-NEXT: [[SHL15:%.*]] = shl <8 x i16> [[BITCAST14]], splat (i16 8) |
116 | | -; CHECK-NEXT: [[BITCAST16:%.*]] = bitcast <8 x i16> [[SHL15]] to <16 x i8> |
117 | | -; CHECK-NEXT: [[ADD13:%.*]] = add <16 x i8> [[ADD14]], [[BITCAST16]] |
118 | | -; CHECK-NEXT: ret <16 x i8> [[ADD13]] |
119 | | -; |
120 | | - %shl6 = shl <8 x i16> %arg2, splat (i16 8) |
121 | | - %bitcast7 = bitcast <8 x i16> %shl6 to <16 x i8> |
122 | | - %shl10 = shl <8 x i16> %arg3, splat (i16 8) |
123 | | - %bitcast11 = bitcast <8 x i16> %shl10 to <16 x i8> |
124 | | - %add12 = add <16 x i8> %bitcast11, %bitcast7 |
125 | | - %add13 = add <16 x i8> %add12, %arg1 |
126 | | - %bitcast14 = bitcast <16 x i8> %add12 to <8 x i16> |
127 | | - %shl15 = shl <8 x i16> %bitcast14, splat (i16 8) |
128 | | - %bitcast16 = bitcast <8 x i16> %shl15 to <16 x i8> |
129 | | - %add17 = add <16 x i8> %add13, %bitcast16 |
130 | | - ret <16 x i8> %add17 |
131 | | -} |
132 | | - |
133 | | -declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) |
134 | | - |
135 | | -declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) |
136 | | - |
137 | | -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
138 | | -; RUN: opt < %s -passes=instcombine -S | FileCheck %s |
139 | | - |
140 | | -; PR125228 |
141 | | - |
142 | | -define <16 x i8> @knownbits_bitcast_masked_shift(<16 x i8> %arg) { |
143 | | -======= |
144 | | ->>>>>>> 46898bd99c8b (Update tests) |
145 | 7 | ; CHECK-LABEL: define <16 x i8> @knownbits_bitcast_masked_shift( |
146 | 8 | ; CHECK-SAME: <16 x i8> [[ARG1:%.*]], <16 x i8> [[ARG2:%.*]]) { |
147 | 9 | ; CHECK-NEXT: [[AND:%.*]] = and <16 x i8> [[ARG1]], splat (i8 3) |
|
0 commit comments