Skip to content

Commit ac3fda6

Browse files
committed
Fix GISel tests
1 parent 99109d5 commit ac3fda6

File tree

8 files changed

+32
-32
lines changed

8 files changed

+32
-32
lines changed

llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ registers:
6565
# AVX-NEXT: RET 0, implicit $xmm0
6666
#
6767
# AVX512VL: %0:vr256x = COPY $ymm1
68-
# AVX512VL-NEXT: %1:vr128x = VEXTRACTF32x4Z256rri %0, 1
68+
# AVX512VL-NEXT: %1:vr128x = VEXTRACTF32X4Z256rri %0, 1
6969
# AVX512VL-NEXT: $xmm0 = COPY %1
7070
# AVX512VL-NEXT: RET 0, implicit $xmm0
7171
body: |

llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ registers:
5959
- { id: 0, class: vecr }
6060
- { id: 1, class: vecr }
6161
# ALL: %0:vr512 = COPY $zmm1
62-
# ALL-NEXT: %1:vr128x = VEXTRACTF32x4Zrri %0, 1
62+
# ALL-NEXT: %1:vr128x = VEXTRACTF32X4Zrri %0, 1
6363
# ALL-NEXT: $xmm0 = COPY %1
6464
# ALL-NEXT: RET 0, implicit $xmm0
6565
body: |
@@ -111,7 +111,7 @@ registers:
111111
- { id: 0, class: vecr }
112112
- { id: 1, class: vecr }
113113
# ALL: %0:vr512 = COPY $zmm1
114-
# ALL-NEXT: %1:vr256x = VEXTRACTF64x4Zrri %0, 1
114+
# ALL-NEXT: %1:vr256x = VEXTRACTF64X4Zrri %0, 1
115115
# ALL-NEXT: $ymm0 = COPY %1
116116
# ALL-NEXT: RET 0, implicit $ymm0
117117
body: |

llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ registers:
3636
#
3737
# AVX512VL: %0:vr256x = COPY $ymm0
3838
# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
39-
# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rri %0, %1, 0
39+
# AVX512VL-NEXT: %2:vr256x = VINSERTF32X4Z256rri %0, %1, 0
4040
# AVX512VL-NEXT: $ymm0 = COPY %2
4141
# AVX512VL-NEXT: RET 0, implicit $ymm0
4242
body: |
@@ -98,7 +98,7 @@ registers:
9898
#
9999
# AVX512VL: %0:vr256x = COPY $ymm0
100100
# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
101-
# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rri %0, %1, 1
101+
# AVX512VL-NEXT: %2:vr256x = VINSERTF32X4Z256rri %0, %1, 1
102102
# AVX512VL-NEXT: $ymm0 = COPY %2
103103
# AVX512VL-NEXT: RET 0, implicit $ymm0
104104
body: |
@@ -129,7 +129,7 @@ registers:
129129
#
130130
# AVX512VL: %0:vr256x = IMPLICIT_DEF
131131
# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
132-
# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rri %0, %1, 1
132+
# AVX512VL-NEXT: %2:vr256x = VINSERTF32X4Z256rri %0, %1, 1
133133
# AVX512VL-NEXT: $ymm0 = COPY %2
134134
# AVX512VL-NEXT: RET 0, implicit $ymm0
135135
body: |

llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ body: |
5151
; ALL-LABEL: name: test_insert_128_idx0
5252
; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
5353
; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
54-
; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[COPY]], [[COPY1]], 0
55-
; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri]]
54+
; ALL: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[COPY]], [[COPY1]], 0
55+
; ALL: $zmm0 = COPY [[VINSERTF32X4Zrri]]
5656
; ALL: RET 0, implicit $ymm0
5757
%0(<16 x s32>) = COPY $zmm0
5858
%1(<4 x s32>) = COPY $xmm1
@@ -102,8 +102,8 @@ body: |
102102
; ALL-LABEL: name: test_insert_128_idx1
103103
; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
104104
; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
105-
; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[COPY]], [[COPY1]], 1
106-
; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri]]
105+
; ALL: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[COPY]], [[COPY1]], 1
106+
; ALL: $zmm0 = COPY [[VINSERTF32X4Zrri]]
107107
; ALL: RET 0, implicit $ymm0
108108
%0(<16 x s32>) = COPY $zmm0
109109
%1(<4 x s32>) = COPY $xmm1
@@ -127,8 +127,8 @@ body: |
127127
; ALL-LABEL: name: test_insert_128_idx1_undef
128128
; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
129129
; ALL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1
130-
; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[DEF]], [[COPY]], 1
131-
; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri]]
130+
; ALL: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[DEF]], [[COPY]], 1
131+
; ALL: $zmm0 = COPY [[VINSERTF32X4Zrri]]
132132
; ALL: RET 0, implicit $ymm0
133133
%0(<16 x s32>) = IMPLICIT_DEF
134134
%1(<4 x s32>) = COPY $xmm1
@@ -152,8 +152,8 @@ body: |
152152
; ALL-LABEL: name: test_insert_256_idx0
153153
; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
154154
; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
155-
; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri [[COPY]], [[COPY1]], 0
156-
; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]]
155+
; ALL: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri [[COPY]], [[COPY1]], 0
156+
; ALL: $zmm0 = COPY [[VINSERTF64X4Zrri]]
157157
; ALL: RET 0, implicit $ymm0
158158
%0(<16 x s32>) = COPY $zmm0
159159
%1(<8 x s32>) = COPY $ymm1
@@ -203,8 +203,8 @@ body: |
203203
; ALL-LABEL: name: test_insert_256_idx1
204204
; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
205205
; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
206-
; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri [[COPY]], [[COPY1]], 1
207-
; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]]
206+
; ALL: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri [[COPY]], [[COPY1]], 1
207+
; ALL: $zmm0 = COPY [[VINSERTF64X4Zrri]]
208208
; ALL: RET 0, implicit $ymm0
209209
%0(<16 x s32>) = COPY $zmm0
210210
%1(<8 x s32>) = COPY $ymm1
@@ -228,8 +228,8 @@ body: |
228228
; ALL-LABEL: name: test_insert_256_idx1_undef
229229
; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
230230
; ALL: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1
231-
; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri [[DEF]], [[COPY]], 1
232-
; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]]
231+
; ALL: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri [[DEF]], [[COPY]], 1
232+
; ALL: $zmm0 = COPY [[VINSERTF64X4Zrri]]
233233
; ALL: RET 0, implicit $ymm0
234234
%0(<16 x s32>) = IMPLICIT_DEF
235235
%1(<8 x s32>) = COPY $ymm1

llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@ body: |
2929
; AVX512VL-LABEL: name: test_merge
3030
; AVX512VL: [[DEF:%[0-9]+]]:vr128x = IMPLICIT_DEF
3131
; AVX512VL: undef %2.sub_xmm:vr256x = COPY [[DEF]]
32-
; AVX512VL: [[VINSERTF32x4Z256rri:%[0-9]+]]:vr256x = VINSERTF32x4Z256rri %2, [[DEF]], 1
33-
; AVX512VL: $ymm0 = COPY [[VINSERTF32x4Z256rri]]
32+
; AVX512VL: [[VINSERTF32X4Z256rri:%[0-9]+]]:vr256x = VINSERTF32X4Z256rri %2, [[DEF]], 1
33+
; AVX512VL: $ymm0 = COPY [[VINSERTF32X4Z256rri]]
3434
; AVX512VL: RET 0, implicit $ymm0
3535
%0(<4 x s32>) = IMPLICIT_DEF
3636
%1(<8 x s32>) = G_CONCAT_VECTORS %0(<4 x s32>), %0(<4 x s32>)

llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,10 +24,10 @@ body: |
2424
; ALL-LABEL: name: test_merge_v128
2525
; ALL: [[DEF:%[0-9]+]]:vr128x = IMPLICIT_DEF
2626
; ALL: undef %2.sub_xmm:vr512 = COPY [[DEF]]
27-
; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri %2, [[DEF]], 1
28-
; ALL: [[VINSERTF32x4Zrri1:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[VINSERTF32x4Zrri]], [[DEF]], 2
29-
; ALL: [[VINSERTF32x4Zrri2:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[VINSERTF32x4Zrri1]], [[DEF]], 3
30-
; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri2]]
27+
; ALL: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri %2, [[DEF]], 1
28+
; ALL: [[VINSERTF32X4Zrri1:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[VINSERTF32X4Zrri]], [[DEF]], 2
29+
; ALL: [[VINSERTF32X4Zrri2:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[VINSERTF32X4Zrri1]], [[DEF]], 3
30+
; ALL: $zmm0 = COPY [[VINSERTF32X4Zrri2]]
3131
; ALL: RET 0, implicit $zmm0
3232
%0(<4 x s32>) = IMPLICIT_DEF
3333
%1(<16 x s32>) = G_CONCAT_VECTORS %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>)
@@ -49,8 +49,8 @@ body: |
4949
; ALL-LABEL: name: test_merge_v256
5050
; ALL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF
5151
; ALL: undef %2.sub_ymm:vr512 = COPY [[DEF]]
52-
; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri %2, [[DEF]], 1
53-
; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]]
52+
; ALL: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri %2, [[DEF]], 1
53+
; ALL: $zmm0 = COPY [[VINSERTF64X4Zrri]]
5454
; ALL: RET 0, implicit $zmm0
5555
%0(<8 x s32>) = IMPLICIT_DEF
5656
%1(<16 x s32>) = G_CONCAT_VECTORS %0(<8 x s32>), %0(<8 x s32>)

llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,9 @@ body: |
3333
; AVX512VL-LABEL: name: test_unmerge
3434
; AVX512VL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF
3535
; AVX512VL-NEXT: [[COPY:%[0-9]+]]:vr128x = COPY [[DEF]].sub_xmm
36-
; AVX512VL-NEXT: [[VEXTRACTF32x4Z256rri:%[0-9]+]]:vr128x = VEXTRACTF32x4Z256rri [[DEF]], 1
36+
; AVX512VL-NEXT: [[VEXTRACTF32X4Z256rri:%[0-9]+]]:vr128x = VEXTRACTF32X4Z256rri [[DEF]], 1
3737
; AVX512VL-NEXT: $xmm0 = COPY [[COPY]]
38-
; AVX512VL-NEXT: $xmm1 = COPY [[VEXTRACTF32x4Z256rri]]
38+
; AVX512VL-NEXT: $xmm1 = COPY [[VEXTRACTF32X4Z256rri]]
3939
; AVX512VL-NEXT: RET 0, implicit $xmm0, implicit $xmm1
4040
%0(<8 x s32>) = IMPLICIT_DEF
4141
%1(<4 x s32>), %2(<4 x s32>) = G_UNMERGE_VALUES %0(<8 x s32>)

llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,9 @@ body: |
2727
; ALL-LABEL: name: test_unmerge_v128
2828
; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
2929
; ALL: [[COPY:%[0-9]+]]:vr128x = COPY [[DEF]].sub_xmm
30-
; ALL: [[VEXTRACTF32x4Zrri:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrri [[DEF]], 1
31-
; ALL: [[VEXTRACTF32x4Zrri1:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrri [[DEF]], 2
32-
; ALL: [[VEXTRACTF32x4Zrri2:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrri [[DEF]], 3
30+
; ALL: [[VEXTRACTF32X4Zrri:%[0-9]+]]:vr128x = VEXTRACTF32X4Zrri [[DEF]], 1
31+
; ALL: [[VEXTRACTF32X4Zrri1:%[0-9]+]]:vr128x = VEXTRACTF32X4Zrri [[DEF]], 2
32+
; ALL: [[VEXTRACTF32X4Zrri2:%[0-9]+]]:vr128x = VEXTRACTF32X4Zrri [[DEF]], 3
3333
; ALL: $xmm0 = COPY [[COPY]]
3434
; ALL: RET 0, implicit $xmm0
3535
%0(<16 x s32>) = IMPLICIT_DEF
@@ -53,7 +53,7 @@ body: |
5353
; ALL-LABEL: name: test_unmerge_v256
5454
; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
5555
; ALL: [[COPY:%[0-9]+]]:vr256x = COPY [[DEF]].sub_ymm
56-
; ALL: [[VEXTRACTF64x4Zrri:%[0-9]+]]:vr256x = VEXTRACTF64x4Zrri [[DEF]], 1
56+
; ALL: [[VEXTRACTF64X4Zrri:%[0-9]+]]:vr256x = VEXTRACTF64X4Zrri [[DEF]], 1
5757
; ALL: $ymm0 = COPY [[COPY]]
5858
; ALL: RET 0, implicit $ymm0
5959
%0(<16 x s32>) = IMPLICIT_DEF

0 commit comments

Comments
 (0)