Skip to content

Commit 2e16f24

Browse files
authored
[RISCV] Add VMNoV0 register class with only the VMaskVTs. (#171231)
I plan to use this for inline assembly "vd" contraints with mask types in a follow up patch. Due to the test changes I wanted to post this separately.
1 parent 0c0ed39 commit 2e16f24

File tree

9 files changed

+24
-23
lines changed

9 files changed

+24
-23
lines changed

llvm/lib/Target/RISCV/RISCVRegisterInfo.td

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -813,6 +813,7 @@ def VMV0 : VReg<VMaskVTs, (add V0), 1>;
813813

814814
// The register class is added for inline assembly for vector mask types.
815815
def VM : VReg<VMaskVTs, (add VR), 1>;
816+
def VMNoV0 : VReg<VMaskVTs, (sub VR, V0), 1>;
816817

817818
defvar VTupM1N2VTs = [riscv_nxv8i8x2, riscv_nxv4i8x2, riscv_nxv2i8x2, riscv_nxv1i8x2];
818819
defvar VTupM1N3VTs = [riscv_nxv8i8x3, riscv_nxv4i8x3, riscv_nxv2i8x3, riscv_nxv1i8x3];

llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -11,15 +11,15 @@ body: |
1111
bb.0.entry:
1212
; RV32I-LABEL: name: select_nxv1i8
1313
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
14-
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
14+
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF
1515
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
1616
; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
1717
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
1818
; RV32I-NEXT: PseudoRET implicit $v8
1919
;
2020
; RV64I-LABEL: name: select_nxv1i8
2121
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
22-
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
22+
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF
2323
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
2424
; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
2525
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
@@ -40,15 +40,15 @@ body: |
4040
bb.0.entry:
4141
; RV32I-LABEL: name: select_nxv4i8
4242
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
43-
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
43+
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF
4444
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
4545
; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
4646
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
4747
; RV32I-NEXT: PseudoRET implicit $v8
4848
;
4949
; RV64I-LABEL: name: select_nxv4i8
5050
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
51-
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
51+
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF
5252
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
5353
; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
5454
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
@@ -98,15 +98,15 @@ body: |
9898
bb.0.entry:
9999
; RV32I-LABEL: name: select_nxv64i8
100100
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
101-
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
101+
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF
102102
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
103103
; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
104104
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
105105
; RV32I-NEXT: PseudoRET implicit $v8
106106
;
107107
; RV64I-LABEL: name: select_nxv64i8
108108
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
109-
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
109+
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF
110110
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
111111
; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
112112
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
@@ -127,15 +127,15 @@ body: |
127127
bb.0.entry:
128128
; RV32I-LABEL: name: select_nxv2i16
129129
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
130-
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
130+
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF
131131
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
132132
; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
133133
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
134134
; RV32I-NEXT: PseudoRET implicit $v8
135135
;
136136
; RV64I-LABEL: name: select_nxv2i16
137137
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
138-
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
138+
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF
139139
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
140140
; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
141141
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
@@ -185,15 +185,15 @@ body: |
185185
bb.0.entry:
186186
; RV32I-LABEL: name: select_nxv32i16
187187
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
188-
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
188+
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF
189189
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
190190
; RV32I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */
191191
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]]
192192
; RV32I-NEXT: PseudoRET implicit $v8
193193
;
194194
; RV64I-LABEL: name: select_nxv32i16
195195
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
196-
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
196+
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vmnov0 = IMPLICIT_DEF
197197
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
198198
; RV64I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */
199199
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]]

llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %x, <vscale x 1 x double
1313
; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[COPY]], 32
1414
; CHECK-NEXT: [[SRLI:%[0-9]+]]:gprnox0 = SRLI killed [[SLLI]], 32
1515
; CHECK-NEXT: [[COPY4:%[0-9]+]]:vmv0 = COPY [[COPY1]]
16-
; CHECK-NEXT: [[PseudoVFMUL_VV_M1_E64_MASK:%[0-9]+]]:vrnov0 = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFMUL_VV_M1_E64_MASK $noreg, [[COPY3]], [[COPY2]], [[COPY4]], 7, killed [[SRLI]], 6 /* e64 */, 1 /* ta, mu */, implicit $frm
16+
; CHECK-NEXT: [[PseudoVFMUL_VV_M1_E64_MASK:%[0-9]+]]:vmnov0 = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFMUL_VV_M1_E64_MASK $noreg, [[COPY3]], [[COPY2]], [[COPY4]], 7, killed [[SRLI]], 6 /* e64 */, 1 /* ta, mu */, implicit $frm
1717
; CHECK-NEXT: $v8 = COPY [[PseudoVFMUL_VV_M1_E64_MASK]]
1818
; CHECK-NEXT: PseudoRET implicit $v8
1919
%1 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)

llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ body: |
1111
; CHECK: liveins: $x1, $v8, $v9
1212
; CHECK-NEXT: {{ $}}
1313
; CHECK-NEXT: %false:vrnov0 = COPY $v8
14-
; CHECK-NEXT: %true:vrnov0 = COPY $v9
14+
; CHECK-NEXT: %true:vmnov0 = COPY $v9
1515
; CHECK-NEXT: %avl:gprnox0 = COPY $x1
1616
; CHECK-NEXT: %mask:vmv0 = PseudoVMSET_M_B8 %avl, 0 /* e8 */
1717
; CHECK-NEXT: $v0 = COPY %mask
@@ -135,7 +135,7 @@ body: |
135135
; CHECK-NEXT: {{ $}}
136136
; CHECK-NEXT: %false:vrnov0 = COPY $v8
137137
; CHECK-NEXT: %mask:vmv0 = COPY $v0
138-
; CHECK-NEXT: %true:vrnov0 = PseudoVADD_VV_M1_MASK %false, $noreg, $noreg, %mask, 4, 5 /* e32 */, 1 /* ta, mu */
138+
; CHECK-NEXT: %true:vmnov0 = PseudoVADD_VV_M1_MASK %false, $noreg, $noreg, %mask, 4, 5 /* e32 */, 1 /* ta, mu */
139139
%false:vrnov0 = COPY $v8
140140
%mask:vmv0 = COPY $v0
141141
%true:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %mask, 4, 5 /* e32 */, 0 /* tu, mu */

llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ define <vscale x 1 x i8> @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride,
1515
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x11
1616
; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10
1717
; CHECK-NEXT: [[COPY4:%[0-9]+]]:vmv0 = COPY [[COPY1]]
18-
; CHECK-NEXT: [[PseudoVLSE8_V_MF8_MASK:%[0-9]+]]:vrnov0 = PseudoVLSE8_V_MF8_MASK $noreg, [[COPY3]], [[COPY2]], [[COPY4]], [[COPY]], 3 /* e8 */, 1 /* ta, mu */ :: (load unknown-size, align 1)
18+
; CHECK-NEXT: [[PseudoVLSE8_V_MF8_MASK:%[0-9]+]]:vmnov0 = PseudoVLSE8_V_MF8_MASK $noreg, [[COPY3]], [[COPY2]], [[COPY4]], [[COPY]], 3 /* e8 */, 1 /* ta, mu */ :: (load unknown-size, align 1)
1919
; CHECK-NEXT: $v8 = COPY [[PseudoVLSE8_V_MF8_MASK]]
2020
; CHECK-NEXT: PseudoRET implicit $v8
2121
%load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i8(ptr %ptr, i8 %stride, <vscale x 1 x i1> %m, i32 %evl)

llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,9 +42,9 @@ define i64 @test_vleff_nxv8i8_mask(<vscale x 8 x i8> %maskedoff, ptr %p, <vscale
4242
; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
4343
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0
4444
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
45-
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
45+
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vmnov0 = COPY $v8
4646
; CHECK-NEXT: [[COPY4:%[0-9]+]]:vmv0 = COPY [[COPY1]]
47-
; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0, [[PseudoVLE8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK [[COPY3]], [[COPY2]], [[COPY4]], [[COPY]], 3 /* e8 */, 0 /* tu, mu */ :: (load unknown-size from %ir.p, align 1)
47+
; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vmnov0, [[PseudoVLE8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK [[COPY3]], [[COPY2]], [[COPY4]], [[COPY]], 3 /* e8 */, 0 /* tu, mu */ :: (load unknown-size from %ir.p, align 1)
4848
; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_MASK1]]
4949
; CHECK-NEXT: PseudoRET implicit $x10
5050
entry:

llvm/test/CodeGen/RISCV/rvv/vmerge-peephole.mir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -148,8 +148,8 @@ body: |
148148
; CHECK-NEXT: %y:vr = COPY $v9
149149
; CHECK-NEXT: %mask:vmv0 = COPY $v0
150150
; CHECK-NEXT: %add0:vr = PseudoVADD_VV_M1 $noreg, %x, %y, -1, 5 /* e32 */, 3 /* ta, ma */
151-
; CHECK-NEXT: %add1:vrnov0 = COPY %add:vrnov0
152-
; CHECK-NEXT: %merge:vrnov0 = PseudoVOR_VV_M1_MASK %add:vrnov0, %add1, %y, %mask, -1, 5 /* e32 */, 1 /* ta, mu */
151+
; CHECK-NEXT: %add1:vmnov0 = COPY %add:vmnov0
152+
; CHECK-NEXT: %merge:vrnov0 = PseudoVOR_VV_M1_MASK %add:vmnov0, %add1, %y, %mask, -1, 5 /* e32 */, 1 /* ta, mu */
153153
%x:vr = COPY $v8
154154
%y:vr = COPY $v9
155155
%mask:vmv0 = COPY $v0

llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ body: |
112112
; CHECK-LABEL: name: diff_regclass
113113
; CHECK: liveins: $v8
114114
; CHECK-NEXT: {{ $}}
115-
; CHECK-NEXT: [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 $noreg, 0, 0, 5 /* e32 */, 1 /* ta, mu */
115+
; CHECK-NEXT: [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vmnov0 = PseudoVMV_V_I_MF2 $noreg, 0, 0, 5 /* e32 */, 1 /* ta, mu */
116116
; CHECK-NEXT: [[COPY:%[0-9]+]]:vmv0 = COPY $v8
117117
; CHECK-NEXT: [[PseudoVADD_VV_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVADD_VV_M1_MASK [[PseudoVMV_V_I_MF2_]], $noreg, $noreg, [[COPY]], 0, 5 /* e32 */, 0 /* tu, mu */
118118
%0:vr = PseudoVMV_V_I_MF2 $noreg, 0, -1, 5 /* e32 */, 0 /* tu, mu */
@@ -128,7 +128,7 @@ body: |
128128
; CHECK-LABEL: name: diff_regclass_passthru
129129
; CHECK: liveins: $v8
130130
; CHECK-NEXT: {{ $}}
131-
; CHECK-NEXT: [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 $noreg, 0, 0, 5 /* e32 */, 1 /* ta, mu */
131+
; CHECK-NEXT: [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vmnov0 = PseudoVMV_V_I_MF2 $noreg, 0, 0, 5 /* e32 */, 1 /* ta, mu */
132132
; CHECK-NEXT: [[COPY:%[0-9]+]]:vmv0 = COPY $v8
133133
; CHECK-NEXT: [[PseudoVLSE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLSE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], $noreg, $noreg, [[COPY]], 0, 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size, align 4)
134134
%2:vr = PseudoVMV_V_I_MF2 $noreg, 0, -1, 5 /* e32 */, 0 /* tu, mu */
@@ -162,7 +162,7 @@ body: |
162162
; CHECK-NEXT: {{ $}}
163163
; CHECK-NEXT: %passthru:vrnov0 = COPY $v8
164164
; CHECK-NEXT: %mask:vmv0 = COPY $v0
165-
; CHECK-NEXT: %x:vrnov0 = PseudoVMERGE_VVM_M1 %passthru, %passthru, $noreg, %mask, 4, 5 /* e32 */
165+
; CHECK-NEXT: %x:vmnov0 = PseudoVMERGE_VVM_M1 %passthru, %passthru, $noreg, %mask, 4, 5 /* e32 */
166166
%passthru:vrnov0 = COPY $v8
167167
%mask:vmv0 = COPY $v0
168168
%x:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, %passthru, $noreg, %mask, 4, 5 /* e32 */

llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -793,7 +793,7 @@ body: |
793793
; CHECK-NEXT: %idxs:vr = COPY $v0
794794
; CHECK-NEXT: %t1:vr = COPY $v1
795795
; CHECK-NEXT: %t3:vr = COPY $v2
796-
; CHECK-NEXT: [[COPY:%[0-9]+]]:vrnov0 = COPY $v3
796+
; CHECK-NEXT: [[COPY:%[0-9]+]]:vmnov0 = COPY $v3
797797
; CHECK-NEXT: %t5:vrnov0 = COPY $v1
798798
; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gprnox0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
799799
; CHECK-NEXT: %t6:vr = PseudoVMSEQ_VI_M1 %t1, 0, -1, 6 /* e64 */, implicit $vl, implicit $vtype
@@ -811,7 +811,7 @@ body: |
811811
; CHECK-NEXT: {{ $}}
812812
; CHECK-NEXT: $v0 = COPY %mask
813813
; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0X0 killed $x0, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl
814-
; CHECK-NEXT: early-clobber [[COPY]]:vrnov0 = PseudoVLUXEI64_V_M1_MF8_MASK %t5, %inaddr, %idxs, $v0, -1, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
814+
; CHECK-NEXT: early-clobber [[COPY]]:vmnov0 = PseudoVLUXEI64_V_M1_MF8_MASK %t5, %inaddr, %idxs, $v0, -1, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
815815
; CHECK-NEXT: PseudoBR %bb.3
816816
; CHECK-NEXT: {{ $}}
817817
; CHECK-NEXT: bb.3:

0 commit comments

Comments
 (0)