@@ -367,6 +367,24 @@ define {<4 x i32>, <4 x i32>} @vpload_factor3_mask_skip_fields(ptr %ptr) {
367
367
ret {<4 x i32 >, <4 x i32 >} %res1
368
368
}
369
369
370
+ define {<4 x i32 >, <4 x i32 >} @vpload_factor3_combined_mask_skip_field (ptr %ptr , <4 x i1 > %mask ) {
371
+ ; CHECK-LABEL: vpload_factor3_combined_mask_skip_field:
372
+ ; CHECK: # %bb.0:
373
+ ; CHECK-NEXT: li a1, 12
374
+ ; CHECK-NEXT: vsetivli zero, 6, e32, m1, ta, ma
375
+ ; CHECK-NEXT: vlsseg2e32.v v8, (a0), a1, v0.t
376
+ ; CHECK-NEXT: ret
377
+ %interleaved.mask = shufflevector <4 x i1 > %mask , <4 x i1 > poison, <12 x i32 > <i32 0 , i32 0 , i32 0 , i32 1 , i32 1 , i32 1 , i32 2 , i32 2 , i32 2 , i32 3 , i32 3 , i32 3 >
378
+ %combined = and <12 x i1 > %interleaved.mask , <i1 true , i1 true , i1 false , i1 true , i1 true , i1 false , i1 true , i1 true , i1 false , i1 true , i1 true , i1 false >
379
+ %interleaved.vec = tail call <12 x i32 > @llvm.vp.load.v12i32.p0 (ptr %ptr , <12 x i1 > %combined , i32 12 )
380
+ ; mask = %mask, skip the last field
381
+ %v0 = shufflevector <12 x i32 > %interleaved.vec , <12 x i32 > poison, <4 x i32 > <i32 0 , i32 3 , i32 6 , i32 9 >
382
+ %v1 = shufflevector <12 x i32 > %interleaved.vec , <12 x i32 > poison, <4 x i32 > <i32 1 , i32 4 , i32 7 , i32 10 >
383
+ %res0 = insertvalue {<4 x i32 >, <4 x i32 >} undef , <4 x i32 > %v0 , 0
384
+ %res1 = insertvalue {<4 x i32 >, <4 x i32 >} %res0 , <4 x i32 > %v1 , 1
385
+ ret {<4 x i32 >, <4 x i32 >} %res1
386
+ }
387
+
370
388
define {<4 x i32 >, <4 x i32 >, <4 x i32 >, <4 x i32 >} @vpload_factor4 (ptr %ptr ) {
371
389
; CHECK-LABEL: vpload_factor4:
372
390
; CHECK: # %bb.0:
@@ -514,8 +532,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
514
532
; RV32-NEXT: li a2, 32
515
533
; RV32-NEXT: lui a3, 12
516
534
; RV32-NEXT: lui a6, 12291
517
- ; RV32-NEXT: lui a7, %hi(.LCPI25_0 )
518
- ; RV32-NEXT: addi a7, a7, %lo(.LCPI25_0 )
535
+ ; RV32-NEXT: lui a7, %hi(.LCPI26_0 )
536
+ ; RV32-NEXT: addi a7, a7, %lo(.LCPI26_0 )
519
537
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
520
538
; RV32-NEXT: vle32.v v24, (a5)
521
539
; RV32-NEXT: vmv.s.x v0, a3
@@ -600,12 +618,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
600
618
; RV32-NEXT: addi a1, a1, 16
601
619
; RV32-NEXT: vs4r.v v8, (a1) # vscale x 32-byte Folded Spill
602
620
; RV32-NEXT: lui a7, 49164
603
- ; RV32-NEXT: lui a1, %hi(.LCPI25_1 )
604
- ; RV32-NEXT: addi a1, a1, %lo(.LCPI25_1 )
621
+ ; RV32-NEXT: lui a1, %hi(.LCPI26_1 )
622
+ ; RV32-NEXT: addi a1, a1, %lo(.LCPI26_1 )
605
623
; RV32-NEXT: lui t2, 3
606
624
; RV32-NEXT: lui t1, 196656
607
- ; RV32-NEXT: lui a4, %hi(.LCPI25_3 )
608
- ; RV32-NEXT: addi a4, a4, %lo(.LCPI25_3 )
625
+ ; RV32-NEXT: lui a4, %hi(.LCPI26_3 )
626
+ ; RV32-NEXT: addi a4, a4, %lo(.LCPI26_3 )
609
627
; RV32-NEXT: lui t0, 786624
610
628
; RV32-NEXT: li a5, 48
611
629
; RV32-NEXT: lui a6, 768
@@ -784,8 +802,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
784
802
; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
785
803
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
786
804
; RV32-NEXT: vrgatherei16.vv v24, v8, v2
787
- ; RV32-NEXT: lui a1, %hi(.LCPI25_2 )
788
- ; RV32-NEXT: addi a1, a1, %lo(.LCPI25_2 )
805
+ ; RV32-NEXT: lui a1, %hi(.LCPI26_2 )
806
+ ; RV32-NEXT: addi a1, a1, %lo(.LCPI26_2 )
789
807
; RV32-NEXT: lui a3, 3073
790
808
; RV32-NEXT: addi a3, a3, -1024
791
809
; RV32-NEXT: vmv.s.x v0, a3
@@ -849,16 +867,16 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
849
867
; RV32-NEXT: vrgatherei16.vv v28, v8, v3
850
868
; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
851
869
; RV32-NEXT: vmv.v.v v28, v24
852
- ; RV32-NEXT: lui a1, %hi(.LCPI25_4 )
853
- ; RV32-NEXT: addi a1, a1, %lo(.LCPI25_4 )
854
- ; RV32-NEXT: lui a2, %hi(.LCPI25_5 )
855
- ; RV32-NEXT: addi a2, a2, %lo(.LCPI25_5 )
870
+ ; RV32-NEXT: lui a1, %hi(.LCPI26_4 )
871
+ ; RV32-NEXT: addi a1, a1, %lo(.LCPI26_4 )
872
+ ; RV32-NEXT: lui a2, %hi(.LCPI26_5 )
873
+ ; RV32-NEXT: addi a2, a2, %lo(.LCPI26_5 )
856
874
; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
857
875
; RV32-NEXT: vle16.v v24, (a2)
858
876
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
859
877
; RV32-NEXT: vle16.v v8, (a1)
860
- ; RV32-NEXT: lui a1, %hi(.LCPI25_7 )
861
- ; RV32-NEXT: addi a1, a1, %lo(.LCPI25_7 )
878
+ ; RV32-NEXT: lui a1, %hi(.LCPI26_7 )
879
+ ; RV32-NEXT: addi a1, a1, %lo(.LCPI26_7 )
862
880
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
863
881
; RV32-NEXT: vle16.v v10, (a1)
864
882
; RV32-NEXT: csrr a1, vlenb
@@ -886,14 +904,14 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
886
904
; RV32-NEXT: vl8r.v v0, (a1) # vscale x 64-byte Folded Reload
887
905
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
888
906
; RV32-NEXT: vrgatherei16.vv v16, v0, v10
889
- ; RV32-NEXT: lui a1, %hi(.LCPI25_6 )
890
- ; RV32-NEXT: addi a1, a1, %lo(.LCPI25_6 )
891
- ; RV32-NEXT: lui a2, %hi(.LCPI25_8 )
892
- ; RV32-NEXT: addi a2, a2, %lo(.LCPI25_8 )
907
+ ; RV32-NEXT: lui a1, %hi(.LCPI26_6 )
908
+ ; RV32-NEXT: addi a1, a1, %lo(.LCPI26_6 )
909
+ ; RV32-NEXT: lui a2, %hi(.LCPI26_8 )
910
+ ; RV32-NEXT: addi a2, a2, %lo(.LCPI26_8 )
893
911
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
894
912
; RV32-NEXT: vle16.v v4, (a1)
895
- ; RV32-NEXT: lui a1, %hi(.LCPI25_9 )
896
- ; RV32-NEXT: addi a1, a1, %lo(.LCPI25_9 )
913
+ ; RV32-NEXT: lui a1, %hi(.LCPI26_9 )
914
+ ; RV32-NEXT: addi a1, a1, %lo(.LCPI26_9 )
897
915
; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
898
916
; RV32-NEXT: vle16.v v6, (a1)
899
917
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
@@ -980,8 +998,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
980
998
; RV64-NEXT: li a4, 128
981
999
; RV64-NEXT: lui a1, 1
982
1000
; RV64-NEXT: vle64.v v8, (a3)
983
- ; RV64-NEXT: lui a3, %hi(.LCPI25_0 )
984
- ; RV64-NEXT: addi a3, a3, %lo(.LCPI25_0 )
1001
+ ; RV64-NEXT: lui a3, %hi(.LCPI26_0 )
1002
+ ; RV64-NEXT: addi a3, a3, %lo(.LCPI26_0 )
985
1003
; RV64-NEXT: vmv.s.x v0, a4
986
1004
; RV64-NEXT: csrr a4, vlenb
987
1005
; RV64-NEXT: li a5, 61
@@ -1169,8 +1187,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
1169
1187
; RV64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
1170
1188
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
1171
1189
; RV64-NEXT: vslideup.vi v12, v16, 1, v0.t
1172
- ; RV64-NEXT: lui a2, %hi(.LCPI25_1 )
1173
- ; RV64-NEXT: addi a2, a2, %lo(.LCPI25_1 )
1190
+ ; RV64-NEXT: lui a2, %hi(.LCPI26_1 )
1191
+ ; RV64-NEXT: addi a2, a2, %lo(.LCPI26_1 )
1174
1192
; RV64-NEXT: li a3, 192
1175
1193
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1176
1194
; RV64-NEXT: vle16.v v6, (a2)
@@ -1204,8 +1222,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
1204
1222
; RV64-NEXT: vrgatherei16.vv v24, v16, v6
1205
1223
; RV64-NEXT: addi a2, sp, 16
1206
1224
; RV64-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
1207
- ; RV64-NEXT: lui a2, %hi(.LCPI25_2 )
1208
- ; RV64-NEXT: addi a2, a2, %lo(.LCPI25_2 )
1225
+ ; RV64-NEXT: lui a2, %hi(.LCPI26_2 )
1226
+ ; RV64-NEXT: addi a2, a2, %lo(.LCPI26_2 )
1209
1227
; RV64-NEXT: li a3, 1040
1210
1228
; RV64-NEXT: vmv.s.x v0, a3
1211
1229
; RV64-NEXT: addi a1, a1, -2016
@@ -1289,12 +1307,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
1289
1307
; RV64-NEXT: add a1, sp, a1
1290
1308
; RV64-NEXT: addi a1, a1, 16
1291
1309
; RV64-NEXT: vs4r.v v8, (a1) # vscale x 32-byte Folded Spill
1292
- ; RV64-NEXT: lui a1, %hi(.LCPI25_3 )
1293
- ; RV64-NEXT: addi a1, a1, %lo(.LCPI25_3 )
1310
+ ; RV64-NEXT: lui a1, %hi(.LCPI26_3 )
1311
+ ; RV64-NEXT: addi a1, a1, %lo(.LCPI26_3 )
1294
1312
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1295
1313
; RV64-NEXT: vle16.v v20, (a1)
1296
- ; RV64-NEXT: lui a1, %hi(.LCPI25_4 )
1297
- ; RV64-NEXT: addi a1, a1, %lo(.LCPI25_4 )
1314
+ ; RV64-NEXT: lui a1, %hi(.LCPI26_4 )
1315
+ ; RV64-NEXT: addi a1, a1, %lo(.LCPI26_4 )
1298
1316
; RV64-NEXT: vle16.v v8, (a1)
1299
1317
; RV64-NEXT: csrr a1, vlenb
1300
1318
; RV64-NEXT: li a2, 77
@@ -1345,8 +1363,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
1345
1363
; RV64-NEXT: vl2r.v v8, (a1) # vscale x 16-byte Folded Reload
1346
1364
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
1347
1365
; RV64-NEXT: vrgatherei16.vv v0, v16, v8
1348
- ; RV64-NEXT: lui a1, %hi(.LCPI25_5 )
1349
- ; RV64-NEXT: addi a1, a1, %lo(.LCPI25_5 )
1366
+ ; RV64-NEXT: lui a1, %hi(.LCPI26_5 )
1367
+ ; RV64-NEXT: addi a1, a1, %lo(.LCPI26_5 )
1350
1368
; RV64-NEXT: vle16.v v20, (a1)
1351
1369
; RV64-NEXT: csrr a1, vlenb
1352
1370
; RV64-NEXT: li a2, 61
@@ -1963,8 +1981,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @invalid_vp_mask(ptr %ptr) {
1963
1981
; RV32-NEXT: vle32.v v12, (a0), v0.t
1964
1982
; RV32-NEXT: li a0, 36
1965
1983
; RV32-NEXT: vmv.s.x v20, a1
1966
- ; RV32-NEXT: lui a1, %hi(.LCPI61_0 )
1967
- ; RV32-NEXT: addi a1, a1, %lo(.LCPI61_0 )
1984
+ ; RV32-NEXT: lui a1, %hi(.LCPI62_0 )
1985
+ ; RV32-NEXT: addi a1, a1, %lo(.LCPI62_0 )
1968
1986
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1969
1987
; RV32-NEXT: vle16.v v21, (a1)
1970
1988
; RV32-NEXT: vcompress.vm v8, v12, v11
@@ -2039,8 +2057,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @invalid_vp_evl(ptr %ptr) {
2039
2057
; RV32-NEXT: vmv.s.x v10, a0
2040
2058
; RV32-NEXT: li a0, 146
2041
2059
; RV32-NEXT: vmv.s.x v11, a0
2042
- ; RV32-NEXT: lui a0, %hi(.LCPI62_0 )
2043
- ; RV32-NEXT: addi a0, a0, %lo(.LCPI62_0 )
2060
+ ; RV32-NEXT: lui a0, %hi(.LCPI63_0 )
2061
+ ; RV32-NEXT: addi a0, a0, %lo(.LCPI63_0 )
2044
2062
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
2045
2063
; RV32-NEXT: vle16.v v20, (a0)
2046
2064
; RV32-NEXT: li a0, 36
@@ -2181,6 +2199,67 @@ define {<4 x i32>, <4 x i32>} @maskedload_factor3_mask_skip_field(ptr %ptr) {
2181
2199
ret {<4 x i32 >, <4 x i32 >} %res1
2182
2200
}
2183
2201
2202
+ define {<4 x i32 >, <4 x i32 >} @maskedload_factor3_combined_mask_skip_field (ptr %ptr , <4 x i1 > %mask ) {
2203
+ ; CHECK-LABEL: maskedload_factor3_combined_mask_skip_field:
2204
+ ; CHECK: # %bb.0:
2205
+ ; CHECK-NEXT: li a1, 12
2206
+ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
2207
+ ; CHECK-NEXT: vlsseg2e32.v v8, (a0), a1, v0.t
2208
+ ; CHECK-NEXT: ret
2209
+ %interleaved.mask = shufflevector <4 x i1 > %mask , <4 x i1 > poison, <12 x i32 > <i32 0 , i32 0 , i32 0 , i32 1 , i32 1 , i32 1 , i32 2 , i32 2 , i32 2 , i32 3 , i32 3 , i32 3 >
2210
+ %combined = and <12 x i1 > %interleaved.mask , <i1 true , i1 true , i1 false , i1 true , i1 true , i1 false , i1 true , i1 true , i1 false , i1 true , i1 true , i1 false >
2211
+ %interleaved.vec = tail call <12 x i32 > @llvm.masked.load.v12i32.p0 (ptr %ptr , i32 4 , <12 x i1 > %combined , <12 x i32 > poison)
2212
+ ; mask = %mask, skip the last field
2213
+ %v0 = shufflevector <12 x i32 > %interleaved.vec , <12 x i32 > poison, <4 x i32 > <i32 0 , i32 3 , i32 6 , i32 9 >
2214
+ %v1 = shufflevector <12 x i32 > %interleaved.vec , <12 x i32 > poison, <4 x i32 > <i32 1 , i32 4 , i32 7 , i32 10 >
2215
+ %res0 = insertvalue {<4 x i32 >, <4 x i32 >} undef , <4 x i32 > %v0 , 0
2216
+ %res1 = insertvalue {<4 x i32 >, <4 x i32 >} %res0 , <4 x i32 > %v1 , 1
2217
+ ret {<4 x i32 >, <4 x i32 >} %res1
2218
+ }
2219
+
2220
+ define {<4 x i32 >, <4 x i32 >} @maskedload_factor4_combined_mask_multi_skip_fields (ptr %ptr , <4 x i1 > %mask ) {
2221
+ ; CHECK-LABEL: maskedload_factor4_combined_mask_multi_skip_fields:
2222
+ ; CHECK: # %bb.0:
2223
+ ; CHECK-NEXT: li a1, 16
2224
+ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
2225
+ ; CHECK-NEXT: vlsseg2e32.v v8, (a0), a1, v0.t
2226
+ ; CHECK-NEXT: ret
2227
+ %interleaved.mask = shufflevector <4 x i1 > %mask , <4 x i1 > poison, <16 x i32 > <i32 0 , i32 0 , i32 0 , i32 0 , i32 1 , i32 1 , i32 1 , i32 1 , i32 2 , i32 2 , i32 2 , i32 2 , i32 3 , i32 3 , i32 3 , i32 3 >
2228
+ %combined = and <16 x i1 > %interleaved.mask , <i1 true , i1 true , i1 true , i1 false , i1 true , i1 true , i1 true , i1 false , i1 true , i1 true , i1 true , i1 false , i1 true , i1 true , i1 true , i1 false >
2229
+ %combined1 = and <16 x i1 > %combined , <i1 true , i1 true , i1 false , i1 true , i1 true , i1 true , i1 false , i1 true , i1 true , i1 true , i1 false , i1 true , i1 true , i1 true , i1 false , i1 true >
2230
+ %interleaved.vec = tail call <16 x i32 > @llvm.masked.load.v16i32.p0 (ptr %ptr , i32 4 , <16 x i1 > %combined1 , <16 x i32 > poison)
2231
+ ; mask = %mask, skip the last 2 fields
2232
+ %v0 = shufflevector <16 x i32 > %interleaved.vec , <16 x i32 > poison, <4 x i32 > <i32 0 , i32 4 , i32 8 , i32 12 >
2233
+ %v1 = shufflevector <16 x i32 > %interleaved.vec , <16 x i32 > poison, <4 x i32 > <i32 1 , i32 5 , i32 9 , i32 13 >
2234
+ %res0 = insertvalue {<4 x i32 >, <4 x i32 >} undef , <4 x i32 > %v0 , 0
2235
+ %res1 = insertvalue {<4 x i32 >, <4 x i32 >} %res0 , <4 x i32 > %v1 , 1
2236
+ ret {<4 x i32 >, <4 x i32 >} %res1
2237
+ }
2238
+
2239
+ define {<4 x i32 >, <4 x i32 >} @maskedload_factor4_combined_mask_multi_skip_fields_and_masks (ptr %ptr , <4 x i1 > %mask , <4 x i1 > %mask2 ) {
2240
+ ; CHECK-LABEL: maskedload_factor4_combined_mask_multi_skip_fields_and_masks:
2241
+ ; CHECK: # %bb.0:
2242
+ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
2243
+ ; CHECK-NEXT: vmand.mm v0, v0, v8
2244
+ ; CHECK-NEXT: li a1, 16
2245
+ ; CHECK-NEXT: vlsseg2e32.v v8, (a0), a1, v0.t
2246
+ ; CHECK-NEXT: ret
2247
+ %interleaved.mask = shufflevector <4 x i1 > %mask , <4 x i1 > poison, <16 x i32 > <i32 0 , i32 0 , i32 0 , i32 0 , i32 1 , i32 1 , i32 1 , i32 1 , i32 2 , i32 2 , i32 2 , i32 2 , i32 3 , i32 3 , i32 3 , i32 3 >
2248
+ %combined = and <16 x i1 > %interleaved.mask , <i1 true , i1 true , i1 true , i1 false , i1 true , i1 true , i1 true , i1 false , i1 true , i1 true , i1 true , i1 false , i1 true , i1 true , i1 true , i1 false >
2249
+
2250
+ %interleaved.mask2 = shufflevector <4 x i1 > %mask2 , <4 x i1 > poison, <16 x i32 > <i32 0 , i32 0 , i32 0 , i32 0 , i32 1 , i32 1 , i32 1 , i32 1 , i32 2 , i32 2 , i32 2 , i32 2 , i32 3 , i32 3 , i32 3 , i32 3 >
2251
+ %combined1 = and <16 x i1 > %interleaved.mask2 , <i1 true , i1 true , i1 false , i1 true , i1 true , i1 true , i1 false , i1 true , i1 true , i1 true , i1 false , i1 true , i1 true , i1 true , i1 false , i1 true >
2252
+
2253
+ %combined2 = and <16 x i1 > %combined , %combined1
2254
+ %interleaved.vec = tail call <16 x i32 > @llvm.masked.load.v16i32.p0 (ptr %ptr , i32 4 , <16 x i1 > %combined2 , <16 x i32 > poison)
2255
+ ; mask = %mask & %mask2, skip the last 2 fields
2256
+ %v0 = shufflevector <16 x i32 > %interleaved.vec , <16 x i32 > poison, <4 x i32 > <i32 0 , i32 4 , i32 8 , i32 12 >
2257
+ %v1 = shufflevector <16 x i32 > %interleaved.vec , <16 x i32 > poison, <4 x i32 > <i32 1 , i32 5 , i32 9 , i32 13 >
2258
+ %res0 = insertvalue {<4 x i32 >, <4 x i32 >} undef , <4 x i32 > %v0 , 0
2259
+ %res1 = insertvalue {<4 x i32 >, <4 x i32 >} %res0 , <4 x i32 > %v1 , 1
2260
+ ret {<4 x i32 >, <4 x i32 >} %res1
2261
+ }
2262
+
2184
2263
; We can only skip the last field for now.
2185
2264
define {<4 x i32 >, <4 x i32 >, <4 x i32 >} @maskedload_factor3_invalid_skip_field (ptr %ptr ) {
2186
2265
; RV32-LABEL: maskedload_factor3_invalid_skip_field:
@@ -2198,8 +2277,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @maskedload_factor3_invalid_skip_field(
2198
2277
; RV32-NEXT: vle32.v v12, (a0), v0.t
2199
2278
; RV32-NEXT: li a0, 36
2200
2279
; RV32-NEXT: vmv.s.x v20, a1
2201
- ; RV32-NEXT: lui a1, %hi(.LCPI68_0 )
2202
- ; RV32-NEXT: addi a1, a1, %lo(.LCPI68_0 )
2280
+ ; RV32-NEXT: lui a1, %hi(.LCPI72_0 )
2281
+ ; RV32-NEXT: addi a1, a1, %lo(.LCPI72_0 )
2203
2282
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
2204
2283
; RV32-NEXT: vle16.v v21, (a1)
2205
2284
; RV32-NEXT: vcompress.vm v8, v12, v11
0 commit comments