-
Notifications
You must be signed in to change notification settings - Fork 15.1k
[IA][RISCV] Detecting gap mask from a mask assembled by interleaveN intrinsics #153510
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
53fe83e
17e9bec
7c00c1c
0ca6f75
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -205,6 +205,23 @@ define {<4 x i32>, <4 x i32>} @vpload_factor2_interleaved_mask_intrinsic(ptr %pt | |
| ret {<4 x i32>, <4 x i32>} %res1 | ||
| } | ||
|
|
||
| ; mask = %m, skip the last two fields. | ||
| define {<2 x i32>, <2 x i32>} @vpload_factor4_interleaved_mask_intrinsic_skip_fields(ptr %ptr, <2 x i1> %m) { | ||
| ; CHECK-LABEL: vpload_factor4_interleaved_mask_intrinsic_skip_fields: | ||
| ; CHECK: # %bb.0: | ||
| ; CHECK-NEXT: li a1, 16 | ||
| ; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, ma | ||
| ; CHECK-NEXT: vlsseg2e32.v v8, (a0), a1, v0.t | ||
| ; CHECK-NEXT: ret | ||
| %interleaved.mask = call <8 x i1> @llvm.vector.interleave4(<2 x i1> %m, <2 x i1> %m, <2 x i1> splat (i1 false), <2 x i1> splat (i1 false)) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please add a test with zero as the first operand. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I had thought about it, but right now RISCV does not support non-trailing gap, so the transformation wouldn't kick in (and thus the emitted code will look the same) regardless of whether we return the correct value like now or incorrect one like I did before. |
||
| %interleaved.vec = tail call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %ptr, <8 x i1> %interleaved.mask, i32 8) | ||
| %v0 = shufflevector <8 x i32> %interleaved.vec, <8 x i32> poison, <2 x i32> <i32 0, i32 4> | ||
| %v1 = shufflevector <8 x i32> %interleaved.vec, <8 x i32> poison, <2 x i32> <i32 1, i32 5> | ||
| %res0 = insertvalue {<2 x i32>, <2 x i32>} undef, <2 x i32> %v0, 0 | ||
| %res1 = insertvalue {<2 x i32>, <2 x i32>} %res0, <2 x i32> %v1, 1 | ||
| ret {<2 x i32>, <2 x i32>} %res1 | ||
| } | ||
|
|
||
| define {<4 x i32>, <4 x i32>} @vpload_factor2_interleaved_mask_shuffle(ptr %ptr, <4 x i1> %m) { | ||
| ; CHECK-LABEL: vpload_factor2_interleaved_mask_shuffle: | ||
| ; CHECK: # %bb.0: | ||
|
|
@@ -514,8 +531,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ | |
| ; RV32-NEXT: li a2, 32 | ||
| ; RV32-NEXT: lui a3, 12 | ||
| ; RV32-NEXT: lui a6, 12291 | ||
| ; RV32-NEXT: lui a7, %hi(.LCPI25_0) | ||
| ; RV32-NEXT: addi a7, a7, %lo(.LCPI25_0) | ||
| ; RV32-NEXT: lui a7, %hi(.LCPI26_0) | ||
| ; RV32-NEXT: addi a7, a7, %lo(.LCPI26_0) | ||
| ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma | ||
| ; RV32-NEXT: vle32.v v24, (a5) | ||
| ; RV32-NEXT: vmv.s.x v0, a3 | ||
|
|
@@ -600,12 +617,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ | |
| ; RV32-NEXT: addi a1, a1, 16 | ||
| ; RV32-NEXT: vs4r.v v8, (a1) # vscale x 32-byte Folded Spill | ||
| ; RV32-NEXT: lui a7, 49164 | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI25_1) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI25_1) | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI26_1) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI26_1) | ||
| ; RV32-NEXT: lui t2, 3 | ||
| ; RV32-NEXT: lui t1, 196656 | ||
| ; RV32-NEXT: lui a4, %hi(.LCPI25_3) | ||
| ; RV32-NEXT: addi a4, a4, %lo(.LCPI25_3) | ||
| ; RV32-NEXT: lui a4, %hi(.LCPI26_3) | ||
| ; RV32-NEXT: addi a4, a4, %lo(.LCPI26_3) | ||
| ; RV32-NEXT: lui t0, 786624 | ||
| ; RV32-NEXT: li a5, 48 | ||
| ; RV32-NEXT: lui a6, 768 | ||
|
|
@@ -784,8 +801,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ | |
| ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload | ||
| ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma | ||
| ; RV32-NEXT: vrgatherei16.vv v24, v8, v2 | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI25_2) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI25_2) | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI26_2) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI26_2) | ||
| ; RV32-NEXT: lui a3, 3073 | ||
| ; RV32-NEXT: addi a3, a3, -1024 | ||
| ; RV32-NEXT: vmv.s.x v0, a3 | ||
|
|
@@ -849,16 +866,16 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ | |
| ; RV32-NEXT: vrgatherei16.vv v28, v8, v3 | ||
| ; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma | ||
| ; RV32-NEXT: vmv.v.v v28, v24 | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI25_4) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI25_4) | ||
| ; RV32-NEXT: lui a2, %hi(.LCPI25_5) | ||
| ; RV32-NEXT: addi a2, a2, %lo(.LCPI25_5) | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI26_4) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI26_4) | ||
| ; RV32-NEXT: lui a2, %hi(.LCPI26_5) | ||
| ; RV32-NEXT: addi a2, a2, %lo(.LCPI26_5) | ||
| ; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma | ||
| ; RV32-NEXT: vle16.v v24, (a2) | ||
| ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma | ||
| ; RV32-NEXT: vle16.v v8, (a1) | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI25_7) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI25_7) | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI26_7) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI26_7) | ||
| ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma | ||
| ; RV32-NEXT: vle16.v v10, (a1) | ||
| ; RV32-NEXT: csrr a1, vlenb | ||
|
|
@@ -886,14 +903,14 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ | |
| ; RV32-NEXT: vl8r.v v0, (a1) # vscale x 64-byte Folded Reload | ||
| ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma | ||
| ; RV32-NEXT: vrgatherei16.vv v16, v0, v10 | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI25_6) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI25_6) | ||
| ; RV32-NEXT: lui a2, %hi(.LCPI25_8) | ||
| ; RV32-NEXT: addi a2, a2, %lo(.LCPI25_8) | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI26_6) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI26_6) | ||
| ; RV32-NEXT: lui a2, %hi(.LCPI26_8) | ||
| ; RV32-NEXT: addi a2, a2, %lo(.LCPI26_8) | ||
| ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma | ||
| ; RV32-NEXT: vle16.v v4, (a1) | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI25_9) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI25_9) | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI26_9) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI26_9) | ||
| ; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma | ||
| ; RV32-NEXT: vle16.v v6, (a1) | ||
| ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma | ||
|
|
@@ -980,8 +997,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ | |
| ; RV64-NEXT: li a4, 128 | ||
| ; RV64-NEXT: lui a1, 1 | ||
| ; RV64-NEXT: vle64.v v8, (a3) | ||
| ; RV64-NEXT: lui a3, %hi(.LCPI25_0) | ||
| ; RV64-NEXT: addi a3, a3, %lo(.LCPI25_0) | ||
| ; RV64-NEXT: lui a3, %hi(.LCPI26_0) | ||
| ; RV64-NEXT: addi a3, a3, %lo(.LCPI26_0) | ||
| ; RV64-NEXT: vmv.s.x v0, a4 | ||
| ; RV64-NEXT: csrr a4, vlenb | ||
| ; RV64-NEXT: li a5, 61 | ||
|
|
@@ -1169,8 +1186,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ | |
| ; RV64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload | ||
| ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu | ||
| ; RV64-NEXT: vslideup.vi v12, v16, 1, v0.t | ||
| ; RV64-NEXT: lui a2, %hi(.LCPI25_1) | ||
| ; RV64-NEXT: addi a2, a2, %lo(.LCPI25_1) | ||
| ; RV64-NEXT: lui a2, %hi(.LCPI26_1) | ||
| ; RV64-NEXT: addi a2, a2, %lo(.LCPI26_1) | ||
| ; RV64-NEXT: li a3, 192 | ||
| ; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma | ||
| ; RV64-NEXT: vle16.v v6, (a2) | ||
|
|
@@ -1204,8 +1221,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ | |
| ; RV64-NEXT: vrgatherei16.vv v24, v16, v6 | ||
| ; RV64-NEXT: addi a2, sp, 16 | ||
| ; RV64-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill | ||
| ; RV64-NEXT: lui a2, %hi(.LCPI25_2) | ||
| ; RV64-NEXT: addi a2, a2, %lo(.LCPI25_2) | ||
| ; RV64-NEXT: lui a2, %hi(.LCPI26_2) | ||
| ; RV64-NEXT: addi a2, a2, %lo(.LCPI26_2) | ||
| ; RV64-NEXT: li a3, 1040 | ||
| ; RV64-NEXT: vmv.s.x v0, a3 | ||
| ; RV64-NEXT: addi a1, a1, -2016 | ||
|
|
@@ -1289,12 +1306,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ | |
| ; RV64-NEXT: add a1, sp, a1 | ||
| ; RV64-NEXT: addi a1, a1, 16 | ||
| ; RV64-NEXT: vs4r.v v8, (a1) # vscale x 32-byte Folded Spill | ||
| ; RV64-NEXT: lui a1, %hi(.LCPI25_3) | ||
| ; RV64-NEXT: addi a1, a1, %lo(.LCPI25_3) | ||
| ; RV64-NEXT: lui a1, %hi(.LCPI26_3) | ||
| ; RV64-NEXT: addi a1, a1, %lo(.LCPI26_3) | ||
| ; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma | ||
| ; RV64-NEXT: vle16.v v20, (a1) | ||
| ; RV64-NEXT: lui a1, %hi(.LCPI25_4) | ||
| ; RV64-NEXT: addi a1, a1, %lo(.LCPI25_4) | ||
| ; RV64-NEXT: lui a1, %hi(.LCPI26_4) | ||
| ; RV64-NEXT: addi a1, a1, %lo(.LCPI26_4) | ||
| ; RV64-NEXT: vle16.v v8, (a1) | ||
| ; RV64-NEXT: csrr a1, vlenb | ||
| ; RV64-NEXT: li a2, 77 | ||
|
|
@@ -1345,8 +1362,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ | |
| ; RV64-NEXT: vl2r.v v8, (a1) # vscale x 16-byte Folded Reload | ||
| ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma | ||
| ; RV64-NEXT: vrgatherei16.vv v0, v16, v8 | ||
| ; RV64-NEXT: lui a1, %hi(.LCPI25_5) | ||
| ; RV64-NEXT: addi a1, a1, %lo(.LCPI25_5) | ||
| ; RV64-NEXT: lui a1, %hi(.LCPI26_5) | ||
| ; RV64-NEXT: addi a1, a1, %lo(.LCPI26_5) | ||
| ; RV64-NEXT: vle16.v v20, (a1) | ||
| ; RV64-NEXT: csrr a1, vlenb | ||
| ; RV64-NEXT: li a2, 61 | ||
|
|
@@ -1963,8 +1980,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @invalid_vp_mask(ptr %ptr) { | |
| ; RV32-NEXT: vle32.v v12, (a0), v0.t | ||
| ; RV32-NEXT: li a0, 36 | ||
| ; RV32-NEXT: vmv.s.x v20, a1 | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI61_0) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI61_0) | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI62_0) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI62_0) | ||
| ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma | ||
| ; RV32-NEXT: vle16.v v21, (a1) | ||
| ; RV32-NEXT: vcompress.vm v8, v12, v11 | ||
|
|
@@ -2039,8 +2056,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @invalid_vp_evl(ptr %ptr) { | |
| ; RV32-NEXT: vmv.s.x v10, a0 | ||
| ; RV32-NEXT: li a0, 146 | ||
| ; RV32-NEXT: vmv.s.x v11, a0 | ||
| ; RV32-NEXT: lui a0, %hi(.LCPI62_0) | ||
| ; RV32-NEXT: addi a0, a0, %lo(.LCPI62_0) | ||
| ; RV32-NEXT: lui a0, %hi(.LCPI63_0) | ||
| ; RV32-NEXT: addi a0, a0, %lo(.LCPI63_0) | ||
| ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma | ||
| ; RV32-NEXT: vle16.v v20, (a0) | ||
| ; RV32-NEXT: li a0, 36 | ||
|
|
@@ -2198,8 +2215,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @maskedload_factor3_invalid_skip_field( | |
| ; RV32-NEXT: vle32.v v12, (a0), v0.t | ||
| ; RV32-NEXT: li a0, 36 | ||
| ; RV32-NEXT: vmv.s.x v20, a1 | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI68_0) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI68_0) | ||
| ; RV32-NEXT: lui a1, %hi(.LCPI69_0) | ||
| ; RV32-NEXT: addi a1, a1, %lo(.LCPI69_0) | ||
| ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma | ||
| ; RV32-NEXT: vle16.v v21, (a1) | ||
| ; RV32-NEXT: vcompress.vm v8, v12, v11 | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Your else here can just be IMI->getArgOperand(0) since that must be the zero constant.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Good point, it's fixed now.