-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[DAG] Combine store + vselect to masked_store
#145176
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
abhishek-kaushik22
merged 22 commits into
llvm:main
from
abhishek-kaushik22:masked-store
Aug 4, 2025
Merged
Changes from 7 commits
Commits
Show all changes
22 commits
Select commit
Hold shift + click to select a range
d9d04de
[X86] Combine `store + vselect` to `masked_store``
abhishek-kaushik22 9eca209
Use pattern match
abhishek-kaushik22 c0d5cf0
Fix tests
abhishek-kaushik22 b3a4522
Revert last 3 commits
abhishek-kaushik22 04366fa
Revert "[X86] Combine `store + vselect` to `masked_store``"
abhishek-kaushik22 34fa965
Move to DAGCombiner
abhishek-kaushik22 3106f46
Update macro-fuse-cmp.ll
abhishek-kaushik22 8c14fba
Use allowsMisalignedMemoryAccesses to check if unaligned stores are a…
abhishek-kaushik22 f1b33cc
Use reachesChainWithoutSideEffects
abhishek-kaushik22 82180a8
Merge branch 'main' into masked-store
abhishek-kaushik22 63356e0
Update tests
abhishek-kaushik22 6602267
Test more types
abhishek-kaushik22 0898e47
Fix review comments and update tests
abhishek-kaushik22 efcf75a
Update DAGCombiner.cpp
abhishek-kaushik22 acbc2c1
Update DAGCombiner.cpp
abhishek-kaushik22 baf3d77
Merge branch 'main' into masked-store
abhishek-kaushik22 4485b09
Place fold at the end of visitSTORE
abhishek-kaushik22 ad5ead1
Merge branch 'masked-store' of https://github.com/abhishek-kaushik22/…
abhishek-kaushik22 ed1d804
Update DAGCombiner.cpp
abhishek-kaushik22 f5aed1f
Merge branch 'main' into masked-store
abhishek-kaushik22 6d26be2
Merge branch 'main' into masked-store
abhishek-kaushik22 f4157dd
Add address space check
abhishek-kaushik22 File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,282 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 | ||
| ; RUN: llc < %s -mtriple=aarch64-- -mattr=+neon | FileCheck %s -check-prefix=AARCH64 | ||
| ; RUN: llc < %s -mtriple=aarch64-- -mattr=+sve | FileCheck %s -check-prefix=SVE | ||
|
|
||
| define void @test_masked_store_success(<8 x i32> %x, ptr %ptr, <8 x i1> %cmp) { | ||
| ; AARCH64-LABEL: test_masked_store_success: | ||
| ; AARCH64: // %bb.0: | ||
| ; AARCH64-NEXT: zip1 v3.8b, v2.8b, v0.8b | ||
| ; AARCH64-NEXT: zip2 v2.8b, v2.8b, v0.8b | ||
| ; AARCH64-NEXT: ldp q4, q5, [x0] | ||
| ; AARCH64-NEXT: ushll v3.4s, v3.4h, #0 | ||
| ; AARCH64-NEXT: ushll v2.4s, v2.4h, #0 | ||
| ; AARCH64-NEXT: shl v3.4s, v3.4s, #31 | ||
| ; AARCH64-NEXT: shl v2.4s, v2.4s, #31 | ||
| ; AARCH64-NEXT: cmlt v3.4s, v3.4s, #0 | ||
| ; AARCH64-NEXT: cmlt v2.4s, v2.4s, #0 | ||
| ; AARCH64-NEXT: bif v0.16b, v4.16b, v3.16b | ||
| ; AARCH64-NEXT: bif v1.16b, v5.16b, v2.16b | ||
| ; AARCH64-NEXT: stp q0, q1, [x0] | ||
| ; AARCH64-NEXT: ret | ||
| ; | ||
| ; SVE-LABEL: test_masked_store_success: | ||
| ; SVE: // %bb.0: | ||
| ; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 | ||
| ; SVE-NEXT: zip2 v3.8b, v2.8b, v0.8b | ||
| ; SVE-NEXT: zip1 v2.8b, v2.8b, v0.8b | ||
| ; SVE-NEXT: mov x8, #4 // =0x4 | ||
| ; SVE-NEXT: ptrue p0.s, vl4 | ||
| ; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 | ||
| ; SVE-NEXT: ushll v3.4s, v3.4h, #0 | ||
| ; SVE-NEXT: ushll v2.4s, v2.4h, #0 | ||
| ; SVE-NEXT: shl v3.4s, v3.4s, #31 | ||
| ; SVE-NEXT: shl v2.4s, v2.4s, #31 | ||
| ; SVE-NEXT: cmlt v3.4s, v3.4s, #0 | ||
| ; SVE-NEXT: cmlt v2.4s, v2.4s, #0 | ||
| ; SVE-NEXT: cmpne p1.s, p0/z, z3.s, #0 | ||
| ; SVE-NEXT: cmpne p0.s, p0/z, z2.s, #0 | ||
| ; SVE-NEXT: st1w { z1.s }, p1, [x0, x8, lsl #2] | ||
| ; SVE-NEXT: st1w { z0.s }, p0, [x0] | ||
| ; SVE-NEXT: ret | ||
| %load = load <8 x i32>, ptr %ptr, align 32 | ||
| %sel = select <8 x i1> %cmp, <8 x i32> %x, <8 x i32> %load | ||
| store <8 x i32> %sel, ptr %ptr, align 32 | ||
| ret void | ||
| } | ||
|
|
||
| define void @test_masked_store_volatile_load(<8 x i32> %x, ptr %ptr, <8 x i1> %cmp) { | ||
| ; AARCH64-LABEL: test_masked_store_volatile_load: | ||
| ; AARCH64: // %bb.0: | ||
| ; AARCH64-NEXT: zip1 v3.8b, v2.8b, v0.8b | ||
| ; AARCH64-NEXT: zip2 v2.8b, v2.8b, v0.8b | ||
| ; AARCH64-NEXT: ldr q4, [x0] | ||
| ; AARCH64-NEXT: ldr q5, [x0, #16] | ||
| ; AARCH64-NEXT: ushll v3.4s, v3.4h, #0 | ||
| ; AARCH64-NEXT: ushll v2.4s, v2.4h, #0 | ||
| ; AARCH64-NEXT: shl v3.4s, v3.4s, #31 | ||
| ; AARCH64-NEXT: shl v2.4s, v2.4s, #31 | ||
| ; AARCH64-NEXT: cmlt v3.4s, v3.4s, #0 | ||
| ; AARCH64-NEXT: cmlt v2.4s, v2.4s, #0 | ||
| ; AARCH64-NEXT: bif v0.16b, v4.16b, v3.16b | ||
| ; AARCH64-NEXT: bif v1.16b, v5.16b, v2.16b | ||
| ; AARCH64-NEXT: stp q0, q1, [x0] | ||
| ; AARCH64-NEXT: ret | ||
| ; | ||
| ; SVE-LABEL: test_masked_store_volatile_load: | ||
| ; SVE: // %bb.0: | ||
| ; SVE-NEXT: zip1 v3.8b, v2.8b, v0.8b | ||
| ; SVE-NEXT: zip2 v2.8b, v2.8b, v0.8b | ||
| ; SVE-NEXT: ldr q4, [x0] | ||
| ; SVE-NEXT: ldr q5, [x0, #16] | ||
| ; SVE-NEXT: ushll v3.4s, v3.4h, #0 | ||
| ; SVE-NEXT: ushll v2.4s, v2.4h, #0 | ||
| ; SVE-NEXT: shl v3.4s, v3.4s, #31 | ||
| ; SVE-NEXT: shl v2.4s, v2.4s, #31 | ||
| ; SVE-NEXT: cmlt v3.4s, v3.4s, #0 | ||
| ; SVE-NEXT: cmlt v2.4s, v2.4s, #0 | ||
| ; SVE-NEXT: bif v0.16b, v4.16b, v3.16b | ||
| ; SVE-NEXT: bif v1.16b, v5.16b, v2.16b | ||
| ; SVE-NEXT: stp q0, q1, [x0] | ||
| ; SVE-NEXT: ret | ||
| %load = load volatile <8 x i32>, ptr %ptr, align 32 | ||
| %sel = select <8 x i1> %cmp, <8 x i32> %x, <8 x i32> %load | ||
| store <8 x i32> %sel, ptr %ptr, align 32 | ||
| ret void | ||
| } | ||
|
|
||
| define void @test_masked_store_volatile_store(<8 x i32> %x, ptr %ptr, <8 x i1> %cmp) { | ||
| ; AARCH64-LABEL: test_masked_store_volatile_store: | ||
| ; AARCH64: // %bb.0: | ||
| ; AARCH64-NEXT: zip1 v3.8b, v2.8b, v0.8b | ||
| ; AARCH64-NEXT: zip2 v2.8b, v2.8b, v0.8b | ||
| ; AARCH64-NEXT: ldp q4, q5, [x0] | ||
| ; AARCH64-NEXT: ushll v3.4s, v3.4h, #0 | ||
| ; AARCH64-NEXT: ushll v2.4s, v2.4h, #0 | ||
| ; AARCH64-NEXT: shl v3.4s, v3.4s, #31 | ||
| ; AARCH64-NEXT: shl v2.4s, v2.4s, #31 | ||
| ; AARCH64-NEXT: cmlt v3.4s, v3.4s, #0 | ||
| ; AARCH64-NEXT: cmlt v2.4s, v2.4s, #0 | ||
| ; AARCH64-NEXT: bif v0.16b, v4.16b, v3.16b | ||
| ; AARCH64-NEXT: bif v1.16b, v5.16b, v2.16b | ||
| ; AARCH64-NEXT: str q0, [x0] | ||
| ; AARCH64-NEXT: str q1, [x0, #16] | ||
| ; AARCH64-NEXT: ret | ||
| ; | ||
| ; SVE-LABEL: test_masked_store_volatile_store: | ||
| ; SVE: // %bb.0: | ||
| ; SVE-NEXT: zip1 v3.8b, v2.8b, v0.8b | ||
| ; SVE-NEXT: zip2 v2.8b, v2.8b, v0.8b | ||
| ; SVE-NEXT: ldp q4, q5, [x0] | ||
| ; SVE-NEXT: ushll v3.4s, v3.4h, #0 | ||
| ; SVE-NEXT: ushll v2.4s, v2.4h, #0 | ||
| ; SVE-NEXT: shl v3.4s, v3.4s, #31 | ||
| ; SVE-NEXT: shl v2.4s, v2.4s, #31 | ||
| ; SVE-NEXT: cmlt v3.4s, v3.4s, #0 | ||
| ; SVE-NEXT: cmlt v2.4s, v2.4s, #0 | ||
| ; SVE-NEXT: bif v0.16b, v4.16b, v3.16b | ||
| ; SVE-NEXT: bif v1.16b, v5.16b, v2.16b | ||
| ; SVE-NEXT: str q0, [x0] | ||
| ; SVE-NEXT: str q1, [x0, #16] | ||
| ; SVE-NEXT: ret | ||
| %load = load <8 x i32>, ptr %ptr, align 32 | ||
| %sel = select <8 x i1> %cmp, <8 x i32> %x, <8 x i32> %load | ||
| store volatile <8 x i32> %sel, ptr %ptr, align 32 | ||
| ret void | ||
| } | ||
|
|
||
| declare void @use_vec(<8 x i32>) | ||
|
|
||
| define void @test_masked_store_intervening(<8 x i32> %x, ptr %ptr, <8 x i1> %cmp) { | ||
| ; AARCH64-LABEL: test_masked_store_intervening: | ||
| ; AARCH64: // %bb.0: | ||
| ; AARCH64-NEXT: sub sp, sp, #96 | ||
| ; AARCH64-NEXT: str d8, [sp, #64] // 8-byte Folded Spill | ||
| ; AARCH64-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill | ||
| ; AARCH64-NEXT: .cfi_def_cfa_offset 96 | ||
| ; AARCH64-NEXT: .cfi_offset w19, -8 | ||
| ; AARCH64-NEXT: .cfi_offset w30, -16 | ||
| ; AARCH64-NEXT: .cfi_offset b8, -32 | ||
| ; AARCH64-NEXT: stp q1, q0, [sp, #32] // 32-byte Folded Spill | ||
| ; AARCH64-NEXT: ldp q1, q3, [x0] | ||
| ; AARCH64-NEXT: movi v0.2d, #0000000000000000 | ||
| ; AARCH64-NEXT: fmov d8, d2 | ||
| ; AARCH64-NEXT: mov x19, x0 | ||
| ; AARCH64-NEXT: stp q1, q3, [sp] // 32-byte Folded Spill | ||
| ; AARCH64-NEXT: movi v1.2d, #0000000000000000 | ||
| ; AARCH64-NEXT: stp q0, q0, [x0] | ||
| ; AARCH64-NEXT: bl use_vec | ||
| ; AARCH64-NEXT: zip2 v0.8b, v8.8b, v0.8b | ||
| ; AARCH64-NEXT: ldp q3, q2, [sp, #16] // 32-byte Folded Reload | ||
| ; AARCH64-NEXT: zip1 v1.8b, v8.8b, v0.8b | ||
| ; AARCH64-NEXT: ushll v0.4s, v0.4h, #0 | ||
| ; AARCH64-NEXT: ldr d8, [sp, #64] // 8-byte Folded Reload | ||
| ; AARCH64-NEXT: shl v0.4s, v0.4s, #31 | ||
| ; AARCH64-NEXT: ushll v1.4s, v1.4h, #0 | ||
| ; AARCH64-NEXT: cmlt v0.4s, v0.4s, #0 | ||
| ; AARCH64-NEXT: shl v1.4s, v1.4s, #31 | ||
| ; AARCH64-NEXT: bsl v0.16b, v2.16b, v3.16b | ||
| ; AARCH64-NEXT: ldr q2, [sp, #48] // 16-byte Folded Reload | ||
| ; AARCH64-NEXT: ldr q3, [sp] // 16-byte Folded Reload | ||
| ; AARCH64-NEXT: cmlt v1.4s, v1.4s, #0 | ||
| ; AARCH64-NEXT: bsl v1.16b, v2.16b, v3.16b | ||
| ; AARCH64-NEXT: stp q1, q0, [x19] | ||
| ; AARCH64-NEXT: ldp x30, x19, [sp, #80] // 16-byte Folded Reload | ||
| ; AARCH64-NEXT: add sp, sp, #96 | ||
| ; AARCH64-NEXT: ret | ||
| ; | ||
| ; SVE-LABEL: test_masked_store_intervening: | ||
| ; SVE: // %bb.0: | ||
| ; SVE-NEXT: sub sp, sp, #96 | ||
| ; SVE-NEXT: str d8, [sp, #64] // 8-byte Folded Spill | ||
| ; SVE-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill | ||
| ; SVE-NEXT: .cfi_def_cfa_offset 96 | ||
| ; SVE-NEXT: .cfi_offset w19, -8 | ||
| ; SVE-NEXT: .cfi_offset w30, -16 | ||
| ; SVE-NEXT: .cfi_offset b8, -32 | ||
| ; SVE-NEXT: stp q1, q0, [sp, #32] // 32-byte Folded Spill | ||
| ; SVE-NEXT: ldp q1, q3, [x0] | ||
| ; SVE-NEXT: movi v0.2d, #0000000000000000 | ||
| ; SVE-NEXT: fmov d8, d2 | ||
| ; SVE-NEXT: mov x19, x0 | ||
| ; SVE-NEXT: stp q1, q3, [sp] // 32-byte Folded Spill | ||
| ; SVE-NEXT: movi v1.2d, #0000000000000000 | ||
| ; SVE-NEXT: stp q0, q0, [x0] | ||
| ; SVE-NEXT: bl use_vec | ||
| ; SVE-NEXT: zip2 v0.8b, v8.8b, v0.8b | ||
| ; SVE-NEXT: ldp q3, q2, [sp, #16] // 32-byte Folded Reload | ||
| ; SVE-NEXT: zip1 v1.8b, v8.8b, v0.8b | ||
| ; SVE-NEXT: ushll v0.4s, v0.4h, #0 | ||
| ; SVE-NEXT: ldr d8, [sp, #64] // 8-byte Folded Reload | ||
| ; SVE-NEXT: shl v0.4s, v0.4s, #31 | ||
| ; SVE-NEXT: ushll v1.4s, v1.4h, #0 | ||
| ; SVE-NEXT: cmlt v0.4s, v0.4s, #0 | ||
| ; SVE-NEXT: shl v1.4s, v1.4s, #31 | ||
| ; SVE-NEXT: bsl v0.16b, v2.16b, v3.16b | ||
| ; SVE-NEXT: ldr q2, [sp, #48] // 16-byte Folded Reload | ||
| ; SVE-NEXT: ldr q3, [sp] // 16-byte Folded Reload | ||
| ; SVE-NEXT: cmlt v1.4s, v1.4s, #0 | ||
| ; SVE-NEXT: bsl v1.16b, v2.16b, v3.16b | ||
| ; SVE-NEXT: stp q1, q0, [x19] | ||
| ; SVE-NEXT: ldp x30, x19, [sp, #80] // 16-byte Folded Reload | ||
| ; SVE-NEXT: add sp, sp, #96 | ||
| ; SVE-NEXT: ret | ||
| %load = load <8 x i32>, ptr %ptr, align 32 | ||
| store <8 x i32> zeroinitializer, ptr %ptr, align 32 | ||
| %tmp = load <8 x i32>, ptr %ptr | ||
| call void @use_vec(<8 x i32> %tmp) | ||
| %sel = select <8 x i1> %cmp, <8 x i32> %x, <8 x i32> %load | ||
| store <8 x i32> %sel, ptr %ptr, align 32 | ||
| ret void | ||
| } | ||
|
|
||
|
|
||
| define void @test_masked_store_multiple(<8 x i32> %x, <8 x i32> %y, ptr %ptr1, ptr %ptr2, <8 x i1> %cmp, <8 x i1> %cmp2) { | ||
| ; AARCH64-LABEL: test_masked_store_multiple: | ||
| ; AARCH64: // %bb.0: | ||
| ; AARCH64-NEXT: zip1 v6.8b, v4.8b, v0.8b | ||
| ; AARCH64-NEXT: zip2 v4.8b, v4.8b, v0.8b | ||
| ; AARCH64-NEXT: zip1 v7.8b, v5.8b, v0.8b | ||
| ; AARCH64-NEXT: zip2 v5.8b, v5.8b, v0.8b | ||
| ; AARCH64-NEXT: ldp q16, q17, [x0] | ||
| ; AARCH64-NEXT: ushll v6.4s, v6.4h, #0 | ||
| ; AARCH64-NEXT: ushll v4.4s, v4.4h, #0 | ||
| ; AARCH64-NEXT: ushll v7.4s, v7.4h, #0 | ||
| ; AARCH64-NEXT: ushll v5.4s, v5.4h, #0 | ||
| ; AARCH64-NEXT: shl v6.4s, v6.4s, #31 | ||
| ; AARCH64-NEXT: shl v4.4s, v4.4s, #31 | ||
| ; AARCH64-NEXT: shl v7.4s, v7.4s, #31 | ||
| ; AARCH64-NEXT: shl v5.4s, v5.4s, #31 | ||
| ; AARCH64-NEXT: cmlt v6.4s, v6.4s, #0 | ||
| ; AARCH64-NEXT: cmlt v4.4s, v4.4s, #0 | ||
| ; AARCH64-NEXT: cmlt v7.4s, v7.4s, #0 | ||
| ; AARCH64-NEXT: cmlt v5.4s, v5.4s, #0 | ||
| ; AARCH64-NEXT: bif v0.16b, v16.16b, v6.16b | ||
| ; AARCH64-NEXT: ldp q6, q16, [x1] | ||
| ; AARCH64-NEXT: bif v1.16b, v17.16b, v4.16b | ||
| ; AARCH64-NEXT: bif v2.16b, v6.16b, v7.16b | ||
| ; AARCH64-NEXT: bif v3.16b, v16.16b, v5.16b | ||
| ; AARCH64-NEXT: stp q0, q1, [x0] | ||
| ; AARCH64-NEXT: stp q2, q3, [x1] | ||
| ; AARCH64-NEXT: ret | ||
| ; | ||
| ; SVE-LABEL: test_masked_store_multiple: | ||
| ; SVE: // %bb.0: | ||
| ; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 | ||
| ; SVE-NEXT: zip2 v6.8b, v4.8b, v0.8b | ||
| ; SVE-NEXT: zip1 v4.8b, v4.8b, v0.8b | ||
| ; SVE-NEXT: mov x8, #4 // =0x4 | ||
| ; SVE-NEXT: zip2 v7.8b, v5.8b, v0.8b | ||
| ; SVE-NEXT: zip1 v5.8b, v5.8b, v0.8b | ||
| ; SVE-NEXT: // kill: def $q3 killed $q3 def $z3 | ||
| ; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 | ||
| ; SVE-NEXT: ptrue p0.s, vl4 | ||
| ; SVE-NEXT: ushll v6.4s, v6.4h, #0 | ||
| ; SVE-NEXT: ushll v4.4s, v4.4h, #0 | ||
| ; SVE-NEXT: ushll v7.4s, v7.4h, #0 | ||
| ; SVE-NEXT: ushll v5.4s, v5.4h, #0 | ||
| ; SVE-NEXT: shl v6.4s, v6.4s, #31 | ||
| ; SVE-NEXT: shl v4.4s, v4.4s, #31 | ||
| ; SVE-NEXT: shl v7.4s, v7.4s, #31 | ||
| ; SVE-NEXT: shl v5.4s, v5.4s, #31 | ||
| ; SVE-NEXT: cmlt v6.4s, v6.4s, #0 | ||
| ; SVE-NEXT: cmlt v4.4s, v4.4s, #0 | ||
| ; SVE-NEXT: cmlt v7.4s, v7.4s, #0 | ||
| ; SVE-NEXT: cmlt v5.4s, v5.4s, #0 | ||
| ; SVE-NEXT: cmpne p1.s, p0/z, z6.s, #0 | ||
| ; SVE-NEXT: ldr q6, [x1] | ||
| ; SVE-NEXT: cmpne p2.s, p0/z, z4.s, #0 | ||
| ; SVE-NEXT: cmpne p0.s, p0/z, z7.s, #0 | ||
| ; SVE-NEXT: bif v2.16b, v6.16b, v5.16b | ||
| ; SVE-NEXT: st1w { z1.s }, p1, [x0, x8, lsl #2] | ||
| ; SVE-NEXT: st1w { z0.s }, p2, [x0] | ||
| ; SVE-NEXT: st1w { z3.s }, p0, [x1, x8, lsl #2] | ||
| ; SVE-NEXT: str q2, [x1] | ||
| ; SVE-NEXT: ret | ||
| %load = load <8 x i32>, ptr %ptr1, align 32 | ||
| %load2 = load <8 x i32>, ptr %ptr2, align 32 | ||
| %sel = select <8 x i1> %cmp, <8 x i32> %x, <8 x i32> %load | ||
| %sel2 = select <8 x i1> %cmp2, <8 x i32> %y, <8 x i32> %load2 | ||
| store <8 x i32> %sel, ptr %ptr1, align 32 | ||
| store <8 x i32> %sel2, ptr %ptr2, align 32 | ||
| ret void | ||
| } |
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.