|
7 | 7 | //===----------------------------------------------------------------------===// |
8 | 8 | #include "llvm/CodeGen/GlobalISel/CombinerHelper.h" |
9 | 9 | #include "llvm/ADT/APFloat.h" |
| 10 | +#include "llvm/ADT/STLExtras.h" |
10 | 11 | #include "llvm/ADT/SetVector.h" |
11 | 12 | #include "llvm/ADT/SmallBitVector.h" |
12 | 13 | #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" |
@@ -944,159 +945,97 @@ void CombinerHelper::applySextInRegOfLoad( |
944 | 945 | MI.eraseFromParent(); |
945 | 946 | } |
946 | 947 |
|
947 | | -bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr, |
| 948 | +bool CombinerHelper::findPostIndexCandidate(GLoadStore &LdSt, Register &Addr, |
948 | 949 | Register &Base, Register &Offset) { |
949 | | - auto &MF = *MI.getParent()->getParent(); |
| 950 | + auto &MF = *LdSt.getParent()->getParent(); |
950 | 951 | const auto &TLI = *MF.getSubtarget().getTargetLowering(); |
951 | 952 |
|
952 | | -#ifndef NDEBUG |
953 | | - unsigned Opcode = MI.getOpcode(); |
954 | | - assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || |
955 | | - Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE); |
956 | | -#endif |
| 953 | + Base = LdSt.getPointerReg(); |
957 | 954 |
|
958 | | - Base = MI.getOperand(1).getReg(); |
959 | | - MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base); |
960 | | - if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) |
| 955 | + if (getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Base, MRI)) |
961 | 956 | return false; |
962 | 957 |
|
963 | | - LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI); |
964 | 958 | // FIXME: The following use traversal needs a bail out for patholigical cases. |
965 | 959 | for (auto &Use : MRI.use_nodbg_instructions(Base)) { |
966 | | - if (Use.getOpcode() != TargetOpcode::G_PTR_ADD) |
| 960 | + auto *PtrAdd = dyn_cast<GPtrAdd>(&Use); |
| 961 | + if (!PtrAdd) |
967 | 962 | continue; |
968 | 963 |
|
969 | | - Offset = Use.getOperand(2).getReg(); |
| 964 | + Offset = PtrAdd->getOffsetReg(); |
970 | 965 | if (!ForceLegalIndexing && |
971 | | - !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) { |
972 | | - LLVM_DEBUG(dbgs() << " Ignoring candidate with illegal addrmode: " |
973 | | - << Use); |
| 966 | + !TLI.isIndexingLegal(LdSt, Base, Offset, /*IsPre*/ false, MRI)) |
974 | 967 | continue; |
975 | | - } |
976 | 968 |
|
977 | 969 | // Make sure the offset calculation is before the potentially indexed op. |
978 | | - // FIXME: we really care about dependency here. The offset calculation might |
979 | | - // be movable. |
980 | | - MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset); |
981 | | - if (!OffsetDef || !dominates(*OffsetDef, MI)) { |
982 | | - LLVM_DEBUG(dbgs() << " Ignoring candidate with offset after mem-op: " |
983 | | - << Use); |
| 970 | + MachineInstr *OffsetDef = MRI.getVRegDef(Offset); |
| 971 | + if (!dominates(*OffsetDef, LdSt)) |
984 | 972 | continue; |
985 | | - } |
986 | 973 |
|
987 | 974 | // FIXME: check whether all uses of Base are load/store with foldable |
988 | 975 | // addressing modes. If so, using the normal addr-modes is better than |
989 | 976 | // forming an indexed one. |
990 | | - |
991 | | - bool MemOpDominatesAddrUses = true; |
992 | | - for (auto &PtrAddUse : |
993 | | - MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) { |
994 | | - if (!dominates(MI, PtrAddUse)) { |
995 | | - MemOpDominatesAddrUses = false; |
996 | | - break; |
997 | | - } |
998 | | - } |
999 | | - |
1000 | | - if (!MemOpDominatesAddrUses) { |
1001 | | - LLVM_DEBUG( |
1002 | | - dbgs() << " Ignoring candidate as memop does not dominate uses: " |
1003 | | - << Use); |
| 977 | + if (any_of(MRI.use_nodbg_instructions(PtrAdd->getReg(0)), |
| 978 | + [&](MachineInstr &PtrAddUse) { |
| 979 | + return !dominates(LdSt, PtrAddUse); |
| 980 | + })) |
1004 | 981 | continue; |
1005 | | - } |
1006 | 982 |
|
1007 | | - LLVM_DEBUG(dbgs() << " Found match: " << Use); |
1008 | | - Addr = Use.getOperand(0).getReg(); |
| 983 | + Addr = PtrAdd->getReg(0); |
1009 | 984 | return true; |
1010 | 985 | } |
1011 | 986 |
|
1012 | 987 | return false; |
1013 | 988 | } |
1014 | 989 |
|
1015 | | -bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr, |
| 990 | +bool CombinerHelper::findPreIndexCandidate(GLoadStore &LdSt, Register &Addr, |
1016 | 991 | Register &Base, Register &Offset) { |
1017 | | - auto &MF = *MI.getParent()->getParent(); |
| 992 | + auto &MF = *LdSt.getParent()->getParent(); |
1018 | 993 | const auto &TLI = *MF.getSubtarget().getTargetLowering(); |
1019 | 994 |
|
1020 | | -#ifndef NDEBUG |
1021 | | - unsigned Opcode = MI.getOpcode(); |
1022 | | - assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || |
1023 | | - Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE); |
1024 | | -#endif |
1025 | | - |
1026 | | - Addr = MI.getOperand(1).getReg(); |
1027 | | - MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI); |
1028 | | - if (!AddrDef || MRI.hasOneNonDBGUse(Addr)) |
| 995 | + Addr = LdSt.getPointerReg(); |
| 996 | + if (!mi_match(Addr, MRI, m_GPtrAdd(m_Reg(Base), m_Reg(Offset))) || |
| 997 | + MRI.hasOneNonDBGUse(Addr)) |
1029 | 998 | return false; |
1030 | 999 |
|
1031 | | - Base = AddrDef->getOperand(1).getReg(); |
1032 | | - Offset = AddrDef->getOperand(2).getReg(); |
1033 | | - |
1034 | | - LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI); |
1035 | | - |
1036 | 1000 | if (!ForceLegalIndexing && |
1037 | | - !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) { |
1038 | | - LLVM_DEBUG(dbgs() << " Skipping, not legal for target"); |
| 1001 | + !TLI.isIndexingLegal(LdSt, Base, Offset, /*IsPre*/ true, MRI)) |
1039 | 1002 | return false; |
1040 | | - } |
1041 | 1003 |
|
1042 | 1004 | MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI); |
1043 | | - if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) { |
1044 | | - LLVM_DEBUG(dbgs() << " Skipping, frame index would need copy anyway."); |
| 1005 | + if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) |
1045 | 1006 | return false; |
1046 | | - } |
1047 | 1007 |
|
1048 | | - if (MI.getOpcode() == TargetOpcode::G_STORE) { |
| 1008 | + if (auto *St = dyn_cast<GStore>(&LdSt)) { |
1049 | 1009 | // Would require a copy. |
1050 | | - if (Base == MI.getOperand(0).getReg()) { |
1051 | | - LLVM_DEBUG(dbgs() << " Skipping, storing base so need copy anyway."); |
| 1010 | + if (Base == St->getValueReg()) |
1052 | 1011 | return false; |
1053 | | - } |
1054 | 1012 |
|
1055 | 1013 | // We're expecting one use of Addr in MI, but it could also be the |
1056 | 1014 | // value stored, which isn't actually dominated by the instruction. |
1057 | | - if (MI.getOperand(0).getReg() == Addr) { |
1058 | | - LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses"); |
| 1015 | + if (St->getValueReg() == Addr) |
1059 | 1016 | return false; |
1060 | | - } |
1061 | 1017 | } |
1062 | 1018 |
|
1063 | 1019 | // FIXME: check whether all uses of the base pointer are constant PtrAdds. |
1064 | 1020 | // That might allow us to end base's liveness here by adjusting the constant. |
1065 | 1021 |
|
1066 | | - for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) { |
1067 | | - if (!dominates(MI, UseMI)) { |
1068 | | - LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses."); |
1069 | | - return false; |
1070 | | - } |
1071 | | - } |
1072 | | - |
1073 | | - return true; |
| 1022 | + return all_of(MRI.use_nodbg_instructions(Addr), |
| 1023 | + [&](MachineInstr &UseMI) { return dominates(LdSt, UseMI); }); |
1074 | 1024 | } |
1075 | 1025 |
|
1076 | | -bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) { |
1077 | | - IndexedLoadStoreMatchInfo MatchInfo; |
1078 | | - if (matchCombineIndexedLoadStore(MI, MatchInfo)) { |
1079 | | - applyCombineIndexedLoadStore(MI, MatchInfo); |
1080 | | - return true; |
1081 | | - } |
1082 | | - return false; |
1083 | | -} |
1084 | | - |
1085 | | -bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) { |
1086 | | - unsigned Opcode = MI.getOpcode(); |
1087 | | - if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD && |
1088 | | - Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE) |
1089 | | - return false; |
| 1026 | +bool CombinerHelper::matchCombineIndexedLoadStore( |
| 1027 | + MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) { |
| 1028 | + auto &LdSt = cast<GLoadStore>(MI); |
1090 | 1029 |
|
1091 | 1030 | // For now, no targets actually support these opcodes so don't waste time |
1092 | 1031 | // running these unless we're forced to for testing. |
1093 | 1032 | if (!ForceLegalIndexing) |
1094 | 1033 | return false; |
1095 | 1034 |
|
1096 | | - MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base, |
| 1035 | + MatchInfo.IsPre = findPreIndexCandidate(LdSt, MatchInfo.Addr, MatchInfo.Base, |
1097 | 1036 | MatchInfo.Offset); |
1098 | 1037 | if (!MatchInfo.IsPre && |
1099 | | - !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base, |
| 1038 | + !findPostIndexCandidate(LdSt, MatchInfo.Addr, MatchInfo.Base, |
1100 | 1039 | MatchInfo.Offset)) |
1101 | 1040 | return false; |
1102 | 1041 |
|
|
0 commit comments