diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td index cd0dc26a1f257..d7bf8f5dce435 100644 --- a/llvm/include/llvm/Target/TargetSelectionDAG.td +++ b/llvm/include/llvm/Target/TargetSelectionDAG.td @@ -1946,6 +1946,11 @@ def atomic_load_azext_16 : PatFrags<(ops node:$op), [(atomic_load_aext_16 node:$op), (atomic_load_zext_16 node:$op)]>; +// Atomic load which zeroes or anyextends the high bits. +def atomic_load_azext_32 : PatFrags<(ops node:$op), + [(atomic_load_aext_32 node:$op), + (atomic_load_zext_32 node:$op)]>; + // Atomic load which sign extends or anyextends the high bits. def atomic_load_asext_8 : PatFrags<(ops node:$op), [(atomic_load_aext_8 node:$op), diff --git a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td index a5f7b384b3e5d..28d45fe25d30c 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td +++ b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td @@ -51,9 +51,9 @@ class seq_cst_load let Predicates = [HasRCPC] in { // v8.3 Release Consistent Processor Consistent support, optional in v8.2. // 8-bit loads - def : Pat<(acquiring_load GPR64sp:$ptr), (LDAPRB GPR64sp:$ptr)>; + def : Pat<(acquiring_load GPR64sp:$ptr), (LDAPRB GPR64sp:$ptr)>; // 16-bit loads - def : Pat<(acquiring_load GPR64sp:$ptr), (LDAPRH GPR64sp:$ptr)>; + def : Pat<(acquiring_load GPR64sp:$ptr), (LDAPRH GPR64sp:$ptr)>; // 32-bit loads def : Pat<(acquiring_load GPR64sp:$ptr), (LDAPRW GPR64sp:$ptr)>; // 64-bit loads diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td index 6a5065cd4a0e8..6cc76b44f1e14 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td @@ -502,10 +502,6 @@ def zextloadi16_#as : PatFrag<(ops node:$ptr), (zextloadi16 node:$ptr)> { let IsLoad = 1; } -def atomic_load_8_#as : PatFrag<(ops node:$ptr), (atomic_load_8 node:$ptr)> { - let IsAtomic = 1; -} - def atomic_load_16_#as : PatFrag<(ops node:$ptr), (atomic_load_16 node:$ptr)> { let IsAtomic = 1; } @@ -526,6 +522,10 @@ def atomic_load_sext_8_#as : PatFrag<(ops node:$ptr), (atomic_load_sext_8 node:$ let IsAtomic = 1; } +def atomic_load_aext_8_#as : PatFrag<(ops node:$ptr), (atomic_load_aext_8 node:$ptr)> { + let IsAtomic = 1; +} + def atomic_load_zext_16_#as : PatFrag<(ops node:$ptr), (atomic_load_zext_16 node:$ptr)> { let IsAtomic = 1; } @@ -534,6 +534,10 @@ def atomic_load_sext_16_#as : PatFrag<(ops node:$ptr), (atomic_load_sext_16 node let IsAtomic = 1; } +def atomic_load_aext_16_#as : PatFrag<(ops node:$ptr), (atomic_load_aext_16 node:$ptr)> { + let IsAtomic = 1; +} + } // End let AddressSpaces } // End foreach as diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td index f4edfe1387731..7d64a3dd240c8 100644 --- a/llvm/lib/Target/AMDGPU/BUFInstructions.td +++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td @@ -953,11 +953,12 @@ defm BUFFER_LOAD_DWORDX4 : MUBUF_Pseudo_Loads_Lds < "buffer_load_dwordx4", v4i32, /*LDSPred=*/HasGFX950Insts >; -defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i32, atomic_load_8_global>; +defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i32, atomic_load_aext_8_global>; defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i32, atomic_load_zext_8_global>; -defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_USHORT", i32, atomic_load_16_global>; +defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_USHORT", i32, atomic_load_aext_16_global>; defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_USHORT", i32, atomic_load_zext_16_global>; -defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i16, atomic_load_8_global>; +defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i16, atomic_load_aext_8_global>; +defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i16, atomic_load_zext_8_global>; defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_USHORT", i16, atomic_load_16_global>; defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i32, extloadi8_global>; defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i32, zextloadi8_global>; diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td index bc1db52eeeb2f..74884a2207079 100644 --- a/llvm/lib/Target/AMDGPU/DSInstructions.td +++ b/llvm/lib/Target/AMDGPU/DSInstructions.td @@ -853,14 +853,14 @@ foreach vt = Reg32Types.types in { defm : DSReadPat_mc ; } -defm : DSReadPat_t16 ; -defm : DSReadPat_mc ; +defm : DSReadPat_t16 ; +defm : DSReadPat_mc ; defm : DSReadPat_t16 ; defm : DSReadPat_mc ; defm : DSReadPat_t16 ; defm : DSReadPat_mc ; defm : DSReadPat_t16 ; -defm : DSReadPat_mc ; +defm : DSReadPat_mc ; defm : DSReadPat_mc ; defm : DSReadPat_mc ; defm : DSReadPat_mc ; diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td index 02a5d50ff3ae6..d8bb6e4378924 100644 --- a/llvm/lib/Target/AMDGPU/FLATInstructions.td +++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td @@ -1536,14 +1536,13 @@ multiclass ScratchFLATLoadPats_D16_t16; -def : FlatLoadPat ; +def : FlatLoadPat ; +def : FlatLoadPat ; def : FlatLoadPat ; def : FlatLoadPat ; -def : FlatLoadPat ; +def : FlatLoadPat ; def : FlatLoadPat ; def : FlatLoadPat ; -def : FlatLoadPat ; def : FlatLoadPat ; def : FlatLoadPat ; def : FlatLoadPat ; @@ -1678,11 +1677,11 @@ def : FlatLoadPat_D16 ; let OtherPredicates = [HasFlatGlobalInsts] in { -defm : GlobalFLATLoadPats ; -defm : GlobalFLATLoadPats ; +defm : GlobalFLATLoadPats ; +defm : GlobalFLATLoadPats ; defm : GlobalFLATLoadPats ; defm : GlobalFLATLoadPats ; -defm : GlobalFLATLoadPats ; +defm : GlobalFLATLoadPats ; defm : GlobalFLATLoadPats ; defm : GlobalFLATLoadPats ; defm : GlobalFLATLoadPats ; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td index 51433020eeae7..ec1fd6fb60d57 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td @@ -373,10 +373,10 @@ def atomic_load_sext_glue : let IsSignExtLoad = true; } -def atomic_load_8_glue : PatFrag<(ops node:$ptr), - (AMDGPUatomic_ld_glue node:$ptr)> { - let IsAtomic = 1; - let MemoryVT = i8; +def atomic_load_aext_glue : + PatFrag<(ops node:$ptr), (AMDGPUatomic_ld_glue node:$ptr)> { + let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic? + let IsAnyExtLoad = true; } def atomic_load_16_glue : PatFrag<(ops node:$ptr), @@ -409,6 +409,12 @@ def atomic_load_sext_8_glue : PatFrag<(ops node:$ptr), let MemoryVT = i8; } +def atomic_load_aext_8_glue : PatFrag<(ops node:$ptr), + (atomic_load_aext_glue node:$ptr)> { + let IsAtomic = 1; + let MemoryVT = i8; +} + def atomic_load_zext_16_glue : PatFrag<(ops node:$ptr), (atomic_load_zext_glue node:$ptr)> { let IsAtomic = 1; @@ -421,6 +427,12 @@ def atomic_load_sext_16_glue : PatFrag<(ops node:$ptr), let MemoryVT = i16; } +def atomic_load_aext_16_glue : PatFrag<(ops node:$ptr), + (atomic_load_aext_glue node:$ptr)> { + let IsAtomic = 1; + let MemoryVT = i16; +} + def extload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> { let IsLoad = 1; let IsAnyExtLoad = 1; @@ -494,8 +506,6 @@ def load_align16_local_m0 : PatFrag<(ops node:$ptr), } let IsAtomic = 1, AddressSpaces = LoadAddress_local.AddrSpaces in { -def atomic_load_8_local_m0 : PatFrag<(ops node:$ptr), - (atomic_load_8_glue node:$ptr)>; def atomic_load_16_local_m0 : PatFrag<(ops node:$ptr), (atomic_load_16_glue node:$ptr)>; def atomic_load_32_local_m0 : PatFrag<(ops node:$ptr), @@ -507,10 +517,14 @@ def atomic_load_zext_8_local_m0 : PatFrag<(ops node:$ptr), (atomic_load_zext_8_glue node:$ptr)>; def atomic_load_sext_8_local_m0 : PatFrag<(ops node:$ptr), (atomic_load_sext_8_glue node:$ptr)>; +def atomic_load_aext_8_local_m0 : PatFrag<(ops node:$ptr), + (atomic_load_aext_8_glue node:$ptr)>; def atomic_load_zext_16_local_m0 : PatFrag<(ops node:$ptr), (atomic_load_zext_16_glue node:$ptr)>; def atomic_load_sext_16_local_m0 : PatFrag<(ops node:$ptr), (atomic_load_sext_16_glue node:$ptr)>; +def atomic_load_aext_16_local_m0 : PatFrag<(ops node:$ptr), + (atomic_load_aext_16_glue node:$ptr)>; } // End let AddressSpaces = LoadAddress_local.AddrSpaces diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td index d6387ff848593..1ce9190a68f3c 100644 --- a/llvm/lib/Target/ARM/ARMInstrInfo.td +++ b/llvm/lib/Target/ARM/ARMInstrInfo.td @@ -5376,14 +5376,14 @@ def : ARMPat<(stlex_1 (and GPR:$Rt, 0xff), addr_offset_none:$addr), def : ARMPat<(stlex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr), (STLEXH GPR:$Rt, addr_offset_none:$addr)>; -class acquiring_load +class acquiring_load : PatFrag<(ops node:$ptr), (base node:$ptr), [{ AtomicOrdering Ordering = cast(N)->getSuccessOrdering(); return isAcquireOrStronger(Ordering); }]>; -def atomic_load_acquire_8 : acquiring_load; -def atomic_load_acquire_16 : acquiring_load; +def atomic_load_azext_acquire_8 : acquiring_load; +def atomic_load_azext_acquire_16 : acquiring_load; def atomic_load_acquire_32 : acquiring_load; class releasing_store @@ -5397,8 +5397,8 @@ def atomic_store_release_16 : releasing_store; def atomic_store_release_32 : releasing_store; let AddedComplexity = 8 in { - def : ARMPat<(atomic_load_acquire_8 addr_offset_none:$addr), (LDAB addr_offset_none:$addr)>; - def : ARMPat<(atomic_load_acquire_16 addr_offset_none:$addr), (LDAH addr_offset_none:$addr)>; + def : ARMPat<(atomic_load_azext_acquire_8 addr_offset_none:$addr), (LDAB addr_offset_none:$addr)>; + def : ARMPat<(atomic_load_azext_acquire_16 addr_offset_none:$addr), (LDAH addr_offset_none:$addr)>; def : ARMPat<(atomic_load_acquire_32 addr_offset_none:$addr), (LDA addr_offset_none:$addr)>; def : ARMPat<(atomic_store_release_8 addr_offset_none:$addr, GPR:$val), (STLB GPR:$val, addr_offset_none:$addr)>; def : ARMPat<(atomic_store_release_16 addr_offset_none:$addr, GPR:$val), (STLH GPR:$val, addr_offset_none:$addr)>; @@ -6214,11 +6214,11 @@ def : ARMV6Pat<(add GPR:$Rn, (sext_inreg GPRnopc:$Rm, i16)), (SXTAH GPR:$Rn, GPRnopc:$Rm, 0)>; // Atomic load/store patterns -def : ARMPat<(atomic_load_8 ldst_so_reg:$src), +def : ARMPat<(atomic_load_azext_8 ldst_so_reg:$src), (LDRBrs ldst_so_reg:$src)>; -def : ARMPat<(atomic_load_8 addrmode_imm12:$src), +def : ARMPat<(atomic_load_azext_8 addrmode_imm12:$src), (LDRBi12 addrmode_imm12:$src)>; -def : ARMPat<(atomic_load_16 addrmode3:$src), +def : ARMPat<(atomic_load_azext_16 addrmode3:$src), (LDRH addrmode3:$src)>; def : ARMPat<(atomic_load_32 ldst_so_reg:$src), (LDRrs ldst_so_reg:$src)>; diff --git a/llvm/lib/Target/ARM/ARMInstrThumb.td b/llvm/lib/Target/ARM/ARMInstrThumb.td index b69bc601a0cdc..feda22c89e925 100644 --- a/llvm/lib/Target/ARM/ARMInstrThumb.td +++ b/llvm/lib/Target/ARM/ARMInstrThumb.td @@ -1697,13 +1697,13 @@ def : T1Pat<(sextloadi16 t_addrmode_is2:$addr), def : T1Pat<(sextloadi16 t_addrmode_rr:$addr), (tASRri (tLSLri (tLDRHr t_addrmode_rr:$addr), 16), 16)>; -def : T1Pat<(atomic_load_8 t_addrmode_is1:$src), +def : T1Pat<(atomic_load_azext_8 t_addrmode_is1:$src), (tLDRBi t_addrmode_is1:$src)>; -def : T1Pat<(atomic_load_8 t_addrmode_rr:$src), +def : T1Pat<(atomic_load_azext_8 t_addrmode_rr:$src), (tLDRBr t_addrmode_rr:$src)>; -def : T1Pat<(atomic_load_16 t_addrmode_is2:$src), +def : T1Pat<(atomic_load_azext_16 t_addrmode_is2:$src), (tLDRHi t_addrmode_is2:$src)>; -def : T1Pat<(atomic_load_16 t_addrmode_rr:$src), +def : T1Pat<(atomic_load_azext_16 t_addrmode_rr:$src), (tLDRHr t_addrmode_rr:$src)>; def : T1Pat<(atomic_load_32 t_addrmode_is4:$src), (tLDRi t_addrmode_is4:$src)>; diff --git a/llvm/lib/Target/ARM/ARMInstrThumb2.td b/llvm/lib/Target/ARM/ARMInstrThumb2.td index 9f80af07df0fc..f9a873a9483de 100644 --- a/llvm/lib/Target/ARM/ARMInstrThumb2.td +++ b/llvm/lib/Target/ARM/ARMInstrThumb2.td @@ -4899,17 +4899,17 @@ def : T2Pat<(add rGPR:$Rn, (sext_inreg rGPR:$Rm, i16)), Requires<[HasDSP, IsThumb2]>; // Atomic load/store patterns -def : T2Pat<(atomic_load_8 t2addrmode_imm12:$addr), +def : T2Pat<(atomic_load_azext_8 t2addrmode_imm12:$addr), (t2LDRBi12 t2addrmode_imm12:$addr)>; -def : T2Pat<(atomic_load_8 t2addrmode_negimm8:$addr), +def : T2Pat<(atomic_load_azext_8 t2addrmode_negimm8:$addr), (t2LDRBi8 t2addrmode_negimm8:$addr)>; -def : T2Pat<(atomic_load_8 t2addrmode_so_reg:$addr), +def : T2Pat<(atomic_load_azext_8 t2addrmode_so_reg:$addr), (t2LDRBs t2addrmode_so_reg:$addr)>; -def : T2Pat<(atomic_load_16 t2addrmode_imm12:$addr), +def : T2Pat<(atomic_load_azext_16 t2addrmode_imm12:$addr), (t2LDRHi12 t2addrmode_imm12:$addr)>; -def : T2Pat<(atomic_load_16 t2addrmode_negimm8:$addr), +def : T2Pat<(atomic_load_azext_16 t2addrmode_negimm8:$addr), (t2LDRHi8 t2addrmode_negimm8:$addr)>; -def : T2Pat<(atomic_load_16 t2addrmode_so_reg:$addr), +def : T2Pat<(atomic_load_azext_16 t2addrmode_so_reg:$addr), (t2LDRHs t2addrmode_so_reg:$addr)>; def : T2Pat<(atomic_load_32 t2addrmode_imm12:$addr), (t2LDRi12 t2addrmode_imm12:$addr)>; @@ -4937,8 +4937,8 @@ def : T2Pat<(atomic_store_32 GPR:$val, t2addrmode_so_reg:$addr), (t2STRs GPR:$val, t2addrmode_so_reg:$addr)>; let AddedComplexity = 8, Predicates = [IsThumb, HasAcquireRelease, HasV7Clrex] in { - def : Pat<(atomic_load_acquire_8 addr_offset_none:$addr), (t2LDAB addr_offset_none:$addr)>; - def : Pat<(atomic_load_acquire_16 addr_offset_none:$addr), (t2LDAH addr_offset_none:$addr)>; + def : Pat<(atomic_load_azext_acquire_8 addr_offset_none:$addr), (t2LDAB addr_offset_none:$addr)>; + def : Pat<(atomic_load_azext_acquire_16 addr_offset_none:$addr), (t2LDAH addr_offset_none:$addr)>; def : Pat<(atomic_load_acquire_32 addr_offset_none:$addr), (t2LDA addr_offset_none:$addr)>; def : Pat<(atomic_store_release_8 addr_offset_none:$addr, GPR:$val), (t2STLB GPR:$val, addr_offset_none:$addr)>; def : Pat<(atomic_store_release_16 addr_offset_none:$addr, GPR:$val), (t2STLH GPR:$val, addr_offset_none:$addr)>; diff --git a/llvm/lib/Target/Hexagon/HexagonPatterns.td b/llvm/lib/Target/Hexagon/HexagonPatterns.td index 244f204539c89..1be16c1739512 100644 --- a/llvm/lib/Target/Hexagon/HexagonPatterns.td +++ b/llvm/lib/Target/Hexagon/HexagonPatterns.td @@ -2130,7 +2130,7 @@ def sextloadv4i8: PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ // Patterns to select load-indexed: Rs + Off. // - frameindex [+ imm], -multiclass Loadxfi_pat { def: Pat<(VT (Load (add (i32 AddrFI:$fi), ImmPred:$Off))), (VT (MI AddrFI:$fi, imm:$Off))>; @@ -2141,7 +2141,7 @@ multiclass Loadxfi_pat { def: Pat<(VT (Load (add I32:$Rs, ImmPred:$Off))), (VT (MI IntRegs:$Rs, imm:$Off))>; @@ -2151,7 +2151,7 @@ multiclass Loadxgi_pat { defm: Loadxfi_pat; defm: Loadxgi_pat; @@ -2221,7 +2221,7 @@ class Loadxum_pat; // Pattern to select load absolute. -class Loada_pat +class Loada_pat : Pat<(VT (Load Addr:$addr)), (MI Addr:$addr)>; // Pattern to select load absolute with value modifier. @@ -2256,8 +2256,8 @@ let AddedComplexity = 20 in { defm: Loadxi_pat; // No sextloadi1. - defm: Loadxi_pat; - defm: Loadxi_pat; + defm: Loadxi_pat; + defm: Loadxi_pat; defm: Loadxi_pat; defm: Loadxi_pat; } @@ -2418,8 +2418,8 @@ let AddedComplexity = 60 in { def: Loada_pat; def: Loada_pat; - def: Loada_pat; - def: Loada_pat; + def: Loada_pat; + def: Loada_pat; def: Loada_pat; def: Loada_pat; } @@ -2463,8 +2463,8 @@ let AddedComplexity = 100 in { def: Loada_pat; def: Loada_pat; - def: Loada_pat; - def: Loada_pat; + def: Loada_pat; + def: Loada_pat; def: Loada_pat; def: Loada_pat; } diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.td b/llvm/lib/Target/Lanai/LanaiInstrInfo.td index 6feed27b7047b..1d968fa391c2a 100644 --- a/llvm/lib/Target/Lanai/LanaiInstrInfo.td +++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.td @@ -845,7 +845,7 @@ def : Pat<(extloadi16 ADDRspls:$src), (i32 (LDHz_RI ADDRspls:$src))>; // Loads up to 32-bits are already atomic. // TODO: This is a workaround for a particular failing case and should be // handled more generally. -def : Pat<(atomic_load_8 ADDRspls:$src), (i32 (LDBz_RI ADDRspls:$src))>; +def : Pat<(atomic_load_azext_8 ADDRspls:$src), (i32 (LDBz_RI ADDRspls:$src))>; // GlobalAddress, ExternalSymbol, Jumptable, ConstantPool def : Pat<(LanaiHi tglobaladdr:$dst), (MOVHI tglobaladdr:$dst)>; diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td index 775d9289af7c4..b607dcb04149b 100644 --- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td @@ -1779,7 +1779,7 @@ def : Pat<(bitreverse (bswap GPR:$rj)), (BITREV_8B GPR:$rj)>; /// Loads -multiclass LdPat { +multiclass LdPat { def : Pat<(vt (LoadOp BaseAddr:$rj)), (Inst BaseAddr:$rj, 0)>; def : Pat<(vt (LoadOp (AddrConstant GPR:$rj, simm12:$imm12))), (Inst GPR:$rj, simm12:$imm12)>; @@ -1890,9 +1890,10 @@ def : Pat<(atomic_fence 5, timm), (DBAR 0b10010)>; // release def : Pat<(atomic_fence 6, timm), (DBAR 0b10000)>; // acqrel def : Pat<(atomic_fence 7, timm), (DBAR 0b10000)>; // seqcst -defm : LdPat; -defm : LdPat; -defm : LdPat; +defm : LdPat; +defm : LdPat; +defm : LdPat, Requires<[IsLA32]>; +defm : LdPat, Requires<[IsLA64]>; class release_seqcst_store : PatFrag<(ops node:$val, node:$ptr), (base node:$val, node:$ptr), [{ diff --git a/llvm/lib/Target/Mips/MicroMipsInstrInfo.td b/llvm/lib/Target/Mips/MicroMipsInstrInfo.td index 43b8eb7faf0ec..661c18e8c3952 100644 --- a/llvm/lib/Target/Mips/MicroMipsInstrInfo.td +++ b/llvm/lib/Target/Mips/MicroMipsInstrInfo.td @@ -1190,8 +1190,8 @@ def : WrapperPat, ISA_MICROMIPS; def : WrapperPat, ISA_MICROMIPS; def : WrapperPat, ISA_MICROMIPS; -def : MipsPat<(atomic_load_8 addr:$a), (LB_MM addr:$a)>, ISA_MICROMIPS; -def : MipsPat<(atomic_load_16 addr:$a), (LH_MM addr:$a)>, ISA_MICROMIPS; +def : MipsPat<(atomic_load_asext_8 addr:$a), (LB_MM addr:$a)>, ISA_MICROMIPS; +def : MipsPat<(atomic_load_asext_16 addr:$a), (LH_MM addr:$a)>, ISA_MICROMIPS; def : MipsPat<(atomic_load_32 addr:$a), (LW_MM addr:$a)>, ISA_MICROMIPS; def : MipsPat<(i32 immLi16:$imm), diff --git a/llvm/lib/Target/Mips/Mips64InstrInfo.td b/llvm/lib/Target/Mips/Mips64InstrInfo.td index f19eaf7a67f73..d028c95287a70 100644 --- a/llvm/lib/Target/Mips/Mips64InstrInfo.td +++ b/llvm/lib/Target/Mips/Mips64InstrInfo.td @@ -894,9 +894,9 @@ def : MipsPat<(brcond (i32 (setne (and i32:$lhs, PowerOf2LO_i32:$mask), 0)), bb: ASE_MIPS64_CNMIPS; // Atomic load patterns. -def : MipsPat<(atomic_load_8 addr:$a), (LB64 addr:$a)>, ISA_MIPS3, GPR_64; -def : MipsPat<(atomic_load_16 addr:$a), (LH64 addr:$a)>, ISA_MIPS3, GPR_64; -def : MipsPat<(atomic_load_32 addr:$a), (LW64 addr:$a)>, ISA_MIPS3, GPR_64; +def : MipsPat<(atomic_load_asext_8 addr:$a), (LB64 addr:$a)>, ISA_MIPS3, GPR_64; +def : MipsPat<(atomic_load_asext_16 addr:$a), (LH64 addr:$a)>, ISA_MIPS3, GPR_64; +def : MipsPat<(atomic_load_asext_32 addr:$a), (LW64 addr:$a)>, ISA_MIPS3, GPR_64; def : MipsPat<(atomic_load_64 addr:$a), (LD addr:$a)>, ISA_MIPS3, GPR_64; // Atomic store patterns. diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.td b/llvm/lib/Target/Mips/MipsInstrInfo.td index 557e6a2c72e27..f17781dcab726 100644 --- a/llvm/lib/Target/Mips/MipsInstrInfo.td +++ b/llvm/lib/Target/Mips/MipsInstrInfo.td @@ -3358,8 +3358,8 @@ let AdditionalPredicates = [NotInMicroMips] in { } // Atomic load patterns. - def : MipsPat<(atomic_load_8 addr:$a), (LB addr:$a)>, ISA_MIPS1; - def : MipsPat<(atomic_load_16 addr:$a), (LH addr:$a)>, ISA_MIPS1; + def : MipsPat<(atomic_load_asext_8 addr:$a), (LB addr:$a)>, ISA_MIPS1; + def : MipsPat<(atomic_load_asext_16 addr:$a), (LH addr:$a)>, ISA_MIPS1; def : MipsPat<(atomic_load_32 addr:$a), (LW addr:$a)>, ISA_MIPS1; // Atomic store patterns. diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td index e2864c2405967..cbf5d0188b79e 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td @@ -5084,11 +5084,11 @@ defm : TrapExtendedMnemonic<"lng", 6>; defm : TrapExtendedMnemonic<"u", 31>; // Atomic loads -def : Pat<(i32 (atomic_load_8 DForm:$src)), (LBZ memri:$src)>; -def : Pat<(i32 (atomic_load_16 DForm:$src)), (LHZ memri:$src)>; +def : Pat<(i32 (atomic_load_azext_8 DForm:$src)), (LBZ memri:$src)>; +def : Pat<(i32 (atomic_load_azext_16 DForm:$src)), (LHZ memri:$src)>; def : Pat<(i32 (atomic_load_32 DForm:$src)), (LWZ memri:$src)>; -def : Pat<(i32 (atomic_load_8 XForm:$src)), (LBZX memrr:$src)>; -def : Pat<(i32 (atomic_load_16 XForm:$src)), (LHZX memrr:$src)>; +def : Pat<(i32 (atomic_load_azext_8 XForm:$src)), (LBZX memrr:$src)>; +def : Pat<(i32 (atomic_load_azext_16 XForm:$src)), (LHZX memrr:$src)>; def : Pat<(i32 (atomic_load_32 XForm:$src)), (LWZX memrr:$src)>; // Atomic stores diff --git a/llvm/lib/Target/PowerPC/PPCInstrP10.td b/llvm/lib/Target/PowerPC/PPCInstrP10.td index 39a1ab0d388a7..3f655d9738414 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrP10.td +++ b/llvm/lib/Target/PowerPC/PPCInstrP10.td @@ -1275,9 +1275,9 @@ let Predicates = [PCRelativeMemops] in { (PSTDpc $RS, $ga, 0)>; // Atomic Load - def : Pat<(i32 (atomic_load_8 (PPCmatpcreladdr PCRelForm:$ga))), + def : Pat<(i32 (atomic_load_azext_8 (PPCmatpcreladdr PCRelForm:$ga))), (PLBZpc $ga, 0)>; - def : Pat<(i32 (atomic_load_16 (PPCmatpcreladdr PCRelForm:$ga))), + def : Pat<(i32 (atomic_load_azext_16 (PPCmatpcreladdr PCRelForm:$ga))), (PLHZpc $ga, 0)>; def : Pat<(i32 (atomic_load_32 (PPCmatpcreladdr PCRelForm:$ga))), (PLWZpc $ga, 0)>; @@ -2360,8 +2360,8 @@ let Predicates = [PrefixInstrs] in { def : Pat<(store i64:$rS, PDForm:$dst), (PSTD g8rc:$rS, memri34:$dst)>; // Atomic Load - def : Pat<(i32 (atomic_load_8 PDForm:$src)), (PLBZ memri34:$src)>; - def : Pat<(i32 (atomic_load_16 PDForm:$src)), (PLHZ memri34:$src)>; + def : Pat<(i32 (atomic_load_azext_8 PDForm:$src)), (PLBZ memri34:$src)>; + def : Pat<(i32 (atomic_load_azext_16 PDForm:$src)), (PLHZ memri34:$src)>; def : Pat<(i32 (atomic_load_32 PDForm:$src)), (PLWZ memri34:$src)>; def : Pat<(i64 (atomic_load_64 PDForm:$src)), (PLD memri34:$src)>; diff --git a/llvm/lib/Target/RISCV/RISCVGISel.td b/llvm/lib/Target/RISCV/RISCVGISel.td index 5045e5eaa9408..36f26620655da 100644 --- a/llvm/lib/Target/RISCV/RISCVGISel.td +++ b/llvm/lib/Target/RISCV/RISCVGISel.td @@ -109,7 +109,7 @@ def : LdPat; // Prefer unsigned due to no c.lb in Zcb. def : StPat; let Predicates = [HasAtomicLdSt] in { - def : LdPat; + def : LdPat; def : LdPat; def : StPat; diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.td b/llvm/lib/Target/Sparc/SparcInstrInfo.td index d5af2000d0481..b867a1dab7e24 100644 --- a/llvm/lib/Target/Sparc/SparcInstrInfo.td +++ b/llvm/lib/Target/Sparc/SparcInstrInfo.td @@ -1919,10 +1919,10 @@ let Predicates = [HasV9] in def : Pat<(atomic_fence timm, timm), (MEMBARi 0xf)>; // atomic_load addr -> load addr -def : Pat<(i32 (atomic_load_8 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>; -def : Pat<(i32 (atomic_load_8 ADDRri:$src)), (LDUBri ADDRri:$src)>; -def : Pat<(i32 (atomic_load_16 ADDRrr:$src)), (LDUHrr ADDRrr:$src)>; -def : Pat<(i32 (atomic_load_16 ADDRri:$src)), (LDUHri ADDRri:$src)>; +def : Pat<(i32 (atomic_load_azext_8 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>; +def : Pat<(i32 (atomic_load_azext_8 ADDRri:$src)), (LDUBri ADDRri:$src)>; +def : Pat<(i32 (atomic_load_azext_16 ADDRrr:$src)), (LDUHrr ADDRrr:$src)>; +def : Pat<(i32 (atomic_load_azext_16 ADDRri:$src)), (LDUHri ADDRri:$src)>; def : Pat<(i32 (atomic_load_32 ADDRrr:$src)), (LDrr ADDRrr:$src)>; def : Pat<(i32 (atomic_load_32 ADDRri:$src)), (LDri ADDRri:$src)>; diff --git a/llvm/lib/Target/VE/VEInstrInfo.td b/llvm/lib/Target/VE/VEInstrInfo.td index b459fbcad909f..6a6d3e069d218 100644 --- a/llvm/lib/Target/VE/VEInstrInfo.td +++ b/llvm/lib/Target/VE/VEInstrInfo.td @@ -1794,8 +1794,8 @@ multiclass ATMLDm; def : Pat<(iAny (from ADDRzii:$addr)), (tozii MEMzii:$addr)>; } -defm : ATMLDm; -defm : ATMLDm; +defm : ATMLDm; +defm : ATMLDm; defm : ATMLDm; defm : ATMLDm; @@ -1824,8 +1824,8 @@ multiclass SXATMLD32m; } -defm : SXATMLDm; -defm : SXATMLDm; +defm : SXATMLDm; defm : SXATMLD32m; @@ -1854,9 +1854,9 @@ multiclass ZXATMLD32m; } -defm : ZXATMLDm; -defm : ZXATMLDm; defm : ZXATMLD32m; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td index 46bd5e42a9d52..f7f8d63b1dd57 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td @@ -150,10 +150,10 @@ defm ATOMIC_LOAD32_U_I64 : AtomicLoad; // zero-extending. def zext_aload_8_64 : PatFrag<(ops node:$addr), - (i64 (zext (i32 (atomic_load_8 node:$addr))))>; + (i64 (zext (i32 (atomic_load_azext_8 node:$addr))))>; def zext_aload_16_64 : PatFrag<(ops node:$addr), - (i64 (zext (i32 (atomic_load_16 node:$addr))))>; + (i64 (zext (i32 (atomic_load_azext_16 node:$addr))))>; def zext_aload_32_64 : PatFrag<(ops node:$addr), (i64 (zext (i32 (atomic_load_32 node:$addr))))>; @@ -163,9 +163,9 @@ def zext_aload_32_64 : // results) and select a zext load; the next instruction will be sext_inreg // which is selected by itself. def sext_aload_8_64 : - PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_8 node:$addr)))>; + PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_azext_8 node:$addr)))>; def sext_aload_16_64 : - PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_16 node:$addr)))>; + PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_azext_16 node:$addr)))>; // Select zero-extending loads defm : LoadPat; @@ -173,8 +173,8 @@ defm : LoadPat; defm : LoadPat; // Select sign-extending loads -defm : LoadPat; -defm : LoadPat; +defm : LoadPat; +defm : LoadPat; defm : LoadPat; defm : LoadPat; // 32->64 sext load gets selected as i32.atomic.load, i64.extend_i32_s