diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index 4480ced637456..c8f31221c35c0 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -268,6 +268,7 @@ class LLVM_ABI TargetLoweringBase { CmpArithIntrinsic, // Use a target-specific intrinsic for special compare // operations; used by X86. Expand, // Generic expansion in terms of other atomic operations. + CustomExpand, // Custom target-specific expansion using TLI hooks. // Rewrite to a non-atomic form for use in a known non-preemptible // environment. @@ -2273,6 +2274,18 @@ class LLVM_ABI TargetLoweringBase { "Generic atomicrmw expansion unimplemented on this target"); } + /// Perform a atomic store using a target-specific way. + virtual void emitExpandAtomicStore(StoreInst *SI) const { + llvm_unreachable( + "Generic atomic store expansion unimplemented on this target"); + } + + /// Perform a atomic load using a target-specific way. + virtual void emitExpandAtomicLoad(LoadInst *LI) const { + llvm_unreachable( + "Generic atomic load expansion unimplemented on this target"); + } + /// Perform a cmpxchg expansion using a target-specific method. virtual void emitExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) const { llvm_unreachable("Generic cmpxchg expansion unimplemented on this target"); @@ -2377,8 +2390,8 @@ class LLVM_ABI TargetLoweringBase { } /// Returns how the given (atomic) store should be expanded by the IR-level - /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try - /// to use an atomicrmw xchg. + /// AtomicExpand pass into. For instance AtomicExpansionKind::CustomExpand + /// will try to use an atomicrmw xchg. virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const { return AtomicExpansionKind::None; } diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp index 278dd6560e736..601185d0d3cb2 100644 --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -84,7 +84,7 @@ class AtomicExpandImpl { bool expandAtomicLoadToCmpXchg(LoadInst *LI); StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI); bool tryExpandAtomicStore(StoreInst *SI); - void expandAtomicStore(StoreInst *SI); + void expandAtomicStoreToXChg(StoreInst *SI); bool tryExpandAtomicRMW(AtomicRMWInst *AI); AtomicRMWInst *convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI); Value * @@ -537,6 +537,9 @@ bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) { case TargetLoweringBase::AtomicExpansionKind::NotAtomic: LI->setAtomic(AtomicOrdering::NotAtomic); return true; + case TargetLoweringBase::AtomicExpansionKind::CustomExpand: + TLI->emitExpandAtomicLoad(LI); + return true; default: llvm_unreachable("Unhandled case in tryExpandAtomicLoad"); } @@ -546,8 +549,11 @@ bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) { switch (TLI->shouldExpandAtomicStoreInIR(SI)) { case TargetLoweringBase::AtomicExpansionKind::None: return false; + case TargetLoweringBase::AtomicExpansionKind::CustomExpand: + TLI->emitExpandAtomicStore(SI); + return true; case TargetLoweringBase::AtomicExpansionKind::Expand: - expandAtomicStore(SI); + expandAtomicStoreToXChg(SI); return true; case TargetLoweringBase::AtomicExpansionKind::NotAtomic: SI->setAtomic(AtomicOrdering::NotAtomic); @@ -620,7 +626,7 @@ StoreInst *AtomicExpandImpl::convertAtomicStoreToIntegerType(StoreInst *SI) { return NewSI; } -void AtomicExpandImpl::expandAtomicStore(StoreInst *SI) { +void AtomicExpandImpl::expandAtomicStoreToXChg(StoreInst *SI) { // This function is only called on atomic stores that are too large to be // atomic if implemented as a native store. So we replace them by an // atomic swap, that can be implemented for example as a ldrex/strex on ARM @@ -741,7 +747,7 @@ bool AtomicExpandImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) { } case TargetLoweringBase::AtomicExpansionKind::NotAtomic: return lowerAtomicRMWInst(AI); - case TargetLoweringBase::AtomicExpansionKind::Expand: + case TargetLoweringBase::AtomicExpansionKind::CustomExpand: TLI->emitExpandAtomicRMW(AI); return true; default: @@ -1695,7 +1701,7 @@ bool AtomicExpandImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) { return true; case TargetLoweringBase::AtomicExpansionKind::NotAtomic: return lowerAtomicCmpXchgInst(CI); - case TargetLoweringBase::AtomicExpansionKind::Expand: { + case TargetLoweringBase::AtomicExpansionKind::CustomExpand: { TLI->emitExpandAtomicCmpXchg(CI); return true; } diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 66c1dfc71c2f5..080e30d428660 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -17823,7 +17823,7 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { if (AS == AMDGPUAS::FLAT_ADDRESS && DL.getTypeSizeInBits(RMW->getType()) == 64 && flatInstrMayAccessPrivate(RMW)) - return AtomicExpansionKind::Expand; + return AtomicExpansionKind::CustomExpand; auto ReportUnsafeHWInst = [=](TargetLowering::AtomicExpansionKind Kind) { OptimizationRemarkEmitter ORE(RMW->getFunction()); @@ -17898,7 +17898,7 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { // does. InstCombine transforms these with 0 to or, so undo that. if (Constant *ConstVal = dyn_cast(RMW->getValOperand()); ConstVal && ConstVal->isNullValue()) - return AtomicExpansionKind::Expand; + return AtomicExpansionKind::CustomExpand; } // If the allocation could be in remote, fine-grained memory, the rmw @@ -18027,9 +18027,9 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { // fadd. if (Subtarget->hasLDSFPAtomicAddF32()) { if (RMW->use_empty() && Subtarget->hasAtomicFaddNoRtnInsts()) - return AtomicExpansionKind::Expand; + return AtomicExpansionKind::CustomExpand; if (!RMW->use_empty() && Subtarget->hasAtomicFaddRtnInsts()) - return AtomicExpansionKind::Expand; + return AtomicExpansionKind::CustomExpand; } } } @@ -18109,7 +18109,7 @@ SITargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CmpX) const { // If a 64-bit flat atomic may alias private, we need to avoid using the // atomic in the private case. - return DL.getTypeSizeInBits(ValTy) == 64 ? AtomicExpansionKind::Expand + return DL.getTypeSizeInBits(ValTy) == 64 ? AtomicExpansionKind::CustomExpand : AtomicExpansionKind::None; } diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index 5b2d185594f44..e3929492f8c45 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -7893,7 +7893,7 @@ LoongArchTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { if (Size < 32 && (AI->getOperation() == AtomicRMWInst::And || AI->getOperation() == AtomicRMWInst::Or || AI->getOperation() == AtomicRMWInst::Xor)) - return AtomicExpansionKind::Expand; + return AtomicExpansionKind::CustomExpand; if (AI->getOperation() == AtomicRMWInst::Nand || Size < 32) return AtomicExpansionKind::CmpXChg; }