21#include "llvm/ADT/DenseMap.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Intrinsics.h"
26using namespace CodeGen;
43 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
50 ValueTy = ATy->getValueType();
57 TypeInfo ValueTI =
C.getTypeInfo(ValueTy);
58 ValueSizeInBits = ValueTI.
Width;
59 ValueAlignInBits = ValueTI.
Align;
61 TypeInfo AtomicTI =
C.getTypeInfo(AtomicTy);
62 AtomicSizeInBits = AtomicTI.
Width;
63 AtomicAlignInBits = AtomicTI.
Align;
65 assert(ValueSizeInBits <= AtomicSizeInBits);
66 assert(ValueAlignInBits <= AtomicAlignInBits);
68 AtomicAlign =
C.toCharUnitsFromBits(AtomicAlignInBits);
69 ValueAlign =
C.toCharUnitsFromBits(ValueAlignInBits);
76 ValueSizeInBits =
C.getTypeSize(ValueTy);
78 auto Offset = OrigBFI.Offset %
C.toBits(lvalue.
getAlignment());
79 AtomicSizeInBits =
C.toBits(
80 C.toCharUnitsFromBits(Offset + OrigBFI.Size +
C.getCharWidth() - 1)
84 (
C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.
getAlignment()) *
86 llvm::Value *StoragePtr = CGF.
Builder.CreateConstGEP1_64(
87 CGF.
Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
89 StoragePtr, CGF.
UnqualPtrTy,
"atomic_bitfield_base");
94 llvm::Type *StorageTy = CGF.
Builder.getIntNTy(AtomicSizeInBits);
95 LVal = LValue::MakeBitfield(
98 AtomicTy =
C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
102 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
103 AtomicTy =
C.getConstantArrayType(
C.CharTy, Size,
nullptr,
104 ArraySizeModifier::Normal,
110 ValueSizeInBits =
C.getTypeSize(ValueTy);
112 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
118 ValueSizeInBits =
C.getTypeSize(ValueTy);
120 lvalue.
getType(), cast<llvm::FixedVectorType>(
123 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
127 UseLibcall = !
C.getTargetInfo().hasBuiltinAtomic(
131 QualType getAtomicType()
const {
return AtomicTy; }
132 QualType getValueType()
const {
return ValueTy; }
133 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
134 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
135 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
137 bool shouldUseLibcall()
const {
return UseLibcall; }
138 const LValue &getAtomicLValue()
const {
return LVal; }
139 llvm::Value *getAtomicPointer()
const {
149 Address getAtomicAddress()
const {
159 return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
162 Address getAtomicAddressAsAtomicIntPointer()
const {
163 return castToAtomicIntPointer(getAtomicAddress());
172 bool hasPadding()
const {
173 return (ValueSizeInBits != AtomicSizeInBits);
176 bool emitMemSetZeroIfNecessary()
const;
178 llvm::Value *getAtomicSizeValue()
const {
196 llvm::Value *getScalarRValValueOrNull(
RValue RVal)
const;
199 llvm::Value *convertRValueToInt(
RValue RVal,
bool CmpXchg =
false)
const;
203 bool CmpXchg =
false)
const;
206 void emitCopyIntoMemory(
RValue rvalue)
const;
209 LValue projectValue()
const {
211 Address addr = getAtomicAddress();
215 return LValue::MakeAddr(addr, getValueType(), CGF.
getContext(),
222 bool AsValue, llvm::AtomicOrdering AO,
233 std::pair<RValue, llvm::Value *>
236 llvm::AtomicOrdering::SequentiallyConsistent,
237 llvm::AtomicOrdering Failure =
238 llvm::AtomicOrdering::SequentiallyConsistent,
239 bool IsWeak =
false);
244 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
249 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
256 Address CreateTempAlloca()
const;
258 bool requiresMemSetZero(llvm::Type *
type)
const;
262 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
263 llvm::AtomicOrdering AO,
bool IsVolatile);
265 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile,
266 bool CmpXchg =
false);
268 llvm::Value *EmitAtomicCompareExchangeLibcall(
269 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
271 llvm::AtomicOrdering::SequentiallyConsistent,
272 llvm::AtomicOrdering Failure =
273 llvm::AtomicOrdering::SequentiallyConsistent);
275 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
276 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
278 llvm::AtomicOrdering::SequentiallyConsistent,
279 llvm::AtomicOrdering Failure =
280 llvm::AtomicOrdering::SequentiallyConsistent,
281 bool IsWeak =
false);
284 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
288 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
292 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
RValue UpdateRVal,
295 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRal,
300Address AtomicInfo::CreateTempAlloca()
const {
302 (LVal.
isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
304 getAtomicAlignment(),
309 TempAlloca, getAtomicAddress().getType(),
310 getAtomicAddress().getElementType());
322 fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
323 fnAttrB.addAttribute(llvm::Attribute::WillReturn);
324 llvm::AttributeList fnAttrs = llvm::AttributeList::get(
325 CGF.
getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
327 llvm::FunctionCallee fn =
335 uint64_t expectedSize) {
342bool AtomicInfo::requiresMemSetZero(llvm::Type *
type)
const {
344 if (hasPadding())
return true;
347 switch (getEvaluationKind()) {
354 AtomicSizeInBits / 2);
360 llvm_unreachable(
"bad evaluation kind");
363bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
380 llvm::AtomicOrdering SuccessOrder,
381 llvm::AtomicOrdering FailureOrder,
382 llvm::SyncScope::ID
Scope) {
389 Pair->setVolatile(
E->isVolatile());
390 Pair->setWeak(IsWeak);
395 llvm::Value *Old = CGF.
Builder.CreateExtractValue(Pair, 0);
396 llvm::Value *Cmp = CGF.
Builder.CreateExtractValue(Pair, 1);
400 llvm::BasicBlock *StoreExpectedBB =
405 llvm::BasicBlock *ContinueBB =
410 CGF.
Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
412 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
418 CGF.
Builder.CreateBr(ContinueBB);
420 CGF.
Builder.SetInsertPoint(ContinueBB);
431 llvm::Value *FailureOrderVal,
433 llvm::AtomicOrdering SuccessOrder,
434 llvm::SyncScope::ID
Scope) {
435 llvm::AtomicOrdering FailureOrder;
436 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
437 auto FOS = FO->getSExtValue();
438 if (!llvm::isValidAtomicOrderingCABI(FOS))
439 FailureOrder = llvm::AtomicOrdering::Monotonic;
441 switch ((llvm::AtomicOrderingCABI)FOS) {
442 case llvm::AtomicOrderingCABI::relaxed:
445 case llvm::AtomicOrderingCABI::release:
446 case llvm::AtomicOrderingCABI::acq_rel:
447 FailureOrder = llvm::AtomicOrdering::Monotonic;
449 case llvm::AtomicOrderingCABI::consume:
450 case llvm::AtomicOrderingCABI::acquire:
451 FailureOrder = llvm::AtomicOrdering::Acquire;
453 case llvm::AtomicOrderingCABI::seq_cst:
454 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
462 FailureOrder,
Scope);
475 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
477 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
479 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
481 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
485 CGF.
Builder.SetInsertPoint(MonotonicBB);
487 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic,
Scope);
490 CGF.
Builder.SetInsertPoint(AcquireBB);
492 llvm::AtomicOrdering::Acquire,
Scope);
495 CGF.
Builder.SetInsertPoint(SeqCstBB);
497 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
500 CGF.
Builder.SetInsertPoint(ContBB);
510 llvm::CmpInst::Predicate Pred;
513 llvm_unreachable(
"Unexpected min/max operation");
514 case AtomicExpr::AO__atomic_max_fetch:
515 case AtomicExpr::AO__scoped_atomic_max_fetch:
516 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
518 case AtomicExpr::AO__atomic_min_fetch:
519 case AtomicExpr::AO__scoped_atomic_min_fetch:
520 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
523 llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS,
"tst");
524 return Builder.CreateSelect(Cmp, OldVal, RHS,
"newval");
529 llvm::Value *IsWeak, llvm::Value *FailureOrder,
530 uint64_t Size, llvm::AtomicOrdering Order,
531 llvm::SyncScope::ID
Scope) {
532 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
533 bool PostOpMinMax =
false;
536 switch (
E->getOp()) {
537 case AtomicExpr::AO__c11_atomic_init:
538 case AtomicExpr::AO__opencl_atomic_init:
539 llvm_unreachable(
"Already handled!");
541 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
542 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
543 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
545 FailureOrder, Size, Order,
Scope);
547 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
548 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
549 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
551 FailureOrder, Size, Order,
Scope);
553 case AtomicExpr::AO__atomic_compare_exchange:
554 case AtomicExpr::AO__atomic_compare_exchange_n:
555 case AtomicExpr::AO__scoped_atomic_compare_exchange:
556 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
557 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
559 Val1, Val2, FailureOrder, Size, Order,
Scope);
562 llvm::BasicBlock *StrongBB =
565 llvm::BasicBlock *ContBB =
568 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
569 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
571 CGF.
Builder.SetInsertPoint(StrongBB);
573 FailureOrder, Size, Order,
Scope);
576 CGF.
Builder.SetInsertPoint(WeakBB);
578 FailureOrder, Size, Order,
Scope);
581 CGF.
Builder.SetInsertPoint(ContBB);
585 case AtomicExpr::AO__c11_atomic_load:
586 case AtomicExpr::AO__opencl_atomic_load:
587 case AtomicExpr::AO__hip_atomic_load:
588 case AtomicExpr::AO__atomic_load_n:
589 case AtomicExpr::AO__atomic_load:
590 case AtomicExpr::AO__scoped_atomic_load_n:
591 case AtomicExpr::AO__scoped_atomic_load: {
593 Load->setAtomic(Order,
Scope);
594 Load->setVolatile(
E->isVolatile());
601 case AtomicExpr::AO__c11_atomic_store:
602 case AtomicExpr::AO__opencl_atomic_store:
603 case AtomicExpr::AO__hip_atomic_store:
604 case AtomicExpr::AO__atomic_store:
605 case AtomicExpr::AO__atomic_store_n:
606 case AtomicExpr::AO__scoped_atomic_store:
607 case AtomicExpr::AO__scoped_atomic_store_n: {
610 Store->setAtomic(Order,
Scope);
611 Store->setVolatile(
E->isVolatile());
616 case AtomicExpr::AO__c11_atomic_exchange:
617 case AtomicExpr::AO__hip_atomic_exchange:
618 case AtomicExpr::AO__opencl_atomic_exchange:
619 case AtomicExpr::AO__atomic_exchange_n:
620 case AtomicExpr::AO__atomic_exchange:
621 case AtomicExpr::AO__scoped_atomic_exchange_n:
622 case AtomicExpr::AO__scoped_atomic_exchange:
623 Op = llvm::AtomicRMWInst::Xchg;
626 case AtomicExpr::AO__atomic_add_fetch:
627 case AtomicExpr::AO__scoped_atomic_add_fetch:
628 PostOp =
E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
629 : llvm::Instruction::Add;
631 case AtomicExpr::AO__c11_atomic_fetch_add:
632 case AtomicExpr::AO__hip_atomic_fetch_add:
633 case AtomicExpr::AO__opencl_atomic_fetch_add:
634 case AtomicExpr::AO__atomic_fetch_add:
635 case AtomicExpr::AO__scoped_atomic_fetch_add:
636 Op =
E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
637 : llvm::AtomicRMWInst::Add;
640 case AtomicExpr::AO__atomic_sub_fetch:
641 case AtomicExpr::AO__scoped_atomic_sub_fetch:
642 PostOp =
E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
643 : llvm::Instruction::Sub;
645 case AtomicExpr::AO__c11_atomic_fetch_sub:
646 case AtomicExpr::AO__hip_atomic_fetch_sub:
647 case AtomicExpr::AO__opencl_atomic_fetch_sub:
648 case AtomicExpr::AO__atomic_fetch_sub:
649 case AtomicExpr::AO__scoped_atomic_fetch_sub:
650 Op =
E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
651 : llvm::AtomicRMWInst::Sub;
654 case AtomicExpr::AO__atomic_min_fetch:
655 case AtomicExpr::AO__scoped_atomic_min_fetch:
658 case AtomicExpr::AO__c11_atomic_fetch_min:
659 case AtomicExpr::AO__hip_atomic_fetch_min:
660 case AtomicExpr::AO__opencl_atomic_fetch_min:
661 case AtomicExpr::AO__atomic_fetch_min:
662 case AtomicExpr::AO__scoped_atomic_fetch_min:
663 Op =
E->getValueType()->isFloatingType()
664 ? llvm::AtomicRMWInst::FMin
665 : (
E->getValueType()->isSignedIntegerType()
666 ? llvm::AtomicRMWInst::Min
667 : llvm::AtomicRMWInst::UMin);
670 case AtomicExpr::AO__atomic_max_fetch:
671 case AtomicExpr::AO__scoped_atomic_max_fetch:
674 case AtomicExpr::AO__c11_atomic_fetch_max:
675 case AtomicExpr::AO__hip_atomic_fetch_max:
676 case AtomicExpr::AO__opencl_atomic_fetch_max:
677 case AtomicExpr::AO__atomic_fetch_max:
678 case AtomicExpr::AO__scoped_atomic_fetch_max:
679 Op =
E->getValueType()->isFloatingType()
680 ? llvm::AtomicRMWInst::FMax
681 : (
E->getValueType()->isSignedIntegerType()
682 ? llvm::AtomicRMWInst::Max
683 : llvm::AtomicRMWInst::UMax);
686 case AtomicExpr::AO__atomic_and_fetch:
687 case AtomicExpr::AO__scoped_atomic_and_fetch:
688 PostOp = llvm::Instruction::And;
690 case AtomicExpr::AO__c11_atomic_fetch_and:
691 case AtomicExpr::AO__hip_atomic_fetch_and:
692 case AtomicExpr::AO__opencl_atomic_fetch_and:
693 case AtomicExpr::AO__atomic_fetch_and:
694 case AtomicExpr::AO__scoped_atomic_fetch_and:
695 Op = llvm::AtomicRMWInst::And;
698 case AtomicExpr::AO__atomic_or_fetch:
699 case AtomicExpr::AO__scoped_atomic_or_fetch:
700 PostOp = llvm::Instruction::Or;
702 case AtomicExpr::AO__c11_atomic_fetch_or:
703 case AtomicExpr::AO__hip_atomic_fetch_or:
704 case AtomicExpr::AO__opencl_atomic_fetch_or:
705 case AtomicExpr::AO__atomic_fetch_or:
706 case AtomicExpr::AO__scoped_atomic_fetch_or:
707 Op = llvm::AtomicRMWInst::Or;
710 case AtomicExpr::AO__atomic_xor_fetch:
711 case AtomicExpr::AO__scoped_atomic_xor_fetch:
712 PostOp = llvm::Instruction::Xor;
714 case AtomicExpr::AO__c11_atomic_fetch_xor:
715 case AtomicExpr::AO__hip_atomic_fetch_xor:
716 case AtomicExpr::AO__opencl_atomic_fetch_xor:
717 case AtomicExpr::AO__atomic_fetch_xor:
718 case AtomicExpr::AO__scoped_atomic_fetch_xor:
719 Op = llvm::AtomicRMWInst::Xor;
722 case AtomicExpr::AO__atomic_nand_fetch:
723 case AtomicExpr::AO__scoped_atomic_nand_fetch:
724 PostOp = llvm::Instruction::And;
726 case AtomicExpr::AO__c11_atomic_fetch_nand:
727 case AtomicExpr::AO__atomic_fetch_nand:
728 case AtomicExpr::AO__scoped_atomic_fetch_nand:
729 Op = llvm::AtomicRMWInst::Nand;
732 case AtomicExpr::AO__atomic_test_and_set: {
733 llvm::AtomicRMWInst *RMWI =
736 RMWI->setVolatile(
E->isVolatile());
737 llvm::Value *Result = CGF.
Builder.CreateIsNotNull(RMWI,
"tobool");
743 case AtomicExpr::AO__atomic_clear: {
744 llvm::StoreInst *Store =
746 Store->setAtomic(Order,
Scope);
747 Store->setVolatile(
E->isVolatile());
754 llvm::AtomicRMWInst *RMWI =
756 RMWI->setVolatile(
E->isVolatile());
760 llvm::Value *Result = RMWI;
763 E->getValueType()->isSignedIntegerType(),
766 Result = CGF.
Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
768 if (
E->getOp() == AtomicExpr::AO__atomic_nand_fetch ||
769 E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)
770 Result = CGF.
Builder.CreateNot(Result);
787 llvm::Value *IsWeak, llvm::Value *FailureOrder,
788 uint64_t Size, llvm::AtomicOrdering Order,
789 llvm::Value *
Scope) {
790 auto ScopeModel =
Expr->getScopeModel();
795 llvm::SyncScope::ID SS;
802 SyncScope::OpenCLDevice,
805 SS = llvm::SyncScope::System;
812 if (
auto SC = dyn_cast<llvm::ConstantInt>(
Scope)) {
823 auto Scopes = ScopeModel->getRuntimeValues();
824 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
825 for (
auto S : Scopes)
828 llvm::BasicBlock *ContBB =
831 auto *SC = Builder.CreateIntCast(
Scope, Builder.getInt32Ty(),
false);
834 auto FallBack = ScopeModel->getFallBackValue();
835 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
836 for (
auto S : Scopes) {
839 SI->addCase(Builder.getInt32(S), B);
841 Builder.SetInsertPoint(B);
848 Builder.CreateBr(ContBB);
851 Builder.SetInsertPoint(ContBB);
860 MemTy = AT->getValueType();
861 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr;
868 if (
E->getOp() == AtomicExpr::AO__c11_atomic_init ||
869 E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
876 uint64_t Size = TInfo.Width.getQuantity();
882 bool Misaligned = (Ptr.
getAlignment() % TInfo.Width) != 0;
886 << (
int)TInfo.Width.getQuantity()
891 << (
int)TInfo.Width.getQuantity() << (
int)MaxInlineWidth.
getQuantity();
897 bool ShouldCastToIntPtrTy =
true;
899 switch (
E->getOp()) {
900 case AtomicExpr::AO__c11_atomic_init:
901 case AtomicExpr::AO__opencl_atomic_init:
902 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
904 case AtomicExpr::AO__atomic_load_n:
905 case AtomicExpr::AO__scoped_atomic_load_n:
906 case AtomicExpr::AO__c11_atomic_load:
907 case AtomicExpr::AO__opencl_atomic_load:
908 case AtomicExpr::AO__hip_atomic_load:
909 case AtomicExpr::AO__atomic_test_and_set:
910 case AtomicExpr::AO__atomic_clear:
913 case AtomicExpr::AO__atomic_load:
914 case AtomicExpr::AO__scoped_atomic_load:
918 case AtomicExpr::AO__atomic_store:
919 case AtomicExpr::AO__scoped_atomic_store:
923 case AtomicExpr::AO__atomic_exchange:
924 case AtomicExpr::AO__scoped_atomic_exchange:
929 case AtomicExpr::AO__atomic_compare_exchange:
930 case AtomicExpr::AO__atomic_compare_exchange_n:
931 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
932 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
933 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
934 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
935 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
936 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
937 case AtomicExpr::AO__scoped_atomic_compare_exchange:
938 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
940 if (
E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
941 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
946 if (
E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
947 E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
948 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
949 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
953 case AtomicExpr::AO__c11_atomic_fetch_add:
954 case AtomicExpr::AO__c11_atomic_fetch_sub:
955 case AtomicExpr::AO__hip_atomic_fetch_add:
956 case AtomicExpr::AO__hip_atomic_fetch_sub:
957 case AtomicExpr::AO__opencl_atomic_fetch_add:
958 case AtomicExpr::AO__opencl_atomic_fetch_sub:
975 case AtomicExpr::AO__atomic_fetch_add:
976 case AtomicExpr::AO__atomic_fetch_max:
977 case AtomicExpr::AO__atomic_fetch_min:
978 case AtomicExpr::AO__atomic_fetch_sub:
979 case AtomicExpr::AO__atomic_add_fetch:
980 case AtomicExpr::AO__atomic_max_fetch:
981 case AtomicExpr::AO__atomic_min_fetch:
982 case AtomicExpr::AO__atomic_sub_fetch:
983 case AtomicExpr::AO__c11_atomic_fetch_max:
984 case AtomicExpr::AO__c11_atomic_fetch_min:
985 case AtomicExpr::AO__opencl_atomic_fetch_max:
986 case AtomicExpr::AO__opencl_atomic_fetch_min:
987 case AtomicExpr::AO__hip_atomic_fetch_max:
988 case AtomicExpr::AO__hip_atomic_fetch_min:
989 case AtomicExpr::AO__scoped_atomic_fetch_add:
990 case AtomicExpr::AO__scoped_atomic_fetch_max:
991 case AtomicExpr::AO__scoped_atomic_fetch_min:
992 case AtomicExpr::AO__scoped_atomic_fetch_sub:
993 case AtomicExpr::AO__scoped_atomic_add_fetch:
994 case AtomicExpr::AO__scoped_atomic_max_fetch:
995 case AtomicExpr::AO__scoped_atomic_min_fetch:
996 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1000 case AtomicExpr::AO__atomic_fetch_and:
1001 case AtomicExpr::AO__atomic_fetch_nand:
1002 case AtomicExpr::AO__atomic_fetch_or:
1003 case AtomicExpr::AO__atomic_fetch_xor:
1004 case AtomicExpr::AO__atomic_and_fetch:
1005 case AtomicExpr::AO__atomic_nand_fetch:
1006 case AtomicExpr::AO__atomic_or_fetch:
1007 case AtomicExpr::AO__atomic_xor_fetch:
1008 case AtomicExpr::AO__atomic_store_n:
1009 case AtomicExpr::AO__atomic_exchange_n:
1010 case AtomicExpr::AO__c11_atomic_fetch_and:
1011 case AtomicExpr::AO__c11_atomic_fetch_nand:
1012 case AtomicExpr::AO__c11_atomic_fetch_or:
1013 case AtomicExpr::AO__c11_atomic_fetch_xor:
1014 case AtomicExpr::AO__c11_atomic_store:
1015 case AtomicExpr::AO__c11_atomic_exchange:
1016 case AtomicExpr::AO__hip_atomic_fetch_and:
1017 case AtomicExpr::AO__hip_atomic_fetch_or:
1018 case AtomicExpr::AO__hip_atomic_fetch_xor:
1019 case AtomicExpr::AO__hip_atomic_store:
1020 case AtomicExpr::AO__hip_atomic_exchange:
1021 case AtomicExpr::AO__opencl_atomic_fetch_and:
1022 case AtomicExpr::AO__opencl_atomic_fetch_or:
1023 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1024 case AtomicExpr::AO__opencl_atomic_store:
1025 case AtomicExpr::AO__opencl_atomic_exchange:
1026 case AtomicExpr::AO__scoped_atomic_fetch_and:
1027 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1028 case AtomicExpr::AO__scoped_atomic_fetch_or:
1029 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1030 case AtomicExpr::AO__scoped_atomic_and_fetch:
1031 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1032 case AtomicExpr::AO__scoped_atomic_or_fetch:
1033 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1034 case AtomicExpr::AO__scoped_atomic_store_n:
1035 case AtomicExpr::AO__scoped_atomic_exchange_n:
1046 AtomicInfo Atomics(*
this, AtomicVal);
1048 if (ShouldCastToIntPtrTy) {
1049 Ptr = Atomics.castToAtomicIntPointer(Ptr);
1051 Val1 = Atomics.convertToAtomicIntPointer(Val1);
1053 Val2 = Atomics.convertToAtomicIntPointer(Val2);
1056 if (ShouldCastToIntPtrTy)
1057 Dest = Atomics.castToAtomicIntPointer(Dest);
1058 }
else if (
E->isCmpXChg())
1061 Dest = Atomics.CreateTempAlloca();
1062 if (ShouldCastToIntPtrTy)
1063 Dest = Atomics.castToAtomicIntPointer(Dest);
1066 bool PowerOf2Size = (Size & (Size - 1)) == 0;
1067 bool UseLibcall = !PowerOf2Size || (Size > 16);
1087 auto CastToGenericAddrSpace = [&](llvm::Value *
V,
QualType PT) {
1094 auto *DestType = llvm::PointerType::get(
getLLVMContext(), DestAS);
1105 std::string LibCallName;
1107 bool HaveRetTy =
false;
1108 switch (
E->getOp()) {
1109 case AtomicExpr::AO__c11_atomic_init:
1110 case AtomicExpr::AO__opencl_atomic_init:
1111 llvm_unreachable(
"Already handled!");
1118 case AtomicExpr::AO__atomic_compare_exchange:
1119 case AtomicExpr::AO__atomic_compare_exchange_n:
1120 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1121 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1122 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1123 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1124 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1125 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1126 case AtomicExpr::AO__scoped_atomic_compare_exchange:
1127 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
1128 LibCallName =
"__atomic_compare_exchange";
1142 case AtomicExpr::AO__atomic_exchange:
1143 case AtomicExpr::AO__atomic_exchange_n:
1144 case AtomicExpr::AO__c11_atomic_exchange:
1145 case AtomicExpr::AO__hip_atomic_exchange:
1146 case AtomicExpr::AO__opencl_atomic_exchange:
1147 case AtomicExpr::AO__scoped_atomic_exchange:
1148 case AtomicExpr::AO__scoped_atomic_exchange_n:
1149 LibCallName =
"__atomic_exchange";
1155 case AtomicExpr::AO__atomic_store:
1156 case AtomicExpr::AO__atomic_store_n:
1157 case AtomicExpr::AO__c11_atomic_store:
1158 case AtomicExpr::AO__hip_atomic_store:
1159 case AtomicExpr::AO__opencl_atomic_store:
1160 case AtomicExpr::AO__scoped_atomic_store:
1161 case AtomicExpr::AO__scoped_atomic_store_n:
1162 LibCallName =
"__atomic_store";
1170 case AtomicExpr::AO__atomic_load:
1171 case AtomicExpr::AO__atomic_load_n:
1172 case AtomicExpr::AO__c11_atomic_load:
1173 case AtomicExpr::AO__hip_atomic_load:
1174 case AtomicExpr::AO__opencl_atomic_load:
1175 case AtomicExpr::AO__scoped_atomic_load:
1176 case AtomicExpr::AO__scoped_atomic_load_n:
1177 LibCallName =
"__atomic_load";
1179 case AtomicExpr::AO__atomic_add_fetch:
1180 case AtomicExpr::AO__scoped_atomic_add_fetch:
1181 case AtomicExpr::AO__atomic_fetch_add:
1182 case AtomicExpr::AO__c11_atomic_fetch_add:
1183 case AtomicExpr::AO__hip_atomic_fetch_add:
1184 case AtomicExpr::AO__opencl_atomic_fetch_add:
1185 case AtomicExpr::AO__scoped_atomic_fetch_add:
1186 case AtomicExpr::AO__atomic_and_fetch:
1187 case AtomicExpr::AO__scoped_atomic_and_fetch:
1188 case AtomicExpr::AO__atomic_fetch_and:
1189 case AtomicExpr::AO__c11_atomic_fetch_and:
1190 case AtomicExpr::AO__hip_atomic_fetch_and:
1191 case AtomicExpr::AO__opencl_atomic_fetch_and:
1192 case AtomicExpr::AO__scoped_atomic_fetch_and:
1193 case AtomicExpr::AO__atomic_or_fetch:
1194 case AtomicExpr::AO__scoped_atomic_or_fetch:
1195 case AtomicExpr::AO__atomic_fetch_or:
1196 case AtomicExpr::AO__c11_atomic_fetch_or:
1197 case AtomicExpr::AO__hip_atomic_fetch_or:
1198 case AtomicExpr::AO__opencl_atomic_fetch_or:
1199 case AtomicExpr::AO__scoped_atomic_fetch_or:
1200 case AtomicExpr::AO__atomic_sub_fetch:
1201 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1202 case AtomicExpr::AO__atomic_fetch_sub:
1203 case AtomicExpr::AO__c11_atomic_fetch_sub:
1204 case AtomicExpr::AO__hip_atomic_fetch_sub:
1205 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1206 case AtomicExpr::AO__scoped_atomic_fetch_sub:
1207 case AtomicExpr::AO__atomic_xor_fetch:
1208 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1209 case AtomicExpr::AO__atomic_fetch_xor:
1210 case AtomicExpr::AO__c11_atomic_fetch_xor:
1211 case AtomicExpr::AO__hip_atomic_fetch_xor:
1212 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1213 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1214 case AtomicExpr::AO__atomic_nand_fetch:
1215 case AtomicExpr::AO__atomic_fetch_nand:
1216 case AtomicExpr::AO__c11_atomic_fetch_nand:
1217 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1218 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1219 case AtomicExpr::AO__atomic_min_fetch:
1220 case AtomicExpr::AO__atomic_fetch_min:
1221 case AtomicExpr::AO__c11_atomic_fetch_min:
1222 case AtomicExpr::AO__hip_atomic_fetch_min:
1223 case AtomicExpr::AO__opencl_atomic_fetch_min:
1224 case AtomicExpr::AO__scoped_atomic_fetch_min:
1225 case AtomicExpr::AO__scoped_atomic_min_fetch:
1226 case AtomicExpr::AO__atomic_max_fetch:
1227 case AtomicExpr::AO__atomic_fetch_max:
1228 case AtomicExpr::AO__c11_atomic_fetch_max:
1229 case AtomicExpr::AO__hip_atomic_fetch_max:
1230 case AtomicExpr::AO__opencl_atomic_fetch_max:
1231 case AtomicExpr::AO__scoped_atomic_fetch_max:
1232 case AtomicExpr::AO__scoped_atomic_max_fetch:
1233 case AtomicExpr::AO__atomic_test_and_set:
1234 case AtomicExpr::AO__atomic_clear:
1235 llvm_unreachable(
"Integral atomic operations always become atomicrmw!");
1238 if (
E->isOpenCL()) {
1240 std::string(
"__opencl") + StringRef(LibCallName).drop_front(1).str();
1268 bool IsStore =
E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1269 E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1270 E->getOp() == AtomicExpr::AO__hip_atomic_store ||
1271 E->getOp() == AtomicExpr::AO__atomic_store ||
1272 E->getOp() == AtomicExpr::AO__atomic_store_n ||
1273 E->getOp() == AtomicExpr::AO__scoped_atomic_store ||
1274 E->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
1275 E->getOp() == AtomicExpr::AO__atomic_clear;
1276 bool IsLoad =
E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1277 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1278 E->getOp() == AtomicExpr::AO__hip_atomic_load ||
1279 E->getOp() == AtomicExpr::AO__atomic_load ||
1280 E->getOp() == AtomicExpr::AO__atomic_load_n ||
1281 E->getOp() == AtomicExpr::AO__scoped_atomic_load ||
1282 E->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
1284 if (isa<llvm::ConstantInt>(Order)) {
1285 auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1288 if (llvm::isValidAtomicOrderingCABI(ord))
1289 switch ((llvm::AtomicOrderingCABI)ord) {
1290 case llvm::AtomicOrderingCABI::relaxed:
1291 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1292 llvm::AtomicOrdering::Monotonic,
Scope);
1294 case llvm::AtomicOrderingCABI::consume:
1295 case llvm::AtomicOrderingCABI::acquire:
1298 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1299 llvm::AtomicOrdering::Acquire,
Scope);
1301 case llvm::AtomicOrderingCABI::release:
1304 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1305 llvm::AtomicOrdering::Release,
Scope);
1307 case llvm::AtomicOrderingCABI::acq_rel:
1308 if (IsLoad || IsStore)
1310 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1311 llvm::AtomicOrdering::AcquireRelease,
Scope);
1313 case llvm::AtomicOrderingCABI::seq_cst:
1314 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1315 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1328 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1329 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1330 *SeqCstBB =
nullptr;
1336 if (!IsLoad && !IsStore)
1345 Order =
Builder.CreateIntCast(Order,
Builder.getInt32Ty(),
false);
1346 llvm::SwitchInst *SI =
Builder.CreateSwitch(Order, MonotonicBB);
1349 Builder.SetInsertPoint(MonotonicBB);
1350 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1351 llvm::AtomicOrdering::Monotonic,
Scope);
1354 Builder.SetInsertPoint(AcquireBB);
1355 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1356 llvm::AtomicOrdering::Acquire,
Scope);
1358 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
1360 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
1364 Builder.SetInsertPoint(ReleaseBB);
1365 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1366 llvm::AtomicOrdering::Release,
Scope);
1368 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::release),
1371 if (!IsLoad && !IsStore) {
1372 Builder.SetInsertPoint(AcqRelBB);
1373 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1374 llvm::AtomicOrdering::AcquireRelease,
Scope);
1376 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acq_rel),
1379 Builder.SetInsertPoint(SeqCstBB);
1380 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1381 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1383 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
1387 Builder.SetInsertPoint(ContBB);
1391 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1397 llvm::IntegerType *ty =
1403 llvm::Type *Ty =
Addr.getElementType();
1405 if (SourceSizeInBits != AtomicSizeInBits) {
1406 Address Tmp = CreateTempAlloca();
1408 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1412 return castToAtomicIntPointer(
Addr);
1418 bool asValue)
const {
1454 if (ValTy->isFloatingPointTy())
1455 return ValTy->isX86_FP80Ty() || CmpXchg;
1456 return !ValTy->isIntegerTy() && !ValTy->isPointerTy();
1459RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,
1462 bool CmpXchg)
const {
1464 assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() ||
1465 Val->getType()->isIEEELikeFPTy()) &&
1466 "Expected integer, pointer or floating point value when converting "
1473 auto *ValTy = AsValue
1475 : getAtomicAddress().getElementType();
1477 assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) &&
1478 "Different integer types.");
1481 if (llvm::CastInst::isBitCastable(Val->getType(), ValTy))
1488 bool TempIsVolatile =
false;
1494 Temp = CreateTempAlloca();
1498 Address CastTemp = castToAtomicIntPointer(Temp);
1501 return convertAtomicTempToRValue(Temp, ResultSlot,
Loc, AsValue);
1504void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1505 llvm::AtomicOrdering AO,
bool) {
1517llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1518 bool IsVolatile,
bool CmpXchg) {
1522 Addr = castToAtomicIntPointer(
Addr);
1524 Load->setAtomic(AO);
1528 Load->setVolatile(
true);
1538 AtomicInfo AI(*
this, LV);
1541 bool AtomicIsInline = !AI.shouldUseLibcall();
1546 return IsVolatile && AtomicIsInline;
1551 llvm::AtomicOrdering AO;
1554 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1556 AO = llvm::AtomicOrdering::Acquire;
1563 bool AsValue, llvm::AtomicOrdering AO,
1566 if (shouldUseLibcall()) {
1572 TempAddr = CreateTempAlloca();
1574 EmitAtomicLoadLibcall(TempAddr.
emitRawPointer(CGF), AO, IsVolatile);
1578 return convertAtomicTempToRValue(TempAddr, ResultSlot,
Loc, AsValue);
1582 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1590 return ConvertToValueOrAtomic(Load, ResultSlot,
Loc, AsValue);
1596 llvm::AtomicOrdering AO,
bool IsVolatile,
1598 AtomicInfo Atomics(*
this, src);
1599 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1605void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1624 emitMemSetZeroIfNecessary();
1627 LValue TempLVal = projectValue();
1648 AtomicInfo Atomics(CGF, TempLV);
1649 Atomics.emitCopyIntoMemory(rvalue);
1653llvm::Value *AtomicInfo::getScalarRValValueOrNull(
RValue RVal)
const {
1659llvm::Value *AtomicInfo::convertRValueToInt(
RValue RVal,
bool CmpXchg)
const {
1662 if (llvm::Value *
Value = getScalarRValValueOrNull(RVal)) {
1666 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1668 LVal.
isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1669 if (llvm::BitCastInst::isBitCastable(
Value->
getType(), InputIntTy))
1678 Addr = castToAtomicIntPointer(
Addr);
1682std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1683 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1684 llvm::AtomicOrdering
Success, llvm::AtomicOrdering Failure,
bool IsWeak) {
1686 Address Addr = getAtomicAddressAsAtomicIntPointer();
1691 Inst->setWeak(IsWeak);
1694 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1695 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1696 return std::make_pair(PreviousVal, SuccessFailureVal);
1700AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1701 llvm::Value *DesiredAddr,
1703 llvm::AtomicOrdering Failure) {
1712 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(
Success))),
1715 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Failure))),
1720 return SuccessFailureRVal.getScalarVal();
1723std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1725 llvm::AtomicOrdering Failure,
bool IsWeak) {
1727 if (shouldUseLibcall()) {
1731 llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);
1732 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,
1734 return std::make_pair(
1742 auto *ExpectedVal = convertRValueToInt(
Expected,
true);
1743 auto *DesiredVal = convertRValueToInt(Desired,
true);
1744 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal,
Success,
1746 return std::make_pair(
1758 LValue AtomicLVal = Atomics.getAtomicLValue();
1765 Address Ptr = Atomics.materializeRValue(OldRVal);
1798 RValue NewRVal = UpdateOp(UpRVal);
1808void AtomicInfo::EmitAtomicUpdateLibcall(
1809 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1811 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1813 Address ExpectedAddr = CreateTempAlloca();
1815 EmitAtomicLoadLibcall(ExpectedAddr.
emitRawPointer(CGF), AO, IsVolatile);
1819 Address DesiredAddr = CreateTempAlloca();
1821 requiresMemSetZero(getAtomicAddress().getElementType())) {
1825 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1832 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1833 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1837void AtomicInfo::EmitAtomicUpdateOp(
1838 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1840 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1843 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile,
true);
1847 auto *CurBB = CGF.
Builder.GetInsertBlock();
1849 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1851 PHI->addIncoming(OldVal, CurBB);
1852 Address NewAtomicAddr = CreateTempAlloca();
1855 ? castToAtomicIntPointer(NewAtomicAddr)
1859 requiresMemSetZero(getAtomicAddress().getElementType())) {
1868 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1869 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1870 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1876 LValue AtomicLVal = Atomics.getAtomicLValue();
1900void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1901 RValue UpdateRVal,
bool IsVolatile) {
1902 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1904 Address ExpectedAddr = CreateTempAlloca();
1906 EmitAtomicLoadLibcall(ExpectedAddr.
emitRawPointer(CGF), AO, IsVolatile);
1910 Address DesiredAddr = CreateTempAlloca();
1912 requiresMemSetZero(getAtomicAddress().getElementType())) {
1920 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1921 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1925void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1927 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1930 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile,
true);
1934 auto *CurBB = CGF.
Builder.GetInsertBlock();
1936 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1938 PHI->addIncoming(OldVal, CurBB);
1939 Address NewAtomicAddr = CreateTempAlloca();
1940 Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
1942 requiresMemSetZero(getAtomicAddress().getElementType())) {
1948 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1949 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1950 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1954void AtomicInfo::EmitAtomicUpdate(
1955 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1957 if (shouldUseLibcall()) {
1958 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1960 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1964void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1966 if (shouldUseLibcall()) {
1967 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1969 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1976 llvm::AtomicOrdering AO;
1978 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1980 AO = llvm::AtomicOrdering::Release;
1992 llvm::AtomicOrdering AO,
bool IsVolatile,
2000 AtomicInfo atomics(*
this, dest);
2001 LValue LVal = atomics.getAtomicLValue();
2006 atomics.emitCopyIntoMemory(rvalue);
2011 if (atomics.shouldUseLibcall()) {
2013 Address srcAddr = atomics.materializeRValue(rvalue);
2030 llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue);
2034 if (llvm::Value *
Value = atomics.getScalarRValValueOrNull(rvalue))
2036 Addr = atomics.castToAtomicIntPointer(
Addr);
2037 ValToStore =
Builder.CreateIntCast(ValToStore,
Addr.getElementType(),
2042 if (AO == llvm::AtomicOrdering::Acquire)
2043 AO = llvm::AtomicOrdering::Monotonic;
2044 else if (AO == llvm::AtomicOrdering::AcquireRelease)
2045 AO = llvm::AtomicOrdering::Release;
2048 store->setAtomic(AO);
2052 store->setVolatile(
true);
2058 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2065 llvm::AtomicOrdering
Success, llvm::AtomicOrdering Failure,
bool IsWeak,
2070 Expected.getAggregateAddress().getElementType() ==
2075 AtomicInfo Atomics(*
this, Obj);
2077 return Atomics.EmitAtomicCompareExchange(
Expected, Desired,
Success, Failure,
2081llvm::AtomicRMWInst *
2083 llvm::Value *Val, llvm::AtomicOrdering Order,
2084 llvm::SyncScope::ID SSID,
2086 llvm::AtomicRMWInst *RMW =
2093 LValue LVal, llvm::AtomicOrdering AO,
2094 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
2095 AtomicInfo Atomics(*
this, LVal);
2096 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2100 AtomicInfo atomics(*
this, dest);
2102 switch (atomics.getEvaluationKind()) {
2118 bool Zeroed =
false;
2120 Zeroed = atomics.emitMemSetZeroIfNecessary();
2121 dest = atomics.projectValue();
2135 llvm_unreachable(
"bad evaluation kind");
Defines the clang::ASTContext interface.
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)
Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)
static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg)
Return true if.
static QualType getPointeeType(const MemRegion *R)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
TypeInfoChars getTypeInfoInChars(const Type *T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address getAddress() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
void add(RValue rvalue, QualType type)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitAtomicInit(Expr *E, LValue lvalue)
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty, SourceLocation Loc)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
CGDebugInfo * getDebugInfo()
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
const TargetCodeGenInfo & getTargetHooks() const
ASTContext & getContext() const
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
RValue EmitAtomicExpr(AtomicExpr *E)
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
llvm::LLVMContext & getLLVMContext()
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const llvm::DataLayout & getDataLayout() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
llvm::LLVMContext & getLLVMContext()
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
LValue - This represents an lvalue references.
llvm::Value * getRawExtVectorPointer(CodeGenFunction &CGF) const
llvm::Constant * getExtVectorElts() const
void setAlignment(CharUnits A)
bool isVolatileQualified() const
llvm::Value * getRawBitFieldPointer(CodeGenFunction &CGF) const
CharUnits getAlignment() const
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getAddress() const
llvm::Value * getRawVectorPointer(CodeGenFunction &CGF) const
bool isExtVectorElt() const
llvm::Value * getVectorIdx() const
LValueBaseInfo getBaseInfo() const
const CGBitFieldInfo & getBitFieldInfo() const
TBAAAccessInfo getTBAAInfo() const
Address getVectorAddress() const
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getExtVectorAddress() const
Address getBitFieldAddress() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::Instruction &AtomicInst, const AtomicExpr *Expr=nullptr) const
Allow the target to apply other metadata to an atomic instruction.
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Scope - A scope is a transient data structure that is used while parsing the program.
Encodes a location in the source.
SourceLocation getBeginLoc() const LLVM_READONLY
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
bool isPointerType() const
const T * castAs() const
Member-template castAs<specific type>.
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isAtomicType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
Represents a GCC generic vector type.
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Load(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
@ Success
Annotation was successful.
llvm::StringRef getAsString(SyncScope S)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * SizeTy
llvm::IntegerType * IntTy
int
llvm::PointerType * UnqualPtrTy