34#include "llvm/ADT/StringExtras.h"
35#include "llvm/Analysis/ValueTracking.h"
36#include "llvm/IR/Assumptions.h"
37#include "llvm/IR/AttributeMask.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/CallingConv.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/InlineAsm.h"
42#include "llvm/IR/IntrinsicInst.h"
43#include "llvm/IR/Intrinsics.h"
44#include "llvm/IR/Type.h"
45#include "llvm/Transforms/Utils/Local.h"
48using namespace CodeGen;
55 return llvm::CallingConv::C;
57 return llvm::CallingConv::X86_StdCall;
59 return llvm::CallingConv::X86_FastCall;
61 return llvm::CallingConv::X86_RegCall;
63 return llvm::CallingConv::X86_ThisCall;
65 return llvm::CallingConv::Win64;
67 return llvm::CallingConv::X86_64_SysV;
69 return llvm::CallingConv::ARM_AAPCS;
71 return llvm::CallingConv::ARM_AAPCS_VFP;
73 return llvm::CallingConv::Intel_OCL_BI;
76 return llvm::CallingConv::C;
79 return llvm::CallingConv::X86_VectorCall;
81 return llvm::CallingConv::AArch64_VectorCall;
83 return llvm::CallingConv::AArch64_SVE_VectorCall;
85 return llvm::CallingConv::SPIR_FUNC;
89 return llvm::CallingConv::PreserveMost;
91 return llvm::CallingConv::PreserveAll;
93 return llvm::CallingConv::Swift;
95 return llvm::CallingConv::SwiftTail;
97 return llvm::CallingConv::M68k_RTD;
99 return llvm::CallingConv::PreserveNone;
103#define CC_VLS_CASE(ABI_VLEN) \
104 case CC_RISCVVLSCall_##ABI_VLEN: \
105 return llvm::CallingConv::RISCV_VLSCall_##ABI_VLEN;
170 assert(paramInfos.size() <= prefixArgs);
171 assert(proto->
getNumParams() + prefixArgs <= totalArgs);
173 paramInfos.reserve(totalArgs);
176 paramInfos.resize(prefixArgs);
180 paramInfos.push_back(ParamInfo);
182 if (ParamInfo.hasPassObjectSize())
183 paramInfos.emplace_back();
186 assert(paramInfos.size() <= totalArgs &&
187 "Did we forget to insert pass_object_size args?");
189 paramInfos.resize(totalArgs);
199 if (!FPT->hasExtParameterInfos()) {
200 assert(paramInfos.empty() &&
201 "We have paramInfos, but the prototype doesn't?");
202 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
206 unsigned PrefixSize = prefix.size();
210 prefix.reserve(prefix.size() + FPT->getNumParams());
212 auto ExtInfos = FPT->getExtParameterInfos();
213 assert(ExtInfos.size() == FPT->getNumParams());
214 for (
unsigned I = 0,
E = FPT->getNumParams(); I !=
E; ++I) {
215 prefix.push_back(FPT->getParamType(I));
216 if (ExtInfos[I].hasPassObjectSize())
241 FTP->getExtInfo(), paramInfos,
Required);
251 return ::arrangeLLVMFunctionInfo(*
this,
false, argTypes,
256 bool IsTargetDefaultMSABI) {
276 if (PcsAttr *PCS =
D->
getAttr<PcsAttr>())
279 if (
D->
hasAttr<AArch64VectorPcsAttr>())
282 if (
D->
hasAttr<AArch64SVEPcsAttr>())
309 if (
D->
hasAttr<RISCVVectorCCAttr>())
312 if (RISCVVLSCCAttr *PCS =
D->
getAttr<RISCVVLSCCAttr>()) {
313 switch (PCS->getVectorWidth()) {
315 llvm_unreachable(
"Invalid RISC-V VLS ABI VLEN");
316#define CC_VLS_CASE(ABI_VLEN) \
318 return CC_RISCVVLSCall_##ABI_VLEN;
353 return ::arrangeLLVMFunctionInfo(
354 *
this,
true, argTypes,
361 if (FD->
hasAttr<CUDAGlobalAttr>()) {
374 assert(!isa<CXXConstructorDecl>(MD) &&
"wrong method for constructors!");
375 assert(!isa<CXXDestructorDecl>(MD) &&
"wrong method for destructors!");
397 !
Target.getCXXABI().hasConstructorVariants();
402 auto *MD = cast<CXXMethodDecl>(GD.
getDecl());
410 bool PassParams =
true;
412 if (
auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
415 if (
auto Inherited = CD->getInheritedConstructor())
427 if (!paramInfos.empty()) {
430 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.
Prefix,
433 paramInfos.append(AddedArgs.
Suffix,
438 (PassParams && MD->isVariadic() ?
RequiredArgs(argTypes.size())
447 argTypes, extInfo, paramInfos, required);
453 for (
auto &arg : args)
461 for (
auto &arg : args)
468 unsigned totalArgs) {
486 unsigned ExtraPrefixArgs,
unsigned ExtraSuffixArgs,
bool PassProtoArgs) {
488 for (
const auto &Arg : args)
492 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
497 FPT, TotalPrefixArgs + ExtraSuffixArgs)
510 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
517 ArgTypes, Info, ParamInfos,
Required);
526 if (MD->isImplicitObjectMemberFunction())
531 assert(isa<FunctionType>(FTy));
534 if (DeviceKernelAttr::isOpenCLSpelling(FD->
getAttr<DeviceKernelAttr>()) &&
545 {}, noProto->getExtInfo(), {},
578 I->hasAttr<NoEscapeAttr>());
579 extParamInfos.push_back(extParamInfo);
583 bool IsTargetDefaultMSABI =
589 if (
getContext().getLangOpts().ObjCAutoRefCount &&
590 MD->
hasAttr<NSReturnsRetainedAttr>())
613 if (isa<CXXConstructorDecl>(GD.
getDecl()) ||
614 isa<CXXDestructorDecl>(GD.
getDecl()))
627 assert(MD->
isVirtual() &&
"only methods have thunks");
644 ArgTys.push_back(*FTP->param_type_begin());
646 ArgTys.push_back(Context.
IntTy);
659 unsigned numExtraRequiredArgs,
bool chainCall) {
660 assert(args.size() >= numExtraRequiredArgs);
670 if (proto->isVariadic())
673 if (proto->hasExtParameterInfos())
682 args, cast<FunctionNoProtoType>(fnType))) {
687 for (
const auto &arg : args)
692 paramInfos, required);
702 chainCall ? 1 : 0, chainCall);
731 for (
const auto &Arg : args)
773 assert(numPrefixArgs + 1 <= args.size() &&
774 "Emitting a call with less args than the required prefix?");
785 paramInfos, required);
796 assert(signature.
arg_size() <= args.size());
797 if (signature.
arg_size() == args.size())
802 if (!sigParamInfos.empty()) {
803 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
804 paramInfos.resize(args.size());
836 assert(llvm::all_of(argTypes,
840 llvm::FoldingSetNodeID ID;
845 bool isDelegateCall =
848 info, paramInfos, required, resultType, argTypes);
850 void *insertPos =
nullptr;
851 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
859 info, paramInfos, resultType, argTypes, required);
860 FunctionInfos.InsertNode(FI, insertPos);
862 bool inserted = FunctionsBeingProcessed.insert(FI).second;
864 assert(inserted &&
"Recursively being processed?");
867 if (CC == llvm::CallingConv::SPIR_KERNEL) {
885 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() ==
nullptr)
888 bool erased = FunctionsBeingProcessed.erase(FI);
890 assert(erased &&
"Not in set?");
896 bool chainCall,
bool delegateCall,
902 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
906 void *buffer =
operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
907 argTypes.size() + 1, paramInfos.size()));
910 FI->CallingConvention = llvmCC;
911 FI->EffectiveCallingConvention = llvmCC;
912 FI->ASTCallingConvention = info.
getCC();
913 FI->InstanceMethod = instanceMethod;
914 FI->ChainCall = chainCall;
915 FI->DelegateCall = delegateCall;
921 FI->Required = required;
924 FI->ArgStruct =
nullptr;
925 FI->ArgStructAlign = 0;
926 FI->NumArgs = argTypes.size();
927 FI->HasExtParameterInfos = !paramInfos.empty();
928 FI->getArgsBuffer()[0].
type = resultType;
929 FI->MaxVectorWidth = 0;
930 for (
unsigned i = 0, e = argTypes.size(); i != e; ++i)
931 FI->getArgsBuffer()[i + 1].
type = argTypes[i];
932 for (
unsigned i = 0, e = paramInfos.size(); i != e; ++i)
933 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
943struct TypeExpansion {
944 enum TypeExpansionKind {
956 const TypeExpansionKind
Kind;
958 TypeExpansion(TypeExpansionKind K) :
Kind(K) {}
959 virtual ~TypeExpansion() {}
962struct ConstantArrayExpansion : TypeExpansion {
966 ConstantArrayExpansion(
QualType EltTy, uint64_t NumElts)
967 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
968 static bool classof(
const TypeExpansion *TE) {
969 return TE->Kind == TEK_ConstantArray;
973struct RecordExpansion : TypeExpansion {
980 : TypeExpansion(TEK_Record), Bases(
std::move(Bases)),
981 Fields(
std::move(Fields)) {}
982 static bool classof(
const TypeExpansion *TE) {
983 return TE->Kind == TEK_Record;
987struct ComplexExpansion : TypeExpansion {
991 static bool classof(
const TypeExpansion *TE) {
996struct NoExpansion : TypeExpansion {
997 NoExpansion() : TypeExpansion(TEK_None) {}
998 static bool classof(
const TypeExpansion *TE) {
return TE->Kind == TEK_None; }
1002static std::unique_ptr<TypeExpansion>
1005 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
1011 assert(!RD->hasFlexibleArrayMember() &&
1012 "Cannot expand structure with flexible array.");
1013 if (RD->isUnion()) {
1019 for (
const auto *FD : RD->fields()) {
1020 if (FD->isZeroLengthBitField())
1022 assert(!FD->isBitField() &&
1023 "Cannot expand structure with bit-field members.");
1025 if (UnionSize < FieldSize) {
1026 UnionSize = FieldSize;
1031 Fields.push_back(LargestFD);
1033 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1034 assert(!CXXRD->isDynamicClass() &&
1035 "cannot expand vtable pointers in dynamic classes");
1036 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
1039 for (
const auto *FD : RD->fields()) {
1040 if (FD->isZeroLengthBitField())
1042 assert(!FD->isBitField() &&
1043 "Cannot expand structure with bit-field members.");
1044 Fields.push_back(FD);
1047 return std::make_unique<RecordExpansion>(std::move(Bases),
1051 return std::make_unique<ComplexExpansion>(CT->getElementType());
1053 return std::make_unique<NoExpansion>();
1058 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1061 if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1063 for (
auto BS : RExp->Bases)
1065 for (
auto FD : RExp->Fields)
1069 if (isa<ComplexExpansion>(Exp.get()))
1071 assert(isa<NoExpansion>(Exp.get()));
1078 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1079 for (
int i = 0, n = CAExp->NumElts; i < n; i++) {
1082 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1083 for (
auto BS : RExp->Bases)
1085 for (
auto FD : RExp->Fields)
1087 }
else if (
auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1092 assert(isa<NoExpansion>(Exp.get()));
1098 ConstantArrayExpansion *CAE,
1100 llvm::function_ref<
void(
Address)> Fn) {
1101 for (
int i = 0, n = CAE->NumElts; i < n; i++) {
1108 llvm::Function::arg_iterator &AI) {
1110 "Unexpected non-simple lvalue during struct expansion.");
1113 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1116 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1117 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1119 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1129 ExpandTypeFromArgs(BS->
getType(), SubLV, AI);
1131 for (
auto FD : RExp->Fields) {
1134 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1136 }
else if (isa<ComplexExpansion>(Exp.get())) {
1137 auto realValue = &*AI++;
1138 auto imagValue = &*AI++;
1143 assert(isa<NoExpansion>(Exp.get()));
1144 llvm::Value *Arg = &*AI++;
1151 if (Arg->getType()->isPointerTy()) {
1153 Arg =
Builder.CreateBitCast(Arg,
Addr.getElementType());
1160void CodeGenFunction::ExpandTypeToArgs(
1164 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1171 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1174 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1185 ExpandTypeToArgs(BS->
getType(), BaseArg, IRFuncTy, IRCallArgs,
1190 for (
auto FD : RExp->Fields) {
1193 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1196 }
else if (isa<ComplexExpansion>(Exp.get())) {
1198 IRCallArgs[IRCallArgPos++] = CV.first;
1199 IRCallArgs[IRCallArgPos++] = CV.second;
1201 assert(isa<NoExpansion>(Exp.get()));
1203 assert(RV.isScalar() &&
1204 "Unexpected non-scalar rvalue during struct expansion.");
1207 llvm::Value *
V = RV.getScalarVal();
1208 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1209 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1210 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(IRCallArgPos));
1212 IRCallArgs[IRCallArgPos++] =
V;
1220 const Twine &Name =
"tmp") {
1233 llvm::StructType *SrcSTy,
1237 if (SrcSTy->getNumElements() == 0)
1246 uint64_t FirstEltSize = CGF.
CGM.
getDataLayout().getTypeStoreSize(FirstElt);
1247 if (FirstEltSize < DstSize &&
1256 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1271 if (Val->getType() == Ty)
1274 if (isa<llvm::PointerType>(Val->getType())) {
1276 if (isa<llvm::PointerType>(Ty))
1277 return CGF.
Builder.CreateBitCast(Val, Ty,
"coerce.val");
1283 llvm::Type *DestIntTy = Ty;
1284 if (isa<llvm::PointerType>(DestIntTy))
1287 if (Val->getType() != DestIntTy) {
1289 if (DL.isBigEndian()) {
1292 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1293 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1295 if (SrcSize > DstSize) {
1296 Val = CGF.
Builder.CreateLShr(Val, SrcSize - DstSize,
"coerce.highbits");
1297 Val = CGF.
Builder.CreateTrunc(Val, DestIntTy,
"coerce.val.ii");
1299 Val = CGF.
Builder.CreateZExt(Val, DestIntTy,
"coerce.val.ii");
1300 Val = CGF.
Builder.CreateShl(Val, DstSize - SrcSize,
"coerce.highbits");
1304 Val = CGF.
Builder.CreateIntCast(Val, DestIntTy,
false,
"coerce.val.ii");
1308 if (isa<llvm::PointerType>(Ty))
1309 Val = CGF.
Builder.CreateIntToPtr(Val, Ty,
"coerce.val.ip");
1330 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1332 DstSize.getFixedValue(), CGF);
1340 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1341 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1347 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1348 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1362 if (
auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1363 if (
auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1366 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1367 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1368 ScalableDstTy = llvm::ScalableVectorType::get(
1369 FixedSrcTy->getElementType(),
1371 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
1373 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1375 auto *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
1377 ScalableDstTy, PoisonVec, Load, uint64_t(0),
"cast.scalable");
1378 ScalableDstTy = cast<llvm::ScalableVectorType>(
1379 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, Ty));
1380 if (
Result->getType() != ScalableDstTy)
1382 if (
Result->getType() != Ty)
1395 llvm::ConstantInt::get(CGF.
IntPtrTy, SrcSize.getKnownMinValue()));
1400 llvm::TypeSize DstSize,
1401 bool DstIsVolatile) {
1405 llvm::Type *SrcTy = Src->getType();
1412 if (llvm::StructType *DstSTy =
1414 assert(!SrcSize.isScalable());
1416 SrcSize.getFixedValue(), *
this);
1420 if (SrcSize.isScalable() || SrcSize <= DstSize) {
1421 if (SrcTy->isIntegerTy() && Dst.
getElementType()->isPointerTy() &&
1427 }
else if (llvm::StructType *STy =
1428 dyn_cast<llvm::StructType>(Src->getType())) {
1431 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1433 llvm::Value *Elt =
Builder.CreateExtractValue(Src, i);
1442 }
else if (SrcTy->isIntegerTy()) {
1444 llvm::Type *DstIntTy =
Builder.getIntNTy(DstSize.getFixedValue() * 8);
1481static std::pair<llvm::Value *, bool>
1483 llvm::ScalableVectorType *FromTy, llvm::Value *
V,
1484 StringRef Name =
"") {
1487 if (FromTy->getElementType()->isIntegerTy(1) &&
1488 ToTy->getElementType() == CGF.
Builder.getInt8Ty()) {
1489 if (!FromTy->getElementCount().isKnownMultipleOf(8)) {
1490 FromTy = llvm::ScalableVectorType::get(
1491 FromTy->getElementType(),
1492 llvm::alignTo<8>(FromTy->getElementCount().getKnownMinValue()));
1493 llvm::Value *ZeroVec = llvm::Constant::getNullValue(FromTy);
1494 V = CGF.
Builder.CreateInsertVector(FromTy, ZeroVec,
V, uint64_t(0));
1496 FromTy = llvm::ScalableVectorType::get(
1497 ToTy->getElementType(),
1498 FromTy->getElementCount().getKnownMinValue() / 8);
1499 V = CGF.
Builder.CreateBitCast(
V, FromTy);
1501 if (FromTy->getElementType() == ToTy->getElementType()) {
1502 V->setName(Name +
".coerce");
1503 V = CGF.
Builder.CreateExtractVector(ToTy,
V, uint64_t(0),
"cast.fixed");
1513class ClangToLLVMArgMapping {
1514 static const unsigned InvalidIndex = ~0
U;
1515 unsigned InallocaArgNo;
1517 unsigned TotalIRArgs;
1521 unsigned PaddingArgIndex;
1524 unsigned FirstArgIndex;
1525 unsigned NumberOfArgs;
1528 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1536 bool OnlyRequiredArgs =
false)
1537 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1538 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1539 construct(Context, FI, OnlyRequiredArgs);
1542 bool hasInallocaArg()
const {
return InallocaArgNo != InvalidIndex; }
1543 unsigned getInallocaArgNo()
const {
1544 assert(hasInallocaArg());
1545 return InallocaArgNo;
1548 bool hasSRetArg()
const {
return SRetArgNo != InvalidIndex; }
1549 unsigned getSRetArgNo()
const {
1550 assert(hasSRetArg());
1554 unsigned totalIRArgs()
const {
return TotalIRArgs; }
1556 bool hasPaddingArg(
unsigned ArgNo)
const {
1557 assert(ArgNo < ArgInfo.size());
1558 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1560 unsigned getPaddingArgNo(
unsigned ArgNo)
const {
1561 assert(hasPaddingArg(ArgNo));
1562 return ArgInfo[ArgNo].PaddingArgIndex;
1567 std::pair<unsigned, unsigned> getIRArgs(
unsigned ArgNo)
const {
1568 assert(ArgNo < ArgInfo.size());
1569 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1570 ArgInfo[ArgNo].NumberOfArgs);
1575 bool OnlyRequiredArgs);
1578void ClangToLLVMArgMapping::construct(
const ASTContext &Context,
1580 bool OnlyRequiredArgs) {
1581 unsigned IRArgNo = 0;
1582 bool SwapThisWithSRet =
false;
1587 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1598 auto &IRArgs = ArgInfo[ArgNo];
1601 IRArgs.PaddingArgIndex = IRArgNo++;
1608 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.
getCoerceToType());
1610 IRArgs.NumberOfArgs = STy->getNumElements();
1612 IRArgs.NumberOfArgs = 1;
1618 IRArgs.NumberOfArgs = 1;
1623 IRArgs.NumberOfArgs = 0;
1633 if (IRArgs.NumberOfArgs > 0) {
1634 IRArgs.FirstArgIndex = IRArgNo;
1635 IRArgNo += IRArgs.NumberOfArgs;
1640 if (IRArgNo == 1 && SwapThisWithSRet)
1643 assert(ArgNo == ArgInfo.size());
1646 InallocaArgNo = IRArgNo++;
1648 TotalIRArgs = IRArgNo;
1656 return RI.
isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1671 switch (BT->getKind()) {
1674 case BuiltinType::Float:
1676 case BuiltinType::Double:
1678 case BuiltinType::LongDouble:
1689 if (BT->getKind() == BuiltinType::LongDouble)
1704 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1706 assert(Inserted &&
"Recursively being processed?");
1708 llvm::Type *resultType =
nullptr;
1713 llvm_unreachable(
"Invalid ABI kind for return argument");
1726 resultType = llvm::PointerType::get(
getLLVMContext(), addressSpace);
1742 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI,
true);
1746 if (IRFunctionArgs.hasSRetArg()) {
1747 ArgTypes[IRFunctionArgs.getSRetArgNo()] = llvm::PointerType::get(
1752 if (IRFunctionArgs.hasInallocaArg())
1753 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1760 for (; it != ie; ++it, ++ArgNo) {
1764 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1765 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1768 unsigned FirstIRArg, NumIRArgs;
1769 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1774 assert(NumIRArgs == 0);
1778 assert(NumIRArgs == 1);
1780 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1784 assert(NumIRArgs == 1);
1785 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1794 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1796 assert(NumIRArgs == st->getNumElements());
1797 for (
unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1798 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1800 assert(NumIRArgs == 1);
1801 ArgTypes[FirstIRArg] = argType;
1807 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1809 *ArgTypesIter++ = EltTy;
1811 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1816 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1818 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1823 bool Erased = FunctionsBeingProcessed.erase(&FI);
1825 assert(Erased &&
"Not in set?");
1827 return llvm::FunctionType::get(resultType, ArgTypes, FI.
isVariadic());
1841 llvm::AttrBuilder &FuncAttrs,
1848 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1852 FuncAttrs.addAttribute(
"aarch64_pstate_sm_enabled");
1854 FuncAttrs.addAttribute(
"aarch64_pstate_sm_compatible");
1856 FuncAttrs.addAttribute(
"aarch64_za_state_agnostic");
1860 FuncAttrs.addAttribute(
"aarch64_preserves_za");
1862 FuncAttrs.addAttribute(
"aarch64_in_za");
1864 FuncAttrs.addAttribute(
"aarch64_out_za");
1866 FuncAttrs.addAttribute(
"aarch64_inout_za");
1870 FuncAttrs.addAttribute(
"aarch64_preserves_zt0");
1872 FuncAttrs.addAttribute(
"aarch64_in_zt0");
1874 FuncAttrs.addAttribute(
"aarch64_out_zt0");
1876 FuncAttrs.addAttribute(
"aarch64_inout_zt0");
1880 const Decl *Callee) {
1886 for (
const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1887 AA->getAssumption().split(Attrs,
",");
1890 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1891 llvm::join(Attrs.begin(), Attrs.end(),
","));
1900 if (
const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl()))
1901 return ClassDecl->hasTrivialDestructor();
1907 const Decl *TargetDecl) {
1913 if (
Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
1917 if (!
Module.getLangOpts().CPlusPlus)
1920 if (
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
1921 if (FDecl->isExternC())
1923 }
else if (
const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
1925 if (VDecl->isExternC())
1933 return Module.getCodeGenOpts().StrictReturn ||
1934 !
Module.MayDropFunctionReturn(
Module.getContext(), RetTy) ||
1935 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
1942 llvm::DenormalMode FP32DenormalMode,
1943 llvm::AttrBuilder &FuncAttrs) {
1944 if (FPDenormalMode != llvm::DenormalMode::getDefault())
1945 FuncAttrs.addAttribute(
"denormal-fp-math", FPDenormalMode.str());
1947 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
1948 FuncAttrs.addAttribute(
"denormal-fp-math-f32", FP32DenormalMode.str());
1956 llvm::AttrBuilder &FuncAttrs) {
1962 StringRef Name,
bool HasOptnone,
const CodeGenOptions &CodeGenOpts,
1964 llvm::AttrBuilder &FuncAttrs) {
1967 if (CodeGenOpts.OptimizeSize)
1968 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1969 if (CodeGenOpts.OptimizeSize == 2)
1970 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1973 if (CodeGenOpts.DisableRedZone)
1974 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1975 if (CodeGenOpts.IndirectTlsSegRefs)
1976 FuncAttrs.addAttribute(
"indirect-tls-seg-refs");
1977 if (CodeGenOpts.NoImplicitFloat)
1978 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1980 if (AttrOnCallSite) {
1985 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1987 FuncAttrs.addAttribute(
"trap-func-name", CodeGenOpts.
TrapFuncName);
1989 switch (CodeGenOpts.getFramePointer()) {
1996 FuncAttrs.addAttribute(
"frame-pointer",
1998 CodeGenOpts.getFramePointer()));
2001 if (CodeGenOpts.LessPreciseFPMAD)
2002 FuncAttrs.addAttribute(
"less-precise-fpmad",
"true");
2004 if (CodeGenOpts.NullPointerIsValid)
2005 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
2008 FuncAttrs.addAttribute(
"no-trapping-math",
"true");
2012 if (LangOpts.NoHonorInfs)
2013 FuncAttrs.addAttribute(
"no-infs-fp-math",
"true");
2014 if (LangOpts.NoHonorNaNs)
2015 FuncAttrs.addAttribute(
"no-nans-fp-math",
"true");
2016 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
2017 LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
2018 (LangOpts.getDefaultFPContractMode() ==
2020 LangOpts.getDefaultFPContractMode() ==
2022 FuncAttrs.addAttribute(
"unsafe-fp-math",
"true");
2023 if (CodeGenOpts.SoftFloat)
2024 FuncAttrs.addAttribute(
"use-soft-float",
"true");
2025 FuncAttrs.addAttribute(
"stack-protector-buffer-size",
2026 llvm::utostr(CodeGenOpts.SSPBufferSize));
2027 if (LangOpts.NoSignedZero)
2028 FuncAttrs.addAttribute(
"no-signed-zeros-fp-math",
"true");
2031 const std::vector<std::string> &Recips = CodeGenOpts.
Reciprocals;
2032 if (!Recips.empty())
2033 FuncAttrs.addAttribute(
"reciprocal-estimates", llvm::join(Recips,
","));
2037 FuncAttrs.addAttribute(
"prefer-vector-width",
2040 if (CodeGenOpts.StackRealignment)
2041 FuncAttrs.addAttribute(
"stackrealign");
2042 if (CodeGenOpts.Backchain)
2043 FuncAttrs.addAttribute(
"backchain");
2044 if (CodeGenOpts.EnableSegmentedStacks)
2045 FuncAttrs.addAttribute(
"split-stack");
2047 if (CodeGenOpts.SpeculativeLoadHardening)
2048 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2051 switch (CodeGenOpts.getZeroCallUsedRegs()) {
2052 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
2053 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2055 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
2056 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr-arg");
2058 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
2059 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr");
2061 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
2062 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-arg");
2064 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
2065 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used");
2067 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
2068 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr-arg");
2070 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
2071 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr");
2073 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
2074 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-arg");
2076 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
2077 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all");
2088 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2093 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
2094 LangOpts.SYCLIsDevice) {
2095 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2098 if (CodeGenOpts.SaveRegParams && !AttrOnCallSite)
2099 FuncAttrs.addAttribute(
"save-reg-params");
2102 StringRef Var,
Value;
2104 FuncAttrs.addAttribute(Var,
Value);
2118 const llvm::Function &F,
2120 auto FFeatures = F.getFnAttribute(
"target-features");
2122 llvm::StringSet<> MergedNames;
2124 MergedFeatures.reserve(TargetOpts.
Features.size());
2126 auto AddUnmergedFeatures = [&](
auto &&FeatureRange) {
2127 for (StringRef
Feature : FeatureRange) {
2131 StringRef Name =
Feature.drop_front(1);
2132 bool Merged = !MergedNames.insert(Name).second;
2134 MergedFeatures.push_back(
Feature);
2138 if (FFeatures.isValid())
2139 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(),
','));
2140 AddUnmergedFeatures(TargetOpts.
Features);
2142 if (!MergedFeatures.empty()) {
2143 llvm::sort(MergedFeatures);
2144 FuncAttr.addAttribute(
"target-features", llvm::join(MergedFeatures,
","));
2151 bool WillInternalize) {
2153 llvm::AttrBuilder FuncAttrs(F.getContext());
2156 if (!TargetOpts.
CPU.empty())
2157 FuncAttrs.addAttribute(
"target-cpu", TargetOpts.
CPU);
2158 if (!TargetOpts.
TuneCPU.empty())
2159 FuncAttrs.addAttribute(
"tune-cpu", TargetOpts.
TuneCPU);
2162 CodeGenOpts, LangOpts,
2165 if (!WillInternalize && F.isInterposable()) {
2170 F.addFnAttrs(FuncAttrs);
2174 llvm::AttributeMask AttrsToRemove;
2176 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
2177 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
2178 llvm::DenormalMode Merged =
2182 if (DenormModeToMergeF32.isValid()) {
2187 if (Merged == llvm::DenormalMode::getDefault()) {
2188 AttrsToRemove.addAttribute(
"denormal-fp-math");
2189 }
else if (Merged != DenormModeToMerge) {
2191 FuncAttrs.addAttribute(
"denormal-fp-math",
2195 if (MergedF32 == llvm::DenormalMode::getDefault()) {
2196 AttrsToRemove.addAttribute(
"denormal-fp-math-f32");
2197 }
else if (MergedF32 != DenormModeToMergeF32) {
2199 FuncAttrs.addAttribute(
"denormal-fp-math-f32",
2203 F.removeFnAttrs(AttrsToRemove);
2208 F.addFnAttrs(FuncAttrs);
2211void CodeGenModule::getTrivialDefaultFunctionAttributes(
2212 StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
2213 llvm::AttrBuilder &FuncAttrs) {
2214 ::getTrivialDefaultFunctionAttributes(Name, HasOptnone,
getCodeGenOpts(),
2219void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2221 bool AttrOnCallSite,
2222 llvm::AttrBuilder &FuncAttrs) {
2223 getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
2226 if (!AttrOnCallSite)
2232 if (!AttrOnCallSite)
2237 llvm::AttrBuilder &attrs) {
2238 getDefaultFunctionAttributes(
"",
false,
2240 GetCPUAndFeaturesAttributes(
GlobalDecl(), attrs);
2245 const NoBuiltinAttr *NBA =
nullptr) {
2246 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2248 AttributeName +=
"no-builtin-";
2249 AttributeName += BuiltinName;
2250 FuncAttrs.addAttribute(AttributeName);
2254 if (LangOpts.NoBuiltin) {
2256 FuncAttrs.addAttribute(
"no-builtins");
2270 if (llvm::is_contained(NBA->builtinNames(),
"*")) {
2271 FuncAttrs.addAttribute(
"no-builtins");
2276 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2280 const llvm::DataLayout &DL,
const ABIArgInfo &AI,
2281 bool CheckCoerce =
true) {
2282 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
2288 if (!DL.typeSizeEqualsStoreSize(Ty))
2295 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2296 DL.getTypeSizeInBits(Ty)))
2320 if (
const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2322 if (
const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2331 unsigned NumRequiredArgs,
unsigned ArgNo) {
2332 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2337 if (ArgNo >= NumRequiredArgs)
2341 if (ArgNo < FD->getNumParams()) {
2342 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2343 if (Param && Param->
hasAttr<MaybeUndefAttr>())
2360 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2363 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2365 llvm::all_of(ST->elements(),
2366 llvm::AttributeFuncs::isNoFPClassCompatibleType);
2374 llvm::FPClassTest Mask = llvm::fcNone;
2375 if (LangOpts.NoHonorInfs)
2376 Mask |= llvm::fcInf;
2377 if (LangOpts.NoHonorNaNs)
2378 Mask |= llvm::fcNan;
2384 llvm::AttributeList &Attrs) {
2385 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2386 Attrs = Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Memory);
2387 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2413 llvm::AttributeList &AttrList,
2415 bool AttrOnCallSite,
bool IsThunk) {
2423 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2425 FuncAttrs.addAttribute(
"cmse_nonsecure_call");
2436 bool HasOptnone =
false;
2438 const NoBuiltinAttr *NBA =
nullptr;
2442 auto AddPotentialArgAccess = [&]() {
2443 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2445 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2446 llvm::MemoryEffects::argMemOnly());
2453 if (TargetDecl->
hasAttr<ReturnsTwiceAttr>())
2454 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2455 if (TargetDecl->
hasAttr<NoThrowAttr>())
2456 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2457 if (TargetDecl->
hasAttr<NoReturnAttr>())
2458 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2459 if (TargetDecl->
hasAttr<ColdAttr>())
2460 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2461 if (TargetDecl->
hasAttr<HotAttr>())
2462 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2463 if (TargetDecl->
hasAttr<NoDuplicateAttr>())
2464 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2465 if (TargetDecl->
hasAttr<ConvergentAttr>())
2466 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2468 if (
const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2471 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2473 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2475 (Kind == OO_New || Kind == OO_Array_New))
2476 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2479 const bool IsVirtualCall = MD && MD->
isVirtual();
2482 if (!(AttrOnCallSite && IsVirtualCall)) {
2483 if (Fn->isNoReturn())
2484 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2485 NBA = Fn->getAttr<NoBuiltinAttr>();
2489 if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) {
2492 if (AttrOnCallSite && TargetDecl->
hasAttr<NoMergeAttr>())
2493 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2497 if (TargetDecl->
hasAttr<ConstAttr>()) {
2498 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2499 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2502 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2503 }
else if (TargetDecl->
hasAttr<PureAttr>()) {
2504 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2505 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2507 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2508 }
else if (TargetDecl->
hasAttr<NoAliasAttr>()) {
2509 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2510 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2512 if (
const auto *RA = TargetDecl->
getAttr<RestrictAttr>();
2513 RA && RA->getDeallocator() ==
nullptr)
2514 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2515 if (TargetDecl->
hasAttr<ReturnsNonNullAttr>() &&
2516 !CodeGenOpts.NullPointerIsValid)
2517 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2518 if (TargetDecl->
hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2519 FuncAttrs.addAttribute(
"no_caller_saved_registers");
2520 if (TargetDecl->
hasAttr<AnyX86NoCfCheckAttr>())
2521 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2522 if (TargetDecl->
hasAttr<LeafAttr>())
2523 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2524 if (TargetDecl->
hasAttr<BPFFastCallAttr>())
2525 FuncAttrs.addAttribute(
"bpf_fastcall");
2527 HasOptnone = TargetDecl->
hasAttr<OptimizeNoneAttr>();
2528 if (
auto *AllocSize = TargetDecl->
getAttr<AllocSizeAttr>()) {
2529 std::optional<unsigned> NumElemsParam;
2530 if (AllocSize->getNumElemsParam().isValid())
2531 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2532 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2536 if (DeviceKernelAttr::isOpenCLSpelling(
2537 TargetDecl->
getAttr<DeviceKernelAttr>()) &&
2544 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2551 FuncAttrs.addAttribute(
2552 "uniform-work-group-size",
2553 llvm::toStringRef(
getLangOpts().OffloadUniformBlock));
2557 if (TargetDecl->
hasAttr<CUDAGlobalAttr>() &&
2559 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2561 if (TargetDecl->
hasAttr<ArmLocallyStreamingAttr>())
2562 FuncAttrs.addAttribute(
"aarch64_pstate_sm_body");
2574 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2579 if (TargetDecl->
hasAttr<NoSpeculativeLoadHardeningAttr>())
2580 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2581 if (TargetDecl->
hasAttr<SpeculativeLoadHardeningAttr>())
2582 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2583 if (TargetDecl->
hasAttr<NoSplitStackAttr>())
2584 FuncAttrs.removeAttribute(
"split-stack");
2585 if (TargetDecl->
hasAttr<ZeroCallUsedRegsAttr>()) {
2588 TargetDecl->
getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2589 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2590 FuncAttrs.addAttribute(
2591 "zero-call-used-regs",
2592 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2599 if (CodeGenOpts.NoPLT) {
2600 if (
auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2601 if (!Fn->isDefined() && !AttrOnCallSite) {
2602 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2607 if (TargetDecl->
hasAttr<NoConvergentAttr>())
2608 FuncAttrs.removeAttribute(llvm::Attribute::Convergent);
2613 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2614 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2615 if (!FD->isExternallyVisible())
2616 FuncAttrs.addAttribute(
"sample-profile-suffix-elision-policy",
2623 if (!AttrOnCallSite) {
2624 if (TargetDecl && TargetDecl->
hasAttr<CmseNSEntryAttr>())
2625 FuncAttrs.addAttribute(
"cmse_nonsecure_entry");
2628 auto shouldDisableTailCalls = [&] {
2630 if (CodeGenOpts.DisableTailCalls)
2636 if (TargetDecl->
hasAttr<DisableTailCallsAttr>() ||
2637 TargetDecl->
hasAttr<AnyX86InterruptAttr>())
2640 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2641 if (
const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2642 if (!BD->doesNotEscape())
2648 if (shouldDisableTailCalls())
2649 FuncAttrs.addAttribute(
"disable-tail-calls",
"true");
2654 static const llvm::StringSet<> ReturnsTwiceFn{
2655 "_setjmpex",
"setjmp",
"_setjmp",
"vfork",
2656 "sigsetjmp",
"__sigsetjmp",
"savectx",
"getcontext"};
2657 if (ReturnsTwiceFn.contains(Name))
2658 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2662 GetCPUAndFeaturesAttributes(CalleeInfo.
getCalleeDecl(), FuncAttrs);
2665 if (!MSHotPatchFunctions.empty()) {
2666 bool IsHotPatched = llvm::binary_search(MSHotPatchFunctions, Name);
2668 FuncAttrs.addAttribute(
"marked_for_windows_hot_patching");
2674 FuncAttrs.addAttribute(
"loader-replaceable");
2677 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI);
2684 if (CodeGenOpts.EnableNoundefAttrs &&
2688 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2694 RetAttrs.addAttribute(llvm::Attribute::SExt);
2696 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2698 RetAttrs.addAttribute(llvm::Attribute::NoExt);
2703 RetAttrs.addAttribute(llvm::Attribute::InReg);
2715 AddPotentialArgAccess();
2724 llvm_unreachable(
"Invalid ABI kind for return argument");
2732 RetAttrs.addDereferenceableAttr(
2734 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2735 !CodeGenOpts.NullPointerIsValid)
2736 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2738 llvm::Align Alignment =
2740 RetAttrs.addAlignmentAttr(Alignment);
2745 bool hasUsedSRet =
false;
2749 if (IRFunctionArgs.hasSRetArg()) {
2751 SRETAttrs.addStructRetAttr(
getTypes().ConvertTypeForMem(RetTy));
2752 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2753 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2756 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2758 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2763 if (IRFunctionArgs.hasInallocaArg()) {
2766 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2775 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2777 assert(IRArgs.second == 1 &&
"Expected only a single `this` pointer.");
2783 if (!CodeGenOpts.NullPointerIsValid &&
2785 Attrs.addAttribute(llvm::Attribute::NonNull);
2792 Attrs.addDereferenceableOrNullAttr(
2798 llvm::Align Alignment =
2802 Attrs.addAlignmentAttr(Alignment);
2804 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(
getLLVMContext(), Attrs);
2809 I !=
E; ++I, ++ArgNo) {
2815 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2817 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2820 .addAttribute(llvm::Attribute::InReg));
2825 if (CodeGenOpts.EnableNoundefAttrs &&
2827 Attrs.addAttribute(llvm::Attribute::NoUndef);
2836 Attrs.addAttribute(llvm::Attribute::SExt);
2838 Attrs.addAttribute(llvm::Attribute::ZExt);
2840 Attrs.addAttribute(llvm::Attribute::NoExt);
2845 Attrs.addAttribute(llvm::Attribute::Nest);
2847 Attrs.addAttribute(llvm::Attribute::InReg);
2848 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.
getDirectAlign()));
2855 Attrs.addAttribute(llvm::Attribute::InReg);
2867 Attrs.addByValAttr(
getTypes().ConvertTypeForMem(ParamType));
2875 Attrs.addAttribute(llvm::Attribute::DeadOnReturn);
2880 if (CodeGenOpts.PassByValueIsNoAlias &&
Decl &&
2881 Decl->getArgPassingRestrictions() ==
2885 Attrs.addAttribute(llvm::Attribute::NoAlias);
2910 AddPotentialArgAccess();
2915 Attrs.addByRefAttr(
getTypes().ConvertTypeForMem(ParamType));
2926 AddPotentialArgAccess();
2934 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2935 !CodeGenOpts.NullPointerIsValid)
2936 Attrs.addAttribute(llvm::Attribute::NonNull);
2938 llvm::Align Alignment =
2940 Attrs.addAlignmentAttr(Alignment);
2949 DeviceKernelAttr::isOpenCLSpelling(
2950 TargetDecl->
getAttr<DeviceKernelAttr>()) &&
2954 llvm::Align Alignment =
2956 Attrs.addAlignmentAttr(Alignment);
2963 Attrs.addAttribute(llvm::Attribute::NoAlias);
2972 Attrs.addStructRetAttr(
getTypes().ConvertTypeForMem(ParamType));
2977 Attrs.addAttribute(llvm::Attribute::NoAlias);
2981 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2983 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2984 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2990 Attrs.addAttribute(llvm::Attribute::SwiftError);
2994 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2998 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
3003 Attrs.addCapturesAttr(llvm::CaptureInfo::none());
3005 if (Attrs.hasAttributes()) {
3006 unsigned FirstIRArg, NumIRArgs;
3007 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3008 for (
unsigned i = 0; i < NumIRArgs; i++)
3009 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
3015 AttrList = llvm::AttributeList::get(
3024 llvm::Value *value) {
3025 llvm::Type *varType = CGF.
ConvertType(var->getType());
3029 if (value->getType() == varType)
3032 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) &&
3033 "unexpected promotion type");
3035 if (isa<llvm::IntegerType>(varType))
3036 return CGF.
Builder.CreateTrunc(value, varType,
"arg.unpromote");
3038 return CGF.
Builder.CreateFPCast(value, varType,
"arg.unpromote");
3044 QualType ArgType,
unsigned ArgNo) {
3056 if (
auto ParmNNAttr = PVD->
getAttr<NonNullAttr>())
3063 if (NNAttr->isNonNull(ArgNo))
3093 if (FD->hasImplicitReturnZero()) {
3094 QualType RetTy = FD->getReturnType().getUnqualifiedType();
3096 llvm::Constant *
Zero = llvm::Constant::getNullValue(LLVMTy);
3105 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
3110 if (IRFunctionArgs.hasInallocaArg())
3111 ArgStruct =
Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
3115 if (IRFunctionArgs.hasSRetArg()) {
3116 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
3117 AI->setName(
"agg.result");
3118 AI->addAttr(llvm::Attribute::NoAlias);
3125 ArgVals.reserve(Args.size());
3131 assert(FI.
arg_size() == Args.size() &&
3132 "Mismatch between function signature & arguments.");
3135 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e;
3136 ++i, ++info_it, ++ArgNo) {
3141 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
3149 unsigned FirstIRArg, NumIRArgs;
3150 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3154 assert(NumIRArgs == 0);
3167 assert(NumIRArgs == 1);
3190 llvm::ConstantInt::get(
IntPtrTy, Size.getQuantity()));
3191 ParamAddr = AlignedTemp;
3208 auto AI = Fn->getArg(FirstIRArg);
3216 assert(NumIRArgs == 1);
3218 if (
const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3221 PVD->getFunctionScopeIndex()) &&
3223 AI->addAttr(llvm::Attribute::NonNull);
3225 QualType OTy = PVD->getOriginalType();
3226 if (
const auto *ArrTy =
getContext().getAsConstantArrayType(OTy)) {
3232 QualType ETy = ArrTy->getElementType();
3233 llvm::Align Alignment =
3236 .addAlignmentAttr(Alignment));
3237 uint64_t ArrSize = ArrTy->getZExtSize();
3241 Attrs.addDereferenceableAttr(
3242 getContext().getTypeSizeInChars(ETy).getQuantity() *
3244 AI->addAttrs(Attrs);
3245 }
else if (
getContext().getTargetInfo().getNullPointerValue(
3248 AI->addAttr(llvm::Attribute::NonNull);
3251 }
else if (
const auto *ArrTy =
3257 QualType ETy = ArrTy->getElementType();
3258 llvm::Align Alignment =
3261 .addAlignmentAttr(Alignment));
3262 if (!
getTypes().getTargetAddressSpace(ETy) &&
3264 AI->addAttr(llvm::Attribute::NonNull);
3269 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3272 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3273 if (AVAttr && !
SanOpts.
has(SanitizerKind::Alignment)) {
3277 llvm::ConstantInt *AlignmentCI =
3279 uint64_t AlignmentInt =
3280 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3281 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3282 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3284 .addAlignmentAttr(llvm::Align(AlignmentInt)));
3291 AI->addAttr(llvm::Attribute::NoAlias);
3299 assert(NumIRArgs == 1);
3303 llvm::Value *
V = AI;
3311 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
3334 if (
V->getType() != LTy)
3345 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(
ConvertType(Ty))) {
3346 llvm::Value *ArgVal = Fn->getArg(FirstIRArg);
3347 if (
auto *VecTyFrom =
3348 dyn_cast<llvm::ScalableVectorType>(ArgVal->getType())) {
3350 *
this, VecTyTo, VecTyFrom, ArgVal, Arg->
getName());
3352 assert(NumIRArgs == 1);
3359 llvm::StructType *STy =
3370 STy->getNumElements() > 1) {
3372 llvm::TypeSize PtrElementSize =
3374 if (StructSize.isScalable()) {
3375 assert(STy->containsHomogeneousScalableVectorTypes() &&
3376 "ABI only supports structure with homogeneous scalable vector "
3378 assert(StructSize == PtrElementSize &&
3379 "Only allow non-fractional movement of structure with"
3380 "homogeneous scalable vector type");
3381 assert(STy->getNumElements() == NumIRArgs);
3383 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3384 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3385 auto *AI = Fn->getArg(FirstIRArg + i);
3386 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3388 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3393 uint64_t SrcSize = StructSize.getFixedValue();
3394 uint64_t DstSize = PtrElementSize.getFixedValue();
3397 if (SrcSize <= DstSize) {
3404 assert(STy->getNumElements() == NumIRArgs);
3405 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3406 auto AI = Fn->getArg(FirstIRArg + i);
3407 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3412 if (SrcSize > DstSize) {
3418 assert(NumIRArgs == 1);
3419 auto AI = Fn->getArg(FirstIRArg);
3420 AI->setName(Arg->
getName() +
".coerce");
3423 llvm::TypeSize::getFixed(
3424 getContext().getTypeSizeInChars(Ty).getQuantity() -
3449 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
3453 unsigned argIndex = FirstIRArg;
3454 unsigned unpaddedIndex = 0;
3455 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3456 llvm::Type *eltType = coercionType->getElementType(i);
3461 llvm::Value *elt = Fn->getArg(argIndex++);
3463 auto paramType = unpaddedStruct
3464 ? unpaddedStruct->getElementType(unpaddedIndex++)
3465 : unpaddedCoercionType;
3467 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(eltType)) {
3468 if (
auto *VecTyFrom = dyn_cast<llvm::ScalableVectorType>(paramType)) {
3471 *
this, VecTyTo, VecTyFrom, elt, elt->getName());
3472 assert(Extracted &&
"Unexpected scalable to fixed vector coercion");
3477 assert(argIndex == FirstIRArg + NumIRArgs);
3489 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
3490 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3491 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
3492 for (
unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3493 auto AI = Fn->getArg(FirstIRArg + i);
3494 AI->setName(Arg->
getName() +
"." + Twine(i));
3500 auto *AI = Fn->getArg(FirstIRArg);
3501 AI->setName(Arg->
getName() +
".target_coerce");
3519 assert(NumIRArgs == 0);
3531 if (
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3532 for (
int I = Args.size() - 1; I >= 0; --I)
3535 for (
unsigned I = 0,
E = Args.size(); I !=
E; ++I)
3541 while (insn->use_empty()) {
3542 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3547 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3548 bitcast->eraseFromParent();
3554 llvm::Value *result) {
3556 llvm::BasicBlock *BB = CGF.
Builder.GetInsertBlock();
3559 if (&BB->back() != result)
3562 llvm::Type *resultType = result->getType();
3565 llvm::Instruction *generator = cast<llvm::Instruction>(result);
3571 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3574 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3577 if (generator->getNextNode() != bitcast)
3580 InstsToKill.push_back(bitcast);
3587 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3591 bool doRetainAutorelease;
3594 doRetainAutorelease =
true;
3595 }
else if (call->getCalledOperand() ==
3597 doRetainAutorelease =
false;
3605 llvm::Instruction *prev = call->getPrevNode();
3607 if (isa<llvm::BitCastInst>(prev)) {
3608 prev = prev->getPrevNode();
3611 assert(isa<llvm::CallInst>(prev));
3612 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3614 InstsToKill.push_back(prev);
3620 result = call->getArgOperand(0);
3621 InstsToKill.push_back(call);
3625 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3626 if (!bitcast->hasOneUse())
3628 InstsToKill.push_back(bitcast);
3629 result = bitcast->getOperand(0);
3633 for (
auto *I : InstsToKill)
3634 I->eraseFromParent();
3637 if (doRetainAutorelease)
3641 return CGF.
Builder.CreateBitCast(result, resultType);
3646 llvm::Value *result) {
3649 dyn_cast_or_null<ObjCMethodDecl>(CGF.
CurCodeDecl);
3658 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3659 if (!retainCall || retainCall->getCalledOperand() !=
3664 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3665 llvm::LoadInst *load =
3666 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3667 if (!load || load->isAtomic() || load->isVolatile() ||
3674 llvm::Type *resultType = result->getType();
3676 assert(retainCall->use_empty());
3677 retainCall->eraseFromParent();
3680 return CGF.
Builder.CreateBitCast(load, resultType);
3687 llvm::Value *result) {
3710 auto GetStoreIfValid = [&CGF,
3711 ReturnValuePtr](llvm::User *
U) -> llvm::StoreInst * {
3712 auto *SI = dyn_cast<llvm::StoreInst>(
U);
3713 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3719 assert(!SI->isAtomic() &&
3727 if (!ReturnValuePtr->hasOneUse()) {
3728 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3734 const llvm::Instruction *LoadIntoFakeUse =
nullptr;
3735 for (llvm::Instruction &I : llvm::reverse(*IP)) {
3739 if (LoadIntoFakeUse == &I)
3741 if (isa<llvm::BitCastInst>(&I))
3743 if (
auto *II = dyn_cast<llvm::IntrinsicInst>(&I)) {
3744 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3747 if (II->getIntrinsicID() == llvm::Intrinsic::fake_use) {
3748 LoadIntoFakeUse = dyn_cast<llvm::Instruction>(II->getArgOperand(0));
3752 return GetStoreIfValid(&I);
3757 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3763 llvm::BasicBlock *StoreBB = store->getParent();
3764 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3766 while (IP != StoreBB) {
3767 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3783 int BitWidth,
int CharWidth) {
3784 assert(CharWidth <= 64);
3785 assert(
static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3788 if (BitOffset >= CharWidth) {
3789 Pos += BitOffset / CharWidth;
3790 BitOffset = BitOffset % CharWidth;
3793 const uint64_t
Used = (uint64_t(1) << CharWidth) - 1;
3794 if (BitOffset + BitWidth >= CharWidth) {
3795 Bits[Pos++] |= (
Used << BitOffset) &
Used;
3796 BitWidth -= CharWidth - BitOffset;
3800 while (BitWidth >= CharWidth) {
3802 BitWidth -= CharWidth;
3806 Bits[Pos++] |= (
Used >> (CharWidth - BitWidth)) << BitOffset;
3814 int StorageSize,
int BitOffset,
int BitWidth,
3815 int CharWidth,
bool BigEndian) {
3818 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3821 std::reverse(TmpBits.begin(), TmpBits.end());
3823 for (uint64_t
V : TmpBits)
3824 Bits[StorageOffset++] |=
V;
3876 auto Src = TmpBits.begin();
3877 auto Dst = Bits.begin() + Offset + I * Size;
3878 for (
int J = 0; J < Size; ++J)
3898 std::fill_n(Bits.begin() + Offset, Size,
3903 int Pos,
int Size,
int CharWidth,
3908 for (
auto P = Bits.begin() + Pos,
E = Bits.begin() + Pos + Size;
P !=
E;
3910 Mask = (Mask << CharWidth) | *
P;
3912 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3914 Mask = (Mask << CharWidth) | *--
P;
3923 llvm::IntegerType *ITy,
3925 assert(Src->getType() == ITy);
3926 assert(ITy->getScalarSizeInBits() <= 64);
3929 int Size = DataLayout.getTypeStoreSize(ITy);
3937 return Builder.CreateAnd(Src, Mask,
"cmse.clear");
3943 llvm::ArrayType *ATy,
3946 int Size = DataLayout.getTypeStoreSize(ATy);
3953 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3955 llvm::Value *R = llvm::PoisonValue::get(ATy);
3956 for (
int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3958 DataLayout.isBigEndian());
3959 MaskIndex += CharsPerElt;
3960 llvm::Value *T0 =
Builder.CreateExtractValue(Src, I);
3961 llvm::Value *T1 =
Builder.CreateAnd(T0, Mask,
"cmse.clear");
3962 R =
Builder.CreateInsertValue(R, T1, I);
3970 uint64_t RetKeyInstructionsSourceAtom) {
3985 auto *I =
Builder.CreateRetVoid();
3986 if (RetKeyInstructionsSourceAtom)
3993 llvm::DebugLoc RetDbgLoc;
3994 llvm::Value *RV =
nullptr;
4004 llvm::Function::arg_iterator EI =
CurFn->arg_end();
4006 llvm::Value *ArgStruct = &*EI;
4010 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
4016 auto AI =
CurFn->arg_begin();
4061 RetDbgLoc = SI->getDebugLoc();
4063 RV = SI->getValueOperand();
4064 SI->eraseFromParent();
4087 if (
auto *FD = dyn_cast<FunctionDecl>(
CurCodeDecl))
4088 RT = FD->getReturnType();
4089 else if (
auto *MD = dyn_cast<ObjCMethodDecl>(
CurCodeDecl))
4090 RT = MD->getReturnType();
4094 llvm_unreachable(
"Unexpected function/method type");
4110 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
4115 unsigned unpaddedIndex = 0;
4116 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4124 unpaddedStruct ? unpaddedStruct->getElementType(unpaddedIndex++)
4125 : unpaddedCoercionType,
4127 results.push_back(elt);
4131 if (results.size() == 1) {
4139 RV = llvm::PoisonValue::get(returnType);
4140 for (
unsigned i = 0, e = results.size(); i != e; ++i) {
4141 RV =
Builder.CreateInsertValue(RV, results[i], i);
4153 llvm_unreachable(
"Invalid ABI kind for return argument");
4156 llvm::Instruction *Ret;
4162 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
4169 Ret =
Builder.CreateRetVoid();
4173 Ret->setDebugLoc(std::move(RetDbgLoc));
4175 llvm::Value *Backup = RV ? Ret->getOperand(0) :
nullptr;
4176 if (RetKeyInstructionsSourceAtom)
4192 ReturnsNonNullAttr *RetNNAttr =
nullptr;
4193 if (
SanOpts.
has(SanitizerKind::ReturnsNonnullAttribute))
4196 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4204 assert(!requiresReturnValueNullabilityCheck() &&
4205 "Cannot check nullability and the nonnull attribute");
4206 AttrLoc = RetNNAttr->getLocation();
4207 CheckKind = SanitizerKind::SO_ReturnsNonnullAttribute;
4208 Handler = SanitizerHandler::NonnullReturn;
4210 if (
auto *DD = dyn_cast<DeclaratorDecl>(
CurCodeDecl))
4211 if (
auto *TSI = DD->getTypeSourceInfo())
4213 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4214 CheckKind = SanitizerKind::SO_NullabilityReturn;
4215 Handler = SanitizerHandler::NullabilityReturn;
4225 llvm::Value *CanNullCheck =
Builder.CreateIsNotNull(SLocPtr);
4226 if (requiresReturnValueNullabilityCheck())
4228 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4229 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4233 llvm::Value *Cond =
Builder.CreateIsNotNull(RV);
4235 llvm::Value *DynamicData[] = {SLocPtr};
4236 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
4255 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.
getLLVMContext());
4256 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4281 if (
type->isReferenceType()) {
4290 param->
hasAttr<NSConsumedAttr>() &&
type->isObjCRetainableType()) {
4293 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
4305 type->castAsRecordDecl()->isParamDestroyedInCallee() &&
4308 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
4309 assert(cleanup.isValid() &&
4310 "cleanup for callee-destructed param not recorded");
4312 llvm::Instruction *isActive =
Builder.CreateUnreachable();
4318 return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
4331 "shouldn't have writeback for provably null argument");
4339 llvm::BasicBlock *contBB =
nullptr;
4345 if (!provablyNonNull) {
4350 CGF.
Builder.CreateCondBr(isNull, contBB, writebackBB);
4359 "icr.writeback-cast");
4368 if (writeback.
ToUse) {
4393 if (!provablyNonNull)
4402 for (
const auto &I : llvm::reverse(Cleanups)) {
4404 I.IsActiveIP->eraseFromParent();
4410 if (uop->getOpcode() == UO_AddrOf)
4411 return uop->getSubExpr();
4441 llvm::PointerType *destType =
4443 llvm::Type *destElemType =
4466 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
4470 llvm::BasicBlock *contBB =
nullptr;
4471 llvm::BasicBlock *originBB =
nullptr;
4474 llvm::Value *finalArgument;
4478 if (provablyNonNull) {
4483 finalArgument = CGF.
Builder.CreateSelect(
4484 isNull, llvm::ConstantPointerNull::get(destType),
4490 originBB = CGF.
Builder.GetInsertBlock();
4493 CGF.
Builder.CreateCondBr(isNull, contBB, copyBB);
4495 condEval.
begin(CGF);
4499 llvm::Value *valueToUse =
nullptr;
4507 src = CGF.
Builder.CreateBitCast(src, destElemType,
"icr.cast");
4524 if (shouldCopy && !provablyNonNull) {
4525 llvm::BasicBlock *copyBB = CGF.
Builder.GetInsertBlock();
4530 llvm::PHINode *phiToUse =
4531 CGF.
Builder.CreatePHI(valueToUse->getType(), 2,
"icr.to-use");
4532 phiToUse->addIncoming(valueToUse, copyBB);
4533 phiToUse->addIncoming(llvm::PoisonValue::get(valueToUse->getType()),
4535 valueToUse = phiToUse;
4549 StackBase = CGF.
Builder.CreateStackSave(
"inalloca.save");
4555 CGF.
Builder.CreateStackRestore(StackBase);
4562 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4567 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) :
nullptr;
4568 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4571 const NonNullAttr *NNAttr =
nullptr;
4572 if (
SanOpts.
has(SanitizerKind::NonnullAttribute))
4575 bool CanCheckNullability =
false;
4576 if (
SanOpts.
has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
4577 !PVD->getType()->isRecordType()) {
4578 auto Nullability = PVD->getType()->getNullability();
4579 CanCheckNullability = Nullability &&
4581 PVD->getTypeSourceInfo();
4584 if (!NNAttr && !CanCheckNullability)
4591 AttrLoc = NNAttr->getLocation();
4592 CheckKind = SanitizerKind::SO_NonnullAttribute;
4593 Handler = SanitizerHandler::NonnullArg;
4595 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4596 CheckKind = SanitizerKind::SO_NullabilityArg;
4597 Handler = SanitizerHandler::NullabilityArg;
4602 llvm::Constant *StaticData[] = {
4605 llvm::ConstantInt::get(
Int32Ty, ArgNo + 1),
4607 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, {});
4613 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4633 return llvm::any_of(ArgTypes, [&](
QualType Ty) {
4644 return classDecl->getTypeParamListAsWritten();
4648 return catDecl->getTypeParamList();
4658 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4662 assert((ParamsToSkip == 0 ||
Prototype.P) &&
4663 "Can't skip parameters if type info is not provided");
4673 bool IsVariadic =
false;
4675 const auto *MD = dyn_cast<const ObjCMethodDecl *>(
Prototype.P);
4677 IsVariadic = MD->isVariadic();
4680 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4681 MD->param_type_end());
4683 const auto *FPT = cast<const FunctionProtoType *>(
Prototype.P);
4684 IsVariadic = FPT->isVariadic();
4685 ExplicitCC = FPT->getExtInfo().getCC();
4686 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4687 FPT->param_type_end());
4695 assert(Arg != ArgRange.end() &&
"Running over edge of argument list!");
4702 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4703 "type mismatch in call argument!");
4709 assert((Arg == ArgRange.end() || IsVariadic) &&
4710 "Extra arguments in non-variadic function!");
4715 for (
auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4716 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4717 assert((
int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4729 auto MaybeEmitImplicitObjectSize = [&](
unsigned I,
const Expr *Arg,
4731 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4733 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4740 assert(EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?");
4741 llvm::Value *
V = evaluateOrEmitBuiltinObjectSize(
4742 Arg, PS->getType(),
T, EmittedArg.getScalarVal(), PS->isDynamic());
4747 std::swap(Args.back(), *(&Args.back() - 1));
4752 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4753 "inalloca only supported on x86");
4758 size_t CallArgsStart = Args.size();
4759 for (
unsigned I = 0,
E = ArgTypes.size(); I !=
E; ++I) {
4760 unsigned Idx = LeftToRight ? I :
E - I - 1;
4762 unsigned InitialArgSize = Args.size();
4765 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4766 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4768 (isa<ObjCMethodDecl>(AC.getDecl()) &&
4770 "Argument and parameter types don't match");
4774 assert(InitialArgSize + 1 == Args.size() &&
4775 "The code below depends on only adding one arg per EmitCallArg");
4776 (void)InitialArgSize;
4779 if (!Args.back().hasLValue()) {
4780 RValue RVArg = Args.back().getKnownRValue();
4782 ParamsToSkip + Idx);
4786 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4793 std::reverse(Args.begin() + CallArgsStart, Args.end());
4857 std::optional<DisableDebugLocationUpdates> Dis;
4858 if (isa<CXXDefaultArgExpr>(
E))
4861 dyn_cast<ObjCIndirectCopyRestoreExpr>(
E)) {
4875 "reference binding to unmaterialized r-value!");
4887 if (
type->isRecordType() &&
4888 type->castAsRecordDecl()->isParamDestroyedInCallee()) {
4895 bool DestroyedInCallee =
true, NeedsCleanup =
true;
4896 if (
const auto *RD =
type->getAsCXXRecordDecl())
4897 DestroyedInCallee = RD->hasNonTrivialDestructor();
4899 NeedsCleanup =
type.isDestructedType();
4901 if (DestroyedInCallee)
4908 if (DestroyedInCallee && NeedsCleanup) {
4915 llvm::Instruction *IsActive =
4922 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(
E) &&
4923 cast<CastExpr>(
E)->getCastKind() == CK_LValueToRValue &&
4924 !
type->isArrayParameterType() && !
type.isNonTrivialToPrimitiveCopy()) {
4934QualType CodeGenFunction::getVarArgType(
const Expr *Arg) {
4938 if (!
getTarget().getTriple().isOSWindows())
4954void CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4957 Inst->setMetadata(
"clang.arc.no_objc_arc_exceptions",
4964 const llvm::Twine &name) {
4972 const llvm::Twine &name) {
4974 for (
auto arg : args)
4975 values.push_back(
arg.emitRawPointer(*
this));
4982 const llvm::Twine &name) {
4984 call->setDoesNotThrow();
4991 const llvm::Twine &name) {
5006 if (
auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) {
5007 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
5008 auto IID = CalleeFn->getIntrinsicID();
5009 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
5022 const llvm::Twine &name) {
5023 llvm::CallInst *call =
Builder.CreateCall(
5028 return cast<llvm::CallInst>(addConvergenceControlToken(call));
5039 llvm::InvokeInst *invoke =
Builder.CreateInvoke(
5041 invoke->setDoesNotReturn();
5044 llvm::CallInst *call =
Builder.CreateCall(callee, args, BundleList);
5045 call->setDoesNotReturn();
5054 const Twine &name) {
5062 const Twine &name) {
5072 const Twine &Name) {
5077 llvm::CallBase *Inst;
5079 Inst =
Builder.CreateCall(Callee, Args, BundleList, Name);
5082 Inst =
Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
5090 AddObjCARCExceptionMetadata(Inst);
5095void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
5097 DeferredReplacements.push_back(
5098 std::make_pair(llvm::WeakTrackingVH(Old),
New));
5105[[nodiscard]] llvm::AttributeList
5106maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
5107 const llvm::AttributeList &Attrs,
5108 llvm::Align NewAlign) {
5109 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
5110 if (CurAlign >= NewAlign)
5112 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
5113 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
5114 .addRetAttribute(Ctx, AlignAttr);
5117template <
typename AlignedAttrTy>
class AbstractAssumeAlignedAttrEmitter {
5122 const AlignedAttrTy *AA =
nullptr;
5124 llvm::Value *Alignment =
nullptr;
5125 llvm::ConstantInt *OffsetCI =
nullptr;
5131 AA = FuncDecl->
getAttr<AlignedAttrTy>();
5136 [[nodiscard]] llvm::AttributeList
5137 TryEmitAsCallSiteAttribute(
const llvm::AttributeList &Attrs) {
5138 if (!AA || OffsetCI || CGF.
SanOpts.
has(SanitizerKind::Alignment))
5140 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
5145 if (!AlignmentCI->getValue().isPowerOf2())
5147 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
5150 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
5162 AA->getLocation(), Alignment, OffsetCI);
5168class AssumeAlignedAttrEmitter final
5169 :
public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
5172 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5176 Alignment = cast<llvm::ConstantInt>(CGF.
EmitScalarExpr(AA->getAlignment()));
5177 if (
Expr *Offset = AA->getOffset()) {
5179 if (OffsetCI->isNullValue())
5186class AllocAlignAttrEmitter final
5187 :
public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
5191 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5195 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5204 if (
auto *VT = dyn_cast<llvm::VectorType>(Ty))
5205 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5206 if (
auto *AT = dyn_cast<llvm::ArrayType>(Ty))
5209 unsigned MaxVectorWidth = 0;
5210 if (
auto *ST = dyn_cast<llvm::StructType>(Ty))
5211 for (
auto *I : ST->elements())
5213 return MaxVectorWidth;
5220 llvm::CallBase **callOrInvoke,
bool IsMustTail,
5222 bool IsVirtualFunctionPointerThunk) {
5225 assert(Callee.isOrdinary() || Callee.isVirtual());
5234 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
5235 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5242 if ((TargetDecl->
hasAttr<AlwaysInlineAttr>() &&
5243 (TargetDecl->
hasAttr<TargetAttr>() ||
5247 TargetDecl->
hasAttr<TargetAttr>())))
5254 const FunctionDecl *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl);
5256 CalleeDecl, CallArgs, RetTy);
5263 if (llvm::StructType *ArgStruct = CallInfo.
getArgStruct()) {
5266 llvm::AllocaInst *AI;
5268 IP = IP->getNextNode();
5269 AI =
new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
"argmem",
5275 AI->setAlignment(Align.getAsAlign());
5276 AI->setUsedWithInAlloca(
true);
5277 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5278 ArgMemory =
RawAddress(AI, ArgStruct, Align);
5281 ClangToLLVMArgMapping IRFunctionArgs(
CGM.
getContext(), CallInfo);
5287 bool NeedSRetLifetimeEnd =
false;
5293 if ((IsVirtualFunctionPointerThunk || IsMustTail) && RetAI.
isIndirect()) {
5295 IRFunctionArgs.getSRetArgNo(),
5304 if (IRFunctionArgs.hasSRetArg()) {
5319 IRCallArgs[IRFunctionArgs.getSRetArgNo()] =
5337 assert(CallInfo.
arg_size() == CallArgs.size() &&
5338 "Mismatch between function signature & arguments.");
5341 for (CallArgList::const_iterator I = CallArgs.begin(),
E = CallArgs.end();
5342 I !=
E; ++I, ++info_it, ++ArgNo) {
5346 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5347 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5350 unsigned FirstIRArg, NumIRArgs;
5351 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5353 bool ArgHasMaybeUndefAttr =
5358 assert(NumIRArgs == 0);
5359 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86);
5360 if (I->isAggregate()) {
5362 ? I->getKnownLValue().getAddress()
5363 : I->getKnownRValue().getAggregateAddress();
5364 llvm::Instruction *Placeholder =
5365 cast<llvm::Instruction>(
Addr.getPointer());
5369 CGBuilderTy::InsertPoint IP =
Builder.saveIP();
5370 Builder.SetInsertPoint(Placeholder);
5383 deferPlaceholderReplacement(Placeholder,
Addr.getPointer());
5388 I->Ty,
getContext().getTypeAlignInChars(I->Ty),
5389 "indirect-arg-temp");
5390 I->copyInto(*
this,
Addr);
5399 I->copyInto(*
this,
Addr);
5406 assert(NumIRArgs == 1);
5407 if (I->isAggregate()) {
5417 ? I->getKnownLValue().getAddress()
5418 : I->getKnownRValue().getAggregateAddress();
5422 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5423 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5424 TD->getAllocaAddrSpace()) &&
5425 "indirect argument must be in alloca address space");
5427 bool NeedCopy =
false;
5428 if (
Addr.getAlignment() < Align &&
5429 llvm::getOrEnforceKnownAlignment(
Addr.emitRawPointer(*
this),
5433 }
else if (I->hasLValue()) {
5434 auto LV = I->getKnownLValue();
5439 if (!isByValOrRef ||
5444 if (isByValOrRef &&
Addr.getType()->getAddressSpace() !=
5461 *
this,
V, I->Ty.getAddressSpace(),
T,
true);
5462 if (ArgHasMaybeUndefAttr)
5463 Val =
Builder.CreateFreeze(Val);
5464 IRCallArgs[FirstIRArg] = Val;
5467 }
else if (I->getType()->isArrayParameterType()) {
5473 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5482 if (ArgHasMaybeUndefAttr)
5483 Val =
Builder.CreateFreeze(Val);
5484 IRCallArgs[FirstIRArg] = Val;
5489 CallLifetimeEndAfterCall.emplace_back(AI);
5492 I->copyInto(*
this, AI);
5497 assert(NumIRArgs == 0);
5505 assert(NumIRArgs == 1);
5507 if (!I->isAggregate())
5508 V = I->getKnownRValue().getScalarVal();
5511 I->hasLValue() ? I->getKnownLValue().getAddress()
5512 : I->getKnownRValue().getAggregateAddress());
5518 assert(!swiftErrorTemp.
isValid() &&
"multiple swifterror args");
5522 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
5527 cast<llvm::AllocaInst>(
V)->setSwiftError(
true);
5535 V->getType()->isIntegerTy())
5542 if (FirstIRArg < IRFuncTy->getNumParams() &&
5543 V->getType() != IRFuncTy->getParamType(FirstIRArg)) {
5544 assert(
V->getType()->isPointerTy() &&
"Only pointers can mismatch!");
5545 auto ActualAS = I->Ty.getAddressSpace();
5547 *
this,
V, ActualAS, IRFuncTy->getParamType(FirstIRArg));
5550 if (ArgHasMaybeUndefAttr)
5552 IRCallArgs[FirstIRArg] =
V;
5556 llvm::StructType *STy =
5561 if (!I->isAggregate()) {
5563 I->copyInto(*
this, Src);
5565 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5566 : I->getKnownRValue().getAggregateAddress();
5576 llvm::TypeSize SrcTypeSize =
5579 if (SrcTypeSize.isScalable()) {
5580 assert(STy->containsHomogeneousScalableVectorTypes() &&
5581 "ABI only supports structure with homogeneous scalable vector "
5583 assert(SrcTypeSize == DstTypeSize &&
5584 "Only allow non-fractional movement of structure with "
5585 "homogeneous scalable vector type");
5586 assert(NumIRArgs == STy->getNumElements());
5588 llvm::Value *StoredStructValue =
5590 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5591 llvm::Value *Extract =
Builder.CreateExtractValue(
5592 StoredStructValue, i, Src.
getName() +
".extract" + Twine(i));
5593 IRCallArgs[FirstIRArg + i] = Extract;
5596 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5597 uint64_t DstSize = DstTypeSize.getFixedValue();
5603 if (SrcSize < DstSize) {
5612 assert(NumIRArgs == STy->getNumElements());
5613 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5616 if (ArgHasMaybeUndefAttr)
5617 LI =
Builder.CreateFreeze(LI);
5618 IRCallArgs[FirstIRArg + i] = LI;
5623 assert(NumIRArgs == 1);
5631 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
5632 if (ATy !=
nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
5636 if (ArgHasMaybeUndefAttr)
5637 Load =
Builder.CreateFreeze(Load);
5638 IRCallArgs[FirstIRArg] = Load;
5648 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
5652 bool NeedLifetimeEnd =
false;
5653 if (I->isAggregate()) {
5654 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
5655 : I->getKnownRValue().getAggregateAddress();
5658 RValue RV = I->getKnownRValue();
5667 layout->getAlignment(), scalarAlign)),
5669 nullptr, &AllocaAddr);
5677 unsigned IRArgPos = FirstIRArg;
5678 unsigned unpaddedIndex = 0;
5679 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5680 llvm::Type *eltType = coercionType->getElementType(i);
5687 : unpaddedCoercionType,
5689 if (ArgHasMaybeUndefAttr)
5690 elt =
Builder.CreateFreeze(elt);
5691 IRCallArgs[IRArgPos++] = elt;
5693 assert(IRArgPos == FirstIRArg + NumIRArgs);
5695 if (NeedLifetimeEnd)
5701 unsigned IRArgPos = FirstIRArg;
5702 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5703 assert(IRArgPos == FirstIRArg + NumIRArgs);
5709 if (!I->isAggregate()) {
5711 I->copyInto(*
this, Src);
5713 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5714 : I->getKnownRValue().getAggregateAddress();
5721 IRCallArgs[FirstIRArg] = Load;
5727 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*
this);
5733 assert(IRFunctionArgs.hasInallocaArg());
5734 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5745 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5746 llvm::Value *Ptr) -> llvm::Function * {
5747 if (!CalleeFT->isVarArg())
5751 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5752 if (CE->getOpcode() == llvm::Instruction::BitCast)
5753 Ptr = CE->getOperand(0);
5756 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5760 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5764 if (OrigFT->isVarArg() ||
5765 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5766 OrigFT->getReturnType() != CalleeFT->getReturnType())
5769 for (
unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5770 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5776 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5778 IRFuncTy = OrigFn->getFunctionType();
5789 for (
unsigned i = 0; i < IRCallArgs.size(); ++i)
5790 LargestVectorWidth = std::max(LargestVectorWidth,
5795 llvm::AttributeList Attrs;
5801 if (
CallingConv == llvm::CallingConv::X86_VectorCall &&
5802 getTarget().getTriple().isWindowsArm64EC()) {
5803 CGM.
Error(
Loc,
"__vectorcall calling convention is not currently "
5808 if (FD->hasAttr<StrictFPAttr>())
5810 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5815 if (FD->hasAttr<OptimizeNoneAttr>() &&
getLangOpts().FastMath)
5821 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoMerge);
5825 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5831 CallerDecl, CalleeDecl))
5833 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5838 Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Convergent);
5847 !(TargetDecl && TargetDecl->
hasAttr<NoInlineAttr>()) &&
5849 CallerDecl, CalleeDecl)) {
5851 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5856 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5863 CannotThrow =
false;
5872 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
5874 if (
auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5875 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5883 if (NeedSRetLifetimeEnd)
5886 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr :
getInvokeDest();
5892 !isa_and_nonnull<FunctionDecl>(TargetDecl))
5899 if (FD->hasAttr<StrictFPAttr>())
5901 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5903 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*
this, TargetDecl);
5904 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5906 AllocAlignAttrEmitter AllocAlignAttrEmitter(*
this, TargetDecl, CallArgs);
5907 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5912 CI =
Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5915 CI =
Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5919 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
5920 CI->getCalledFunction()->getName().starts_with(
"_Z4sqrt")) {
5929 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(
CurFuncDecl)) {
5930 if (
const auto *A = FD->getAttr<CFGuardAttr>()) {
5931 if (A->getGuard() == CFGuardAttr::GuardArg::nocf &&
5932 !CI->getCalledFunction())
5938 CI->setAttributes(Attrs);
5939 CI->setCallingConv(
static_cast<llvm::CallingConv::ID
>(
CallingConv));
5943 if (!CI->getType()->isVoidTy())
5944 CI->setName(
"call");
5947 CI = addConvergenceControlToken(CI);
5950 LargestVectorWidth =
5956 if (!CI->getCalledFunction())
5957 PGO->valueProfile(
Builder, llvm::IPVK_IndirectCallTarget, CI, CalleePtr);
5962 AddObjCARCExceptionMetadata(CI);
5965 if (llvm::CallInst *
Call = dyn_cast<llvm::CallInst>(CI)) {
5966 if (TargetDecl && TargetDecl->
hasAttr<NotTailCalledAttr>())
5967 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5968 else if (IsMustTail) {
5975 else if (
Call->isIndirectCall())
5977 else if (isa_and_nonnull<FunctionDecl>(TargetDecl)) {
5978 if (!cast<FunctionDecl>(TargetDecl)->isDefined())
5983 {cast<FunctionDecl>(TargetDecl),
Loc});
5987 if (llvm::GlobalValue::isWeakForLinker(
Linkage) ||
5988 llvm::GlobalValue::isDiscardableIfUnused(
Linkage))
5995 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
6004 if (TargetDecl && TargetDecl->
hasAttr<ErrorAttr>()) {
6005 llvm::ConstantInt *
Line =
6007 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(
Line);
6009 CI->setMetadata(
"srcloc", MDT);
6017 if (CI->doesNotReturn()) {
6018 if (NeedSRetLifetimeEnd)
6022 if (
SanOpts.
has(SanitizerKind::Unreachable)) {
6025 if (
auto *F = CI->getCalledFunction())
6026 F->removeFnAttr(llvm::Attribute::NoReturn);
6027 CI->removeFnAttr(llvm::Attribute::NoReturn);
6032 SanitizerKind::KernelAddress)) {
6034 llvm::IRBuilder<>::InsertPointGuard IPGuard(
Builder);
6036 auto *FnType = llvm::FunctionType::get(
CGM.
VoidTy,
false);
6037 llvm::FunctionCallee Fn =
6044 Builder.ClearInsertionPoint();
6064 CGBuilderTy::InsertPointGuard IPG(
Builder);
6067 }
else if (!(Cleanup &&
6072 if (CI->getType()->isVoidTy())
6076 Builder.ClearInsertionPoint();
6082 if (swiftErrorTemp.
isValid()) {
6101 if (IsVirtualFunctionPointerThunk) {
6112 bool requiresExtract = isa<llvm::StructType>(CI->getType());
6114 unsigned unpaddedIndex = 0;
6115 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
6116 llvm::Type *eltType = coercionType->getElementType(i);
6120 llvm::Value *elt = CI;
6121 if (requiresExtract)
6122 elt =
Builder.CreateExtractValue(elt, unpaddedIndex++);
6124 assert(unpaddedIndex == 0);
6133 if (NeedSRetLifetimeEnd)
6150 llvm::Value *Real =
Builder.CreateExtractValue(CI, 0);
6151 llvm::Value *Imag =
Builder.CreateExtractValue(CI, 1);
6159 llvm::Value *
V = CI;
6160 if (
V->getType() != RetIRTy)
6170 if (
auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
6171 llvm::Value *
V = CI;
6172 if (
auto *ScalableSrcTy =
6173 dyn_cast<llvm::ScalableVectorType>(
V->getType())) {
6174 if (FixedDstTy->getElementType() ==
6175 ScalableSrcTy->getElementType()) {
6176 V =
Builder.CreateExtractVector(FixedDstTy,
V, uint64_t(0),
6190 DestIsVolatile =
false;
6215 DestIsVolatile =
false;
6224 llvm_unreachable(
"Invalid ABI kind for return argument");
6227 llvm_unreachable(
"Unhandled ABIArgInfo::Kind");
6232 if (Ret.isScalar() && TargetDecl) {
6233 AssumeAlignedAttrEmitter.EmitAsAnAssumption(
Loc, RetTy, Ret);
6234 AllocAlignAttrEmitter.EmitAsAnAssumption(
Loc, RetTy, Ret);
6240 LifetimeEnd.Emit(*
this, {});
static ExtParameterInfoList getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
static uint64_t buildMultiCharMask(const SmallVectorImpl< uint64_t > &Bits, int Pos, int Size, int CharWidth, bool BigEndian)
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
static CanQualTypeList getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, bool IsTargetDefaultMSABI)
static void setBitRange(SmallVectorImpl< uint64_t > &Bits, int BitOffset, int BitWidth, int CharWidth)
static bool isProvablyNull(llvm::Value *addr)
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
static void eraseUnusedBitCasts(llvm::Instruction *insn)
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, const LangOptions &LangOpts, const NoBuiltinAttr *NBA=nullptr)
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
static void overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, const llvm::Function &F, const TargetOptions &TargetOpts)
Merges target-features from \TargetOpts and \F, and sets the result in \FuncAttr.
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
static int getExpansionSize(QualType Ty, const ASTContext &Context)
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, const llvm::DataLayout &DL, const ABIArgInfo &AI, bool CheckCoerce=true)
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode, llvm::DenormalMode FP32DenormalMode, llvm::AttrBuilder &FuncAttrs)
Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the requested denormal behavior,...
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF)
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
static std::pair< llvm::Value *, bool > CoerceScalableToFixed(CodeGenFunction &CGF, llvm::FixedVectorType *ToTy, llvm::ScalableVectorType *FromTy, llvm::Value *V, StringRef Name="")
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, bool IsReturn)
Test if it's legal to apply nofpclass for the given parameter type and it's lowered IR type.
static void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs)
static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts)
Return the nofpclass mask that can be applied to floating-point parameters.
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
static bool IsArgumentMaybeUndef(const Decl *TargetDecl, unsigned NumRequiredArgs, unsigned ArgNo)
Check if the argument of a function has maybe_undef attribute.
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, ArrayRef< QualType > ArgTypes)
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name="tmp")
Create a temporary allocation for the purposes of coercion.
static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl< uint64_t > &)
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, const Decl *TargetDecl)
static CanQualTypeList getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
static void addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, llvm::AttrBuilder &FuncAttrs)
Add default attributes to a function, which have merge semantics under -mlink-builtin-bitcode and sho...
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, const Decl *Callee)
static unsigned getMaxVectorWidth(const llvm::Type *Ty)
CodeGenFunction::ComplexPairTy ComplexPairTy
static void appendParameterTypes(const CIRGenTypes &cgt, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > fpt)
Adds the formal parameters in FPT to the given prefix.
static const CIRGenFunctionInfo & arrangeFreeFunctionLikeCall(CIRGenTypes &cgt, CIRGenModule &cgm, const CallArgList &args, const FunctionType *fnType)
enum clang::sema::@1840::IndirectLocalPathEntry::EntryKind Kind
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
#define CC_VLS_CASE(ABI_VLEN)
llvm::MachO::Target Target
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature.
static QualType getPointeeType(const MemRegion *R)
a trap message and trap category.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod) const
Retrieves the default calling convention for the current context.
CanQualType getCanonicalSizeType() const
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C 'SEL' type.
TypeInfoChars getTypeInfoDataSizeInChars(QualType T) const
TypeInfoChars getTypeInfoInChars(const Type *T) const
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
const TargetInfo & getTargetInfo() const
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const
Return number of constant array elements.
CanQualType getCanonicalTagType(const TagDecl *TD) const
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Attr - This represents one attribute.
const FunctionProtoType * getFunctionType() const
getFunctionType - Return the underlying function type for this block.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
QualType getType() const
Retrieves the type of the base class.
Represents a C++ constructor within a class.
Represents a C++ destructor within a class.
Represents a static or instance method of a struct/union/class.
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Qualifiers getMethodQualifiers() const
Represents a C++ struct/union/class.
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
SourceLocation getBeginLoc() const
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
CanProxy< U > castAs() const
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
llvm::DenormalMode FPDenormalMode
The floating-point denormal mode to use.
static StringRef getFramePointerKindName(FramePointerKind Kind)
std::vector< std::string > Reciprocals
PointerAuthOptions PointerAuth
Configuration for pointer-signing.
llvm::DenormalMode FP32DenormalMode
The floating-point denormal mode to use, for float.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
std::vector< std::string > DefaultFunctionAttrs
std::string PreferVectorWidth
The preferred width for auto-vectorization transforms.
bool isLoaderReplaceableFunctionName(StringRef FuncName) const
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getInAllocaFieldIndex() const
bool getIndirectByVal() const
llvm::StructType * getCoerceAndExpandType() const
bool getIndirectRealign() const
void setCoerceToType(llvm::Type *T)
llvm::Type * getUnpaddedCoerceAndExpandType() const
bool getCanBeFlattened() const
unsigned getDirectOffset() const
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Type * getPaddingType() const
bool getPaddingInReg() const
unsigned getDirectAlign() const
unsigned getIndirectAddrSpace() const
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ TargetSpecific
TargetSpecific - Some argument types are passed as target specific types such as RISC-V's tuple type,...
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isCoerceAndExpand() const
unsigned getInAllocaIndirect() const
llvm::Type * getCoerceToType() const
bool isIndirectAliased() const
bool isSRetAfterThis() const
bool canHaveCoerceToType() const
CharUnits getIndirectAlign() const
virtual void createCoercedStore(llvm::Value *Val, Address DstAddr, const ABIArgInfo &AI, bool DestIsVolatile, CodeGenFunction &CGF) const
virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const
Emit the target dependent code to load a value of.
virtual llvm::Value * createCoercedLoad(Address SrcAddr, const ABIArgInfo &AI, CodeGenFunction &CGF) const
virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
unsigned getAddressSpace() const
Return the address space that this address resides in.
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
llvm::StringRef getName() const
Return the IR name of the pointer value.
Address getAddress() const
void setExternallyDestructed(bool destructed=true)
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
const BlockExpr * BlockExpression
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::LoadInst * CreateFlagLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Emit a load from an i1 flag variable.
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Implements C++ ABI-specific code generation functions.
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(GlobalDecl GD)
Get the type of the implicit "this" parameter used by a method.
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
Abstract information about a function or function prototype.
const GlobalDecl getCalleeDecl() const
const FunctionProtoType * getCalleeFunctionProtoType() const
All available information about a concrete callee.
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Address getThisAddress() const
const CallExpr * getVirtualCallExpr() const
llvm::Value * getFunctionPointer() const
llvm::FunctionType * getVirtualFunctionType() const
const CGPointerAuthInfo & getPointerAuthInfo() const
GlobalDecl getVirtualMethodDecl() const
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
FunctionType::ExtInfo getExtInfo() const
bool isInstanceMethod() const
ABIArgInfo & getReturnInfo()
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void Profile(llvm::FoldingSetNodeID &ID)
const_arg_iterator arg_begin() const
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
CanQualType getReturnType() const
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
bool isCmseNSCall() const
bool isDelegateCall() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
CharUnits getArgStructAlignment() const
unsigned arg_size() const
RequiredArgs getRequiredArgs() const
unsigned getNumRequiredArgs() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr)
llvm::Instruction * getStackBase() const
void addUncopiedAggregate(LValue LV, QualType type)
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
bool hasWritebacks() const
void add(RValue rvalue, QualType type)
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
void allocateArgumentMemory(CodeGenFunction &CGF)
void freeArgumentMemory(CodeGenFunction &CGF) const
writeback_const_range writebacks() const
An abstract representation of regular/ObjC call/message targets.
An object to manage conditionally-evaluated expressions.
void begin(CodeGenFunction &CGF)
void end(CodeGenFunction &CGF)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
Emits a call or invoke to the given noreturn runtime function.
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
void callCStructDestructor(LValue Dst)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
bool shouldUseFusedARCCalls()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void addInstToSpecificSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom)
See CGDebugInfo::addInstToSpecificSourceAtom.
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
const CodeGen::CGBlockInfo * BlockInfo
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::BasicBlock * getUnreachableBlock()
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
bool currentFunctionUsesSEHTry() const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
void EmitWritebacks(const CallArgList &Args)
EmitWriteback - Emit callbacks for function.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
llvm::BasicBlock * getInvokeDest()
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
CGDebugInfo * getDebugInfo()
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
const TargetCodeGenInfo & getTargetHooks() const
void EmitLifetimeEnd(llvm::Value *Addr)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
llvm::Type * ConvertTypeForMem(QualType T)
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc, uint64_t RetKeyInstructionsSourceAtom)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CallExpr * MustTailCall
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
llvm::MDNode * getNoObjCARCExceptionsMetadata()
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
const ABIInfo & getABIInfo()
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
void addUndefinedGlobalForTailCall(std::pair< const FunctionDecl *, SourceLocation > Global)
ObjCEntrypoints & getObjCEntrypoints() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGCXXABI & getCXXABI() const
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
llvm::GlobalVariable::LinkageTypes getFunctionLinkage(GlobalDecl GD)
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type.
bool ReturnTypeHasInReg(const CGFunctionInfo &FI)
Return true iff the given type has inreg set.
void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs)
Adjust Memory attribute to ensure that the BE gets the right attribute.
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk)
Get the LLVM attributes and calling convention to use for a particular function type.
ASTContext & getContext() const
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeSYCLKernelCallerDeclaration(QualType resultType, const FunctionArgList &args)
A SYCL kernel caller function is an offload device entry point function with a target device dependen...
CGCXXABI & getCXXABI() const
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
ASTContext & getContext() const
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, FnInfoOpts opts, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type.
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the 'this' type for codegen purposes, i.e.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i....
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
unsigned getTargetAddressSpace(QualType T) const
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl.
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
const CGFunctionInfo & arrangeCXXStructorDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeFunctionDeclaration(const GlobalDecl GD)
Free functions are functions that are compatible with an ordinary C function pointer type.
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
A cleanup scope which generates the cleanup blocks lazily.
EHScopeStack::Cleanup * getCleanup()
Information for lazily generating a cleanup.
virtual void Emit(CodeGenFunction &CGF, Flags flags)=0
Emit the cleanup.
virtual bool isRedundantBeforeReturn()
A saved depth on the scope stack.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
iterator end() const
Returns an iterator pointing to the outermost EH scope.
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
bool isVolatileQualified() const
CharUnits getAlignment() const
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getAddress() const
ARCPreciseLifetime_t isARCPreciseLifetime() const
Qualifiers::ObjCLifetime getObjCLifetime() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
An abstract representation of an aligned address.
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Value * getPointer() const
static RawAddress invalid()
A class for recording the number of arguments that a function signature requires.
bool allowsOptionalArgs() const
unsigned getNumRequiredArgs() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual unsigned getDeviceKernelCallingConv() const
Get LLVM calling convention for device kernels.
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
virtual bool wouldInliningViolateFunctionCallABI(const FunctionDecl *Caller, const FunctionDecl *Callee) const
Returns true if inlining the function call would produce incorrect code for the current target and sh...
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
virtual void setOCLKernelStubCallingConvention(const FunctionType *&FT) const
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
static void initPointerAuthFnAttributes(const PointerAuthOptions &Opts, llvm::AttrBuilder &FuncAttrs)
static void initBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::AttrBuilder &FuncAttrs)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Decl - This represents one declaration (or definition), e.g.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
DeclContext * getDeclContext()
SourceLocation getBeginLoc() const LLVM_READONLY
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
bool isUnnamedBitField() const
Determines whether this is an unnamed bitfield.
bool isZeroLengthBitField() const
Is this a zero-length bit-field? Such bit-fields aren't really bit-fields at all and instead act as a...
Represents a function declaration or definition.
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Represents a prototype with parameter type info, e.g.
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
unsigned getNumParams() const
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type?
Wrapper for source info for functions.
A class which abstracts out some details necessary for making a call.
ExtInfo withCallingConv(CallingConv cc) const
CallingConv getCC() const
ExtInfo withProducesResult(bool producesResult) const
bool getCmseNSCall() const
bool getNoCfCheck() const
unsigned getRegParm() const
bool getNoCallerSavedRegs() const
bool getHasRegParm() const
bool getProducesResult() const
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
ParameterABI getABI() const
Return the ABI treatment of this parameter.
ExtParameterInfo withIsNoEscape(bool NoEscape) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
@ SME_PStateSMEnabledMask
@ SME_PStateSMCompatibleMask
@ SME_AgnosticZAStateMask
static ArmStateValue getArmZT0State(unsigned AttrBits)
static ArmStateValue getArmZAState(unsigned AttrBits)
QualType getReturnType() const
GlobalDecl - represents a global declaration.
CXXCtorType getCtorType() const
KernelReferenceKind getKernelReferenceKind() const
const Decl * getDecl() const
This class represents temporary values used to represent inout and out arguments in HLSL.
Description of a constructor that was inherited from a base class.
ConstructorUsingShadowDecl * getShadowDecl() const
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
std::vector< std::string > NoBuiltinFuncs
A list of all -fno-builtin-* function names (e.g., memset).
FPExceptionModeKind getDefaultExceptionMode() const
bool isNoBuiltinFunc(StringRef Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
bool assumeFunctionsAreConvergent() const
Represents a matrix type, as defined in the Matrix Types clang extensions.
Describes a module or submodule.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
ObjCCategoryDecl - Represents a category declaration.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Represents an ObjC class declaration.
ObjCMethodDecl - Represents an instance or class method declaration.
ImplicitParamDecl * getSelfDecl() const
ArrayRef< ParmVarDecl * > parameters() const
bool isDirectMethod() const
True if the method is tagged as objc_direct.
QualType getReturnType() const
Represents a parameter to a function.
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
QualType getCanonicalType() const
bool isConstQualified() const
Determine whether this type is const-qualified.
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
LangAS getAddressSpace() const
Represents a struct/union/class.
field_iterator field_end() const
bool isParamDestroyedInCallee() const
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
field_iterator field_begin() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
RecordDecl * getOriginalDecl() const
Base for LValueReferenceType and RValueReferenceType.
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
bool areArgsDestroyedLeftToRightInCallee() const
Are arguments to a call destroyed left to right in the callee? This is a fundamental language change,...
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool useObjCFPRetForRealType(FloatModeKind T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Options for controlling the target.
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
std::string TuneCPU
If given, the name of the target CPU to tune code for.
std::string CPU
If given, the name of the target CPU to generate code for.
The base class of the type hierarchy.
bool isBlockPointerType() const
bool isIncompleteArrayType() const
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
bool isPointerType() const
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
bool isScalarType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isBitIntType() const
RecordDecl * castAsRecordDecl() const
bool isMemberPointerType() const
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
bool isObjectType() const
Determine whether this type is an object type.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
bool isAnyPointerType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isNullPtrType() const
bool isRecordType() const
bool isObjCRetainableType() const
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Represents a call to the builtin function __builtin_va_arg.
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
const Expr * getSubExpr() const
Represents a variable declaration or definition.
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const
Would the destruction of this variable have any effect, and if so, what kind?
Represents a GCC generic vector type.
Defines the clang::TargetInfo interface.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, const TargetOptions &TargetOpts, bool WillInternalize)
Adds attributes to F according to our CodeGenOpts and LangOpts, as though we had emitted it ourselves...
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Ret(InterpState &S, CodePtr &PC)
The JSON file list parser is used to communicate input to InstallAPI.
CXXCtorType
C++ constructor types.
@ Ctor_DefaultClosure
Default closure variant of a ctor.
@ Ctor_CopyingClosure
Copying closure variant of a ctor.
@ Ctor_Complete
Complete object ctor.
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
bool isInstanceMethod(const Decl *D)
@ NonNull
Values of this type can never be null.
@ OK_Ordinary
An ordinary object is located at an address in memory.
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
@ Result
The result type of a method or function.
@ SwiftAsyncContext
This parameter (which must have pointer type) uses the special Swift asynchronous context-pointer ABI...
@ SwiftErrorResult
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
@ Ordinary
This parameter uses ordinary ABI rules for its type.
@ SwiftIndirectResult
This parameter (which must have pointer type) is a Swift indirect result parameter.
@ SwiftContext
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment.
@ Dtor_Complete
Complete object dtor.
LangAS
Defines the address space values used by the address space qualifier of QualType.
@ CanPassInRegs
The argument of this type can be passed directly in registers.
const FunctionProtoType * T
CallingConv
CallingConv - Specifies the calling convention that a function uses.
LangAS getLangASFromTargetAS(unsigned TargetAS)
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Similar to AddedStructorArgs, but only notes the number of additional arguments.
llvm::Value * ToUse
A value to "use" after the writeback, or null.
LValue Source
The original argument.
Address Temporary
The temporary alloca.
const Expr * WritebackExpr
An Expression (optional) that performs the writeback with any required casting.
LValue getKnownLValue() const
RValue getKnownRValue() const
void copyInto(CodeGenFunction &CGF, Address A) const
RValue getRValue(CodeGenFunction &CGF) const
llvm::BasicBlock * getBlock() const
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
llvm::IntegerType * SizeTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
llvm::PointerType * Int8PtrTy
CharUnits getPointerAlign() const
~DisableDebugLocationUpdates()
DisableDebugLocationUpdates(CodeGenFunction &CGF)
bool isMSVCXXPersonality() const
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
llvm::Function * objc_retain
id objc_retain(id);
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain.
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Iterator for iterating over Stmt * arrays that contain only T *.