10#include "TargetInfo.h"
11#include "llvm/ADT/StringExtras.h"
12#include "llvm/Support/AMDGPUAddrSpace.h"
25 static const unsigned MaxNumRegsForArgsRet = 16;
27 unsigned numRegsForType(
QualType Ty)
const;
31 uint64_t Members)
const override;
34 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty,
unsigned FromAS,
35 unsigned ToAS)
const {
37 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Ty);
38 if (PtrTy && PtrTy->getAddressSpace() == FromAS)
39 return llvm::PointerType::get(Ty->getContext(), ToAS);
50 unsigned &NumRegsLeft)
const;
56 llvm::FixedVectorType *
62 if (
T->getNumElements() == 3 && getDataLayout().getTypeSizeInBits(
T) == 96)
68bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
72bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
73 const Type *
Base, uint64_t Members)
const {
74 uint32_t NumRegs = (getContext().getTypeSize(
Base) + 31) / 32;
77 return Members * NumRegs <= MaxNumRegsForArgsRet;
81unsigned AMDGPUABIInfo::numRegsForType(
QualType Ty)
const {
87 QualType EltTy = VT->getElementType();
88 unsigned EltSize = getContext().getTypeSize(EltTy);
92 return (VT->getNumElements() + 1) / 2;
94 unsigned EltNumRegs = (EltSize + 31) / 32;
95 return EltNumRegs * VT->getNumElements();
99 assert(!RD->hasFlexibleArrayMember());
101 for (
const FieldDecl *Field : RD->fields()) {
103 NumRegs += numRegsForType(FieldTy);
109 return (getContext().getTypeSize(Ty) + 31) / 32;
118 unsigned ArgumentIndex = 0;
121 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
123 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
124 Arg.info = classifyKernelArgumentType(Arg.type);
126 bool FixedArgument = ArgumentIndex++ < numFixedArguments;
134 const bool IsIndirect =
false;
135 const bool AllowHigherAlign =
false;
137 getContext().getTypeInfoInChars(Ty),
167 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
171 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
190 llvm::Type *OrigLTy = CGT.ConvertType(Ty);
191 llvm::Type *LTy = OrigLTy;
192 if (getContext().getLangOpts().
HIP) {
193 LTy = coerceKernelArgumentType(
194 OrigLTy, getContext().getTargetAddressSpace(LangAS::Default),
195 getContext().getTargetAddressSpace(LangAS::cuda_device));
203 getContext().getTypeAlignInChars(Ty),
204 getContext().getTargetAddressSpace(LangAS::opencl_constant),
215 unsigned &NumRegsLeft)
const {
216 assert(NumRegsLeft <= MaxNumRegsForArgsRet &&
"register estimate underflow");
232 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
252 unsigned NumRegs = (
Size + 31) / 32;
253 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
262 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
266 if (NumRegsLeft > 0) {
267 unsigned NumRegs = numRegsForType(Ty);
268 if (NumRegsLeft >= NumRegs) {
269 NumRegsLeft -= NumRegs;
277 getContext().getTypeAlignInChars(Ty),
278 getContext().getTargetAddressSpace(LangAS::opencl_private));
284 unsigned NumRegs = numRegsForType(Ty);
285 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
297 void setFunctionDeclAttributes(
const FunctionDecl *FD, llvm::Function *F,
305 llvm::PointerType *
T,
QualType QT)
const override;
309 getABIInfo().getDataLayout().getAllocaAddrSpace());
315 llvm::AtomicOrdering Ordering,
316 llvm::LLVMContext &Ctx)
const override;
318 llvm::Instruction &AtomicInst,
321 llvm::Function *BlockInvokeFunc,
322 llvm::Type *BlockTy)
const override;
330 llvm::GlobalValue *GV) {
331 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
334 return !
D->
hasAttr<OMPDeclareTargetDeclAttr>() &&
336 (isa<FunctionDecl>(
D) &&
D->
hasAttr<CUDAGlobalAttr>()) ||
339 cast<VarDecl>(
D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
340 cast<VarDecl>(
D)->getType()->isCUDADeviceBuiltinTextureType())));
343void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
345 const auto *ReqdWGS =
347 const bool IsOpenCLKernel =
351 const auto *FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>();
352 if (ReqdWGS || FlatWGS) {
354 }
else if (IsOpenCLKernel || IsHIPKernel) {
357 const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
358 const unsigned DefaultMaxWorkGroupSize =
359 IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
361 std::string AttrVal =
362 std::string(
"1,") + llvm::utostr(DefaultMaxWorkGroupSize);
363 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
366 if (
const auto *
Attr = FD->
getAttr<AMDGPUWavesPerEUAttr>())
369 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
370 unsigned NumSGPR =
Attr->getNumSGPR();
373 F->addFnAttr(
"amdgpu-num-sgpr", llvm::utostr(NumSGPR));
376 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
380 F->addFnAttr(
"amdgpu-num-vgpr", llvm::utostr(NumVGPR));
383 if (
const auto *
Attr = FD->
getAttr<AMDGPUMaxNumWorkGroupsAttr>()) {
389 ?
Attr->getMaxNumWorkGroupsY()
394 ?
Attr->getMaxNumWorkGroupsZ()
400 llvm::raw_svector_ostream OS(AttrVal);
401 OS <<
X <<
',' << Y <<
',' << Z;
403 F->addFnAttr(
"amdgpu-max-num-workgroups", AttrVal.str());
407void AMDGPUTargetCodeGenInfo::setTargetAttributes(
410 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
411 GV->setDSOLocal(
true);
414 if (GV->isDeclaration())
417 llvm::Function *F = dyn_cast<llvm::Function>(GV);
423 setFunctionDeclAttributes(FD, F, M);
425 if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
426 F->addFnAttr(
"amdgpu-ieee",
"false");
429unsigned AMDGPUTargetCodeGenInfo::getDeviceKernelCallingConv()
const {
430 return llvm::CallingConv::AMDGPU_KERNEL;
438llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
442 return llvm::ConstantPointerNull::get(PT);
445 auto NPT = llvm::PointerType::get(
447 return llvm::ConstantExpr::getAddrSpaceCast(
448 llvm::ConstantPointerNull::get(NPT), PT);
452AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(
CodeGenModule &CGM,
456 "Address space agnostic languages only");
460 return DefaultGlobalAS;
462 LangAS AddrSpace =
D->getType().getAddressSpace();
463 if (AddrSpace != LangAS::Default)
467 if (
D->getType().isConstantStorage(CGM.
getContext(),
false,
false) &&
468 D->hasConstantInitialization()) {
472 return DefaultGlobalAS;
476AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(
const LangOptions &LangOpts,
478 llvm::AtomicOrdering Ordering,
479 llvm::LLVMContext &Ctx)
const {
482 case SyncScope::HIPSingleThread:
483 case SyncScope::SingleScope:
484 Name =
"singlethread";
486 case SyncScope::HIPWavefront:
487 case SyncScope::OpenCLSubGroup:
488 case SyncScope::WavefrontScope:
491 case SyncScope::HIPWorkgroup:
492 case SyncScope::OpenCLWorkGroup:
493 case SyncScope::WorkgroupScope:
496 case SyncScope::HIPAgent:
497 case SyncScope::OpenCLDevice:
498 case SyncScope::DeviceScope:
501 case SyncScope::SystemScope:
502 case SyncScope::HIPSystem:
503 case SyncScope::OpenCLAllSVMDevices:
510 if (
Scope >= SyncScope::OpenCLWorkGroup &&
511 Scope <= SyncScope::OpenCLSubGroup &&
512 Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
514 Name = Twine(Twine(Name) + Twine(
"-")).str();
516 Name = Twine(Twine(Name) + Twine(
"one-as")).str();
519 return Ctx.getOrInsertSyncScopeID(Name);
522void AMDGPUTargetCodeGenInfo::setTargetAtomicMetadata(
525 auto *RMW = dyn_cast<llvm::AtomicRMWInst>(&AtomicInst);
526 auto *CmpX = dyn_cast<llvm::AtomicCmpXchgInst>(&AtomicInst);
533 if (((RMW && RMW->getPointerAddressSpace() == llvm::AMDGPUAS::FLAT_ADDRESS) ||
535 CmpX->getPointerAddressSpace() == llvm::AMDGPUAS::FLAT_ADDRESS)) &&
538 llvm::MDNode *ASRange = MDHelper.createRange(
539 llvm::APInt(32, llvm::AMDGPUAS::PRIVATE_ADDRESS),
540 llvm::APInt(32, llvm::AMDGPUAS::PRIVATE_ADDRESS + 1));
541 AtomicInst.setMetadata(llvm::LLVMContext::MD_noalias_addrspace, ASRange);
550 RMW->setMetadata(
"amdgpu.no.fine.grained.memory",
Empty);
552 RMW->setMetadata(
"amdgpu.no.remote.memory",
Empty);
554 RMW->getOperation() == llvm::AtomicRMWInst::FAdd &&
555 RMW->getType()->isFloatTy())
556 RMW->setMetadata(
"amdgpu.ignore.denormal.mode",
Empty);
559bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
563bool AMDGPUTargetCodeGenInfo::shouldEmitDWARFBitFieldSeparators()
const {
567void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
569 FT = getABIInfo().getContext().adjustFunctionType(
579static llvm::StructType *
581 llvm::Type *KernelDescriptorPtrTy) {
582 llvm::Type *Int32 = llvm::Type::getInt32Ty(
C);
583 return llvm::StructType::create(
C, {KernelDescriptorPtrTy, Int32, Int32},
584 "block.runtime.handle.t");
595llvm::Value *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
596 CodeGenFunction &CGF, llvm::Function *Invoke, llvm::Type *BlockTy)
const {
600 auto *InvokeFT = Invoke->getFunctionType();
609 ArgTys.push_back(BlockTy);
610 ArgTypeNames.push_back(llvm::MDString::get(
C,
"__block_literal"));
611 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
612 ArgBaseTypeNames.push_back(llvm::MDString::get(
C,
"__block_literal"));
613 ArgTypeQuals.push_back(llvm::MDString::get(
C,
""));
614 AccessQuals.push_back(llvm::MDString::get(
C,
"none"));
615 ArgNames.push_back(llvm::MDString::get(
C,
"block_literal"));
616 for (
unsigned I = 1,
E = InvokeFT->getNumParams(); I <
E; ++I) {
617 ArgTys.push_back(InvokeFT->getParamType(I));
618 ArgTypeNames.push_back(llvm::MDString::get(
C,
"void*"));
619 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
620 AccessQuals.push_back(llvm::MDString::get(
C,
"none"));
621 ArgBaseTypeNames.push_back(llvm::MDString::get(
C,
"void*"));
622 ArgTypeQuals.push_back(llvm::MDString::get(
C,
""));
624 llvm::MDString::get(
C, (Twine(
"local_arg") + Twine(I)).str()));
628 const llvm::DataLayout &DL = Mod.getDataLayout();
630 llvm::Twine Name = Invoke->getName() +
"_kernel";
631 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(
C), ArgTys,
false);
635 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
637 F->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
639 llvm::AttrBuilder KernelAttrs(
C);
643 F->addFnAttrs(KernelAttrs);
645 auto IP = CGF.
Builder.saveIP();
646 auto *BB = llvm::BasicBlock::Create(
C,
"entry", F);
647 Builder.SetInsertPoint(BB);
648 const auto BlockAlign = DL.getPrefTypeAlign(BlockTy);
649 auto *BlockPtr = Builder.CreateAlloca(BlockTy,
nullptr);
650 BlockPtr->setAlignment(BlockAlign);
651 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
652 auto *
Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
654 Args.push_back(Cast);
655 for (llvm::Argument &A : llvm::drop_begin(F->args()))
657 llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
658 call->setCallingConv(Invoke->getCallingConv());
659 Builder.CreateRetVoid();
660 Builder.restoreIP(IP);
662 F->setMetadata(
"kernel_arg_addr_space", llvm::MDNode::get(
C, AddressQuals));
663 F->setMetadata(
"kernel_arg_access_qual", llvm::MDNode::get(
C, AccessQuals));
664 F->setMetadata(
"kernel_arg_type", llvm::MDNode::get(
C, ArgTypeNames));
665 F->setMetadata(
"kernel_arg_base_type",
666 llvm::MDNode::get(
C, ArgBaseTypeNames));
667 F->setMetadata(
"kernel_arg_type_qual", llvm::MDNode::get(
C, ArgTypeQuals));
669 F->setMetadata(
"kernel_arg_name", llvm::MDNode::get(
C, ArgNames));
672 C, llvm::PointerType::get(
C, DL.getDefaultGlobalsAddressSpace()));
673 llvm::Constant *RuntimeHandleInitializer =
674 llvm::ConstantAggregateZero::get(HandleTy);
676 llvm::Twine RuntimeHandleName = F->getName() +
".runtime.handle";
687 auto *RuntimeHandle =
new llvm::GlobalVariable(
689 true, llvm::GlobalValue::InternalLinkage,
690 RuntimeHandleInitializer, RuntimeHandleName,
691 nullptr, llvm::GlobalValue::NotThreadLocal,
692 DL.getDefaultGlobalsAddressSpace(),
695 llvm::MDNode *HandleAsMD =
696 llvm::MDNode::get(
C, llvm::ValueAsMetadata::get(RuntimeHandle));
697 F->setMetadata(llvm::LLVMContext::MD_associated, HandleAsMD);
699 RuntimeHandle->setSection(
".amdgpu.kernel.runtime.handle");
703 return RuntimeHandle;
707 llvm::Function *F,
const AMDGPUFlatWorkGroupSizeAttr *FlatWGS,
708 const ReqdWorkGroupSizeAttr *ReqdWGS, int32_t *MinThreadsVal,
709 int32_t *MaxThreadsVal) {
712 auto Eval = [&](
Expr *
E) {
716 Min = Eval(FlatWGS->getMin());
717 Max = Eval(FlatWGS->getMax());
719 if (ReqdWGS &&
Min == 0 &&
Max == 0)
720 Min =
Max = Eval(ReqdWGS->getXDim()) * Eval(ReqdWGS->getYDim()) *
721 Eval(ReqdWGS->getZDim());
724 assert(
Min <=
Max &&
"Min must be less than or equal Max");
727 *MinThreadsVal =
Min;
729 *MaxThreadsVal =
Max;
730 std::string AttrVal = llvm::utostr(
Min) +
"," + llvm::utostr(
Max);
732 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
734 assert(
Max == 0 &&
"Max must be zero");
738 llvm::Function *F,
const AMDGPUWavesPerEUAttr *
Attr) {
740 Attr->getMin()->EvaluateKnownConstInt(
getContext()).getExtValue();
743 ?
Attr->getMax()->EvaluateKnownConstInt(
getContext()).getExtValue()
747 assert((
Max == 0 ||
Min <=
Max) &&
"Min must be less than or equal Max");
749 std::string AttrVal = llvm::utostr(
Min);
751 AttrVal = AttrVal +
"," + llvm::utostr(
Max);
752 F->addFnAttr(
"amdgpu-waves-per-eu", AttrVal);
754 assert(
Max == 0 &&
"Max must be zero");
757std::unique_ptr<TargetCodeGenInfo>
759 return std::make_unique<AMDGPUTargetCodeGenInfo>(CGM.
getTypes());
static bool requiresAMDGPUProtectedVisibility(const Decl *D, llvm::GlobalValue *GV)
static llvm::StructType * getAMDGPURuntimeHandleType(llvm::LLVMContext &C, llvm::Type *KernelDescriptorPtrTy)
Return IR struct type for rtinfo struct in rocm-device-libs used for device enqueue.
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
unsigned getTargetAddressSpace(LangAS AS) const
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
bool threadPrivateMemoryAtomicsAreUndefined() const
Return true if atomics operations targeting allocations in private memory are undefined.
Attr - This represents one attribute.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getIndirectAliased(CharUnits Alignment, unsigned AddrSpace, bool Realign=false, llvm::Type *Padding=nullptr)
Pass this in memory using the IR byref attribute.
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
virtual llvm::FixedVectorType * getOptimalVectorMemoryType(llvm::FixedVectorType *T, const LangOptions &Opt) const
Returns the optimal vector memory type based on the given vector type.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
unsigned getNumRequiredArgs() const
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
void handleAMDGPUWavesPerEUAttr(llvm::Function *F, const AMDGPUWavesPerEUAttr *A)
Emit the IR encoding to attach the AMD GPU waves-per-eu attribute to F.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
void addUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.used metadata.
void handleAMDGPUFlatWorkGroupSizeAttr(llvm::Function *F, const AMDGPUFlatWorkGroupSizeAttr *A, const ReqdWorkGroupSizeAttr *ReqdWGS=nullptr, int32_t *MinThreadsVal=nullptr, int32_t *MaxThreadsVal=nullptr)
Emit the IR encoding to attach the AMD GPU flat-work-group-size attribute to F.
AtomicOptions getAtomicOpts()
Get the current Atomic options.
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
DefaultABIInfo - The default implementation for ABI specific details.
ABIArgInfo classifyArgumentType(QualType RetTy) const
RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, AggValueSlot Slot) const override
EmitVAArg - Emit the target dependent code to load a value of.
ABIArgInfo classifyReturnType(QualType RetTy) const
void computeInfo(CGFunctionInfo &FI) const override
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual unsigned getDeviceKernelCallingConv() const
Get LLVM calling convention for device kernels.
virtual bool supportsLibCall() const
supportsLibCall - Query to whether or not target supports all lib calls.
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
const T & getABIInfo() const
virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, const VarDecl *D) const
Get target favored AST address space of a global variable for languages other than OpenCL and CUDA.
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual bool shouldEmitDWARFBitFieldSeparators() const
virtual llvm::Constant * getNullPointer(const CodeGen::CodeGenModule &CGM, llvm::PointerType *T, QualType QT) const
Get target specific null pointer.
virtual LangAS getASTAllocaAddressSpace() const
Get the AST address space for alloca.
virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::Instruction &AtomicInst, const AtomicExpr *Expr=nullptr) const
Allow the target to apply other metadata to an atomic instruction.
virtual llvm::Value * createEnqueuedBlockKernel(CodeGenFunction &CGF, llvm::Function *BlockInvokeFunc, llvm::Type *BlockTy) const
Create an OpenCL kernel for an enqueued block.
virtual bool shouldEmitStaticExternCAliases() const
Decl - This represents one declaration (or definition), e.g.
This represents one expression.
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Represents a member of a struct/union/class.
Represents a function declaration or definition.
ExtInfo withCallingConv(CallingConv cc) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
A (possibly-)qualified type.
bool hasFlexibleArrayMember() const
Scope - A scope is a transient data structure that is used while parsing the program.
virtual std::optional< LangAS > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory.
The base class of the type hierarchy.
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
const T * getAs() const
Member-template getAs<specific type>'.
Represents a variable declaration or definition.
Represents a GCC generic vector type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
bool Cast(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
LangAS
Defines the address space values used by the address space qualifier of QualType.
const FunctionProtoType * T
SyncScope
Defines sync scope values used internally by clang.
LangAS getLangASFromTargetAS(unsigned TargetAS)
bool getOption(AtomicOptionKind Kind) const