11#include "TargetInfo.h"
30class SPIRVABIInfo :
public CommonSPIRABIInfo {
32 SPIRVABIInfo(
CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
46 CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo>
ABIInfo)
51 getABIInfo().getDataLayout().getAllocaAddrSpace());
59 llvm::Type *getSPIRVImageTypeFromHLSLResource(
65class SPIRVTargetCodeGenInfo :
public CommonSPIRTargetCodeGenInfo {
68 : CommonSPIRTargetCodeGenInfo(
std::make_unique<SPIRVABIInfo>(CGT)) {}
72 void setTargetAttributes(
const Decl *
D, llvm::GlobalValue *GV,
74 llvm::SyncScope::ID getLLVMSyncScopeID(
const LangOptions &LangOpts,
76 llvm::AtomicOrdering Ordering,
77 llvm::LLVMContext &Ctx)
const override;
78 bool supportsLibCall()
const override {
79 return getABIInfo().getTarget().getTriple().getVendor() !=
86 case SyncScope::HIPSingleThread:
87 case SyncScope::SingleScope:
88 return "singlethread";
89 case SyncScope::HIPWavefront:
90 case SyncScope::OpenCLSubGroup:
91 case SyncScope::WavefrontScope:
93 case SyncScope::HIPWorkgroup:
94 case SyncScope::OpenCLWorkGroup:
95 case SyncScope::WorkgroupScope:
97 case SyncScope::HIPAgent:
98 case SyncScope::OpenCLDevice:
99 case SyncScope::DeviceScope:
101 case SyncScope::SystemScope:
102 case SyncScope::HIPSystem:
103 case SyncScope::OpenCLAllSVMDevices:
110void CommonSPIRABIInfo::setCCs() {
111 assert(getRuntimeCC() == llvm::CallingConv::C);
112 RuntimeCC = llvm::CallingConv::SPIR_FUNC;
116 if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
135 if (getContext().getLangOpts().CUDAIsDevice) {
139 llvm::Type *LTy = CGT.ConvertType(Ty);
140 auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
141 auto GlobalAS = getContext().getTargetAddressSpace(LangAS::cuda_device);
142 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(LTy);
143 if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) {
144 LTy = llvm::PointerType::get(PtrTy->getContext(), GlobalAS);
149 if (getTarget().getTriple().getVendor() == llvm::Triple::AMD)
169 return getNaturalAlignIndirect(Ty, 0,
true);
176 if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
184 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
203 if (CC == llvm::CallingConv::SPIR_KERNEL) {
204 I.info = classifyKernelArgumentType(I.type);
215 SPIRVABIInfo(CGM.
getTypes()).computeInfo(FI);
217 CommonSPIRABIInfo(CGM.
getTypes()).computeInfo(FI);
222unsigned CommonSPIRTargetCodeGenInfo::getDeviceKernelCallingConv()
const {
223 return llvm::CallingConv::SPIR_KERNEL;
226void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
229 if (getABIInfo().getContext().getLangOpts().
HIP) {
230 FT = getABIInfo().getContext().adjustFunctionType(
236void CommonSPIRTargetCodeGenInfo::setOCLKernelStubCallingConvention(
238 FT = getABIInfo().getContext().adjustFunctionType(
243SPIRVTargetCodeGenInfo::getGlobalVarAddressSpace(
CodeGenModule &CGM,
247 "Address space agnostic languages only");
255 return DefaultGlobalAS;
257 LangAS AddrSpace =
D->getType().getAddressSpace();
258 if (AddrSpace != LangAS::Default)
261 return DefaultGlobalAS;
264void SPIRVTargetCodeGenInfo::setTargetAttributes(
269 if (GV->isDeclaration())
272 auto F = dyn_cast<llvm::Function>(GV);
276 auto FD = dyn_cast_or_null<FunctionDecl>(
D);
279 if (!FD->hasAttr<CUDAGlobalAttr>())
282 unsigned N = M.
getLangOpts().GPUMaxThreadsPerBlock;
283 if (
auto FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>())
284 N = FlatWGS->getMax()->EvaluateKnownConstInt(M.
getContext()).getExtValue();
290 llvm::Metadata *AttrMDArgs[] = {
291 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, N)),
292 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1)),
293 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1))};
295 F->setMetadata(
"max_work_group_size",
301 llvm::AtomicOrdering,
302 llvm::LLVMContext &Ctx)
const {
303 return Ctx.getOrInsertSyncScopeID(mapClangSyncScopeToLLVM(
Scope));
308 StringRef OpenCLName,
309 unsigned AccessQualifier) {
320 if (OpenCLName.starts_with(
"image2d"))
322 else if (OpenCLName.starts_with(
"image3d"))
324 else if (OpenCLName ==
"image1d_buffer")
327 assert(OpenCLName.starts_with(
"image1d") &&
"Unknown image type");
332 if (OpenCLName.contains(
"_depth"))
334 if (OpenCLName.contains(
"_array"))
336 if (OpenCLName.contains(
"_msaa"))
340 IntParams.push_back(AccessQualifier);
342 return llvm::TargetExtType::get(Ctx, BaseType, {llvm::Type::getVoidTy(Ctx)},
346llvm::Type *CommonSPIRTargetCodeGenInfo::getOpenCLType(
CodeGenModule &CGM,
347 const Type *Ty)
const {
349 if (
auto *PipeTy = dyn_cast<PipeType>(Ty))
350 return llvm::TargetExtType::get(Ctx,
"spirv.Pipe", {},
351 {!PipeTy->isReadOnly()});
352 if (
auto *BuiltinTy = dyn_cast<BuiltinType>(Ty)) {
353 enum AccessQualifier :
unsigned { AQ_ro = 0, AQ_wo = 1, AQ_rw = 2 };
354 switch (BuiltinTy->getKind()) {
355#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
356 case BuiltinType::Id: \
357 return getSPIRVImageType(Ctx, "spirv.Image", #ImgType, AQ_##Suffix);
358#include "clang/Basic/OpenCLImageTypes.def"
359 case BuiltinType::OCLSampler:
360 return llvm::TargetExtType::get(Ctx,
"spirv.Sampler");
361 case BuiltinType::OCLEvent:
362 return llvm::TargetExtType::get(Ctx,
"spirv.Event");
363 case BuiltinType::OCLClkEvent:
364 return llvm::TargetExtType::get(Ctx,
"spirv.DeviceEvent");
365 case BuiltinType::OCLQueue:
366 return llvm::TargetExtType::get(Ctx,
"spirv.Queue");
367 case BuiltinType::OCLReserveID:
368 return llvm::TargetExtType::get(Ctx,
"spirv.ReserveId");
369#define INTEL_SUBGROUP_AVC_TYPE(Name, Id) \
370 case BuiltinType::OCLIntelSubgroupAVC##Id: \
371 return llvm::TargetExtType::get(Ctx, "spirv.Avc" #Id "INTEL");
372#include "clang/Basic/OpenCLExtensionTypes.def"
384 llvm::Type *IntegralType,
391 while (
Value.ugt(0)) {
392 uint32_t Word =
Value.trunc(32).getZExtValue();
393 Value.lshrInPlace(32);
395 Words.push_back(Word);
397 if (Words.size() == 0)
401 return llvm::TargetExtType::get(Ctx,
"spirv.IntegralConstant",
402 {IntegralType}, Words);
403 return llvm::TargetExtType::get(Ctx,
"spirv.Literal", {}, Words);
412 for (
auto &Operand : SpirvType->getOperands()) {
415 llvm::Type *Result =
nullptr;
416 switch (Operand.getKind()) {
417 case SpirvOperandKind::ConstantId: {
418 llvm::Type *IntegralType =
424 case SpirvOperandKind::Literal: {
428 case SpirvOperandKind::TypeId: {
429 QualType TypeOperand = Operand.getResultType();
431 assert(RD->isCompleteDefinition() &&
432 "Type completion should have been required in Sema");
434 const FieldDecl *HandleField = RD->findFirstNamedDataMember();
438 TypeOperand = ResourceType;
446 llvm_unreachable(
"HLSLInlineSpirvType had invalid operand!");
451 Operands.push_back(Result);
454 return llvm::TargetExtType::get(Ctx,
"spirv.Type", Operands,
455 {SpirvType->getOpcode(), SpirvType->getSize(),
456 SpirvType->getAlignment()});
459llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType(
464 if (
auto *SpirvType = dyn_cast<HLSLInlineSpirvType>(Ty))
467 auto *ResType = dyn_cast<HLSLAttributedResourceType>(Ty);
473 case llvm::dxil::ResourceClass::UAV:
474 case llvm::dxil::ResourceClass::SRV: {
476 QualType ContainedTy = ResType->getContainedType();
480 assert(!ResAttrs.
IsROV &&
481 "Rasterizer order views not implemented for SPIR-V yet");
485 return getSPIRVImageTypeFromHLSLResource(ResAttrs, ContainedTy, CGM);
489 llvm::ArrayType *RuntimeArrayType = llvm::ArrayType::get(ElemType, 0);
491 bool IsWritable = ResAttrs.
ResourceClass == llvm::dxil::ResourceClass::UAV;
492 return llvm::TargetExtType::get(Ctx,
"spirv.VulkanBuffer",
496 case llvm::dxil::ResourceClass::CBuffer: {
497 QualType ContainedTy = ResType->getContainedType();
501 llvm::Type *BufferLayoutTy =
506 return llvm::TargetExtType::get(Ctx,
"spirv.VulkanBuffer", {BufferLayoutTy},
510 case llvm::dxil::ResourceClass::Sampler:
511 return llvm::TargetExtType::get(Ctx,
"spirv.Sampler");
519 llvm::Type *SampledType,
QualType Ty,
unsigned NumChannels) {
524 if (LangOpts.HLSLSpvUseUnknownImageFormat ||
525 attributes.
ResourceClass != llvm::dxil::ResourceClass::UAV) {
529 if (SampledType->isIntegerTy(32)) {
531 if (NumChannels == 1)
533 if (NumChannels == 2)
535 if (NumChannels == 4)
538 if (NumChannels == 1)
540 if (NumChannels == 2)
542 if (NumChannels == 4)
545 }
else if (SampledType->isIntegerTy(64)) {
546 if (NumChannels == 1) {
552 }
else if (SampledType->isFloatTy()) {
553 if (NumChannels == 1)
555 if (NumChannels == 2)
557 if (NumChannels == 4)
564llvm::Type *CommonSPIRTargetCodeGenInfo::getSPIRVImageTypeFromHLSLResource(
569 unsigned NumChannels = 1;
571 if (
const VectorType *
V = dyn_cast<VectorType>(Ty)) {
572 NumChannels =
V->getNumElements();
573 Ty =
V->getElementType();
575 assert(!Ty->
isVectorType() &&
"We still have a vector type.");
579 assert((SampledType->isIntegerTy() || SampledType->isFloatingPointTy()) &&
580 "The element type for a SPIR-V resource must be a scalar integer or "
581 "floating point type.");
607 attributes.
ResourceClass == llvm::dxil::ResourceClass::UAV ? 2 : 1;
613 llvm::TargetExtType *ImageType =
614 llvm::TargetExtType::get(Ctx, Name, {SampledType}, IntParams);
618std::unique_ptr<TargetCodeGenInfo>
620 return std::make_unique<CommonSPIRTargetCodeGenInfo>(CGM.
getTypes());
623std::unique_ptr<TargetCodeGenInfo>
625 return std::make_unique<SPIRVTargetCodeGenInfo>(CGM.
getTypes());
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Type * getInlineSpirvType(CodeGenModule &CGM, const HLSLInlineSpirvType *SpirvType)
static llvm::Type * getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType, StringRef OpenCLName, unsigned AccessQualifier)
Construct a SPIR-V target extension type for the given OpenCL image type.
static unsigned getImageFormat(const LangOptions &LangOpts, const HLSLAttributedResourceType::Attributes &attributes, llvm::Type *SampledType, QualType Ty, unsigned NumChannels)
static llvm::Type * getInlineSpirvConstant(CodeGenModule &CGM, llvm::Type *IntegralType, llvm::APInt Value)
unsigned getTargetAddressSpace(LangAS AS) const
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
This class organizes the cross-function state that is used while generating LLVM code.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
DefaultABIInfo - The default implementation for ABI specific details.
ABIArgInfo classifyArgumentType(QualType RetTy) const
ABIArgInfo classifyReturnType(QualType RetTy) const
llvm::TargetExtType * createLayoutType(const RecordType *StructType, const llvm::SmallVector< int32_t > *Packoffsets=nullptr)
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual unsigned getDeviceKernelCallingConv() const
Get LLVM calling convention for device kernels.
virtual llvm::Type * getOpenCLType(CodeGenModule &CGM, const Type *T) const
Return an LLVM type that corresponds to an OpenCL type.
virtual void setOCLKernelStubCallingConvention(const FunctionType *&FT) const
virtual llvm::Type * getHLSLType(CodeGenModule &CGM, const Type *T, const SmallVector< int32_t > *Packoffsets=nullptr) const
Return an LLVM type that corresponds to a HLSL type.
const T & getABIInfo() const
virtual LangAS getASTAllocaAddressSpace() const
Get the AST address space for alloca.
Decl - This represents one declaration (or definition), e.g.
Represents a member of a struct/union/class.
ExtInfo withCallingConv(CallingConv cc) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
Represents an arbitrary, user-specified SPIR-V type instruction.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
A (possibly-)qualified type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
bool hasFlexibleArrayMember() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Scope - A scope is a transient data structure that is used while parsing the program.
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
The base class of the type hierarchy.
bool isStructureType() const
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
CanQualType getCanonicalTypeUnqualified() const
bool isVectorType() const
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
const T * getAs() const
Member-template getAs<specific type>'.
Represents a variable declaration or definition.
Represents a GCC generic vector type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createSPIRVTargetCodeGenInfo(CodeGenModule &CGM)
std::unique_ptr< TargetCodeGenInfo > createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM)
The JSON file list parser is used to communicate input to InstallAPI.
StorageClass
Storage classes.
LangAS
Defines the address space values used by the address space qualifier of QualType.
const FunctionProtoType * T
SyncScope
Defines sync scope values used internally by clang.
LangAS getLangASFromTargetAS(unsigned TargetAS)
llvm::dxil::ResourceClass ResourceClass