clang 22.0.0git
CGDecl.cpp
Go to the documentation of this file.
1//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Decl nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBlocks.h"
14#include "CGCXXABI.h"
15#include "CGCleanup.h"
16#include "CGDebugInfo.h"
17#include "CGOpenCLRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CodeGenFunction.h"
20#include "CodeGenModule.h"
21#include "CodeGenPGO.h"
22#include "ConstantEmitter.h"
23#include "EHScopeStack.h"
24#include "PatternInit.h"
25#include "TargetInfo.h"
27#include "clang/AST/Attr.h"
28#include "clang/AST/CharUnits.h"
29#include "clang/AST/Decl.h"
30#include "clang/AST/DeclObjC.h"
36#include "clang/Sema/Sema.h"
37#include "llvm/Analysis/ConstantFolding.h"
38#include "llvm/Analysis/ValueTracking.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/GlobalVariable.h"
41#include "llvm/IR/Instructions.h"
42#include "llvm/IR/Intrinsics.h"
43#include "llvm/IR/Type.h"
44#include <optional>
45
46using namespace clang;
47using namespace CodeGen;
48
49static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
50 "Clang max alignment greater than what LLVM supports?");
51
52void CodeGenFunction::EmitDecl(const Decl &D, bool EvaluateConditionDecl) {
53 switch (D.getKind()) {
54 case Decl::BuiltinTemplate:
55 case Decl::TranslationUnit:
56 case Decl::ExternCContext:
57 case Decl::Namespace:
58 case Decl::UnresolvedUsingTypename:
59 case Decl::ClassTemplateSpecialization:
60 case Decl::ClassTemplatePartialSpecialization:
61 case Decl::VarTemplateSpecialization:
62 case Decl::VarTemplatePartialSpecialization:
63 case Decl::TemplateTypeParm:
64 case Decl::UnresolvedUsingValue:
65 case Decl::NonTypeTemplateParm:
66 case Decl::CXXDeductionGuide:
67 case Decl::CXXMethod:
68 case Decl::CXXConstructor:
69 case Decl::CXXDestructor:
70 case Decl::CXXConversion:
71 case Decl::Field:
72 case Decl::MSProperty:
73 case Decl::IndirectField:
74 case Decl::ObjCIvar:
75 case Decl::ObjCAtDefsField:
76 case Decl::ParmVar:
77 case Decl::ImplicitParam:
78 case Decl::ClassTemplate:
79 case Decl::VarTemplate:
80 case Decl::FunctionTemplate:
81 case Decl::TypeAliasTemplate:
82 case Decl::TemplateTemplateParm:
83 case Decl::ObjCMethod:
84 case Decl::ObjCCategory:
85 case Decl::ObjCProtocol:
86 case Decl::ObjCInterface:
87 case Decl::ObjCCategoryImpl:
88 case Decl::ObjCImplementation:
89 case Decl::ObjCProperty:
90 case Decl::ObjCCompatibleAlias:
91 case Decl::PragmaComment:
92 case Decl::PragmaDetectMismatch:
93 case Decl::AccessSpec:
94 case Decl::LinkageSpec:
95 case Decl::Export:
96 case Decl::ObjCPropertyImpl:
97 case Decl::FileScopeAsm:
98 case Decl::TopLevelStmt:
99 case Decl::Friend:
100 case Decl::FriendTemplate:
101 case Decl::Block:
102 case Decl::OutlinedFunction:
103 case Decl::Captured:
104 case Decl::UsingShadow:
105 case Decl::ConstructorUsingShadow:
106 case Decl::ObjCTypeParam:
107 case Decl::Binding:
108 case Decl::UnresolvedUsingIfExists:
109 case Decl::HLSLBuffer:
110 case Decl::HLSLRootSignature:
111 llvm_unreachable("Declaration should not be in declstmts!");
112 case Decl::Record: // struct/union/class X;
113 case Decl::CXXRecord: // struct/union/class X; [C++]
114 if (CGDebugInfo *DI = getDebugInfo())
115 if (cast<RecordDecl>(D).getDefinition())
116 DI->EmitAndRetainType(
117 getContext().getCanonicalTagType(cast<RecordDecl>(&D)));
118 return;
119 case Decl::Enum: // enum X;
120 if (CGDebugInfo *DI = getDebugInfo())
121 if (cast<EnumDecl>(D).getDefinition())
122 DI->EmitAndRetainType(
123 getContext().getCanonicalTagType(cast<EnumDecl>(&D)));
124 return;
125 case Decl::Function: // void X();
126 case Decl::EnumConstant: // enum ? { X = ? }
127 case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
128 case Decl::Label: // __label__ x;
129 case Decl::Import:
130 case Decl::MSGuid: // __declspec(uuid("..."))
131 case Decl::UnnamedGlobalConstant:
132 case Decl::TemplateParamObject:
133 case Decl::OMPThreadPrivate:
134 case Decl::OMPAllocate:
135 case Decl::OMPCapturedExpr:
136 case Decl::OMPRequires:
137 case Decl::Empty:
138 case Decl::Concept:
139 case Decl::ImplicitConceptSpecialization:
140 case Decl::LifetimeExtendedTemporary:
141 case Decl::RequiresExprBody:
142 // None of these decls require codegen support.
143 return;
144
145 case Decl::NamespaceAlias:
146 if (CGDebugInfo *DI = getDebugInfo())
147 DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
148 return;
149 case Decl::Using: // using X; [C++]
150 if (CGDebugInfo *DI = getDebugInfo())
151 DI->EmitUsingDecl(cast<UsingDecl>(D));
152 return;
153 case Decl::UsingEnum: // using enum X; [C++]
154 if (CGDebugInfo *DI = getDebugInfo())
155 DI->EmitUsingEnumDecl(cast<UsingEnumDecl>(D));
156 return;
157 case Decl::UsingPack:
158 for (auto *Using : cast<UsingPackDecl>(D).expansions())
159 EmitDecl(*Using, /*EvaluateConditionDecl=*/EvaluateConditionDecl);
160 return;
161 case Decl::UsingDirective: // using namespace X; [C++]
162 if (CGDebugInfo *DI = getDebugInfo())
163 DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
164 return;
165 case Decl::Var:
166 case Decl::Decomposition: {
167 const VarDecl &VD = cast<VarDecl>(D);
168 assert(VD.isLocalVarDecl() &&
169 "Should not see file-scope variables inside a function!");
170 EmitVarDecl(VD);
171 if (EvaluateConditionDecl)
173
174 return;
175 }
176
177 case Decl::OMPDeclareReduction:
178 return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
179
180 case Decl::OMPDeclareMapper:
181 return CGM.EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(&D), this);
182
183 case Decl::OpenACCDeclare:
184 return CGM.EmitOpenACCDeclare(cast<OpenACCDeclareDecl>(&D), this);
185 case Decl::OpenACCRoutine:
186 return CGM.EmitOpenACCRoutine(cast<OpenACCRoutineDecl>(&D), this);
187
188 case Decl::Typedef: // typedef int X;
189 case Decl::TypeAlias: { // using X = int; [C++0x]
190 QualType Ty = cast<TypedefNameDecl>(D).getUnderlyingType();
191 if (CGDebugInfo *DI = getDebugInfo())
192 DI->EmitAndRetainType(Ty);
193 if (Ty->isVariablyModifiedType())
195 return;
196 }
197 }
198}
199
200/// EmitVarDecl - This method handles emission of any variable declaration
201/// inside a function, including static vars etc.
203 if (D.hasExternalStorage())
204 // Don't emit it now, allow it to be emitted lazily on its first use.
205 return;
206
207 // Some function-scope variable does not have static storage but still
208 // needs to be emitted like a static variable, e.g. a function-scope
209 // variable in constant address space in OpenCL.
210 if (D.getStorageDuration() != SD_Automatic) {
211 // Static sampler variables translated to function calls.
212 if (D.getType()->isSamplerT())
213 return;
214
215 llvm::GlobalValue::LinkageTypes Linkage =
217
218 // FIXME: We need to force the emission/use of a guard variable for
219 // some variables even if we can constant-evaluate them because
220 // we can't guarantee every translation unit will constant-evaluate them.
221
222 return EmitStaticVarDecl(D, Linkage);
223 }
224
225 if (D.getType().getAddressSpace() == LangAS::opencl_local)
227
228 assert(D.hasLocalStorage());
229 return EmitAutoVarDecl(D);
230}
231
232static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
233 if (CGM.getLangOpts().CPlusPlus)
234 return CGM.getMangledName(&D).str();
235
236 // If this isn't C++, we don't need a mangled name, just a pretty one.
237 assert(!D.isExternallyVisible() && "name shouldn't matter");
238 std::string ContextName;
239 const DeclContext *DC = D.getDeclContext();
240 if (auto *CD = dyn_cast<CapturedDecl>(DC))
241 DC = cast<DeclContext>(CD->getNonClosureContext());
242 if (const auto *FD = dyn_cast<FunctionDecl>(DC))
243 ContextName = std::string(CGM.getMangledName(FD));
244 else if (const auto *BD = dyn_cast<BlockDecl>(DC))
245 ContextName = std::string(CGM.getBlockMangledName(GlobalDecl(), BD));
246 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
247 ContextName = OMD->getSelector().getAsString();
248 else
249 llvm_unreachable("Unknown context for static var decl");
250
251 ContextName += "." + D.getNameAsString();
252 return ContextName;
253}
254
256 const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
257 // In general, we don't always emit static var decls once before we reference
258 // them. It is possible to reference them before emitting the function that
259 // contains them, and it is possible to emit the containing function multiple
260 // times.
261 if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
262 return ExistingGV;
263
264 QualType Ty = D.getType();
265 assert(Ty->isConstantSizeType() && "VLAs can't be static");
266
267 // Use the label if the variable is renamed with the asm-label extension.
268 std::string Name;
269 if (D.hasAttr<AsmLabelAttr>())
270 Name = std::string(getMangledName(&D));
271 else
272 Name = getStaticDeclName(*this, D);
273
274 llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
276 unsigned TargetAS = getContext().getTargetAddressSpace(AS);
277
278 // OpenCL variables in local address space and CUDA shared
279 // variables cannot have an initializer.
280 llvm::Constant *Init = nullptr;
282 D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
283 Init = llvm::UndefValue::get(LTy);
284 else
286
287 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
288 getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
289 nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
290 GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign());
291
292 if (supportsCOMDAT() && GV->isWeakForLinker())
293 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
294
295 if (D.getTLSKind())
296 setTLSMode(GV, D);
297
298 setGVProperties(GV, &D);
299 getTargetCodeGenInfo().setTargetAttributes(cast<Decl>(&D), GV, *this);
300
301 // Make sure the result is of the correct type.
302 LangAS ExpectedAS = Ty.getAddressSpace();
303 llvm::Constant *Addr = GV;
304 if (AS != ExpectedAS) {
306 *this, GV, AS,
307 llvm::PointerType::get(getLLVMContext(),
308 getContext().getTargetAddressSpace(ExpectedAS)));
309 }
310
312
313 // Ensure that the static local gets initialized by making sure the parent
314 // function gets emitted eventually.
315 const Decl *DC = cast<Decl>(D.getDeclContext());
316
317 // We can't name blocks or captured statements directly, so try to emit their
318 // parents.
319 if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
320 DC = DC->getNonClosureContext();
321 // FIXME: Ensure that global blocks get emitted.
322 if (!DC)
323 return Addr;
324 }
325
326 GlobalDecl GD;
327 if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
328 GD = GlobalDecl(CD, Ctor_Base);
329 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
330 GD = GlobalDecl(DD, Dtor_Base);
331 else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
332 GD = GlobalDecl(FD);
333 else {
334 // Don't do anything for Obj-C method decls or global closures. We should
335 // never defer them.
336 assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
337 }
338 if (GD.getDecl()) {
339 // Disable emission of the parent function for the OpenMP device codegen.
341 (void)GetAddrOfGlobal(GD);
342 }
343
344 return Addr;
345}
346
347/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
348/// global variable that has already been created for it. If the initializer
349/// has a different type than GV does, this may free GV and return a different
350/// one. Otherwise it just returns GV.
351llvm::GlobalVariable *
353 llvm::GlobalVariable *GV) {
354 ConstantEmitter emitter(*this);
355 llvm::Constant *Init = emitter.tryEmitForInitializer(D);
356
357 // If constant emission failed, then this should be a C++ static
358 // initializer.
359 if (!Init) {
360 if (!getLangOpts().CPlusPlus)
361 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
362 else if (D.hasFlexibleArrayInit(getContext()))
363 CGM.ErrorUnsupported(D.getInit(), "flexible array initializer");
364 else if (HaveInsertPoint()) {
365 // Since we have a static initializer, this global variable can't
366 // be constant.
367 GV->setConstant(false);
368
369 EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
370 }
371 return GV;
372 }
373
374 PGO->markStmtMaybeUsed(D.getInit()); // FIXME: Too lazy
375
376#ifndef NDEBUG
377 CharUnits VarSize = CGM.getContext().getTypeSizeInChars(D.getType()) +
378 D.getFlexibleArrayInitChars(getContext());
380 CGM.getDataLayout().getTypeAllocSize(Init->getType()));
381 assert(VarSize == CstSize && "Emitted constant has unexpected size");
382#endif
383
384 bool NeedsDtor =
385 D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
386
387 GV->setConstant(
388 D.getType().isConstantStorage(getContext(), true, !NeedsDtor));
389 GV->replaceInitializer(Init);
390
391 emitter.finalize(GV);
392
393 if (NeedsDtor && HaveInsertPoint()) {
394 // We have a constant initializer, but a nontrivial destructor. We still
395 // need to perform a guarded "initialization" in order to register the
396 // destructor.
397 EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
398 }
399
400 return GV;
401}
402
404 llvm::GlobalValue::LinkageTypes Linkage) {
405 // Check to see if we already have a global variable for this
406 // declaration. This can happen when double-emitting function
407 // bodies, e.g. with complete and base constructors.
408 llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
409 CharUnits alignment = getContext().getDeclAlign(&D);
410
411 // Store into LocalDeclMap before generating initializer to handle
412 // circular references.
413 llvm::Type *elemTy = ConvertTypeForMem(D.getType());
414 setAddrOfLocalVar(&D, Address(addr, elemTy, alignment));
415
416 // We can't have a VLA here, but we can have a pointer to a VLA,
417 // even though that doesn't really make any sense.
418 // Make sure to evaluate VLA bounds now so that we have them for later.
419 if (D.getType()->isVariablyModifiedType())
420 EmitVariablyModifiedType(D.getType());
421
422 // Save the type in case adding the initializer forces a type change.
423 llvm::Type *expectedType = addr->getType();
424
425 llvm::GlobalVariable *var =
426 cast<llvm::GlobalVariable>(addr->stripPointerCasts());
427
428 // CUDA's local and local static __shared__ variables should not
429 // have any non-empty initializers. This is ensured by Sema.
430 // Whatever initializer such variable may have when it gets here is
431 // a no-op and should not be emitted.
432 bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
433 D.hasAttr<CUDASharedAttr>();
434 // If this value has an initializer, emit it.
435 if (D.getInit() && !isCudaSharedVar) {
438 }
439
440 var->setAlignment(alignment.getAsAlign());
441
442 if (D.hasAttr<AnnotateAttr>())
444
445 if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
446 var->addAttribute("bss-section", SA->getName());
447 if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
448 var->addAttribute("data-section", SA->getName());
449 if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
450 var->addAttribute("rodata-section", SA->getName());
451 if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
452 var->addAttribute("relro-section", SA->getName());
453
454 if (const SectionAttr *SA = D.getAttr<SectionAttr>())
455 var->setSection(SA->getName());
456
457 if (D.hasAttr<RetainAttr>())
458 CGM.addUsedGlobal(var);
459 else if (D.hasAttr<UsedAttr>())
461
462 if (CGM.getCodeGenOpts().KeepPersistentStorageVariables)
464
465 // We may have to cast the constant because of the initializer
466 // mismatch above.
467 //
468 // FIXME: It is really dangerous to store this in the map; if anyone
469 // RAUW's the GV uses of this constant will be invalid.
470 llvm::Constant *castedAddr =
471 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
472 LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment);
473 CGM.setStaticLocalDeclAddress(&D, castedAddr);
474
476
477 // Emit global variable debug descriptor for static vars.
479 if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) {
481 DI->EmitGlobalVariable(var, &D);
482 }
483}
484
485namespace {
486 struct DestroyObject final : EHScopeStack::Cleanup {
487 DestroyObject(Address addr, QualType type,
489 bool useEHCleanupForArray)
490 : addr(addr), type(type), destroyer(destroyer),
491 useEHCleanupForArray(useEHCleanupForArray) {}
492
493 Address addr;
496 bool useEHCleanupForArray;
497
498 void Emit(CodeGenFunction &CGF, Flags flags) override {
499 // Don't use an EH cleanup recursively from an EH cleanup.
500 bool useEHCleanupForArray =
501 flags.isForNormalCleanup() && this->useEHCleanupForArray;
502
503 CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
504 }
505 };
506
507 template <class Derived>
508 struct DestroyNRVOVariable : EHScopeStack::Cleanup {
509 DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
510 : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
511
512 llvm::Value *NRVOFlag;
513 Address Loc;
514 QualType Ty;
515
516 void Emit(CodeGenFunction &CGF, Flags flags) override {
517 // Along the exceptions path we always execute the dtor.
518 bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
519
520 llvm::BasicBlock *SkipDtorBB = nullptr;
521 if (NRVO) {
522 // If we exited via NRVO, we skip the destructor call.
523 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
524 SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
525 llvm::Value *DidNRVO =
526 CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
527 CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
528 CGF.EmitBlock(RunDtorBB);
529 }
530
531 static_cast<Derived *>(this)->emitDestructorCall(CGF);
532
533 if (NRVO) CGF.EmitBlock(SkipDtorBB);
534 }
535
536 virtual ~DestroyNRVOVariable() = default;
537 };
538
539 struct DestroyNRVOVariableCXX final
540 : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
541 DestroyNRVOVariableCXX(Address addr, QualType type,
542 const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
543 : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
544 Dtor(Dtor) {}
545
546 const CXXDestructorDecl *Dtor;
547
548 void emitDestructorCall(CodeGenFunction &CGF) {
550 /*ForVirtualBase=*/false,
551 /*Delegating=*/false, Loc, Ty);
552 }
553 };
554
555 struct DestroyNRVOVariableC final
556 : DestroyNRVOVariable<DestroyNRVOVariableC> {
557 DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
558 : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
559
560 void emitDestructorCall(CodeGenFunction &CGF) {
561 CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
562 }
563 };
564
565 struct CallStackRestore final : EHScopeStack::Cleanup {
566 Address Stack;
567 CallStackRestore(Address Stack) : Stack(Stack) {}
568 bool isRedundantBeforeReturn() override { return true; }
569 void Emit(CodeGenFunction &CGF, Flags flags) override {
570 llvm::Value *V = CGF.Builder.CreateLoad(Stack);
571 CGF.Builder.CreateStackRestore(V);
572 }
573 };
574
575 struct KmpcAllocFree final : EHScopeStack::Cleanup {
576 std::pair<llvm::Value *, llvm::Value *> AddrSizePair;
577 KmpcAllocFree(const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair)
578 : AddrSizePair(AddrSizePair) {}
579 void Emit(CodeGenFunction &CGF, Flags EmissionFlags) override {
580 auto &RT = CGF.CGM.getOpenMPRuntime();
581 RT.getKmpcFreeShared(CGF, AddrSizePair);
582 }
583 };
584
585 struct ExtendGCLifetime final : EHScopeStack::Cleanup {
586 const VarDecl &Var;
587 ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
588
589 void Emit(CodeGenFunction &CGF, Flags flags) override {
590 // Compute the address of the local variable, in case it's a
591 // byref or something.
592 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
594 llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
596 CGF.EmitExtendGCLifetime(value);
597 }
598 };
599
600 struct CallCleanupFunction final : EHScopeStack::Cleanup {
601 llvm::Constant *CleanupFn;
602 const CGFunctionInfo &FnInfo;
603 const VarDecl &Var;
604 const CleanupAttr *Attribute;
605
606 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
607 const VarDecl *Var, const CleanupAttr *Attr)
608 : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var), Attribute(Attr) {}
609
610 void Emit(CodeGenFunction &CGF, Flags flags) override {
611 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
613 // Compute the address of the local variable, in case it's a byref
614 // or something.
615 llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(CGF);
616
617 // In some cases, the type of the function argument will be different from
618 // the type of the pointer. An example of this is
619 // void f(void* arg);
620 // __attribute__((cleanup(f))) void *g;
621 //
622 // To fix this we insert a bitcast here.
623 QualType ArgTy = FnInfo.arg_begin()->type;
624 llvm::Value *Arg =
625 CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
626
627 CallArgList Args;
628 Args.add(RValue::get(Arg),
629 CGF.getContext().getPointerType(Var.getType()));
630 GlobalDecl GD = GlobalDecl(Attribute->getFunctionDecl());
631 auto Callee = CGCallee::forDirect(CleanupFn, CGCalleeInfo(GD));
632 CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args,
633 /*callOrInvoke*/ nullptr, /*IsMustTail*/ false,
634 Attribute->getLoc());
635 }
636 };
637} // end anonymous namespace
638
639/// EmitAutoVarWithLifetime - Does the setup required for an automatic
640/// variable with lifetime.
642 Address addr,
643 Qualifiers::ObjCLifetime lifetime) {
644 switch (lifetime) {
646 llvm_unreachable("present but none");
647
649 // nothing to do
650 break;
651
653 CodeGenFunction::Destroyer *destroyer =
654 (var.hasAttr<ObjCPreciseLifetimeAttr>()
657
658 CleanupKind cleanupKind = CGF.getARCCleanupKind();
659 CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
660 cleanupKind & EHCleanup);
661 break;
662 }
664 // nothing to do
665 break;
666
668 // __weak objects always get EH cleanups; otherwise, exceptions
669 // could cause really nasty crashes instead of mere leaks.
670 CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
672 /*useEHCleanup*/ true);
673 break;
674 }
675}
676
677static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
678 if (const Expr *e = dyn_cast<Expr>(s)) {
679 // Skip the most common kinds of expressions that make
680 // hierarchy-walking expensive.
681 s = e = e->IgnoreParenCasts();
682
683 if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
684 return (ref->getDecl() == &var);
685 if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
686 const BlockDecl *block = be->getBlockDecl();
687 for (const auto &I : block->captures()) {
688 if (I.getVariable() == &var)
689 return true;
690 }
691 }
692 }
693
694 for (const Stmt *SubStmt : s->children())
695 // SubStmt might be null; as in missing decl or conditional of an if-stmt.
696 if (SubStmt && isAccessedBy(var, SubStmt))
697 return true;
698
699 return false;
700}
701
702static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
703 if (!decl) return false;
704 if (!isa<VarDecl>(decl)) return false;
705 const VarDecl *var = cast<VarDecl>(decl);
706 return isAccessedBy(*var, e);
707}
708
710 const LValue &destLV, const Expr *init) {
711 bool needsCast = false;
712
713 while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
714 switch (castExpr->getCastKind()) {
715 // Look through casts that don't require representation changes.
716 case CK_NoOp:
717 case CK_BitCast:
718 case CK_BlockPointerToObjCPointerCast:
719 needsCast = true;
720 break;
721
722 // If we find an l-value to r-value cast from a __weak variable,
723 // emit this operation as a copy or move.
724 case CK_LValueToRValue: {
725 const Expr *srcExpr = castExpr->getSubExpr();
726 if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
727 return false;
728
729 // Emit the source l-value.
730 LValue srcLV = CGF.EmitLValue(srcExpr);
731
732 // Handle a formal type change to avoid asserting.
733 auto srcAddr = srcLV.getAddress();
734 if (needsCast) {
735 srcAddr = srcAddr.withElementType(destLV.getAddress().getElementType());
736 }
737
738 // If it was an l-value, use objc_copyWeak.
739 if (srcExpr->isLValue()) {
740 CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr);
741 } else {
742 assert(srcExpr->isXValue());
743 CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr);
744 }
745 return true;
746 }
747
748 // Stop at anything else.
749 default:
750 return false;
751 }
752
753 init = castExpr->getSubExpr();
754 }
755 return false;
756}
757
759 LValue &lvalue,
760 const VarDecl *var) {
761 lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
762}
763
766 if (!SanOpts.has(SanitizerKind::NullabilityAssign))
767 return;
768
769 auto Nullability = LHS.getType()->getNullability();
770 if (!Nullability || *Nullability != NullabilityKind::NonNull)
771 return;
772
773 // Check if the right hand side of the assignment is nonnull, if the left
774 // hand side must be nonnull.
775 auto CheckOrdinal = SanitizerKind::SO_NullabilityAssign;
776 auto CheckHandler = SanitizerHandler::TypeMismatch;
777 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
778 llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
779 llvm::Constant *StaticData[] = {
781 llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
782 llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
783 EmitCheck({{IsNotNull, CheckOrdinal}}, CheckHandler, StaticData, RHS);
784}
785
787 LValue lvalue, bool capturedByInit) {
788 Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
789 if (!lifetime) {
790 llvm::Value *Value;
791 if (PointerAuthQualifier PtrAuth = lvalue.getQuals().getPointerAuth()) {
792 Value = EmitPointerAuthQualify(PtrAuth, init, lvalue.getAddress());
793 lvalue.getQuals().removePointerAuth();
794 } else {
795 Value = EmitScalarExpr(init);
796 }
797 if (capturedByInit)
798 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
799 EmitNullabilityCheck(lvalue, Value, init->getExprLoc());
801 return;
802 }
803
804 if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
805 init = DIE->getExpr();
806
807 // If we're emitting a value with lifetime, we have to do the
808 // initialization *before* we leave the cleanup scopes.
809 if (auto *EWC = dyn_cast<ExprWithCleanups>(init)) {
811 return EmitScalarInit(EWC->getSubExpr(), D, lvalue, capturedByInit);
812 }
813
814 // We have to maintain the illusion that the variable is
815 // zero-initialized. If the variable might be accessed in its
816 // initializer, zero-initialize before running the initializer, then
817 // actually perform the initialization with an assign.
818 bool accessedByInit = false;
819 if (lifetime != Qualifiers::OCL_ExplicitNone)
820 accessedByInit = (capturedByInit || isAccessedBy(D, init));
821 if (accessedByInit) {
822 LValue tempLV = lvalue;
823 // Drill down to the __block object if necessary.
824 if (capturedByInit) {
825 // We can use a simple GEP for this because it can't have been
826 // moved yet.
828 cast<VarDecl>(D),
829 /*follow*/ false));
830 }
831
832 auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
833 llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
834
835 // If __weak, we want to use a barrier under certain conditions.
836 if (lifetime == Qualifiers::OCL_Weak)
837 EmitARCInitWeak(tempLV.getAddress(), zero);
838
839 // Otherwise just do a simple store.
840 else
841 EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
842 }
843
844 // Emit the initializer.
845 llvm::Value *value = nullptr;
846
847 switch (lifetime) {
849 llvm_unreachable("present but none");
850
852 if (!D || !isa<VarDecl>(D) || !cast<VarDecl>(D)->isARCPseudoStrong()) {
853 value = EmitARCRetainScalarExpr(init);
854 break;
855 }
856 // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
857 // that we omit the retain, and causes non-autoreleased return values to be
858 // immediately released.
859 [[fallthrough]];
860 }
861
864 break;
865
867 // If it's not accessed by the initializer, try to emit the
868 // initialization with a copy or move.
869 if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
870 return;
871 }
872
873 // No way to optimize a producing initializer into this. It's not
874 // worth optimizing for, because the value will immediately
875 // disappear in the common case.
876 value = EmitScalarExpr(init);
877
878 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
879 if (accessedByInit)
880 EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
881 else
882 EmitARCInitWeak(lvalue.getAddress(), value);
883 return;
884 }
885
888 break;
889 }
890
891 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
892
893 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
894
895 // If the variable might have been accessed by its initializer, we
896 // might have to initialize with a barrier. We have to do this for
897 // both __weak and __strong, but __weak got filtered out above.
898 if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
899 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
900 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
902 return;
903 }
904
905 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
906}
907
908/// Decide whether we can emit the non-zero parts of the specified initializer
909/// with equal or fewer than NumStores scalar stores.
910static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
911 unsigned &NumStores) {
912 // Zero and Undef never requires any extra stores.
913 if (isa<llvm::ConstantAggregateZero>(Init) ||
914 isa<llvm::ConstantPointerNull>(Init) ||
915 isa<llvm::UndefValue>(Init))
916 return true;
917 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
918 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
919 isa<llvm::ConstantExpr>(Init))
920 return Init->isNullValue() || NumStores--;
921
922 // See if we can emit each element.
923 if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
924 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
925 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
926 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
927 return false;
928 }
929 return true;
930 }
931
932 if (llvm::ConstantDataSequential *CDS =
933 dyn_cast<llvm::ConstantDataSequential>(Init)) {
934 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
935 llvm::Constant *Elt = CDS->getElementAsConstant(i);
936 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
937 return false;
938 }
939 return true;
940 }
941
942 // Anything else is hard and scary.
943 return false;
944}
945
946/// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
947/// the scalar stores that would be required.
948void CodeGenFunction::emitStoresForInitAfterBZero(llvm::Constant *Init,
949 Address Loc, bool isVolatile,
950 bool IsAutoInit) {
951 assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
952 "called emitStoresForInitAfterBZero for zero or undef value.");
953
954 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
955 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
956 isa<llvm::ConstantExpr>(Init)) {
957 auto *I = Builder.CreateStore(Init, Loc, isVolatile);
958 addInstToCurrentSourceAtom(I, nullptr);
959 if (IsAutoInit)
960 I->addAnnotationMetadata("auto-init");
961 return;
962 }
963
964 if (llvm::ConstantDataSequential *CDS =
965 dyn_cast<llvm::ConstantDataSequential>(Init)) {
966 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
967 llvm::Constant *Elt = CDS->getElementAsConstant(i);
968
969 // If necessary, get a pointer to the element and emit it.
970 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
971 emitStoresForInitAfterBZero(
972 Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile,
973 IsAutoInit);
974 }
975 return;
976 }
977
978 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
979 "Unknown value type!");
980
981 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
982 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
983
984 // If necessary, get a pointer to the element and emit it.
985 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
986 emitStoresForInitAfterBZero(Elt,
988 isVolatile, IsAutoInit);
989 }
990}
991
992/// Decide whether we should use bzero plus some stores to initialize a local
993/// variable instead of using a memcpy from a constant global. It is beneficial
994/// to use bzero if the global is all zeros, or mostly zeros and large.
995static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
996 uint64_t GlobalSize) {
997 // If a global is all zeros, always use a bzero.
998 if (isa<llvm::ConstantAggregateZero>(Init)) return true;
999
1000 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
1001 // do it if it will require 6 or fewer scalar stores.
1002 // TODO: Should budget depends on the size? Avoiding a large global warrants
1003 // plopping in more stores.
1004 unsigned StoreBudget = 6;
1005 uint64_t SizeLimit = 32;
1006
1007 return GlobalSize > SizeLimit &&
1009}
1010
1011/// Decide whether we should use memset to initialize a local variable instead
1012/// of using a memcpy from a constant global. Assumes we've already decided to
1013/// not user bzero.
1014/// FIXME We could be more clever, as we are for bzero above, and generate
1015/// memset followed by stores. It's unclear that's worth the effort.
1016static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
1017 uint64_t GlobalSize,
1018 const llvm::DataLayout &DL) {
1019 uint64_t SizeLimit = 32;
1020 if (GlobalSize <= SizeLimit)
1021 return nullptr;
1022 return llvm::isBytewiseValue(Init, DL);
1023}
1024
1025/// Decide whether we want to split a constant structure or array store into a
1026/// sequence of its fields' stores. This may cost us code size and compilation
1027/// speed, but plays better with store optimizations.
1029 uint64_t GlobalByteSize) {
1030 // Don't break things that occupy more than one cacheline.
1031 uint64_t ByteSizeLimit = 64;
1032 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1033 return false;
1034 if (GlobalByteSize <= ByteSizeLimit)
1035 return true;
1036 return false;
1037}
1038
1039enum class IsPattern { No, Yes };
1040
1041/// Generate a constant filled with either a pattern or zeroes.
1042static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
1043 llvm::Type *Ty) {
1044 if (isPattern == IsPattern::Yes)
1045 return initializationPatternFor(CGM, Ty);
1046 else
1047 return llvm::Constant::getNullValue(Ty);
1048}
1049
1050static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1051 llvm::Constant *constant);
1052
1053/// Helper function for constWithPadding() to deal with padding in structures.
1054static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1055 IsPattern isPattern,
1056 llvm::StructType *STy,
1057 llvm::Constant *constant) {
1058 const llvm::DataLayout &DL = CGM.getDataLayout();
1059 const llvm::StructLayout *Layout = DL.getStructLayout(STy);
1060 llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
1061 unsigned SizeSoFar = 0;
1063 bool NestedIntact = true;
1064 for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1065 unsigned CurOff = Layout->getElementOffset(i);
1066 if (SizeSoFar < CurOff) {
1067 assert(!STy->isPacked());
1068 auto *PadTy = llvm::ArrayType::get(Int8Ty, CurOff - SizeSoFar);
1069 Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1070 }
1071 llvm::Constant *CurOp;
1072 if (constant->isZeroValue())
1073 CurOp = llvm::Constant::getNullValue(STy->getElementType(i));
1074 else
1075 CurOp = cast<llvm::Constant>(constant->getAggregateElement(i));
1076 auto *NewOp = constWithPadding(CGM, isPattern, CurOp);
1077 if (CurOp != NewOp)
1078 NestedIntact = false;
1079 Values.push_back(NewOp);
1080 SizeSoFar = CurOff + DL.getTypeAllocSize(CurOp->getType());
1081 }
1082 unsigned TotalSize = Layout->getSizeInBytes();
1083 if (SizeSoFar < TotalSize) {
1084 auto *PadTy = llvm::ArrayType::get(Int8Ty, TotalSize - SizeSoFar);
1085 Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1086 }
1087 if (NestedIntact && Values.size() == STy->getNumElements())
1088 return constant;
1089 return llvm::ConstantStruct::getAnon(Values, STy->isPacked());
1090}
1091
1092/// Replace all padding bytes in a given constant with either a pattern byte or
1093/// 0x00.
1094static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1095 llvm::Constant *constant) {
1096 llvm::Type *OrigTy = constant->getType();
1097 if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
1098 return constStructWithPadding(CGM, isPattern, STy, constant);
1099 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(OrigTy)) {
1101 uint64_t Size = ArrayTy->getNumElements();
1102 if (!Size)
1103 return constant;
1104 llvm::Type *ElemTy = ArrayTy->getElementType();
1105 bool ZeroInitializer = constant->isNullValue();
1106 llvm::Constant *OpValue, *PaddedOp;
1107 if (ZeroInitializer) {
1108 OpValue = llvm::Constant::getNullValue(ElemTy);
1109 PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1110 }
1111 for (unsigned Op = 0; Op != Size; ++Op) {
1112 if (!ZeroInitializer) {
1113 OpValue = constant->getAggregateElement(Op);
1114 PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1115 }
1116 Values.push_back(PaddedOp);
1117 }
1118 auto *NewElemTy = Values[0]->getType();
1119 if (NewElemTy == ElemTy)
1120 return constant;
1121 auto *NewArrayTy = llvm::ArrayType::get(NewElemTy, Size);
1122 return llvm::ConstantArray::get(NewArrayTy, Values);
1123 }
1124 // FIXME: Add handling for tail padding in vectors. Vectors don't
1125 // have padding between or inside elements, but the total amount of
1126 // data can be less than the allocated size.
1127 return constant;
1128}
1129
1131 llvm::Constant *Constant,
1132 CharUnits Align) {
1133 auto FunctionName = [&](const DeclContext *DC) -> std::string {
1134 if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
1135 if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
1136 return CC->getNameAsString();
1137 if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
1138 return CD->getNameAsString();
1139 return std::string(getMangledName(FD));
1140 } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
1141 return OM->getNameAsString();
1142 } else if (isa<BlockDecl>(DC)) {
1143 return "<block>";
1144 } else if (isa<CapturedDecl>(DC)) {
1145 return "<captured>";
1146 } else {
1147 llvm_unreachable("expected a function or method");
1148 }
1149 };
1150
1151 // Form a simple per-variable cache of these values in case we find we
1152 // want to reuse them.
1153 llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1154 if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1155 auto *Ty = Constant->getType();
1156 bool isConstant = true;
1157 llvm::GlobalVariable *InsertBefore = nullptr;
1158 unsigned AS =
1160 std::string Name;
1161 if (D.hasGlobalStorage())
1162 Name = getMangledName(&D).str() + ".const";
1163 else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1164 Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1165 else
1166 llvm_unreachable("local variable has no parent function or method");
1167 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1168 getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1169 Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1170 GV->setAlignment(Align.getAsAlign());
1171 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1172 CacheEntry = GV;
1173 } else if (CacheEntry->getAlignment() < uint64_t(Align.getQuantity())) {
1174 CacheEntry->setAlignment(Align.getAsAlign());
1175 }
1176
1177 return Address(CacheEntry, CacheEntry->getValueType(), Align);
1178}
1179
1181 const VarDecl &D,
1182 CGBuilderTy &Builder,
1183 llvm::Constant *Constant,
1184 CharUnits Align) {
1185 Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1186 return SrcPtr.withElementType(CGM.Int8Ty);
1187}
1188
1189void CodeGenFunction::emitStoresForConstant(const VarDecl &D, Address Loc,
1190 bool isVolatile,
1191 llvm::Constant *constant,
1192 bool IsAutoInit) {
1193 auto *Ty = constant->getType();
1194 uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1195 if (!ConstantSize)
1196 return;
1197
1198 bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1199 Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1200 if (canDoSingleStore) {
1201 auto *I = Builder.CreateStore(constant, Loc, isVolatile);
1202 addInstToCurrentSourceAtom(I, nullptr);
1203 if (IsAutoInit)
1204 I->addAnnotationMetadata("auto-init");
1205 return;
1206 }
1207
1208 auto *SizeVal = llvm::ConstantInt::get(CGM.IntPtrTy, ConstantSize);
1209
1210 // If the initializer is all or mostly the same, codegen with bzero / memset
1211 // then do a few stores afterward.
1212 if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
1213 auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0),
1214 SizeVal, isVolatile);
1215 addInstToCurrentSourceAtom(I, nullptr);
1216
1217 if (IsAutoInit)
1218 I->addAnnotationMetadata("auto-init");
1219
1220 bool valueAlreadyCorrect =
1221 constant->isNullValue() || isa<llvm::UndefValue>(constant);
1222 if (!valueAlreadyCorrect) {
1223 Loc = Loc.withElementType(Ty);
1224 emitStoresForInitAfterBZero(constant, Loc, isVolatile, IsAutoInit);
1225 }
1226 return;
1227 }
1228
1229 // If the initializer is a repeated byte pattern, use memset.
1230 llvm::Value *Pattern =
1231 shouldUseMemSetToInitialize(constant, ConstantSize, CGM.getDataLayout());
1232 if (Pattern) {
1233 uint64_t Value = 0x00;
1234 if (!isa<llvm::UndefValue>(Pattern)) {
1235 const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
1236 assert(AP.getBitWidth() <= 8);
1237 Value = AP.getLimitedValue();
1238 }
1239 auto *I = Builder.CreateMemSet(
1240 Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal, isVolatile);
1241 addInstToCurrentSourceAtom(I, nullptr);
1242 if (IsAutoInit)
1243 I->addAnnotationMetadata("auto-init");
1244 return;
1245 }
1246
1247 // If the initializer is small or trivialAutoVarInit is set, use a handful of
1248 // stores.
1249 bool IsTrivialAutoVarInitPattern =
1250 CGM.getContext().getLangOpts().getTrivialAutoVarInit() ==
1252 if (shouldSplitConstantStore(CGM, ConstantSize)) {
1253 if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
1254 if (STy == Loc.getElementType() ||
1255 (STy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
1256 const llvm::StructLayout *Layout =
1257 CGM.getDataLayout().getStructLayout(STy);
1258 for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1259 CharUnits CurOff =
1260 CharUnits::fromQuantity(Layout->getElementOffset(i));
1262 Loc.withElementType(CGM.Int8Ty), CurOff);
1263 emitStoresForConstant(D, EltPtr, isVolatile,
1264 constant->getAggregateElement(i), IsAutoInit);
1265 }
1266 return;
1267 }
1268 } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
1269 if (ATy == Loc.getElementType() ||
1270 (ATy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
1271 for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1273 Loc.withElementType(ATy->getElementType()), i);
1274 emitStoresForConstant(D, EltPtr, isVolatile,
1275 constant->getAggregateElement(i), IsAutoInit);
1276 }
1277 return;
1278 }
1279 }
1280 }
1281
1282 // Copy from a global.
1283 auto *I =
1286 CGM, D, Builder, constant, Loc.getAlignment()),
1287 SizeVal, isVolatile);
1288 addInstToCurrentSourceAtom(I, nullptr);
1289
1290 if (IsAutoInit)
1291 I->addAnnotationMetadata("auto-init");
1292}
1293
1294void CodeGenFunction::emitStoresForZeroInit(const VarDecl &D, Address Loc,
1295 bool isVolatile) {
1296 llvm::Type *ElTy = Loc.getElementType();
1297 llvm::Constant *constant =
1298 constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy));
1299 emitStoresForConstant(D, Loc, isVolatile, constant,
1300 /*IsAutoInit=*/true);
1301}
1302
1303void CodeGenFunction::emitStoresForPatternInit(const VarDecl &D, Address Loc,
1304 bool isVolatile) {
1305 llvm::Type *ElTy = Loc.getElementType();
1306 llvm::Constant *constant = constWithPadding(
1307 CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1308 assert(!isa<llvm::UndefValue>(constant));
1309 emitStoresForConstant(D, Loc, isVolatile, constant,
1310 /*IsAutoInit=*/true);
1311}
1312
1313static bool containsUndef(llvm::Constant *constant) {
1314 auto *Ty = constant->getType();
1315 if (isa<llvm::UndefValue>(constant))
1316 return true;
1317 if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1318 for (llvm::Use &Op : constant->operands())
1319 if (containsUndef(cast<llvm::Constant>(Op)))
1320 return true;
1321 return false;
1322}
1323
1324static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1325 llvm::Constant *constant) {
1326 auto *Ty = constant->getType();
1327 if (isa<llvm::UndefValue>(constant))
1328 return patternOrZeroFor(CGM, isPattern, Ty);
1329 if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1330 return constant;
1331 if (!containsUndef(constant))
1332 return constant;
1333 llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1334 for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1335 auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
1336 Values[Op] = replaceUndef(CGM, isPattern, OpValue);
1337 }
1338 if (Ty->isStructTy())
1339 return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
1340 if (Ty->isArrayTy())
1341 return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
1342 assert(Ty->isVectorTy());
1343 return llvm::ConstantVector::get(Values);
1344}
1345
1346/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1347/// variable declaration with auto, register, or no storage class specifier.
1348/// These turn into simple stack objects, or GlobalValues depending on target.
1351 EmitAutoVarInit(emission);
1352 EmitAutoVarCleanups(emission);
1353}
1354
1355/// Emit a lifetime.begin marker if some criteria are satisfied.
1356/// \return whether the marker was emitted.
1358 if (!ShouldEmitLifetimeMarkers)
1359 return false;
1360
1361 assert(Addr->getType()->getPointerAddressSpace() ==
1362 CGM.getDataLayout().getAllocaAddrSpace() &&
1363 "Pointer should be in alloca address space");
1364 llvm::CallInst *C = Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {Addr});
1365 C->setDoesNotThrow();
1366 return true;
1367}
1368
1370 if (!ShouldEmitLifetimeMarkers)
1371 return;
1372
1373 assert(Addr->getType()->getPointerAddressSpace() ==
1374 CGM.getDataLayout().getAllocaAddrSpace() &&
1375 "Pointer should be in alloca address space");
1376 llvm::CallInst *C = Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Addr});
1377 C->setDoesNotThrow();
1378}
1379
1381 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1382 llvm::Value *V = Builder.CreateLoad(Addr, "fake.use");
1383 llvm::CallInst *C = Builder.CreateCall(CGM.getLLVMFakeUseFn(), {V});
1384 C->setDoesNotThrow();
1385 C->setTailCallKind(llvm::CallInst::TCK_NoTail);
1386}
1387
1389 CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1390 // For each dimension stores its QualType and corresponding
1391 // size-expression Value.
1394
1395 // Break down the array into individual dimensions.
1396 QualType Type1D = D.getType();
1397 while (getContext().getAsVariableArrayType(Type1D)) {
1398 auto VlaSize = getVLAElements1D(Type1D);
1399 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1400 Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
1401 else {
1402 // Generate a locally unique name for the size expression.
1403 Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1404 SmallString<12> Buffer;
1405 StringRef NameRef = Name.toStringRef(Buffer);
1406 auto &Ident = getContext().Idents.getOwn(NameRef);
1407 VLAExprNames.push_back(&Ident);
1408 auto SizeExprAddr =
1409 CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
1410 Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
1411 Dimensions.emplace_back(SizeExprAddr.getPointer(),
1412 Type1D.getUnqualifiedType());
1413 }
1414 Type1D = VlaSize.Type;
1415 }
1416
1417 if (!EmitDebugInfo)
1418 return;
1419
1420 // Register each dimension's size-expression with a DILocalVariable,
1421 // so that it can be used by CGDebugInfo when instantiating a DISubrange
1422 // to describe this array.
1423 unsigned NameIdx = 0;
1424 for (auto &VlaSize : Dimensions) {
1425 llvm::Metadata *MD;
1426 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1427 MD = llvm::ConstantAsMetadata::get(C);
1428 else {
1429 // Create an artificial VarDecl to generate debug info for.
1430 const IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1432 SizeTy->getScalarSizeInBits(), false);
1433 auto *ArtificialDecl = VarDecl::Create(
1434 getContext(), const_cast<DeclContext *>(D.getDeclContext()),
1435 D.getLocation(), D.getLocation(), NameIdent, QT,
1436 getContext().CreateTypeSourceInfo(QT), SC_Auto);
1437 ArtificialDecl->setImplicit();
1438
1439 MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
1440 Builder);
1441 }
1442 assert(MD && "No Size expression debug node created");
1443 DI->registerVLASizeExpression(VlaSize.Type, MD);
1444 }
1445}
1446
1447/// Return the maximum size of an aggregate for which we generate a fake use
1448/// intrinsic when -fextend-variable-liveness is in effect.
1449static uint64_t maxFakeUseAggregateSize(const ASTContext &C) {
1450 return 4 * C.getTypeSize(C.UnsignedIntTy);
1451}
1452
1453// Helper function to determine whether a variable's or parameter's lifetime
1454// should be extended.
1455static bool shouldExtendLifetime(const ASTContext &Context,
1456 const Decl *FuncDecl, const VarDecl &D,
1457 ImplicitParamDecl *CXXABIThisDecl) {
1458 // When we're not inside a valid function it is unlikely that any
1459 // lifetime extension is useful.
1460 if (!FuncDecl)
1461 return false;
1462 if (FuncDecl->isImplicit())
1463 return false;
1464 // Do not extend compiler-created variables except for the this pointer.
1465 if (D.isImplicit() && &D != CXXABIThisDecl)
1466 return false;
1467 QualType Ty = D.getType();
1468 // No need to extend volatiles, they have a memory location.
1469 if (Ty.isVolatileQualified())
1470 return false;
1471 // Don't extend variables that exceed a certain size.
1472 if (Context.getTypeSize(Ty) > maxFakeUseAggregateSize(Context))
1473 return false;
1474 // Do not extend variables in nodebug or optnone functions.
1475 if (FuncDecl->hasAttr<NoDebugAttr>() || FuncDecl->hasAttr<OptimizeNoneAttr>())
1476 return false;
1477 return true;
1478}
1479
1480/// EmitAutoVarAlloca - Emit the alloca and debug information for a
1481/// local variable. Does not emit initialization or destruction.
1484 QualType Ty = D.getType();
1485 assert(
1488
1489 AutoVarEmission emission(D);
1490
1491 bool isEscapingByRef = D.isEscapingByref();
1492 emission.IsEscapingByRef = isEscapingByRef;
1493
1494 CharUnits alignment = getContext().getDeclAlign(&D);
1495
1496 // If the type is variably-modified, emit all the VLA sizes for it.
1497 if (Ty->isVariablyModifiedType())
1499
1500 auto *DI = getDebugInfo();
1501 bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
1502
1503 Address address = Address::invalid();
1504 RawAddress AllocaAddr = RawAddress::invalid();
1505 Address OpenMPLocalAddr = Address::invalid();
1506 if (CGM.getLangOpts().OpenMPIRBuilder)
1507 OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D);
1508 else
1509 OpenMPLocalAddr =
1510 getLangOpts().OpenMP
1512 : Address::invalid();
1513
1514 bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1515
1516 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1517 address = OpenMPLocalAddr;
1518 AllocaAddr = OpenMPLocalAddr;
1519 } else if (Ty->isConstantSizeType()) {
1520 // If this value is an array or struct with a statically determinable
1521 // constant initializer, there are optimizations we can do.
1522 //
1523 // TODO: We should constant-evaluate the initializer of any variable,
1524 // as long as it is initialized by a constant expression. Currently,
1525 // isConstantInitializer produces wrong answers for structs with
1526 // reference or bitfield members, and a few other cases, and checking
1527 // for POD-ness protects us from some of these.
1528 if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1529 (D.isConstexpr() ||
1530 ((Ty.isPODType(getContext()) ||
1531 getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
1532 D.getInit()->isConstantInitializer(getContext(), false)))) {
1533
1534 // If the variable's a const type, and it's neither an NRVO
1535 // candidate nor a __block variable and has no mutable members,
1536 // emit it as a global instead.
1537 // Exception is if a variable is located in non-constant address space
1538 // in OpenCL.
1539 bool NeedsDtor =
1540 D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
1541 if ((!getLangOpts().OpenCL ||
1543 (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1544 !isEscapingByRef &&
1545 Ty.isConstantStorage(getContext(), true, !NeedsDtor))) {
1546 EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
1547
1548 // Signal this condition to later callbacks.
1549 emission.Addr = Address::invalid();
1550 assert(emission.wasEmittedAsGlobal());
1551 return emission;
1552 }
1553
1554 // Otherwise, tell the initialization code that we're in this case.
1555 emission.IsConstantAggregate = true;
1556 }
1557
1558 // A normal fixed sized variable becomes an alloca in the entry block,
1559 // unless:
1560 // - it's an NRVO variable.
1561 // - we are compiling OpenMP and it's an OpenMP local variable.
1562 if (NRVO) {
1563 // The named return value optimization: allocate this variable in the
1564 // return slot, so that we can elide the copy when returning this
1565 // variable (C++0x [class.copy]p34).
1566 AllocaAddr =
1569 address = MaybeCastStackAddressSpace(AllocaAddr, Ty.getAddressSpace());
1570
1571 if (const auto *RD = Ty->getAsRecordDecl()) {
1572 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
1573 (CXXRD && !CXXRD->hasTrivialDestructor()) ||
1574 RD->isNonTrivialToPrimitiveDestroy()) {
1575 // Create a flag that is used to indicate when the NRVO was applied
1576 // to this variable. Set it to zero to indicate that NRVO was not
1577 // applied.
1578 llvm::Value *Zero = Builder.getFalse();
1579 RawAddress NRVOFlag =
1580 CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
1582 Builder.CreateStore(Zero, NRVOFlag);
1583
1584 // Record the NRVO flag for this variable.
1585 NRVOFlags[&D] = NRVOFlag.getPointer();
1586 emission.NRVOFlag = NRVOFlag.getPointer();
1587 }
1588 }
1589 } else {
1590 CharUnits allocaAlignment;
1591 llvm::Type *allocaTy;
1592 if (isEscapingByRef) {
1593 auto &byrefInfo = getBlockByrefInfo(&D);
1594 allocaTy = byrefInfo.Type;
1595 allocaAlignment = byrefInfo.ByrefAlignment;
1596 } else {
1597 allocaTy = ConvertTypeForMem(Ty);
1598 allocaAlignment = alignment;
1599 }
1600
1601 // Create the alloca. Note that we set the name separately from
1602 // building the instruction so that it's there even in no-asserts
1603 // builds.
1604 address = CreateTempAlloca(allocaTy, Ty.getAddressSpace(),
1605 allocaAlignment, D.getName(),
1606 /*ArraySize=*/nullptr, &AllocaAddr);
1607
1608 // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1609 // the catch parameter starts in the catchpad instruction, and we can't
1610 // insert code in those basic blocks.
1611 bool IsMSCatchParam =
1612 D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1613
1614 // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1615 // if we don't have a valid insertion point (?).
1616 if (HaveInsertPoint() && !IsMSCatchParam) {
1617 // If there's a jump into the lifetime of this variable, its lifetime
1618 // gets broken up into several regions in IR, which requires more work
1619 // to handle correctly. For now, just omit the intrinsics; this is a
1620 // rare case, and it's better to just be conservatively correct.
1621 // PR28267.
1622 //
1623 // We have to do this in all language modes if there's a jump past the
1624 // declaration. We also have to do it in C if there's a jump to an
1625 // earlier point in the current block because non-VLA lifetimes begin as
1626 // soon as the containing block is entered, not when its variables
1627 // actually come into scope; suppressing the lifetime annotations
1628 // completely in this case is unnecessarily pessimistic, but again, this
1629 // is rare.
1630 if (!Bypasses.IsBypassed(&D) &&
1632 emission.UseLifetimeMarkers =
1633 EmitLifetimeStart(AllocaAddr.getPointer());
1634 }
1635 } else {
1636 assert(!emission.useLifetimeMarkers());
1637 }
1638 }
1639 } else {
1641
1642 // Delayed globalization for variable length declarations. This ensures that
1643 // the expression representing the length has been emitted and can be used
1644 // by the definition of the VLA. Since this is an escaped declaration, in
1645 // OpenMP we have to use a call to __kmpc_alloc_shared(). The matching
1646 // deallocation call to __kmpc_free_shared() is emitted later.
1647 bool VarAllocated = false;
1648 if (getLangOpts().OpenMPIsTargetDevice) {
1649 auto &RT = CGM.getOpenMPRuntime();
1650 if (RT.isDelayedVariableLengthDecl(*this, &D)) {
1651 // Emit call to __kmpc_alloc_shared() instead of the alloca.
1652 std::pair<llvm::Value *, llvm::Value *> AddrSizePair =
1653 RT.getKmpcAllocShared(*this, &D);
1654
1655 // Save the address of the allocation:
1656 LValue Base = MakeAddrLValue(AddrSizePair.first, D.getType(),
1659 address = Base.getAddress();
1660
1661 // Push a cleanup block to emit the call to __kmpc_free_shared in the
1662 // appropriate location at the end of the scope of the
1663 // __kmpc_alloc_shared functions:
1664 pushKmpcAllocFree(NormalCleanup, AddrSizePair);
1665
1666 // Mark variable as allocated:
1667 VarAllocated = true;
1668 }
1669 }
1670
1671 if (!VarAllocated) {
1672 if (!DidCallStackSave) {
1673 // Save the stack.
1674 Address Stack =
1676
1677 llvm::Value *V = Builder.CreateStackSave();
1678 assert(V->getType() == AllocaInt8PtrTy);
1679 Builder.CreateStore(V, Stack);
1680
1681 DidCallStackSave = true;
1682
1683 // Push a cleanup block and restore the stack there.
1684 // FIXME: in general circumstances, this should be an EH cleanup.
1686 }
1687
1688 auto VlaSize = getVLASize(Ty);
1689 llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
1690
1691 // Allocate memory for the array.
1692 address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
1693 &AllocaAddr);
1694 }
1695
1696 // If we have debug info enabled, properly describe the VLA dimensions for
1697 // this type by registering the vla size expression for each of the
1698 // dimensions.
1699 EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1700 }
1701
1702 setAddrOfLocalVar(&D, address);
1703 emission.Addr = address;
1704 emission.AllocaAddr = AllocaAddr;
1705
1706 // Emit debug info for local var declaration.
1707 if (EmitDebugInfo && HaveInsertPoint()) {
1708 Address DebugAddr = address;
1709 bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1710 DI->setLocation(D.getLocation());
1711
1712 // If NRVO, use a pointer to the return address.
1713 if (UsePointerValue) {
1714 DebugAddr = ReturnValuePointer;
1715 AllocaAddr = ReturnValuePointer;
1716 }
1717 (void)DI->EmitDeclareOfAutoVariable(&D, AllocaAddr.getPointer(), Builder,
1718 UsePointerValue);
1719 }
1720
1721 if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1722 EmitVarAnnotations(&D, address.emitRawPointer(*this));
1723
1724 // Make sure we call @llvm.lifetime.end.
1725 if (emission.useLifetimeMarkers())
1726 EHStack.pushCleanup<CallLifetimeEnd>(
1728
1729 // Analogous to lifetime markers, we use a 'cleanup' to emit fake.use
1730 // calls for local variables. We are exempting volatile variables and
1731 // non-scalars larger than 4 times the size of an unsigned int. Larger
1732 // non-scalars are often allocated in memory and may create unnecessary
1733 // overhead.
1734 if (CGM.getCodeGenOpts().getExtendVariableLiveness() ==
1736 if (shouldExtendLifetime(getContext(), CurCodeDecl, D, CXXABIThisDecl))
1737 EHStack.pushCleanup<FakeUse>(NormalFakeUse,
1738 emission.getAllocatedAddress());
1739 }
1740
1741 return emission;
1742}
1743
1744static bool isCapturedBy(const VarDecl &, const Expr *);
1745
1746/// Determines whether the given __block variable is potentially
1747/// captured by the given statement.
1748static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1749 if (const Expr *E = dyn_cast<Expr>(S))
1750 return isCapturedBy(Var, E);
1751 for (const Stmt *SubStmt : S->children())
1752 if (isCapturedBy(Var, SubStmt))
1753 return true;
1754 return false;
1755}
1756
1757/// Determines whether the given __block variable is potentially
1758/// captured by the given expression.
1759static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1760 // Skip the most common kinds of expressions that make
1761 // hierarchy-walking expensive.
1762 E = E->IgnoreParenCasts();
1763
1764 if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
1765 const BlockDecl *Block = BE->getBlockDecl();
1766 for (const auto &I : Block->captures()) {
1767 if (I.getVariable() == &Var)
1768 return true;
1769 }
1770
1771 // No need to walk into the subexpressions.
1772 return false;
1773 }
1774
1775 if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
1776 const CompoundStmt *CS = SE->getSubStmt();
1777 for (const auto *BI : CS->body())
1778 if (const auto *BIE = dyn_cast<Expr>(BI)) {
1779 if (isCapturedBy(Var, BIE))
1780 return true;
1781 }
1782 else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
1783 // special case declarations
1784 for (const auto *I : DS->decls()) {
1785 if (const auto *VD = dyn_cast<VarDecl>((I))) {
1786 const Expr *Init = VD->getInit();
1787 if (Init && isCapturedBy(Var, Init))
1788 return true;
1789 }
1790 }
1791 }
1792 else
1793 // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1794 // Later, provide code to poke into statements for capture analysis.
1795 return true;
1796 return false;
1797 }
1798
1799 for (const Stmt *SubStmt : E->children())
1800 if (isCapturedBy(Var, SubStmt))
1801 return true;
1802
1803 return false;
1804}
1805
1806/// Determine whether the given initializer is trivial in the sense
1807/// that it requires no code to be generated.
1809 if (!Init)
1810 return true;
1811
1812 if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
1813 if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1814 if (Constructor->isTrivial() &&
1815 Constructor->isDefaultConstructor() &&
1816 !Construct->requiresZeroInitialization())
1817 return true;
1818
1819 return false;
1820}
1821
1822void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1823 const VarDecl &D,
1824 Address Loc) {
1825 auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1826 auto trivialAutoVarInitMaxSize =
1827 getContext().getLangOpts().TrivialAutoVarInitMaxSize;
1829 bool isVolatile = type.isVolatileQualified();
1830 if (!Size.isZero()) {
1831 // We skip auto-init variables by their alloc size. Take this as an example:
1832 // "struct Foo {int x; char buff[1024];}" Assume the max-size flag is 1023.
1833 // All Foo type variables will be skipped. Ideally, we only skip the buff
1834 // array and still auto-init X in this example.
1835 // TODO: Improve the size filtering to by member size.
1836 auto allocSize = CGM.getDataLayout().getTypeAllocSize(Loc.getElementType());
1837 switch (trivialAutoVarInit) {
1839 llvm_unreachable("Uninitialized handled by caller");
1841 if (CGM.stopAutoInit())
1842 return;
1843 if (trivialAutoVarInitMaxSize > 0 &&
1844 allocSize > trivialAutoVarInitMaxSize)
1845 return;
1846 emitStoresForZeroInit(D, Loc, isVolatile);
1847 break;
1849 if (CGM.stopAutoInit())
1850 return;
1851 if (trivialAutoVarInitMaxSize > 0 &&
1852 allocSize > trivialAutoVarInitMaxSize)
1853 return;
1854 emitStoresForPatternInit(D, Loc, isVolatile);
1855 break;
1856 }
1857 return;
1858 }
1859
1860 // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1861 // them, so emit a memcpy with the VLA size to initialize each element.
1862 // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1863 // will catch that code, but there exists code which generates zero-sized
1864 // VLAs. Be nice and initialize whatever they requested.
1865 const auto *VlaType = getContext().getAsVariableArrayType(type);
1866 if (!VlaType)
1867 return;
1868 auto VlaSize = getVLASize(VlaType);
1869 auto SizeVal = VlaSize.NumElts;
1870 CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1871 switch (trivialAutoVarInit) {
1873 llvm_unreachable("Uninitialized handled by caller");
1874
1876 if (CGM.stopAutoInit())
1877 return;
1878 if (!EltSize.isOne())
1879 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1880 auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0),
1881 SizeVal, isVolatile);
1882 I->addAnnotationMetadata("auto-init");
1883 break;
1884 }
1885
1887 if (CGM.stopAutoInit())
1888 return;
1889 llvm::Type *ElTy = Loc.getElementType();
1890 llvm::Constant *Constant = constWithPadding(
1891 CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1892 CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
1893 llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
1894 llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
1895 llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
1896 llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1897 SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
1898 "vla.iszerosized");
1899 Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
1900 EmitBlock(SetupBB);
1901 if (!EltSize.isOne())
1902 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1903 llvm::Value *BaseSizeInChars =
1904 llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
1905 Address Begin = Loc.withElementType(Int8Ty);
1906 llvm::Value *End = Builder.CreateInBoundsGEP(Begin.getElementType(),
1907 Begin.emitRawPointer(*this),
1908 SizeVal, "vla.end");
1909 llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1910 EmitBlock(LoopBB);
1911 llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
1912 Cur->addIncoming(Begin.emitRawPointer(*this), OriginBB);
1913 CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
1914 auto *I =
1915 Builder.CreateMemCpy(Address(Cur, Int8Ty, CurAlign),
1917 CGM, D, Builder, Constant, ConstantAlign),
1918 BaseSizeInChars, isVolatile);
1919 I->addAnnotationMetadata("auto-init");
1920 llvm::Value *Next =
1921 Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
1922 llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
1923 Builder.CreateCondBr(Done, ContBB, LoopBB);
1924 Cur->addIncoming(Next, LoopBB);
1925 EmitBlock(ContBB);
1926 } break;
1927 }
1928}
1929
1931 assert(emission.Variable && "emission was not valid!");
1932
1933 // If this was emitted as a global constant, we're done.
1934 if (emission.wasEmittedAsGlobal()) return;
1935
1936 const VarDecl &D = *emission.Variable;
1939 QualType type = D.getType();
1940
1941 // If this local has an initializer, emit it now.
1942 const Expr *Init = D.getInit();
1943
1944 // If we are at an unreachable point, we don't need to emit the initializer
1945 // unless it contains a label.
1946 if (!HaveInsertPoint()) {
1947 if (!Init || !ContainsLabel(Init)) {
1948 PGO->markStmtMaybeUsed(Init);
1949 return;
1950 }
1952 }
1953
1954 // Initialize the structure of a __block variable.
1955 if (emission.IsEscapingByRef)
1956 emitByrefStructureInit(emission);
1957
1958 // Initialize the variable here if it doesn't have a initializer and it is a
1959 // C struct that is non-trivial to initialize or an array containing such a
1960 // struct.
1961 if (!Init &&
1962 type.isNonTrivialToPrimitiveDefaultInitialize() ==
1964 LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
1965 if (emission.IsEscapingByRef)
1966 drillIntoBlockVariable(*this, Dst, &D);
1968 return;
1969 }
1970
1971 // Check whether this is a byref variable that's potentially
1972 // captured and moved by its own initializer. If so, we'll need to
1973 // emit the initializer first, then copy into the variable.
1974 bool capturedByInit =
1975 Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
1976
1977 bool locIsByrefHeader = !capturedByInit;
1978 const Address Loc =
1979 locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
1980
1981 auto hasNoTrivialAutoVarInitAttr = [&](const Decl *D) {
1982 return D && D->hasAttr<NoTrivialAutoVarInitAttr>();
1983 };
1984 // Note: constexpr already initializes everything correctly.
1985 LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1986 ((D.isConstexpr() || D.getAttr<UninitializedAttr>() ||
1987 hasNoTrivialAutoVarInitAttr(type->getAsTagDecl()) ||
1988 hasNoTrivialAutoVarInitAttr(CurFuncDecl))
1990 : getContext().getLangOpts().getTrivialAutoVarInit());
1991
1992 auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1993 if (trivialAutoVarInit ==
1995 return;
1996
1997 // Only initialize a __block's storage: we always initialize the header.
1998 if (emission.IsEscapingByRef && !locIsByrefHeader)
1999 Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
2000
2001 return emitZeroOrPatternForAutoVarInit(type, D, Loc);
2002 };
2003
2005 return initializeWhatIsTechnicallyUninitialized(Loc);
2006
2007 llvm::Constant *constant = nullptr;
2008 if (emission.IsConstantAggregate ||
2009 D.mightBeUsableInConstantExpressions(getContext())) {
2010 assert(!capturedByInit && "constant init contains a capturing block?");
2012 if (constant && !constant->isZeroValue() &&
2013 (trivialAutoVarInit !=
2015 IsPattern isPattern =
2016 (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
2017 ? IsPattern::Yes
2018 : IsPattern::No;
2019 // C guarantees that brace-init with fewer initializers than members in
2020 // the aggregate will initialize the rest of the aggregate as-if it were
2021 // static initialization. In turn static initialization guarantees that
2022 // padding is initialized to zero bits. We could instead pattern-init if D
2023 // has any ImplicitValueInitExpr, but that seems to be unintuitive
2024 // behavior.
2025 constant = constWithPadding(CGM, IsPattern::No,
2026 replaceUndef(CGM, isPattern, constant));
2027 }
2028
2029 if (constant && type->isBitIntType() &&
2031 // Constants for long _BitInt types are split into individual bytes.
2032 // Try to fold these back into an integer constant so it can be stored
2033 // properly.
2034 llvm::Type *LoadType =
2035 CGM.getTypes().convertTypeForLoadStore(type, constant->getType());
2036 constant = llvm::ConstantFoldLoadFromConst(
2037 constant, LoadType, llvm::APInt::getZero(32), CGM.getDataLayout());
2038 }
2039 }
2040
2041 if (!constant) {
2042 if (trivialAutoVarInit !=
2044 // At this point, we know D has an Init expression, but isn't a constant.
2045 // - If D is not a scalar, auto-var-init conservatively (members may be
2046 // left uninitialized by constructor Init expressions for example).
2047 // - If D is a scalar, we only need to auto-var-init if there is a
2048 // self-reference. Otherwise, the Init expression should be sufficient.
2049 // It may be that the Init expression uses other uninitialized memory,
2050 // but auto-var-init here would not help, as auto-init would get
2051 // overwritten by Init.
2052 if (!type->isScalarType() || capturedByInit || isAccessedBy(D, Init)) {
2053 initializeWhatIsTechnicallyUninitialized(Loc);
2054 }
2055 }
2057 lv.setNonGC(true);
2058 return EmitExprAsInit(Init, &D, lv, capturedByInit);
2059 }
2060
2061 PGO->markStmtMaybeUsed(Init);
2062
2063 if (!emission.IsConstantAggregate) {
2064 // For simple scalar/complex initialization, store the value directly.
2066 lv.setNonGC(true);
2067 return EmitStoreThroughLValue(RValue::get(constant), lv, true);
2068 }
2069
2070 emitStoresForConstant(D, Loc.withElementType(CGM.Int8Ty),
2071 type.isVolatileQualified(), constant,
2072 /*IsAutoInit=*/false);
2073}
2074
2076 if (auto *DD = dyn_cast_if_present<DecompositionDecl>(VD)) {
2077 for (auto *B : DD->flat_bindings())
2078 if (auto *HD = B->getHoldingVar())
2079 EmitVarDecl(*HD);
2080 }
2081}
2082
2083/// Emit an expression as an initializer for an object (variable, field, etc.)
2084/// at the given location. The expression is not necessarily the normal
2085/// initializer for the object, and the address is not necessarily
2086/// its normal location.
2087///
2088/// \param init the initializing expression
2089/// \param D the object to act as if we're initializing
2090/// \param lvalue the lvalue to initialize
2091/// \param capturedByInit true if \p D is a __block variable
2092/// whose address is potentially changed by the initializer
2094 LValue lvalue, bool capturedByInit) {
2095 QualType type = D->getType();
2096
2097 if (type->isReferenceType()) {
2098 RValue rvalue = EmitReferenceBindingToExpr(init);
2099 if (capturedByInit)
2100 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
2101 EmitStoreThroughLValue(rvalue, lvalue, true);
2102 return;
2103 }
2104 switch (getEvaluationKind(type)) {
2105 case TEK_Scalar:
2106 EmitScalarInit(init, D, lvalue, capturedByInit);
2107 return;
2108 case TEK_Complex: {
2109 ComplexPairTy complex = EmitComplexExpr(init);
2110 if (capturedByInit)
2111 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
2112 EmitStoreOfComplex(complex, lvalue, /*init*/ true);
2113 return;
2114 }
2115 case TEK_Aggregate:
2116 if (type->isAtomicType()) {
2117 EmitAtomicInit(const_cast<Expr*>(init), lvalue);
2118 } else {
2120 if (isa<VarDecl>(D))
2122 else if (auto *FD = dyn_cast<FieldDecl>(D))
2123 Overlap = getOverlapForFieldInit(FD);
2124 // TODO: how can we delay here if D is captured by its initializer?
2125 EmitAggExpr(init,
2128 AggValueSlot::IsNotAliased, Overlap));
2129 }
2130 return;
2131 }
2132 llvm_unreachable("bad evaluation kind");
2133}
2134
2135/// Enter a destroy cleanup for the given local variable.
2137 const CodeGenFunction::AutoVarEmission &emission,
2138 QualType::DestructionKind dtorKind) {
2139 assert(dtorKind != QualType::DK_none);
2140
2141 // Note that for __block variables, we want to destroy the
2142 // original stack object, not the possibly forwarded object.
2143 Address addr = emission.getObjectAddress(*this);
2144
2145 const VarDecl *var = emission.Variable;
2146 QualType type = var->getType();
2147
2148 CleanupKind cleanupKind = NormalAndEHCleanup;
2149 CodeGenFunction::Destroyer *destroyer = nullptr;
2150
2151 switch (dtorKind) {
2152 case QualType::DK_none:
2153 llvm_unreachable("no cleanup for trivially-destructible variable");
2154
2156 // If there's an NRVO flag on the emission, we need a different
2157 // cleanup.
2158 if (emission.NRVOFlag) {
2159 assert(!type->isArrayType());
2160 CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
2161 EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, type, dtor,
2162 emission.NRVOFlag);
2163 return;
2164 }
2165 break;
2166
2168 // Suppress cleanups for pseudo-strong variables.
2169 if (var->isARCPseudoStrong()) return;
2170
2171 // Otherwise, consider whether to use an EH cleanup or not.
2172 cleanupKind = getARCCleanupKind();
2173
2174 // Use the imprecise destroyer by default.
2175 if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
2177 break;
2178
2180 break;
2181
2184 if (emission.NRVOFlag) {
2185 assert(!type->isArrayType());
2186 EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
2187 emission.NRVOFlag, type);
2188 return;
2189 }
2190 break;
2191 }
2192
2193 // If we haven't chosen a more specific destroyer, use the default.
2194 if (!destroyer) destroyer = getDestroyer(dtorKind);
2195
2196 // Use an EH cleanup in array destructors iff the destructor itself
2197 // is being pushed as an EH cleanup.
2198 bool useEHCleanup = (cleanupKind & EHCleanup);
2199 EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
2200 useEHCleanup);
2201}
2202
2204 assert(emission.Variable && "emission was not valid!");
2205
2206 // If this was emitted as a global constant, we're done.
2207 if (emission.wasEmittedAsGlobal()) return;
2208
2209 // If we don't have an insertion point, we're done. Sema prevents
2210 // us from jumping into any of these scopes anyway.
2211 if (!HaveInsertPoint()) return;
2212
2213 const VarDecl &D = *emission.Variable;
2214
2215 // Check the type for a cleanup.
2216 if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext()))
2217 emitAutoVarTypeCleanup(emission, dtorKind);
2218
2219 // In GC mode, honor objc_precise_lifetime.
2220 if (getLangOpts().getGC() != LangOptions::NonGC &&
2221 D.hasAttr<ObjCPreciseLifetimeAttr>()) {
2222 EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
2223 }
2224
2225 // Handle the cleanup attribute.
2226 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2227 const FunctionDecl *FD = CA->getFunctionDecl();
2228
2229 llvm::Constant *F = CGM.GetAddrOfFunction(FD);
2230 assert(F && "Could not find function!");
2231
2233 EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D,
2234 CA);
2235 }
2236
2237 // If this is a block variable, call _Block_object_destroy
2238 // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2239 // mode.
2240 if (emission.IsEscapingByRef &&
2241 CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2243 if (emission.Variable->getType().isObjCGCWeak())
2244 Flags |= BLOCK_FIELD_IS_WEAK;
2245 enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
2246 /*LoadBlockVarAddr*/ false,
2247 cxxDestructorCanThrow(emission.Variable->getType()));
2248 }
2249}
2250
2253 switch (kind) {
2254 case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2256 return destroyCXXObject;
2260 return destroyARCWeak;
2263 }
2264 llvm_unreachable("Unknown DestructionKind");
2265}
2266
2267/// pushEHDestroy - Push the standard destructor for the given type as
2268/// an EH-only cleanup.
2270 Address addr, QualType type) {
2271 assert(dtorKind && "cannot push destructor for trivial type");
2272 assert(needsEHCleanup(dtorKind));
2273
2274 pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
2275}
2276
2277/// pushDestroy - Push the standard destructor for the given type as
2278/// at least a normal cleanup.
2280 Address addr, QualType type) {
2281 assert(dtorKind && "cannot push destructor for trivial type");
2282
2283 CleanupKind cleanupKind = getCleanupKind(dtorKind);
2284 pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
2285 cleanupKind & EHCleanup);
2286}
2287
2290 CleanupKind cleanupKind = getCleanupKind(dtorKind);
2291 pushLifetimeExtendedDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
2292 cleanupKind & EHCleanup);
2293}
2294
2296 QualType type, Destroyer *destroyer,
2297 bool useEHCleanupForArray) {
2298 pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
2299 useEHCleanupForArray);
2300}
2301
2302// Pushes a destroy and defers its deactivation until its
2303// CleanupDeactivationScope is exited.
2306 assert(dtorKind && "cannot push destructor for trivial type");
2307
2308 CleanupKind cleanupKind = getCleanupKind(dtorKind);
2310 cleanupKind, addr, type, getDestroyer(dtorKind), cleanupKind & EHCleanup);
2311}
2312
2314 CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer,
2315 bool useEHCleanupForArray) {
2316 llvm::Instruction *DominatingIP =
2317 Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
2318 pushDestroy(cleanupKind, addr, type, destroyer, useEHCleanupForArray);
2320 {EHStack.stable_begin(), DominatingIP});
2321}
2322
2324 EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
2325}
2326
2328 CleanupKind Kind, std::pair<llvm::Value *, llvm::Value *> AddrSizePair) {
2329 EHStack.pushCleanup<KmpcAllocFree>(Kind, AddrSizePair);
2330}
2331
2333 Address addr, QualType type,
2334 Destroyer *destroyer,
2335 bool useEHCleanupForArray) {
2336 // If we're not in a conditional branch, we don't need to bother generating a
2337 // conditional cleanup.
2338 if (!isInConditionalBranch()) {
2339 // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2340 // around in case a temporary's destructor throws an exception.
2341
2342 // Add the cleanup to the EHStack. After the full-expr, this would be
2343 // deactivated before being popped from the stack.
2344 pushDestroyAndDeferDeactivation(cleanupKind, addr, type, destroyer,
2345 useEHCleanupForArray);
2346
2347 // Since this is lifetime-extended, push it once again to the EHStack after
2348 // the full expression.
2349 return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
2350 cleanupKind, Address::invalid(), addr, type, destroyer,
2351 useEHCleanupForArray);
2352 }
2353
2354 // Otherwise, we should only destroy the object if it's been initialized.
2355
2356 using ConditionalCleanupType =
2358 Destroyer *, bool>;
2360
2361 // Remember to emit cleanup if we branch-out before end of full-expression
2362 // (eg: through stmt-expr or coro suspensions).
2363 AllocaTrackerRAII DeactivationAllocas(*this);
2364 Address ActiveFlagForDeactivation = createCleanupActiveFlag();
2365
2366 pushCleanupAndDeferDeactivation<ConditionalCleanupType>(
2367 cleanupKind, SavedAddr, type, destroyer, useEHCleanupForArray);
2368 initFullExprCleanupWithFlag(ActiveFlagForDeactivation);
2369 EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
2370 // Erase the active flag if the cleanup was not emitted.
2371 cleanup.AddAuxAllocas(std::move(DeactivationAllocas).Take());
2372
2373 // Since this is lifetime-extended, push it once again to the EHStack after
2374 // the full expression.
2375 // The previous active flag would always be 'false' due to forced deferred
2376 // deactivation. Use a separate flag for lifetime-extension to correctly
2377 // remember if this branch was taken and the object was initialized.
2378 Address ActiveFlagForLifetimeExt = createCleanupActiveFlag();
2379 pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
2380 cleanupKind, ActiveFlagForLifetimeExt, SavedAddr, type, destroyer,
2381 useEHCleanupForArray);
2382}
2383
2384/// emitDestroy - Immediately perform the destruction of the given
2385/// object.
2386///
2387/// \param addr - the address of the object; a type*
2388/// \param type - the type of the object; if an array type, all
2389/// objects are destroyed in reverse order
2390/// \param destroyer - the function to call to destroy individual
2391/// elements
2392/// \param useEHCleanupForArray - whether an EH cleanup should be
2393/// used when destroying array elements, in case one of the
2394/// destructions throws an exception
2396 Destroyer *destroyer,
2397 bool useEHCleanupForArray) {
2399 if (!arrayType)
2400 return destroyer(*this, addr, type);
2401
2402 llvm::Value *length = emitArrayLength(arrayType, type, addr);
2403
2404 CharUnits elementAlign =
2405 addr.getAlignment()
2406 .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
2407
2408 // Normally we have to check whether the array is zero-length.
2409 bool checkZeroLength = true;
2410
2411 // But if the array length is constant, we can suppress that.
2412 if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
2413 // ...and if it's constant zero, we can just skip the entire thing.
2414 if (constLength->isZero()) return;
2415 checkZeroLength = false;
2416 }
2417
2418 llvm::Value *begin = addr.emitRawPointer(*this);
2419 llvm::Value *end =
2421 emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2422 checkZeroLength, useEHCleanupForArray);
2423}
2424
2425/// emitArrayDestroy - Destroys all the elements of the given array,
2426/// beginning from last to first. The array cannot be zero-length.
2427///
2428/// \param begin - a type* denoting the first element of the array
2429/// \param end - a type* denoting one past the end of the array
2430/// \param elementType - the element type of the array
2431/// \param destroyer - the function to call to destroy elements
2432/// \param useEHCleanup - whether to push an EH cleanup to destroy
2433/// the remaining elements in case the destruction of a single
2434/// element throws
2436 llvm::Value *end,
2437 QualType elementType,
2438 CharUnits elementAlign,
2439 Destroyer *destroyer,
2440 bool checkZeroLength,
2441 bool useEHCleanup) {
2442 assert(!elementType->isArrayType());
2443
2444 // The basic structure here is a do-while loop, because we don't
2445 // need to check for the zero-element case.
2446 llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
2447 llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
2448
2449 if (checkZeroLength) {
2450 llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
2451 "arraydestroy.isempty");
2452 Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
2453 }
2454
2455 // Enter the loop body, making that address the current address.
2456 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2457 EmitBlock(bodyBB);
2458 llvm::PHINode *elementPast =
2459 Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
2460 elementPast->addIncoming(end, entryBB);
2461
2462 // Shift the address back by one element.
2463 llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
2464 llvm::Type *llvmElementType = ConvertTypeForMem(elementType);
2465 llvm::Value *element = Builder.CreateInBoundsGEP(
2466 llvmElementType, elementPast, negativeOne, "arraydestroy.element");
2467
2468 if (useEHCleanup)
2469 pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
2470 destroyer);
2471
2472 // Perform the actual destruction there.
2473 destroyer(*this, Address(element, llvmElementType, elementAlign),
2474 elementType);
2475
2476 if (useEHCleanup)
2478
2479 // Check whether we've reached the end.
2480 llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
2481 Builder.CreateCondBr(done, doneBB, bodyBB);
2482 elementPast->addIncoming(element, Builder.GetInsertBlock());
2483
2484 // Done.
2485 EmitBlock(doneBB);
2486}
2487
2488/// Perform partial array destruction as if in an EH cleanup. Unlike
2489/// emitArrayDestroy, the element type here may still be an array type.
2491 llvm::Value *begin, llvm::Value *end,
2492 QualType type, CharUnits elementAlign,
2493 CodeGenFunction::Destroyer *destroyer) {
2494 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
2495
2496 // If the element type is itself an array, drill down.
2497 unsigned arrayDepth = 0;
2498 while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
2499 // VLAs don't require a GEP index to walk into.
2500 if (!isa<VariableArrayType>(arrayType))
2501 arrayDepth++;
2502 type = arrayType->getElementType();
2503 }
2504
2505 if (arrayDepth) {
2506 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
2507
2508 SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2509 begin = CGF.Builder.CreateInBoundsGEP(
2510 elemTy, begin, gepIndices, "pad.arraybegin");
2511 end = CGF.Builder.CreateInBoundsGEP(
2512 elemTy, end, gepIndices, "pad.arrayend");
2513 }
2514
2515 // Destroy the array. We don't ever need an EH cleanup because we
2516 // assume that we're in an EH cleanup ourselves, so a throwing
2517 // destructor causes an immediate terminate.
2518 CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2519 /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2520}
2521
2522namespace {
2523 /// RegularPartialArrayDestroy - a cleanup which performs a partial
2524 /// array destroy where the end pointer is regularly determined and
2525 /// does not need to be loaded from a local.
2526 class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2527 llvm::Value *ArrayBegin;
2528 llvm::Value *ArrayEnd;
2529 QualType ElementType;
2530 CodeGenFunction::Destroyer *Destroyer;
2531 CharUnits ElementAlign;
2532 public:
2533 RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2534 QualType elementType, CharUnits elementAlign,
2535 CodeGenFunction::Destroyer *destroyer)
2536 : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2537 ElementType(elementType), Destroyer(destroyer),
2538 ElementAlign(elementAlign) {}
2539
2540 void Emit(CodeGenFunction &CGF, Flags flags) override {
2541 emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
2542 ElementType, ElementAlign, Destroyer);
2543 }
2544 };
2545
2546 /// IrregularPartialArrayDestroy - a cleanup which performs a
2547 /// partial array destroy where the end pointer is irregularly
2548 /// determined and must be loaded from a local.
2549 class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2550 llvm::Value *ArrayBegin;
2551 Address ArrayEndPointer;
2552 QualType ElementType;
2553 CodeGenFunction::Destroyer *Destroyer;
2554 CharUnits ElementAlign;
2555 public:
2556 IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2557 Address arrayEndPointer,
2558 QualType elementType,
2559 CharUnits elementAlign,
2560 CodeGenFunction::Destroyer *destroyer)
2561 : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2562 ElementType(elementType), Destroyer(destroyer),
2563 ElementAlign(elementAlign) {}
2564
2565 void Emit(CodeGenFunction &CGF, Flags flags) override {
2566 llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
2567 emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
2568 ElementType, ElementAlign, Destroyer);
2569 }
2570 };
2571} // end anonymous namespace
2572
2573/// pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to
2574/// destroy already-constructed elements of the given array. The cleanup may be
2575/// popped with DeactivateCleanupBlock or PopCleanupBlock.
2576///
2577/// \param elementType - the immediate element type of the array;
2578/// possibly still an array type
2580 Address arrayEndPointer,
2581 QualType elementType,
2582 CharUnits elementAlign,
2583 Destroyer *destroyer) {
2584 pushFullExprCleanup<IrregularPartialArrayDestroy>(
2585 NormalAndEHCleanup, arrayBegin, arrayEndPointer, elementType,
2586 elementAlign, destroyer);
2587}
2588
2589/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2590/// already-constructed elements of the given array. The cleanup
2591/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2592///
2593/// \param elementType - the immediate element type of the array;
2594/// possibly still an array type
2596 llvm::Value *arrayEnd,
2597 QualType elementType,
2598 CharUnits elementAlign,
2599 Destroyer *destroyer) {
2600 pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
2601 arrayBegin, arrayEnd,
2602 elementType, elementAlign,
2603 destroyer);
2604}
2605
2606/// Lazily declare the @llvm.lifetime.start intrinsic.
2608 if (LifetimeStartFn)
2609 return LifetimeStartFn;
2610 LifetimeStartFn = llvm::Intrinsic::getOrInsertDeclaration(
2611 &getModule(), llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
2612 return LifetimeStartFn;
2613}
2614
2615/// Lazily declare the @llvm.lifetime.end intrinsic.
2617 if (LifetimeEndFn)
2618 return LifetimeEndFn;
2619 LifetimeEndFn = llvm::Intrinsic::getOrInsertDeclaration(
2620 &getModule(), llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
2621 return LifetimeEndFn;
2622}
2623
2624/// Lazily declare the @llvm.fake.use intrinsic.
2626 if (FakeUseFn)
2627 return FakeUseFn;
2628 FakeUseFn = llvm::Intrinsic::getOrInsertDeclaration(
2629 &getModule(), llvm::Intrinsic::fake_use);
2630 return FakeUseFn;
2631}
2632
2633namespace {
2634 /// A cleanup to perform a release of an object at the end of a
2635 /// function. This is used to balance out the incoming +1 of a
2636 /// ns_consumed argument when we can't reasonably do that just by
2637 /// not doing the initial retain for a __block argument.
2638 struct ConsumeARCParameter final : EHScopeStack::Cleanup {
2639 ConsumeARCParameter(llvm::Value *param,
2640 ARCPreciseLifetime_t precise)
2641 : Param(param), Precise(precise) {}
2642
2643 llvm::Value *Param;
2644 ARCPreciseLifetime_t Precise;
2645
2646 void Emit(CodeGenFunction &CGF, Flags flags) override {
2647 CGF.EmitARCRelease(Param, Precise);
2648 }
2649 };
2650} // end anonymous namespace
2651
2652/// Emit an alloca (or GlobalValue depending on target)
2653/// for the specified parameter and set up LocalDeclMap.
2655 unsigned ArgNo) {
2656 bool NoDebugInfo = false;
2657 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2658 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2659 "Invalid argument to EmitParmDecl");
2660
2661 // Set the name of the parameter's initial value to make IR easier to
2662 // read. Don't modify the names of globals.
2663 if (!isa<llvm::GlobalValue>(Arg.getAnyValue()))
2664 Arg.getAnyValue()->setName(D.getName());
2665
2666 QualType Ty = D.getType();
2667
2668 // Use better IR generation for certain implicit parameters.
2669 if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
2670 // The only implicit argument a block has is its literal.
2671 // This may be passed as an inalloca'ed value on Windows x86.
2672 if (BlockInfo) {
2673 llvm::Value *V = Arg.isIndirect()
2675 : Arg.getDirectValue();
2676 setBlockContextParameter(IPD, ArgNo, V);
2677 return;
2678 }
2679 // Suppressing debug info for ThreadPrivateVar parameters, else it hides
2680 // debug info of TLS variables.
2681 NoDebugInfo =
2682 (IPD->getParameterKind() == ImplicitParamKind::ThreadPrivateVar);
2683 }
2684
2685 Address DeclPtr = Address::invalid();
2686 RawAddress AllocaPtr = Address::invalid();
2687 bool DoStore = false;
2688 bool IsScalar = hasScalarEvaluationKind(Ty);
2689 bool UseIndirectDebugAddress = false;
2690
2691 // If we already have a pointer to the argument, reuse the input pointer.
2692 if (Arg.isIndirect()) {
2693 DeclPtr = Arg.getIndirectAddress();
2694 DeclPtr = DeclPtr.withElementType(ConvertTypeForMem(Ty));
2695 // Indirect argument is in alloca address space, which may be different
2696 // from the default address space.
2697 auto AllocaAS = CGM.getASTAllocaAddressSpace();
2698 auto *V = DeclPtr.emitRawPointer(*this);
2699 AllocaPtr = RawAddress(V, DeclPtr.getElementType(), DeclPtr.getAlignment());
2700
2701 // For truly ABI indirect arguments -- those that are not `byval` -- store
2702 // the address of the argument on the stack to preserve debug information.
2703 ABIArgInfo ArgInfo = CurFnInfo->arguments()[ArgNo - 1].info;
2704 if (ArgInfo.isIndirect())
2705 UseIndirectDebugAddress = !ArgInfo.getIndirectByVal();
2706 if (UseIndirectDebugAddress) {
2707 auto PtrTy = getContext().getPointerType(Ty);
2708 AllocaPtr = CreateMemTemp(PtrTy, getContext().getTypeAlignInChars(PtrTy),
2709 D.getName() + ".indirect_addr");
2710 EmitStoreOfScalar(V, AllocaPtr, /* Volatile */ false, PtrTy);
2711 }
2712
2713 auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2714 auto DestLangAS =
2716 if (SrcLangAS != DestLangAS) {
2717 assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2718 CGM.getDataLayout().getAllocaAddrSpace());
2719 auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
2720 auto *T = llvm::PointerType::get(getLLVMContext(), DestAS);
2721 DeclPtr = DeclPtr.withPointer(
2722 getTargetHooks().performAddrSpaceCast(*this, V, SrcLangAS, T, true),
2723 DeclPtr.isKnownNonNull());
2724 }
2725
2726 // Push a destructor cleanup for this parameter if the ABI requires it.
2727 // Don't push a cleanup in a thunk for a method that will also emit a
2728 // cleanup.
2729 if (Ty->isRecordType() && !CurFuncIsThunk &&
2731 if (QualType::DestructionKind DtorKind =
2732 D.needsDestruction(getContext())) {
2733 assert((DtorKind == QualType::DK_cxx_destructor ||
2734 DtorKind == QualType::DK_nontrivial_c_struct) &&
2735 "unexpected destructor type");
2736 pushDestroy(DtorKind, DeclPtr, Ty);
2737 CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
2739 }
2740 }
2741 } else {
2742 // Check if the parameter address is controlled by OpenMP runtime.
2743 Address OpenMPLocalAddr =
2744 getLangOpts().OpenMP
2746 : Address::invalid();
2747 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2748 DeclPtr = OpenMPLocalAddr;
2749 AllocaPtr = DeclPtr;
2750 } else {
2751 // Otherwise, create a temporary to hold the value.
2752 DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
2753 D.getName() + ".addr", &AllocaPtr);
2754 }
2755 DoStore = true;
2756 }
2757
2758 llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2759
2760 LValue lv = MakeAddrLValue(DeclPtr, Ty);
2761 if (IsScalar) {
2762 Qualifiers qs = Ty.getQualifiers();
2764 // We honor __attribute__((ns_consumed)) for types with lifetime.
2765 // For __strong, it's handled by just skipping the initial retain;
2766 // otherwise we have to balance out the initial +1 with an extra
2767 // cleanup to do the release at the end of the function.
2768 bool isConsumed = D.hasAttr<NSConsumedAttr>();
2769
2770 // If a parameter is pseudo-strong then we can omit the implicit retain.
2771 if (D.isARCPseudoStrong()) {
2772 assert(lt == Qualifiers::OCL_Strong &&
2773 "pseudo-strong variable isn't strong?");
2774 assert(qs.hasConst() && "pseudo-strong variable should be const!");
2776 }
2777
2778 // Load objects passed indirectly.
2779 if (Arg.isIndirect() && !ArgVal)
2780 ArgVal = Builder.CreateLoad(DeclPtr);
2781
2782 if (lt == Qualifiers::OCL_Strong) {
2783 if (!isConsumed) {
2784 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2785 // use objc_storeStrong(&dest, value) for retaining the
2786 // object. But first, store a null into 'dest' because
2787 // objc_storeStrong attempts to release its old value.
2788 llvm::Value *Null = CGM.EmitNullConstant(D.getType());
2789 EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
2790 EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true);
2791 DoStore = false;
2792 }
2793 else
2794 // Don't use objc_retainBlock for block pointers, because we
2795 // don't want to Block_copy something just because we got it
2796 // as a parameter.
2797 ArgVal = EmitARCRetainNonBlock(ArgVal);
2798 }
2799 } else {
2800 // Push the cleanup for a consumed parameter.
2801 if (isConsumed) {
2802 ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2804 EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
2805 precise);
2806 }
2807
2808 if (lt == Qualifiers::OCL_Weak) {
2809 EmitARCInitWeak(DeclPtr, ArgVal);
2810 DoStore = false; // The weak init is a store, no need to do two.
2811 }
2812 }
2813
2814 // Enter the cleanup scope.
2815 EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
2816 }
2817 }
2818
2819 // Store the initial value into the alloca.
2820 if (DoStore)
2821 EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
2822
2823 setAddrOfLocalVar(&D, DeclPtr);
2824
2825 // Push a FakeUse 'cleanup' object onto the EHStack for the parameter,
2826 // which may be the 'this' pointer. This causes the emission of a fake.use
2827 // call with the parameter as argument at the end of the function.
2828 if (CGM.getCodeGenOpts().getExtendVariableLiveness() ==
2830 (CGM.getCodeGenOpts().getExtendVariableLiveness() ==
2832 &D == CXXABIThisDecl)) {
2833 if (shouldExtendLifetime(getContext(), CurCodeDecl, D, CXXABIThisDecl))
2834 EHStack.pushCleanup<FakeUse>(NormalFakeUse, DeclPtr);
2835 }
2836
2837 // Emit debug info for param declarations in non-thunk functions.
2838 if (CGDebugInfo *DI = getDebugInfo()) {
2840 !NoDebugInfo) {
2841 llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
2842 &D, AllocaPtr.getPointer(), ArgNo, Builder, UseIndirectDebugAddress);
2843 if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(&D))
2844 DI->getParamDbgMappings().insert({Var, DILocalVar});
2845 }
2846 }
2847
2848 if (D.hasAttr<AnnotateAttr>())
2849 EmitVarAnnotations(&D, DeclPtr.emitRawPointer(*this));
2850
2851 // We can only check return value nullability if all arguments to the
2852 // function satisfy their nullability preconditions. This makes it necessary
2853 // to emit null checks for args in the function body itself.
2854 if (requiresReturnValueNullabilityCheck()) {
2855 auto Nullability = Ty->getNullability();
2856 if (Nullability && *Nullability == NullabilityKind::NonNull) {
2857 SanitizerScope SanScope(this);
2858 RetValNullabilityPrecondition =
2859 Builder.CreateAnd(RetValNullabilityPrecondition,
2860 Builder.CreateIsNotNull(Arg.getAnyValue()));
2861 }
2862 }
2863}
2864
2866 CodeGenFunction *CGF) {
2867 if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2868 return;
2870}
2871
2873 CodeGenFunction *CGF) {
2874 if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2875 (!LangOpts.EmitAllDecls && !D->isUsed()))
2876 return;
2878}
2879
2881 CodeGenFunction *CGF) {
2882 // This is a no-op, we cna just ignore these declarations.
2883}
2884
2886 CodeGenFunction *CGF) {
2887 // This is a no-op, we cna just ignore these declarations.
2888}
2889
2892}
2893
2895 for (const Expr *E : D->varlist()) {
2896 const auto *DE = cast<DeclRefExpr>(E);
2897 const auto *VD = cast<VarDecl>(DE->getDecl());
2898
2899 // Skip all but globals.
2900 if (!VD->hasGlobalStorage())
2901 continue;
2902
2903 // Check if the global has been materialized yet or not. If not, we are done
2904 // as any later generation will utilize the OMPAllocateDeclAttr. However, if
2905 // we already emitted the global we might have done so before the
2906 // OMPAllocateDeclAttr was attached, leading to the wrong address space
2907 // (potentially). While not pretty, common practise is to remove the old IR
2908 // global and generate a new one, so we do that here too. Uses are replaced
2909 // properly.
2910 StringRef MangledName = getMangledName(VD);
2911 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
2912 if (!Entry)
2913 continue;
2914
2915 // We can also keep the existing global if the address space is what we
2916 // expect it to be, if not, it is replaced.
2918 auto TargetAS = getContext().getTargetAddressSpace(GVAS);
2919 if (Entry->getType()->getAddressSpace() == TargetAS)
2920 continue;
2921
2922 llvm::PointerType *PTy = llvm::PointerType::get(getLLVMContext(), TargetAS);
2923
2924 // Replace all uses of the old global with a cast. Since we mutate the type
2925 // in place we neeed an intermediate that takes the spot of the old entry
2926 // until we can create the cast.
2927 llvm::GlobalVariable *DummyGV = new llvm::GlobalVariable(
2928 getModule(), Entry->getValueType(), false,
2929 llvm::GlobalValue::CommonLinkage, nullptr, "dummy", nullptr,
2930 llvm::GlobalVariable::NotThreadLocal, Entry->getAddressSpace());
2931 Entry->replaceAllUsesWith(DummyGV);
2932
2933 Entry->mutateType(PTy);
2934 llvm::Constant *NewPtrForOldDecl =
2935 llvm::ConstantExpr::getAddrSpaceCast(Entry, DummyGV->getType());
2936
2937 // Now we have a casted version of the changed global, the dummy can be
2938 // replaced and deleted.
2939 DummyGV->replaceAllUsesWith(NewPtrForOldDecl);
2940 DummyGV->eraseFromParent();
2941 }
2942}
2943
2944std::optional<CharUnits>
2946 if (const auto *AA = VD->getAttr<OMPAllocateDeclAttr>()) {
2947 if (Expr *Alignment = AA->getAlignment()) {
2948 unsigned UserAlign =
2949 Alignment->EvaluateKnownConstInt(getContext()).getExtValue();
2950 CharUnits NaturalAlign =
2952
2953 // OpenMP5.1 pg 185 lines 7-10
2954 // Each item in the align modifier list must be aligned to the maximum
2955 // of the specified alignment and the type's natural alignment.
2957 std::max<unsigned>(UserAlign, NaturalAlign.getQuantity()));
2958 }
2959 }
2960 return std::nullopt;
2961}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3597
static bool isCapturedBy(const VarDecl &, const Expr *)
Determines whether the given __block variable is potentially captured by the given expression.
Definition: CGDecl.cpp:1759
static void emitPartialArrayDestroy(CodeGenFunction &CGF, llvm::Value *begin, llvm::Value *end, QualType type, CharUnits elementAlign, CodeGenFunction::Destroyer *destroyer)
Perform partial array destruction as if in an EH cleanup.
Definition: CGDecl.cpp:2490
static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init, unsigned &NumStores)
Decide whether we can emit the non-zero parts of the specified initializer with equal or fewer than N...
Definition: CGDecl.cpp:910
static llvm::Constant * patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern, llvm::Type *Ty)
Generate a constant filled with either a pattern or zeroes.
Definition: CGDecl.cpp:1042
static llvm::Constant * constWithPadding(CodeGenModule &CGM, IsPattern isPattern, llvm::Constant *constant)
Replace all padding bytes in a given constant with either a pattern byte or 0x00.
Definition: CGDecl.cpp:1094
static llvm::Value * shouldUseMemSetToInitialize(llvm::Constant *Init, uint64_t GlobalSize, const llvm::DataLayout &DL)
Decide whether we should use memset to initialize a local variable instead of using a memcpy from a c...
Definition: CGDecl.cpp:1016
IsPattern
Definition: CGDecl.cpp:1039
static bool shouldSplitConstantStore(CodeGenModule &CGM, uint64_t GlobalByteSize)
Decide whether we want to split a constant structure or array store into a sequence of its fields' st...
Definition: CGDecl.cpp:1028
static llvm::Constant * replaceUndef(CodeGenModule &CGM, IsPattern isPattern, llvm::Constant *constant)
Definition: CGDecl.cpp:1324
static bool shouldExtendLifetime(const ASTContext &Context, const Decl *FuncDecl, const VarDecl &D, ImplicitParamDecl *CXXABIThisDecl)
Definition: CGDecl.cpp:1455
static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF, const LValue &destLV, const Expr *init)
Definition: CGDecl.cpp:709
static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init, uint64_t GlobalSize)
Decide whether we should use bzero plus some stores to initialize a local variable instead of using a...
Definition: CGDecl.cpp:995
static llvm::Constant * constStructWithPadding(CodeGenModule &CGM, IsPattern isPattern, llvm::StructType *STy, llvm::Constant *constant)
Helper function for constWithPadding() to deal with padding in structures.
Definition: CGDecl.cpp:1054
static bool containsUndef(llvm::Constant *constant)
Definition: CGDecl.cpp:1313
static uint64_t maxFakeUseAggregateSize(const ASTContext &C)
Return the maximum size of an aggregate for which we generate a fake use intrinsic when -fextend-vari...
Definition: CGDecl.cpp:1449
static bool isAccessedBy(const VarDecl &var, const Stmt *s)
Definition: CGDecl.cpp:677
static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var, Address addr, Qualifiers::ObjCLifetime lifetime)
EmitAutoVarWithLifetime - Does the setup required for an automatic variable with lifetime.
Definition: CGDecl.cpp:641
static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM, const VarDecl &D, CGBuilderTy &Builder, llvm::Constant *Constant, CharUnits Align)
Definition: CGDecl.cpp:1180
static void drillIntoBlockVariable(CodeGenFunction &CGF, LValue &lvalue, const VarDecl *var)
Definition: CGDecl.cpp:758
static std::string getStaticDeclName(CIRGenModule &cgm, const VarDecl &d)
Definition: CIRGenDecl.cpp:245
const Decl * D
Expr * E
This file defines OpenACC nodes for declarative directives.
This file defines OpenMP nodes for declarative directives.
static const NamedDecl * getDefinition(const Decl *D)
Definition: SemaDecl.cpp:2958
SourceLocation Loc
Definition: SemaObjC.cpp:754
SourceLocation Begin
__device__ __2f16 float __ockl_bool s
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
IdentifierTable & Idents
Definition: ASTContext.h:740
const LangOptions & getLangOpts() const
Definition: ASTContext.h:894
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2625
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:3059
unsigned getTargetAddressSpace(LangAS AS) const
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: TypeBase.h:3738
Attr - This represents one attribute.
Definition: Attr.h:44
Represents a block literal declaration, which is like an unnamed FunctionDecl.
Definition: Decl.h:4634
ArrayRef< Capture > captures() const
Definition: Decl.h:4761
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:6560
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1549
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2604
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1378
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2869
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:125
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:253
CharUnits getAlignment() const
Definition: Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:209
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition: Address.h:261
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:276
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
Definition: Address.h:233
bool isValid() const
Definition: Address.h:177
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:602
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
static ApplyDebugLocation CreateDefaultArtificial(CodeGenFunction &CGF, SourceLocation TemporaryLocation)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:953
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:963
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:140
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:309
Address CreateConstInBoundsGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Definition: CGBuilder.h:329
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:402
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:112
llvm::LoadInst * CreateFlagLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Emit a load from an i1 flag variable.
Definition: CGBuilder.h:162
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:369
Address CreateConstGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ... produce name = getelementptr inbounds addr, i64 index where i64 is actually the t...
Definition: CGBuilder.h:282
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:350
Abstract information about a function or function prototype.
Definition: CGCall.h:41
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:137
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:59
void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl)
Emit information about a global variable.
llvm::DILocalVariable * EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI, CGBuilderTy &Builder, const bool UsePointerValue=false)
Emit call to llvm.dbg.declare for an automatic variable declaration.
void setLocation(SourceLocation Loc)
Update the current source location.
void registerVLASizeExpression(QualType Ty, llvm::Metadata *SizeExpr)
Register VLA size expression debug node with the qualified type.
Definition: CGDebugInfo.h:445
CGFunctionInfo - Class to encapsulate the information about a function definition.
const_arg_iterator arg_begin() const
MutableArrayRef< ArgInfo > arguments()
virtual void EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF, const VarDecl &D)
Emit the IR required for a work-group-local variable declaration, and add an entry to CGF's LocalDecl...
Allows to disable automatic handling of functions used in target regions as those marked as omp decla...
virtual void getKmpcFreeShared(CodeGenFunction &CGF, const std::pair< llvm::Value *, llvm::Value * > &AddrSizePair)
Get call to __kmpc_free_shared.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF=nullptr)
Emit the function for the user defined mapper construct.
virtual void processRequiresDirective(const OMPRequiresDecl *D)
Perform check on requires decl to ensure that target architecture supports unified addressing.
virtual std::pair< llvm::Value *, llvm::Value * > getKmpcAllocShared(CodeGenFunction &CGF, const VarDecl *VD)
Get call to __kmpc_alloc_shared.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D)
Emit code for the specified user defined reduction construct.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:274
void add(RValue rvalue, QualType type)
Definition: CGCall.h:302
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself.
RawAddress getOriginalAllocatedAddress() const
Returns the address for the original alloca instruction.
Address getObjectAddress(CodeGenFunction &CGF) const
Returns the address of the object within this declaration.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void emitArrayDestroy(llvm::Value *begin, llvm::Value *end, QualType elementType, CharUnits elementAlign, Destroyer *destroyer, bool checkZeroLength, bool useEHCleanup)
emitArrayDestroy - Destroys all the elements of the given array, beginning from last to first.
Definition: CGDecl.cpp:2435
void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr, bool PerformInit)
Emit code in this function to perform a guarded variable initialization.
Definition: CGDeclCXX.cpp:387
void EmitARCMoveWeak(Address dst, Address src)
void @objc_moveWeak(i8** dest, i8** src) Disregards the current value in dest.
Definition: CGObjC.cpp:2692
void emitDestroy(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
emitDestroy - Immediately perform the destruction of the given object.
Definition: CGDecl.cpp:2395
static Destroyer destroyNonTrivialCStruct
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
Definition: CGExprAgg.cpp:2236
void emitByrefStructureInit(const AutoVarEmission &emission)
Initialize the structural components of a __block variable, i.e.
Definition: CGBlocks.cpp:2756
llvm::Value * EmitARCUnsafeUnretainedScalarExpr(const Expr *expr)
EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to immediately releasing the resut of Emi...
Definition: CGObjC.cpp:3618
SanitizerSet SanOpts
Sanitizers enabled for this function.
void pushStackRestore(CleanupKind kind, Address SPMem)
Definition: CGDecl.cpp:2323
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
void EmitARCInitWeak(Address addr, llvm::Value *value)
i8* @objc_initWeak(i8** addr, i8* value) Returns value.
Definition: CGObjC.cpp:2663
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void EmitFakeUse(Address Addr)
Definition: CGDecl.cpp:1380
void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushEHDestroy - Push the standard destructor for the given type as an EH-only cleanup.
Definition: CGDecl.cpp:2269
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
llvm::Value * EmitARCRetainAutoreleaseScalarExpr(const Expr *expr)
Definition: CGObjC.cpp:3508
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
void EmitAtomicInit(Expr *E, LValue lvalue)
Definition: CGAtomic.cpp:2099
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
pushRegularPartialArrayCleanup - Push an EH cleanup to destroy already-constructed elements of the gi...
Definition: CGDecl.cpp:2595
void EmitAutoVarDecl(const VarDecl &D)
EmitAutoVarDecl - Emit an auto variable declaration.
Definition: CGDecl.cpp:1349
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition: CGExpr.cpp:3649
void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags, bool LoadBlockVarAddr, bool CanThrow)
Enter a cleanup to destroy a __block variable.
Definition: CGBlocks.cpp:2882
void EmitAutoVarInit(const AutoVarEmission &emission)
Definition: CGDecl.cpp:1930
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition: CGExpr.cpp:684
AutoVarEmission EmitAutoVarAlloca(const VarDecl &var)
EmitAutoVarAlloca - Emit the alloca and debug information for a local variable.
Definition: CGDecl.cpp:1483
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
Definition: CGDecl.cpp:2279
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
Definition: CGDecl.cpp:786
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition: CGDecl.cpp:764
const CodeGen::CGBlockInfo * BlockInfo
@ TCK_NonnullAssign
Checking the value assigned to a _Nonnull pointer. Must not be null.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
Definition: CGClass.cpp:2502
const BlockByrefInfo & getBlockByrefInfo(const VarDecl *var)
BuildByrefInfo - This routine changes a __block variable declared as T x into:
Definition: CGBlocks.cpp:2670
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to destroy already-constructed elements ...
Definition: CGDecl.cpp:2579
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
Definition: CGDecl.cpp:2252
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
Definition: CGObjC.cpp:2481
DominatingValue< T >::saved_type saveValueInCond(T value)
static bool cxxDestructorCanThrow(QualType T)
Check if T is a C++ class that has a destructor that can throw.
Definition: CGBlocks.cpp:1724
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition: CGExpr.cpp:3539
void initFullExprCleanupWithFlag(RawAddress ActiveFlag)
Definition: CGCleanup.cpp:290
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition: CGExpr.cpp:174
const TargetInfo & getTarget() const
void defaultInitNonTrivialCStructVar(LValue Dst)
void EmitStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
Definition: CGDecl.cpp:403
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
void pushKmpcAllocFree(CleanupKind Kind, std::pair< llvm::Value *, llvm::Value * > AddrSizePair)
Definition: CGDecl.cpp:2327
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
Definition: CGDecl.cpp:2304
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
static Destroyer destroyARCStrongImprecise
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition: CGExpr.cpp:3789
void EmitExtendGCLifetime(llvm::Value *object)
EmitExtendGCLifetime - Given a pointer to an Objective-C object, make sure it survives garbage collec...
Definition: CGObjC.cpp:3716
LValue EmitDeclRefLValue(const DeclRefExpr *E)
Definition: CGExpr.cpp:3205
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
EmitARCRetainScalarExpr - Semantically equivalent to EmitARCRetainObject(e->getType(),...
Definition: CGObjC.cpp:3493
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition: CGDecl.cpp:1357
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
Definition: CGBlocks.cpp:2634
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:151
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition: CGCall.cpp:5216
const TargetCodeGenInfo & getTargetHooks() const
void EmitLifetimeEnd(llvm::Value *Addr)
Definition: CGDecl.cpp:1369
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void emitAutoVarTypeCleanup(const AutoVarEmission &emission, QualType::DestructionKind dtorKind)
Enter a destroy cleanup for the given local variable.
Definition: CGDecl.cpp:2136
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void EmitAutoVarCleanups(const AutoVarEmission &emission)
Definition: CGDecl.cpp:2203
void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo)
Emits the alloca and debug information for the size expressions for each dimension of an array.
Definition: CGDecl.cpp:1388
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:2533
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition: CGDecl.cpp:2332
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
Definition: CGDecl.cpp:2654
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
llvm::GlobalVariable * AddInitializerToStaticVarDecl(const VarDecl &D, llvm::GlobalVariable *GV)
AddInitializerToStaticVarDecl - Add the initializer for 'D' to the global variable that has already b...
Definition: CGDecl.cpp:352
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
Definition: CGObjC.cpp:2337
llvm::Type * ConvertTypeForMem(QualType T)
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:186
void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum, llvm::Value *ptr)
Definition: CGBlocks.cpp:1414
void EmitVarDecl(const VarDecl &D)
EmitVarDecl - Emit a local variable declaration.
Definition: CGDecl.cpp:202
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Definition: CGExprAgg.cpp:2205
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
llvm::Value * EmitARCStoreStrongCall(Address addr, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition: CGObjC.cpp:2524
const CGFunctionInfo * CurFnInfo
void EmitDecl(const Decl &D, bool EvaluateConditionDecl=false)
EmitDecl - Emit a declaration.
Definition: CGDecl.cpp:52
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition: CGExpr.cpp:1631
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition: CGObjC.cpp:2651
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
static Destroyer destroyARCStrongPrecise
void EmitARCCopyWeak(Address dst, Address src)
void @objc_copyWeak(i8** dest, i8** src) Disregards the current value in dest.
Definition: CGObjC.cpp:2701
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
void MaybeEmitDeferredVarDeclInit(const VarDecl *var)
Definition: CGDecl.cpp:2075
bool isTrivialInitializer(const Expr *Init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
Definition: CGDecl.cpp:1808
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
Definition: CGCleanup.cpp:652
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
bool hasLabelBeenSeenInCurrentScope() const
Return true if a label was seen in the current scope.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:652
void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
EmitExprAsInit - Emits the code necessary to initialize a location in memory with the given initializ...
Definition: CGDecl.cpp:2093
This class organizes the cross-function state that is used while generating LLVM code.
StringRef getBlockMangledName(GlobalDecl GD, const BlockDecl *BD)
void setGVProperties(llvm::GlobalValue *GV, GlobalDecl GD) const
Set visibility, dllimport/dllexport and dso_local.
llvm::Module & getModule() const
void setStaticLocalDeclAddress(const VarDecl *D, llvm::Constant *C)
llvm::Function * getLLVMLifetimeStartFn()
Lazily declare the @llvm.lifetime.start intrinsic.
Definition: CGDecl.cpp:2607
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
Address createUnnamedGlobalFrom(const VarDecl &D, llvm::Constant *Constant, CharUnits Align)
Definition: CGDecl.cpp:1130
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
void EmitOpenACCDeclare(const OpenACCDeclareDecl *D, CodeGenFunction *CGF=nullptr)
Definition: CGDecl.cpp:2880
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CGOpenCLRuntime & getOpenCLRuntime()
Return a reference to the configured OpenCL runtime.
llvm::Function * getLLVMFakeUseFn()
Lazily declare the @llvm.fake.use intrinsic.
Definition: CGDecl.cpp:2625
void addUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.used metadata.
void EmitOMPAllocateDecl(const OMPAllocateDecl *D)
Emit a code for the allocate directive.
Definition: CGDecl.cpp:2894
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
const llvm::DataLayout & getDataLayout() const
void addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.compiler.used metadata.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
llvm::Constant * getOrCreateStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
Definition: CGDecl.cpp:255
llvm::Constant * GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition=NotForDefinition)
void AddGlobalAnnotations(const ValueDecl *D, llvm::GlobalValue *GV)
Add global annotations that are set on D, for the global GV.
void setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const
Set the TLS mode for the given LLVM GlobalValue for the thread-local variable declaration D.
ASTContext & getContext() const
void EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF=nullptr)
Emit a code for declare mapper construct.
Definition: CGDecl.cpp:2872
llvm::Function * getLLVMLifetimeEndFn()
Lazily declare the @llvm.lifetime.end intrinsic.
Definition: CGDecl.cpp:2616
void EmitOMPRequiresDecl(const OMPRequiresDecl *D)
Emit a code for requires directive.
Definition: CGDecl.cpp:2890
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
std::optional< CharUnits > getOMPAllocateAlignment(const VarDecl *VD)
Return the alignment specified in an allocate directive, if present.
Definition: CGDecl.cpp:2945
llvm::LLVMContext & getLLVMContext()
llvm::GlobalValue * GetGlobalValue(StringRef Ref)
void EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D, CodeGenFunction *CGF=nullptr)
Emit a code for declare reduction construct.
Definition: CGDecl.cpp:2865
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
LangAS GetGlobalVarAddressSpace(const VarDecl *D)
Return the AST address space of the underlying global variable for D, as determined by its declaratio...
void EmitOpenACCRoutine(const OpenACCRoutineDecl *D, CodeGenFunction *CGF=nullptr)
Definition: CGDecl.cpp:2885
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * convertTypeForLoadStore(QualType T, llvm::Type *LLVMTy=nullptr)
Given that T is a scalar type, return the IR type that should be used for load and store operations.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
bool typeRequiresSplitIntoByteArray(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
Check whether the given type needs to be laid out in memory using an opaque byte-array type because i...
const CGFunctionInfo & arrangeFunctionDeclaration(const GlobalDecl GD)
Free functions are functions that are compatible with an ordinary C function pointer type.
Definition: CGCall.cpp:523
llvm::Constant * tryEmitForInitializer(const VarDecl &D)
Try to emit the initiaizer of the given declaration as an abstract constant.
void finalize(llvm::GlobalVariable *global)
llvm::Constant * tryEmitAbstractForInitializer(const VarDecl &D)
Try to emit the initializer of the given declaration as an abstract constant.
A cleanup scope which generates the cleanup blocks lazily.
Definition: CGCleanup.h:247
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:146
ConditionalCleanup stores the saved form of its parameters, then restores them and performs the clean...
Definition: EHScopeStack.h:208
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:398
iterator begin() const
Returns an iterator pointing to the innermost EH scope.
Definition: CGCleanup.h:623
LValue - This represents an lvalue references.
Definition: CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
const Qualifiers & getQuals() const
Definition: CGValue.h:338
Address getAddress() const
Definition: CGValue.h:361
QualType getType() const
Definition: CGValue.h:291
void setNonGC(bool Value)
Definition: CGValue.h:304
void setAddress(Address address)
Definition: CGValue.h:363
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:293
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
An abstract representation of an aligned address.
Definition: Address.h:42
llvm::Value * getPointer() const
Definition: Address.h:66
static RawAddress invalid()
Definition: Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:379
void reportGlobal(llvm::GlobalVariable *GV, const VarDecl &D, bool IsDynInit=false)
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
Definition: TargetInfo.h:80
bool IsBypassed(const VarDecl *D) const
Returns true if the variable declaration was by bypassed by any goto or switch statement.
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1720
body_range body()
Definition: Stmt.h:1783
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Definition: DeclBase.h:1449
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1272
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
const DeclContext * getParentFunctionOrMethod(bool LexicalParent=false) const
If this decl is defined inside a function/method/block it returns the corresponding DeclContext,...
Definition: DeclBase.cpp:319
T * getAttr() const
Definition: DeclBase.h:573
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition: DeclBase.h:593
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
Definition: DeclBase.cpp:1267
SourceLocation getLocation() const
Definition: DeclBase.h:439
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition: DeclBase.cpp:553
DeclContext * getDeclContext()
Definition: DeclBase.h:448
bool hasAttr() const
Definition: DeclBase.h:577
Kind getKind() const
Definition: DeclBase.h:442
This represents one expression.
Definition: Expr.h:112
bool isXValue() const
Definition: Expr.h:286
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition: Expr.cpp:3078
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3069
bool isLValue() const
isLValue - True if this expression is an "l-value" according to the rules of the current language.
Definition: Expr.h:284
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:273
QualType getType() const
Definition: Expr.h:144
Represents a function declaration or definition.
Definition: Decl.h:1999
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:57
const Decl * getDecl() const
Definition: GlobalDecl.h:106
One of these records is kept for each identifier that is lexed.
IdentifierInfo & getOwn(StringRef Name)
Gets an IdentifierInfo for the given name without consulting external sources.
This represents '#pragma omp allocate ...' directive.
Definition: DeclOpenMP.h:474
This represents '#pragma omp declare mapper ...' directive.
Definition: DeclOpenMP.h:287
This represents '#pragma omp declare reduction ...' directive.
Definition: DeclOpenMP.h:177
This represents '#pragma omp requires...' directive.
Definition: DeclOpenMP.h:417
Pointer-authentication qualifiers.
Definition: TypeBase.h:152
A (possibly-)qualified type.
Definition: TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: TypeBase.h:8427
@ DK_nontrivial_c_struct
Definition: TypeBase.h:1538
@ DK_objc_strong_lifetime
Definition: TypeBase.h:1536
@ PDIK_Struct
The type is a struct containing a field whose type is not PCK_Trivial.
Definition: TypeBase.h:1478
LangAS getAddressSpace() const
Return the address space of this type.
Definition: TypeBase.h:8469
bool isConstant(const ASTContext &Ctx) const
Definition: TypeBase.h:1097
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: TypeBase.h:8383
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition: TypeBase.h:1438
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: TypeBase.h:8528
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: TypeBase.h:8437
bool isObjCGCWeak() const
true when Type is objc's weak.
Definition: TypeBase.h:1428
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition: TypeBase.h:1036
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition: Type.cpp:2699
The collection of all-type qualifiers we support.
Definition: TypeBase.h:331
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition: TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition: TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition: TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition: TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition: TypeBase.h:367
bool hasConst() const
Definition: TypeBase.h:457
void removePointerAuth()
Definition: TypeBase.h:610
PointerAuthQualifier getPointerAuth() const
Definition: TypeBase.h:603
ObjCLifetime getObjCLifetime() const
Definition: TypeBase.h:545
bool isParamDestroyedInCallee() const
Definition: Decl.h:4459
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
static const uint64_t MaximumAlignment
Definition: Sema.h:1207
Encodes a location in the source.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4531
Stmt - This represents one statement.
Definition: Stmt.h:85
child_range children()
Definition: Stmt.cpp:295
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
Definition: TargetCXXABI.h:136
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:1360
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition: Type.h:41
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
Definition: Type.cpp:2430
bool isArrayType() const
Definition: TypeBase.h:8679
RecordDecl * castAsRecordDecl() const
Definition: Type.h:48
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: TypeBase.h:2818
bool isRecordType() const
Definition: TypeBase.h:8707
std::optional< NullabilityKind > getNullability() const
Determine the nullability of the given type.
Definition: Type.cpp:5066
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:711
QualType getType() const
Definition: Decl.h:722
Represents a variable declaration or definition.
Definition: Decl.h:925
static VarDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, StorageClass S)
Definition: Decl.cpp:2151
bool isLocalVarDecl() const
Returns true for local variable declarations other than parameters.
Definition: Decl.h:1252
Defines the clang::TargetInfo interface.
@ BLOCK_FIELD_IS_BYREF
Definition: CGBlocks.h:92
@ BLOCK_FIELD_IS_WEAK
Definition: CGBlocks.h:94
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
llvm::Constant * initializationPatternFor(CodeGenModule &, llvm::Type *)
Definition: PatternInit.cpp:15
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:84
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
ARCPreciseLifetime_t
Does an ARC strong l-value have precise lifetime?
Definition: CGValue.h:135
@ ARCPreciseLifetime
Definition: CGValue.h:136
@ ARCImpreciseLifetime
Definition: CGValue.h:136
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const internal::VariadicAllOfMatcher< Decl > decl
Matches declarations.
const internal::VariadicDynCastAllOfMatcher< Stmt, CastExpr > castExpr
Matches any cast nodes of Clang's AST.
constexpr Variable var(Literal L)
Returns the variable of L.
Definition: CNFFormula.h:64
The JSON file list parser is used to communicate input to InstallAPI.
@ Ctor_Base
Base object ctor.
Definition: ABI.h:26
@ OpenCL
Definition: LangStandard.h:65
@ CPlusPlus
Definition: LangStandard.h:55
@ NonNull
Values of this type can never be null.
@ SC_Auto
Definition: Specifiers.h:256
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
Definition: Linkage.h:24
@ SD_Automatic
Automatic storage duration (most local variables).
Definition: Specifiers.h:341
@ Dtor_Base
Base object dtor.
Definition: ABI.h:36
@ Dtor_Complete
Complete object dtor.
Definition: ABI.h:35
LangAS
Defines the address space values used by the address space qualifier of QualType.
Definition: AddressSpaces.h:25
@ VK_LValue
An l-value expression is a reference to an object with independent storage.
Definition: Specifiers.h:139
const FunctionProtoType * T
@ ThreadPrivateVar
Parameter for Thread private variable.
unsigned long uint64_t
float __ovld __cnfn length(float)
Return the length of vector p, i.e., sqrt(p.x2 + p.y 2 + ...)
static Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable /p VD.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::PointerType * AllocaInt8PtrTy
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:174