clang 22.0.0git
CodeGenFunction.h
Go to the documentation of this file.
1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the internal per-function state used for llvm translation.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14#define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15
16#include "CGBuilder.h"
17#include "CGLoopInfo.h"
18#include "CGValue.h"
19#include "CodeGenModule.h"
20#include "EHScopeStack.h"
21#include "SanitizerHandler.h"
22#include "VarBypassDetector.h"
23#include "clang/AST/CharUnits.h"
25#include "clang/AST/ExprCXX.h"
26#include "clang/AST/ExprObjC.h"
30#include "clang/AST/StmtSYCL.h"
31#include "clang/AST/Type.h"
32#include "clang/Basic/ABI.h"
37#include "llvm/ADT/ArrayRef.h"
38#include "llvm/ADT/DenseMap.h"
39#include "llvm/ADT/MapVector.h"
40#include "llvm/ADT/SmallVector.h"
41#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/ValueHandle.h"
44#include "llvm/Support/Debug.h"
45#include "llvm/Transforms/Utils/SanitizerStats.h"
46#include <optional>
47
48namespace llvm {
49class BasicBlock;
50class ConvergenceControlInst;
51class LLVMContext;
52class MDNode;
53class SwitchInst;
54class Twine;
55class Value;
56class CanonicalLoopInfo;
57} // namespace llvm
58
59namespace clang {
60class ASTContext;
61class CXXDestructorDecl;
62class CXXForRangeStmt;
63class CXXTryStmt;
64class Decl;
65class LabelDecl;
66class FunctionDecl;
67class FunctionProtoType;
68class LabelStmt;
69class ObjCContainerDecl;
70class ObjCInterfaceDecl;
71class ObjCIvarDecl;
72class ObjCMethodDecl;
73class ObjCImplementationDecl;
74class ObjCPropertyImplDecl;
75class TargetInfo;
76class VarDecl;
77class ObjCForCollectionStmt;
78class ObjCAtTryStmt;
79class ObjCAtThrowStmt;
80class ObjCAtSynchronizedStmt;
81class ObjCAutoreleasePoolStmt;
82class OMPUseDevicePtrClause;
83class OMPUseDeviceAddrClause;
84class SVETypeFlags;
85class OMPExecutableDirective;
86
87namespace analyze_os_log {
88class OSLogBufferLayout;
89}
90
91namespace CodeGen {
92class CodeGenTypes;
93class CodeGenPGO;
94class CGCallee;
95class CGFunctionInfo;
96class CGBlockInfo;
97class CGCXXABI;
98class BlockByrefHelpers;
99class BlockByrefInfo;
100class BlockFieldFlags;
101class RegionCodeGenTy;
102class TargetCodeGenInfo;
103struct OMPTaskDataTy;
104struct CGCoroData;
105
106// clang-format off
107/// The kind of evaluation to perform on values of a particular
108/// type. Basically, is the code in CGExprScalar, CGExprComplex, or
109/// CGExprAgg?
110///
111/// TODO: should vectors maybe be split out into their own thing?
117// clang-format on
118
119/// Helper class with most of the code for saving a value for a
120/// conditional expression cleanup.
122 typedef llvm::PointerIntPair<llvm::Value *, 1, bool> saved_type;
123
124 /// Answer whether the given value needs extra work to be saved.
125 static bool needsSaving(llvm::Value *value) {
126 if (!value)
127 return false;
128
129 // If it's not an instruction, we don't need to save.
130 if (!isa<llvm::Instruction>(value))
131 return false;
132
133 // If it's an instruction in the entry block, we don't need to save.
134 llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
135 return (block != &block->getParent()->getEntryBlock());
136 }
137
138 static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
139 static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
140};
141
142/// A partial specialization of DominatingValue for llvm::Values that
143/// might be llvm::Instructions.
144template <class T> struct DominatingPointer<T, true> : DominatingLLVMValue {
145 typedef T *type;
147 return static_cast<T *>(DominatingLLVMValue::restore(CGF, value));
148 }
149};
150
151/// A specialization of DominatingValue for Address.
152template <> struct DominatingValue<Address> {
153 typedef Address type;
154
155 struct saved_type {
157 llvm::Type *ElementType;
160 llvm::PointerType *EffectiveType;
161 };
162
163 static bool needsSaving(type value) {
166 return true;
167 return false;
168 }
169 static saved_type save(CodeGenFunction &CGF, type value) {
170 return {DominatingLLVMValue::save(CGF, value.getBasePointer()),
171 value.getElementType(), value.getAlignment(),
172 DominatingLLVMValue::save(CGF, value.getOffset()), value.getType()};
173 }
175 return Address(DominatingLLVMValue::restore(CGF, value.BasePtr),
176 value.ElementType, value.Alignment, CGPointerAuthInfo(),
177 DominatingLLVMValue::restore(CGF, value.Offset));
178 }
179};
180
181/// A specialization of DominatingValue for RValue.
182template <> struct DominatingValue<RValue> {
183 typedef RValue type;
185 enum Kind {
186 ScalarLiteral,
187 ScalarAddress,
188 AggregateLiteral,
189 AggregateAddress,
190 ComplexAddress
191 };
192 union {
193 struct {
195 } Vals;
197 };
198 LLVM_PREFERRED_TYPE(Kind)
199 unsigned K : 3;
200
202 : Vals{Val1, DominatingLLVMValue::saved_type()}, K(K) {}
203
206 : Vals{Val1, Val2}, K(ComplexAddress) {}
207
208 saved_type(DominatingValue<Address>::saved_type AggregateAddr, unsigned K)
209 : AggregateAddr(AggregateAddr), K(K) {}
210
211 public:
212 static bool needsSaving(RValue value);
215
216 // implementations in CGCleanup.cpp
217 };
218
219 static bool needsSaving(type value) { return saved_type::needsSaving(value); }
220 static saved_type save(CodeGenFunction &CGF, type value) {
221 return saved_type::save(CGF, value);
222 }
224 return value.restore(CGF);
225 }
226};
227
228/// A scoped helper to set the current source atom group for
229/// CGDebugInfo::addInstToCurrentSourceAtom. A source atom is a source construct
230/// that is "interesting" for debug stepping purposes. We use an atom group
231/// number to track the instruction(s) that implement the functionality for the
232/// atom, plus backup instructions/source locations.
234 uint64_t OriginalAtom = 0;
235 CGDebugInfo *DI = nullptr;
236
237 ApplyAtomGroup(const ApplyAtomGroup &) = delete;
238 void operator=(const ApplyAtomGroup &) = delete;
239
240public:
243};
244
245/// CodeGenFunction - This class organizes the per-function state that is used
246/// while generating LLVM code.
248 CodeGenFunction(const CodeGenFunction &) = delete;
249 void operator=(const CodeGenFunction &) = delete;
250
251 friend class CGCXXABI;
252
253public:
254 /// A jump destination is an abstract label, branching to which may
255 /// require a jump out through normal cleanups.
256 struct JumpDest {
257 JumpDest() : Block(nullptr), Index(0) {}
258 JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth,
259 unsigned Index)
260 : Block(Block), ScopeDepth(Depth), Index(Index) {}
261
262 bool isValid() const { return Block != nullptr; }
263 llvm::BasicBlock *getBlock() const { return Block; }
264 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
265 unsigned getDestIndex() const { return Index; }
266
267 // This should be used cautiously.
269 ScopeDepth = depth;
270 }
271
272 private:
273 llvm::BasicBlock *Block;
275 unsigned Index;
276 };
277
278 CodeGenModule &CGM; // Per-module state.
280
281 // For EH/SEH outlined funclets, this field points to parent's CGF
283
284 typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
287
288 // Stores variables for which we can't generate correct lifetime markers
289 // because of jumps.
291
292 /// List of recently emitted OMPCanonicalLoops.
293 ///
294 /// Since OMPCanonicalLoops are nested inside other statements (in particular
295 /// CapturedStmt generated by OMPExecutableDirective and non-perfectly nested
296 /// loops), we cannot directly call OMPEmitOMPCanonicalLoop and receive its
297 /// llvm::CanonicalLoopInfo. Instead, we call EmitStmt and any
298 /// OMPEmitOMPCanonicalLoop called by it will add its CanonicalLoopInfo to
299 /// this stack when done. Entering a new loop requires clearing this list; it
300 /// either means we start parsing a new loop nest (in which case the previous
301 /// loop nest goes out of scope) or a second loop in the same level in which
302 /// case it would be ambiguous into which of the two (or more) loops the loop
303 /// nest would extend.
305
306 /// Stack to track the Logical Operator recursion nest for MC/DC.
308
309 /// Stack to track the controlled convergence tokens.
311
312 /// Number of nested loop to be consumed by the last surrounding
313 /// loop-associated directive.
315
316 // CodeGen lambda for loops and support for ordered clause
317 typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
318 JumpDest)>
320 typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
321 const unsigned, const bool)>
323
324 // Codegen lambda for loop bounds in worksharing loop constructs
325 typedef llvm::function_ref<std::pair<LValue, LValue>(
328
329 // Codegen lambda for loop bounds in dispatch-based loop implementation
330 typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
332 Address UB)>
334
335 /// CGBuilder insert helper. This function is called after an
336 /// instruction is created using Builder.
337 void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
338 llvm::BasicBlock::iterator InsertPt) const;
339
340 /// CurFuncDecl - Holds the Decl for the current outermost
341 /// non-closure context.
342 const Decl *CurFuncDecl = nullptr;
343 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
344 const Decl *CurCodeDecl = nullptr;
345 const CGFunctionInfo *CurFnInfo = nullptr;
347 llvm::Function *CurFn = nullptr;
348
349 /// Save Parameter Decl for coroutine.
351
352 // Holds coroutine data if the current function is a coroutine. We use a
353 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
354 // in this header.
355 struct CGCoroInfo {
356 std::unique_ptr<CGCoroData> Data;
357 bool InSuspendBlock = false;
358 CGCoroInfo();
359 ~CGCoroInfo();
360 };
362
363 bool isCoroutine() const { return CurCoro.Data != nullptr; }
364
365 bool inSuspendBlock() const {
367 }
368
369 // Holds FramePtr for await_suspend wrapper generation,
370 // so that __builtin_coro_frame call can be lowered
371 // directly to value of its second argument
373 llvm::Value *FramePtr = nullptr;
374 };
376
377 // Generates wrapper function for `llvm.coro.await.suspend.*` intrinisics.
378 // It encapsulates SuspendExpr in a function, to separate it's body
379 // from the main coroutine to avoid miscompilations. Intrinisic
380 // is lowered to this function call in CoroSplit pass
381 // Function signature is:
382 // <type> __await_suspend_wrapper_<name>(ptr %awaiter, ptr %hdl)
383 // where type is one of (void, i1, ptr)
384 llvm::Function *generateAwaitSuspendWrapper(Twine const &CoroName,
385 Twine const &SuspendPointName,
386 CoroutineSuspendExpr const &S);
387
388 /// CurGD - The GlobalDecl for the current function being compiled.
390
391 /// PrologueCleanupDepth - The cleanup depth enclosing all the
392 /// cleanups associated with the parameters.
394
395 /// ReturnBlock - Unified return block.
397
398 /// ReturnValue - The temporary alloca to hold the return
399 /// value. This is invalid iff the function has no return value.
401
402 /// ReturnValuePointer - The temporary alloca to hold a pointer to sret.
403 /// This is invalid if sret is not in use.
405
406 /// If a return statement is being visited, this holds the return statment's
407 /// result expression.
408 const Expr *RetExpr = nullptr;
409
410 /// Return true if a label was seen in the current scope.
412 if (CurLexicalScope)
413 return CurLexicalScope->hasLabels();
414 return !LabelMap.empty();
415 }
416
417 /// AllocaInsertPoint - This is an instruction in the entry block before which
418 /// we prefer to insert allocas.
419 llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
420
421private:
422 /// PostAllocaInsertPt - This is a place in the prologue where code can be
423 /// inserted that will be dominated by all the static allocas. This helps
424 /// achieve two things:
425 /// 1. Contiguity of all static allocas (within the prologue) is maintained.
426 /// 2. All other prologue code (which are dominated by static allocas) do
427 /// appear in the source order immediately after all static allocas.
428 ///
429 /// PostAllocaInsertPt will be lazily created when it is *really* required.
430 llvm::AssertingVH<llvm::Instruction> PostAllocaInsertPt = nullptr;
431
432public:
433 /// Return PostAllocaInsertPt. If it is not yet created, then insert it
434 /// immediately after AllocaInsertPt.
435 llvm::Instruction *getPostAllocaInsertPoint() {
436 if (!PostAllocaInsertPt) {
437 assert(AllocaInsertPt &&
438 "Expected static alloca insertion point at function prologue");
439 assert(AllocaInsertPt->getParent()->isEntryBlock() &&
440 "EBB should be entry block of the current code gen function");
441 PostAllocaInsertPt = AllocaInsertPt->clone();
442 PostAllocaInsertPt->setName("postallocapt");
443 PostAllocaInsertPt->insertAfter(AllocaInsertPt->getIterator());
444 }
445
446 return PostAllocaInsertPt;
447 }
448
449 /// API for captured statement code generation.
451 public:
453 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
456 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
457
459 S.getCapturedRecordDecl()->field_begin();
460 for (CapturedStmt::const_capture_iterator I = S.capture_begin(),
461 E = S.capture_end();
462 I != E; ++I, ++Field) {
463 if (I->capturesThis())
464 CXXThisFieldDecl = *Field;
465 else if (I->capturesVariable())
466 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
467 else if (I->capturesVariableByCopy())
468 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
469 }
470 }
471
472 virtual ~CGCapturedStmtInfo();
473
474 CapturedRegionKind getKind() const { return Kind; }
475
476 virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
477 // Retrieve the value of the context parameter.
478 virtual llvm::Value *getContextValue() const { return ThisValue; }
479
480 /// Lookup the captured field decl for a variable.
481 virtual const FieldDecl *lookup(const VarDecl *VD) const {
482 return CaptureFields.lookup(VD->getCanonicalDecl());
483 }
484
485 bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
486 virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
487
488 static bool classof(const CGCapturedStmtInfo *) { return true; }
489
490 /// Emit the captured statement body.
491 virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
493 CGF.EmitStmt(S);
494 }
495
496 /// Get the name of the capture helper.
497 virtual StringRef getHelperName() const { return "__captured_stmt"; }
498
499 /// Get the CaptureFields
500 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> getCaptureFields() {
501 return CaptureFields;
502 }
503
504 private:
505 /// The kind of captured statement being generated.
507
508 /// Keep the map between VarDecl and FieldDecl.
509 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
510
511 /// The base address of the captured record, passed in as the first
512 /// argument of the parallel region function.
513 llvm::Value *ThisValue;
514
515 /// Captured 'this' type.
516 FieldDecl *CXXThisFieldDecl;
517 };
519
520 /// RAII for correct setting/restoring of CapturedStmtInfo.
522 private:
523 CodeGenFunction &CGF;
524 CGCapturedStmtInfo *PrevCapturedStmtInfo;
525
526 public:
528 CGCapturedStmtInfo *NewCapturedStmtInfo)
529 : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
530 CGF.CapturedStmtInfo = NewCapturedStmtInfo;
531 }
532 ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
533 };
534
535 /// An abstract representation of regular/ObjC call/message targets.
537 /// The function declaration of the callee.
538 const Decl *CalleeDecl;
539
540 public:
541 AbstractCallee() : CalleeDecl(nullptr) {}
542 AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
543 AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
544 bool hasFunctionDecl() const {
545 return isa_and_nonnull<FunctionDecl>(CalleeDecl);
546 }
547 const Decl *getDecl() const { return CalleeDecl; }
548 unsigned getNumParams() const {
549 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
550 return FD->getNumParams();
551 return cast<ObjCMethodDecl>(CalleeDecl)->param_size();
552 }
553 const ParmVarDecl *getParamDecl(unsigned I) const {
554 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
555 return FD->getParamDecl(I);
556 return *(cast<ObjCMethodDecl>(CalleeDecl)->param_begin() + I);
557 }
558 };
559
560 /// Sanitizers enabled for this function.
562
563 /// True if CodeGen currently emits code implementing sanitizer checks.
564 bool IsSanitizerScope = false;
565
566 /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
568 CodeGenFunction *CGF;
569
570 public:
573 };
574
575 /// In C++, whether we are code generating a thunk. This controls whether we
576 /// should emit cleanups.
577 bool CurFuncIsThunk = false;
578
579 /// In ARC, whether we should autorelease the return value.
580 bool AutoreleaseResult = false;
581
582 /// Whether we processed a Microsoft-style asm block during CodeGen. These can
583 /// potentially set the return value.
584 bool SawAsmBlock = false;
585
587
588 /// True if the current function is an outlined SEH helper. This can be a
589 /// finally block or filter expression.
591
592 /// True if CodeGen currently emits code inside presereved access index
593 /// region.
595
596 /// True if the current statement has nomerge attribute.
598
599 /// True if the current statement has noinline attribute.
601
602 /// True if the current statement has always_inline attribute.
604
605 /// True if the current statement has noconvergent attribute.
607
608 /// HLSL Branch attribute.
609 HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr =
610 HLSLControlFlowHintAttr::SpellingNotCalculated;
611
612 // The CallExpr within the current statement that the musttail attribute
613 // applies to. nullptr if there is no 'musttail' on the current statement.
614 const CallExpr *MustTailCall = nullptr;
615
616 /// Returns true if a function must make progress, which means the
617 /// mustprogress attribute can be added.
619 if (CGM.getCodeGenOpts().getFiniteLoops() ==
621 return false;
622
623 // C++11 and later guarantees that a thread eventually will do one of the
624 // following (C++11 [intro.multithread]p24 and C++17 [intro.progress]p1):
625 // - terminate,
626 // - make a call to a library I/O function,
627 // - perform an access through a volatile glvalue, or
628 // - perform a synchronization operation or an atomic operation.
629 //
630 // Hence each function is 'mustprogress' in C++11 or later.
631 return getLangOpts().CPlusPlus11;
632 }
633
634 /// Returns true if a loop must make progress, which means the mustprogress
635 /// attribute can be added. \p HasConstantCond indicates whether the branch
636 /// condition is a known constant.
637 bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody);
638
640 llvm::Value *BlockPointer = nullptr;
641
642 llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
644
645 /// A mapping from NRVO variables to the flags used to indicate
646 /// when the NRVO has been applied to this variable.
647 llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
648
651
652 // A stack of cleanups which were added to EHStack but have to be deactivated
653 // later before being popped or emitted. These are usually deactivated on
654 // exiting a `CleanupDeactivationScope` scope. For instance, after a
655 // full-expr.
656 //
657 // These are specially useful for correctly emitting cleanups while
658 // encountering branches out of expression (through stmt-expr or coroutine
659 // suspensions).
662 llvm::Instruction *DominatingIP;
663 };
665
666 // Enters a new scope for capturing cleanups which are deferred to be
667 // deactivated, all of which will be deactivated once the scope is exited.
676
678 assert(!Deactivated && "Deactivating already deactivated scope");
680 for (size_t I = Stack.size(); I > OldDeactivateCleanupStackSize; I--) {
681 CGF.DeactivateCleanupBlock(Stack[I - 1].Cleanup,
682 Stack[I - 1].DominatingIP);
683 Stack[I - 1].DominatingIP->eraseFromParent();
684 }
685 Stack.resize(OldDeactivateCleanupStackSize);
686 Deactivated = true;
687 }
688
690 if (Deactivated)
691 return;
693 }
694 };
695
697
698 llvm::Instruction *CurrentFuncletPad = nullptr;
699
701 bool isRedundantBeforeReturn() override { return true; }
702
703 llvm::Value *Addr;
704
705 public:
706 CallLifetimeEnd(RawAddress addr) : Addr(addr.getPointer()) {}
707
708 void Emit(CodeGenFunction &CGF, Flags flags) override {
710 }
711 };
712
713 // We are using objects of this 'cleanup' class to emit fake.use calls
714 // for -fextend-variable-liveness. They are placed at the end of a variable's
715 // scope analogous to lifetime markers.
716 class FakeUse final : public EHScopeStack::Cleanup {
717 Address Addr;
718
719 public:
720 FakeUse(Address addr) : Addr(addr) {}
721
722 void Emit(CodeGenFunction &CGF, Flags flags) override {
723 CGF.EmitFakeUse(Addr);
724 }
725 };
726
727 /// Header for data within LifetimeExtendedCleanupStack.
728 struct alignas(uint64_t) LifetimeExtendedCleanupHeader {
729 /// The size of the following cleanup object.
730 unsigned Size;
731 /// The kind of cleanup to push.
732 LLVM_PREFERRED_TYPE(CleanupKind)
734 /// Whether this is a conditional cleanup.
735 LLVM_PREFERRED_TYPE(bool)
736 unsigned IsConditional : 1;
737
738 size_t getSize() const { return Size; }
739 CleanupKind getKind() const { return (CleanupKind)Kind; }
740 bool isConditional() const { return IsConditional; }
741 };
742
743 /// i32s containing the indexes of the cleanup destinations.
744 RawAddress NormalCleanupDest = RawAddress::invalid();
745
746 unsigned NextCleanupDestIndex = 1;
747
748 /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
749 llvm::BasicBlock *EHResumeBlock = nullptr;
750
751 /// The exception slot. All landing pads write the current exception pointer
752 /// into this alloca.
753 llvm::Value *ExceptionSlot = nullptr;
754
755 /// The selector slot. Under the MandatoryCleanup model, all landing pads
756 /// write the current selector value into this alloca.
757 llvm::AllocaInst *EHSelectorSlot = nullptr;
758
759 /// A stack of exception code slots. Entering an __except block pushes a slot
760 /// on the stack and leaving pops one. The __exception_code() intrinsic loads
761 /// a value from the top of the stack.
763
764 /// Value returned by __exception_info intrinsic.
765 llvm::Value *SEHInfo = nullptr;
766
767 /// Emits a landing pad for the current EH stack.
768 llvm::BasicBlock *EmitLandingPad();
769
770 llvm::BasicBlock *getInvokeDestImpl();
771
772 /// Parent loop-based directive for scan directive.
773 const OMPExecutableDirective *OMPParentLoopDirectiveForScan = nullptr;
774 llvm::BasicBlock *OMPBeforeScanBlock = nullptr;
775 llvm::BasicBlock *OMPAfterScanBlock = nullptr;
776 llvm::BasicBlock *OMPScanExitBlock = nullptr;
777 llvm::BasicBlock *OMPScanDispatch = nullptr;
778 bool OMPFirstScanLoop = false;
779
780 /// Manages parent directive for scan directives.
782 CodeGenFunction &CGF;
783 const OMPExecutableDirective *ParentLoopDirectiveForScan;
784
785 public:
787 CodeGenFunction &CGF,
788 const OMPExecutableDirective &ParentLoopDirectiveForScan)
789 : CGF(CGF),
790 ParentLoopDirectiveForScan(CGF.OMPParentLoopDirectiveForScan) {
791 CGF.OMPParentLoopDirectiveForScan = &ParentLoopDirectiveForScan;
792 }
794 CGF.OMPParentLoopDirectiveForScan = ParentLoopDirectiveForScan;
795 }
796 };
797
798 template <class T>
800 return DominatingValue<T>::save(*this, value);
801 }
802
804 public:
805 CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures);
808
809 private:
810 void ConstructorHelper(FPOptions FPFeatures);
811 CodeGenFunction &CGF;
812 FPOptions OldFPFeatures;
813 llvm::fp::ExceptionBehavior OldExcept;
814 llvm::RoundingMode OldRounding;
815 std::optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
816 };
818
820 public:
822 : CGM(CGM_), SavedAtomicOpts(CGM.getAtomicOpts()) {
823 CGM.setAtomicOpts(AO);
824 }
825 CGAtomicOptionsRAII(CodeGenModule &CGM_, const AtomicAttr *AA)
826 : CGM(CGM_), SavedAtomicOpts(CGM.getAtomicOpts()) {
827 if (!AA)
828 return;
829 AtomicOptions AO = SavedAtomicOpts;
830 for (auto Option : AA->atomicOptions()) {
831 switch (Option) {
832 case AtomicAttr::remote_memory:
833 AO.remote_memory = true;
834 break;
835 case AtomicAttr::no_remote_memory:
836 AO.remote_memory = false;
837 break;
838 case AtomicAttr::fine_grained_memory:
839 AO.fine_grained_memory = true;
840 break;
841 case AtomicAttr::no_fine_grained_memory:
842 AO.fine_grained_memory = false;
843 break;
844 case AtomicAttr::ignore_denormal_mode:
845 AO.ignore_denormal_mode = true;
846 break;
847 case AtomicAttr::no_ignore_denormal_mode:
848 AO.ignore_denormal_mode = false;
849 break;
850 }
851 }
852 CGM.setAtomicOpts(AO);
853 }
854
857 ~CGAtomicOptionsRAII() { CGM.setAtomicOpts(SavedAtomicOpts); }
858
859 private:
860 CodeGenModule &CGM;
861 AtomicOptions SavedAtomicOpts;
862 };
863
864public:
865 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
866 /// rethrows.
868
869 /// A class controlling the emission of a finally block.
871 /// Where the catchall's edge through the cleanup should go.
872 JumpDest RethrowDest;
873
874 /// A function to call to enter the catch.
875 llvm::FunctionCallee BeginCatchFn;
876
877 /// An i1 variable indicating whether or not the @finally is
878 /// running for an exception.
879 llvm::AllocaInst *ForEHVar = nullptr;
880
881 /// An i8* variable into which the exception pointer to rethrow
882 /// has been saved.
883 llvm::AllocaInst *SavedExnVar = nullptr;
884
885 public:
886 void enter(CodeGenFunction &CGF, const Stmt *Finally,
887 llvm::FunctionCallee beginCatchFn,
888 llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn);
889 void exit(CodeGenFunction &CGF);
890 };
891
892 /// Returns true inside SEH __try blocks.
893 bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
894
895 /// Returns true while emitting a cleanuppad.
896 bool isCleanupPadScope() const {
897 return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
898 }
899
900 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
901 /// current full-expression. Safe against the possibility that
902 /// we're currently inside a conditionally-evaluated expression.
903 template <class T, class... As>
904 void pushFullExprCleanup(CleanupKind kind, As... A) {
905 // If we're not in a conditional branch, or if none of the
906 // arguments requires saving, then use the unconditional cleanup.
907 if (!isInConditionalBranch())
908 return EHStack.pushCleanup<T>(kind, A...);
909
910 // Stash values in a tuple so we can guarantee the order of saves.
911 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
912 SavedTuple Saved{saveValueInCond(A)...};
913
914 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
915 EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
916 initFullExprCleanup();
917 }
918
919 /// Queue a cleanup to be pushed after finishing the current full-expression,
920 /// potentially with an active flag.
921 template <class T, class... As>
923 if (!isInConditionalBranch())
924 return pushCleanupAfterFullExprWithActiveFlag<T>(
925 Kind, RawAddress::invalid(), A...);
926
927 RawAddress ActiveFlag = createCleanupActiveFlag();
928 assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
929 "cleanup active flag should never need saving");
930
931 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
932 SavedTuple Saved{saveValueInCond(A)...};
933
934 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
935 pushCleanupAfterFullExprWithActiveFlag<CleanupType>(Kind, ActiveFlag,
936 Saved);
937 }
938
939 template <class T, class... As>
941 RawAddress ActiveFlag, As... A) {
942 LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
943 ActiveFlag.isValid()};
944
945 size_t OldSize = LifetimeExtendedCleanupStack.size();
946 LifetimeExtendedCleanupStack.resize(
947 LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
948 (Header.IsConditional ? sizeof(ActiveFlag) : 0));
949
950 static_assert((alignof(LifetimeExtendedCleanupHeader) == alignof(T)) &&
951 (alignof(T) == alignof(RawAddress)),
952 "Cleanup will be allocated on misaligned address");
953 char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
954 new (Buffer) LifetimeExtendedCleanupHeader(Header);
955 new (Buffer + sizeof(Header)) T(A...);
956 if (Header.IsConditional)
957 new (Buffer + sizeof(Header) + sizeof(T)) RawAddress(ActiveFlag);
958 }
959
960 // Push a cleanup onto EHStack and deactivate it later. It is usually
961 // deactivated when exiting a `CleanupDeactivationScope` (for example: after a
962 // full expression).
963 template <class T, class... As>
965 // Placeholder dominating IP for this cleanup.
966 llvm::Instruction *DominatingIP =
967 Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
968 EHStack.pushCleanup<T>(Kind, A...);
969 DeferredDeactivationCleanupStack.push_back(
970 {EHStack.stable_begin(), DominatingIP});
971 }
972
973 /// Set up the last cleanup that was pushed as a conditional
974 /// full-expression cleanup.
976 initFullExprCleanupWithFlag(createCleanupActiveFlag());
977 }
978
979 void initFullExprCleanupWithFlag(RawAddress ActiveFlag);
980 RawAddress createCleanupActiveFlag();
981
982 /// PushDestructorCleanup - Push a cleanup to call the
983 /// complete-object destructor of an object of the given type at the
984 /// given address. Does nothing if T is not a C++ class type with a
985 /// non-trivial destructor.
986 void PushDestructorCleanup(QualType T, Address Addr);
987
988 /// PushDestructorCleanup - Push a cleanup to call the
989 /// complete-object variant of the given destructor on the object at
990 /// the given address.
991 void PushDestructorCleanup(const CXXDestructorDecl *Dtor, QualType T,
992 Address Addr);
993
994 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
995 /// process all branch fixups.
996 void PopCleanupBlock(bool FallThroughIsBranchThrough = false,
997 bool ForDeactivation = false);
998
999 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
1000 /// The block cannot be reactivated. Pops it if it's the top of the
1001 /// stack.
1002 ///
1003 /// \param DominatingIP - An instruction which is known to
1004 /// dominate the current IP (if set) and which lies along
1005 /// all paths of execution between the current IP and the
1006 /// the point at which the cleanup comes into scope.
1007 void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
1008 llvm::Instruction *DominatingIP);
1009
1010 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
1011 /// Cannot be used to resurrect a deactivated cleanup.
1012 ///
1013 /// \param DominatingIP - An instruction which is known to
1014 /// dominate the current IP (if set) and which lies along
1015 /// all paths of execution between the current IP and the
1016 /// the point at which the cleanup comes into scope.
1017 void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
1018 llvm::Instruction *DominatingIP);
1019
1020 /// Enters a new scope for capturing cleanups, all of which
1021 /// will be executed once the scope is exited.
1023 EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
1024 size_t LifetimeExtendedCleanupStackSize;
1025 CleanupDeactivationScope DeactivateCleanups;
1026 bool OldDidCallStackSave;
1027
1028 protected:
1030
1031 private:
1032 RunCleanupsScope(const RunCleanupsScope &) = delete;
1033 void operator=(const RunCleanupsScope &) = delete;
1034
1035 protected:
1037
1038 public:
1039 /// Enter a new cleanup scope.
1041 : DeactivateCleanups(CGF), PerformCleanup(true), CGF(CGF) {
1042 CleanupStackDepth = CGF.EHStack.stable_begin();
1043 LifetimeExtendedCleanupStackSize =
1045 OldDidCallStackSave = CGF.DidCallStackSave;
1046 CGF.DidCallStackSave = false;
1047 OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
1048 CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
1049 }
1050
1051 /// Exit this cleanup scope, emitting any accumulated cleanups.
1053 if (PerformCleanup)
1054 ForceCleanup();
1055 }
1056
1057 /// Determine whether this scope requires any cleanups.
1058 bool requiresCleanups() const {
1059 return CGF.EHStack.stable_begin() != CleanupStackDepth;
1060 }
1061
1062 /// Force the emission of cleanups now, instead of waiting
1063 /// until this object is destroyed.
1064 /// \param ValuesToReload - A list of values that need to be available at
1065 /// the insertion point after cleanup emission. If cleanup emission created
1066 /// a shared cleanup block, these value pointers will be rewritten.
1067 /// Otherwise, they not will be modified.
1068 void
1069 ForceCleanup(std::initializer_list<llvm::Value **> ValuesToReload = {}) {
1070 assert(PerformCleanup && "Already forced cleanup");
1071 CGF.DidCallStackSave = OldDidCallStackSave;
1072 DeactivateCleanups.ForceDeactivate();
1073 CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
1074 ValuesToReload);
1075 PerformCleanup = false;
1076 CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
1077 }
1078 };
1079
1080 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1081 EHScopeStack::stable_iterator CurrentCleanupScopeDepth =
1082 EHScopeStack::stable_end();
1083
1087 LexicalScope *ParentScope;
1088
1089 LexicalScope(const LexicalScope &) = delete;
1090 void operator=(const LexicalScope &) = delete;
1091
1092 public:
1093 /// Enter a new cleanup scope.
1095
1096 void addLabel(const LabelDecl *label) {
1097 assert(PerformCleanup && "adding label to dead scope?");
1098 Labels.push_back(label);
1099 }
1100
1101 /// Exit this cleanup scope, emitting any accumulated
1102 /// cleanups.
1103 ~LexicalScope();
1104
1105 /// Force the emission of cleanups now, instead of waiting
1106 /// until this object is destroyed.
1108 CGF.CurLexicalScope = ParentScope;
1109 RunCleanupsScope::ForceCleanup();
1110
1111 if (!Labels.empty())
1112 rescopeLabels();
1113 }
1114
1115 bool hasLabels() const { return !Labels.empty(); }
1116
1117 void rescopeLabels();
1118 };
1119
1120 typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
1121
1122 /// The class used to assign some variables some temporarily addresses.
1124 DeclMapTy SavedLocals;
1125 DeclMapTy SavedTempAddresses;
1126 OMPMapVars(const OMPMapVars &) = delete;
1127 void operator=(const OMPMapVars &) = delete;
1128
1129 public:
1130 explicit OMPMapVars() = default;
1132 assert(SavedLocals.empty() && "Did not restored original addresses.");
1133 };
1134
1135 /// Sets the address of the variable \p LocalVD to be \p TempAddr in
1136 /// function \p CGF.
1137 /// \return true if at least one variable was set already, false otherwise.
1138 bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
1139 Address TempAddr) {
1140 LocalVD = LocalVD->getCanonicalDecl();
1141 // Only save it once.
1142 if (SavedLocals.count(LocalVD))
1143 return false;
1144
1145 // Copy the existing local entry to SavedLocals.
1146 auto it = CGF.LocalDeclMap.find(LocalVD);
1147 if (it != CGF.LocalDeclMap.end())
1148 SavedLocals.try_emplace(LocalVD, it->second);
1149 else
1150 SavedLocals.try_emplace(LocalVD, Address::invalid());
1151
1152 // Generate the private entry.
1153 QualType VarTy = LocalVD->getType();
1154 if (VarTy->isReferenceType()) {
1155 Address Temp = CGF.CreateMemTemp(VarTy);
1156 CGF.Builder.CreateStore(TempAddr.emitRawPointer(CGF), Temp);
1157 TempAddr = Temp;
1158 }
1159 SavedTempAddresses.try_emplace(LocalVD, TempAddr);
1160
1161 return true;
1162 }
1163
1164 /// Applies new addresses to the list of the variables.
1165 /// \return true if at least one variable is using new address, false
1166 /// otherwise.
1168 copyInto(SavedTempAddresses, CGF.LocalDeclMap);
1169 SavedTempAddresses.clear();
1170 return !SavedLocals.empty();
1171 }
1172
1173 /// Restores original addresses of the variables.
1175 if (!SavedLocals.empty()) {
1176 copyInto(SavedLocals, CGF.LocalDeclMap);
1177 SavedLocals.clear();
1178 }
1179 }
1180
1181 private:
1182 /// Copy all the entries in the source map over the corresponding
1183 /// entries in the destination, which must exist.
1184 static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
1185 for (auto &[Decl, Addr] : Src) {
1186 if (!Addr.isValid())
1187 Dest.erase(Decl);
1188 else
1189 Dest.insert_or_assign(Decl, Addr);
1190 }
1191 }
1192 };
1193
1194 /// The scope used to remap some variables as private in the OpenMP loop body
1195 /// (or other captured region emitted without outlining), and to restore old
1196 /// vars back on exit.
1198 OMPMapVars MappedVars;
1199 OMPPrivateScope(const OMPPrivateScope &) = delete;
1200 void operator=(const OMPPrivateScope &) = delete;
1201
1202 public:
1203 /// Enter a new OpenMP private scope.
1205
1206 /// Registers \p LocalVD variable as a private with \p Addr as the address
1207 /// of the corresponding private variable. \p
1208 /// PrivateGen is the address of the generated private variable.
1209 /// \return true if the variable is registered as private, false if it has
1210 /// been privatized already.
1211 bool addPrivate(const VarDecl *LocalVD, Address Addr) {
1212 assert(PerformCleanup && "adding private to dead scope");
1213 return MappedVars.setVarAddr(CGF, LocalVD, Addr);
1214 }
1215
1216 /// Privatizes local variables previously registered as private.
1217 /// Registration is separate from the actual privatization to allow
1218 /// initializers use values of the original variables, not the private one.
1219 /// This is important, for example, if the private variable is a class
1220 /// variable initialized by a constructor that references other private
1221 /// variables. But at initialization original variables must be used, not
1222 /// private copies.
1223 /// \return true if at least one variable was privatized, false otherwise.
1224 bool Privatize() { return MappedVars.apply(CGF); }
1225
1227 RunCleanupsScope::ForceCleanup();
1228 restoreMap();
1229 }
1230
1231 /// Exit scope - all the mapped variables are restored.
1233 if (PerformCleanup)
1234 ForceCleanup();
1235 }
1236
1237 /// Checks if the global variable is captured in current function.
1238 bool isGlobalVarCaptured(const VarDecl *VD) const {
1239 VD = VD->getCanonicalDecl();
1240 return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
1241 }
1242
1243 /// Restore all mapped variables w/o clean up. This is usefully when we want
1244 /// to reference the original variables but don't want the clean up because
1245 /// that could emit lifetime end too early, causing backend issue #56913.
1246 void restoreMap() { MappedVars.restore(CGF); }
1247 };
1248
1249 /// Save/restore original map of previously emitted local vars in case when we
1250 /// need to duplicate emission of the same code several times in the same
1251 /// function for OpenMP code.
1253 CodeGenFunction &CGF;
1254 DeclMapTy SavedMap;
1255
1256 public:
1258 : CGF(CGF), SavedMap(CGF.LocalDeclMap) {}
1259 ~OMPLocalDeclMapRAII() { SavedMap.swap(CGF.LocalDeclMap); }
1260 };
1261
1262 /// Takes the old cleanup stack size and emits the cleanup blocks
1263 /// that have been added.
1264 void
1265 PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
1266 std::initializer_list<llvm::Value **> ValuesToReload = {});
1267
1268 /// Takes the old cleanup stack size and emits the cleanup blocks
1269 /// that have been added, then adds all lifetime-extended cleanups from
1270 /// the given position to the stack.
1271 void
1272 PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
1273 size_t OldLifetimeExtendedStackSize,
1274 std::initializer_list<llvm::Value **> ValuesToReload = {});
1275
1276 void ResolveBranchFixups(llvm::BasicBlock *Target);
1277
1278 /// The given basic block lies in the current EH scope, but may be a
1279 /// target of a potentially scope-crossing jump; get a stable handle
1280 /// to which we can perform this jump later.
1282 return JumpDest(Target, EHStack.getInnermostNormalCleanup(),
1283 NextCleanupDestIndex++);
1284 }
1285
1286 /// The given basic block lies in the current EH scope, but may be a
1287 /// target of a potentially scope-crossing jump; get a stable handle
1288 /// to which we can perform this jump later.
1289 JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
1290 return getJumpDestInCurrentScope(createBasicBlock(Name));
1291 }
1292
1293 /// EmitBranchThroughCleanup - Emit a branch from the current insert
1294 /// block through the normal cleanup handling code (if any) and then
1295 /// on to \arg Dest.
1296 void EmitBranchThroughCleanup(JumpDest Dest);
1297
1298 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
1299 /// specified destination obviously has no cleanups to run. 'false' is always
1300 /// a conservatively correct answer for this method.
1301 bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
1302
1303 /// popCatchScope - Pops the catch scope at the top of the EHScope
1304 /// stack, emitting any required code (other than the catch handlers
1305 /// themselves).
1306 void popCatchScope();
1307
1308 llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
1309 llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
1310 llvm::BasicBlock *
1311 getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope);
1312
1313 /// An object to manage conditionally-evaluated expressions.
1315 llvm::BasicBlock *StartBB;
1316
1317 public:
1319 : StartBB(CGF.Builder.GetInsertBlock()) {}
1320
1322 assert(CGF.OutermostConditional != this);
1323 if (!CGF.OutermostConditional)
1324 CGF.OutermostConditional = this;
1325 }
1326
1328 assert(CGF.OutermostConditional != nullptr);
1329 if (CGF.OutermostConditional == this)
1330 CGF.OutermostConditional = nullptr;
1331 }
1332
1333 /// Returns a block which will be executed prior to each
1334 /// evaluation of the conditional code.
1335 llvm::BasicBlock *getStartingBlock() const { return StartBB; }
1336 };
1337
1338 /// isInConditionalBranch - Return true if we're currently emitting
1339 /// one branch or the other of a conditional expression.
1340 bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
1341
1342 void setBeforeOutermostConditional(llvm::Value *value, Address addr,
1343 CodeGenFunction &CGF) {
1344 assert(isInConditionalBranch());
1345 llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
1346 auto store = new llvm::StoreInst(value, addr.emitRawPointer(CGF),
1347 block->back().getIterator());
1348 store->setAlignment(addr.getAlignment().getAsAlign());
1349 }
1350
1351 /// An RAII object to record that we're evaluating a statement
1352 /// expression.
1354 CodeGenFunction &CGF;
1355
1356 /// We have to save the outermost conditional: cleanups in a
1357 /// statement expression aren't conditional just because the
1358 /// StmtExpr is.
1359 ConditionalEvaluation *SavedOutermostConditional;
1360
1361 public:
1363 : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
1364 CGF.OutermostConditional = nullptr;
1365 }
1366
1368 CGF.OutermostConditional = SavedOutermostConditional;
1369 CGF.EnsureInsertPoint();
1370 }
1371 };
1372
1373 /// An object which temporarily prevents a value from being
1374 /// destroyed by aggressive peephole optimizations that assume that
1375 /// all uses of a value have been realized in the IR.
1377 llvm::Instruction *Inst = nullptr;
1378 friend class CodeGenFunction;
1379
1380 public:
1382 };
1383
1384 /// A non-RAII class containing all the information about a bound
1385 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1386 /// this which makes individual mappings very simple; using this
1387 /// class directly is useful when you have a variable number of
1388 /// opaque values or don't want the RAII functionality for some
1389 /// reason.
1391 const OpaqueValueExpr *OpaqueValue;
1392 bool BoundLValue;
1394
1395 OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue)
1396 : OpaqueValue(ov), BoundLValue(boundLValue) {}
1397
1398 public:
1399 OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1400
1401 static bool shouldBindAsLValue(const Expr *expr) {
1402 // gl-values should be bound as l-values for obvious reasons.
1403 // Records should be bound as l-values because IR generation
1404 // always keeps them in memory. Expressions of function type
1405 // act exactly like l-values but are formally required to be
1406 // r-values in C.
1407 return expr->isGLValue() || expr->getType()->isFunctionType() ||
1408 hasAggregateEvaluationKind(expr->getType());
1409 }
1410
1412 bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e) {
1413 if (shouldBindAsLValue(ov))
1414 return bind(CGF, ov, CGF.EmitLValue(e));
1415 return bind(CGF, ov, CGF.EmitAnyExpr(e));
1416 }
1417
1419 bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv) {
1420 assert(shouldBindAsLValue(ov));
1421 CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1422 return OpaqueValueMappingData(ov, true);
1423 }
1424
1426 bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv) {
1427 assert(!shouldBindAsLValue(ov));
1428 CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1429
1430 OpaqueValueMappingData data(ov, false);
1431
1432 // Work around an extremely aggressive peephole optimization in
1433 // EmitScalarConversion which assumes that all other uses of a
1434 // value are extant.
1435 data.Protection = CGF.protectFromPeepholes(rv);
1436
1437 return data;
1438 }
1439
1440 bool isValid() const { return OpaqueValue != nullptr; }
1441 void clear() { OpaqueValue = nullptr; }
1442
1444 assert(OpaqueValue && "no data to unbind!");
1445
1446 if (BoundLValue) {
1447 CGF.OpaqueLValues.erase(OpaqueValue);
1448 } else {
1449 CGF.OpaqueRValues.erase(OpaqueValue);
1450 CGF.unprotectFromPeepholes(Protection);
1451 }
1452 }
1453 };
1454
1455 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1457 CodeGenFunction &CGF;
1459
1460 public:
1461 static bool shouldBindAsLValue(const Expr *expr) {
1462 return OpaqueValueMappingData::shouldBindAsLValue(expr);
1463 }
1464
1465 /// Build the opaque value mapping for the given conditional
1466 /// operator if it's the GNU ?: extension. This is a common
1467 /// enough pattern that the convenience operator is really
1468 /// helpful.
1469 ///
1472 : CGF(CGF) {
1473 if (isa<ConditionalOperator>(op))
1474 // Leave Data empty.
1475 return;
1476
1477 const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1478 Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1479 e->getCommon());
1480 }
1481
1482 /// Build the opaque value mapping for an OpaqueValueExpr whose source
1483 /// expression is set to the expression the OVE represents.
1485 : CGF(CGF) {
1486 if (OV) {
1487 assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1488 "for OVE with no source expression");
1489 Data = OpaqueValueMappingData::bind(CGF, OV, OV->getSourceExpr());
1490 }
1491 }
1492
1494 LValue lvalue)
1495 : CGF(CGF),
1496 Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {}
1497
1499 RValue rvalue)
1500 : CGF(CGF),
1501 Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {}
1502
1503 void pop() {
1504 Data.unbind(CGF);
1505 Data.clear();
1506 }
1507
1509 if (Data.isValid())
1510 Data.unbind(CGF);
1511 }
1512 };
1513
1514private:
1515 CGDebugInfo *DebugInfo;
1516 /// Used to create unique names for artificial VLA size debug info variables.
1517 unsigned VLAExprCounter = 0;
1518 bool DisableDebugInfo = false;
1519
1520 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1521 /// calling llvm.stacksave for multiple VLAs in the same scope.
1522 bool DidCallStackSave = false;
1523
1524 /// IndirectBranch - The first time an indirect goto is seen we create a block
1525 /// with an indirect branch. Every time we see the address of a label taken,
1526 /// we add the label to the indirect goto. Every subsequent indirect goto is
1527 /// codegen'd as a jump to the IndirectBranch's basic block.
1528 llvm::IndirectBrInst *IndirectBranch = nullptr;
1529
1530 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1531 /// decls.
1532 DeclMapTy LocalDeclMap;
1533
1534 // Keep track of the cleanups for callee-destructed parameters pushed to the
1535 // cleanup stack so that they can be deactivated later.
1536 llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1537 CalleeDestructedParamCleanups;
1538
1539 /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1540 /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1541 /// parameter.
1542 llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1543 SizeArguments;
1544
1545 /// Track escaped local variables with auto storage. Used during SEH
1546 /// outlining to produce a call to llvm.localescape.
1547 llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1548
1549 /// LabelMap - This keeps track of the LLVM basic block for each C label.
1550 llvm::DenseMap<const LabelDecl *, JumpDest> LabelMap;
1551
1552 // BreakContinueStack - This keeps track of where break and continue
1553 // statements should jump to.
1554 struct BreakContinue {
1555 BreakContinue(JumpDest Break, JumpDest Continue)
1556 : BreakBlock(Break), ContinueBlock(Continue) {}
1557
1558 JumpDest BreakBlock;
1559 JumpDest ContinueBlock;
1560 };
1561 SmallVector<BreakContinue, 8> BreakContinueStack;
1562
1563 /// Handles cancellation exit points in OpenMP-related constructs.
1564 class OpenMPCancelExitStack {
1565 /// Tracks cancellation exit point and join point for cancel-related exit
1566 /// and normal exit.
1567 struct CancelExit {
1568 CancelExit() = default;
1569 CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1570 JumpDest ContBlock)
1571 : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1572 OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown;
1573 /// true if the exit block has been emitted already by the special
1574 /// emitExit() call, false if the default codegen is used.
1575 bool HasBeenEmitted = false;
1576 JumpDest ExitBlock;
1577 JumpDest ContBlock;
1578 };
1579
1580 SmallVector<CancelExit, 8> Stack;
1581
1582 public:
1583 OpenMPCancelExitStack() : Stack(1) {}
1584 ~OpenMPCancelExitStack() = default;
1585 /// Fetches the exit block for the current OpenMP construct.
1586 JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1587 /// Emits exit block with special codegen procedure specific for the related
1588 /// OpenMP construct + emits code for normal construct cleanup.
1589 void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1590 const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1591 if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1592 assert(CGF.getOMPCancelDestination(Kind).isValid());
1593 assert(CGF.HaveInsertPoint());
1594 assert(!Stack.back().HasBeenEmitted);
1595 auto IP = CGF.Builder.saveAndClearIP();
1596 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1597 CodeGen(CGF);
1598 CGF.EmitBranch(Stack.back().ContBlock.getBlock());
1599 CGF.Builder.restoreIP(IP);
1600 Stack.back().HasBeenEmitted = true;
1601 }
1602 CodeGen(CGF);
1603 }
1604 /// Enter the cancel supporting \a Kind construct.
1605 /// \param Kind OpenMP directive that supports cancel constructs.
1606 /// \param HasCancel true, if the construct has inner cancel directive,
1607 /// false otherwise.
1608 void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
1609 Stack.push_back({Kind,
1610 HasCancel ? CGF.getJumpDestInCurrentScope("cancel.exit")
1611 : JumpDest(),
1612 HasCancel ? CGF.getJumpDestInCurrentScope("cancel.cont")
1613 : JumpDest()});
1614 }
1615 /// Emits default exit point for the cancel construct (if the special one
1616 /// has not be used) + join point for cancel/normal exits.
1617 void exit(CodeGenFunction &CGF) {
1618 if (getExitBlock().isValid()) {
1619 assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
1620 bool HaveIP = CGF.HaveInsertPoint();
1621 if (!Stack.back().HasBeenEmitted) {
1622 if (HaveIP)
1623 CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1624 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1625 CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1626 }
1627 CGF.EmitBlock(Stack.back().ContBlock.getBlock());
1628 if (!HaveIP) {
1629 CGF.Builder.CreateUnreachable();
1630 CGF.Builder.ClearInsertionPoint();
1631 }
1632 }
1633 Stack.pop_back();
1634 }
1635 };
1636 OpenMPCancelExitStack OMPCancelStack;
1637
1638 /// Lower the Likelihood knowledge about the \p Cond via llvm.expect intrin.
1639 llvm::Value *emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
1640 Stmt::Likelihood LH);
1641
1642 std::unique_ptr<CodeGenPGO> PGO;
1643
1644 /// Bitmap used by MC/DC to track condition outcomes of a boolean expression.
1645 Address MCDCCondBitmapAddr = Address::invalid();
1646
1647 /// Calculate branch weights appropriate for PGO data
1648 llvm::MDNode *createProfileWeights(uint64_t TrueCount,
1649 uint64_t FalseCount) const;
1650 llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights) const;
1651 llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
1652 uint64_t LoopCount) const;
1653
1654public:
1655 std::pair<bool, bool> getIsCounterPair(const Stmt *S) const;
1656 void markStmtAsUsed(bool Skipped, const Stmt *S);
1657 void markStmtMaybeUsed(const Stmt *S);
1658
1659 /// Increment the profiler's counter for the given statement by \p StepV.
1660 /// If \p StepV is null, the default increment is 1.
1661 void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr);
1662
1664 return (CGM.getCodeGenOpts().hasProfileClangInstr() &&
1665 CGM.getCodeGenOpts().MCDCCoverage &&
1666 !CurFn->hasFnAttribute(llvm::Attribute::NoProfile));
1667 }
1668
1669 /// Allocate a temp value on the stack that MCDC can use to track condition
1670 /// results.
1671 void maybeCreateMCDCCondBitmap();
1672
1673 bool isBinaryLogicalOp(const Expr *E) const {
1674 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(E->IgnoreParens());
1675 return (BOp && BOp->isLogicalOp());
1676 }
1677
1678 /// Zero-init the MCDC temp value.
1679 void maybeResetMCDCCondBitmap(const Expr *E);
1680
1681 /// Increment the profiler's counter for the given expression by \p StepV.
1682 /// If \p StepV is null, the default increment is 1.
1683 void maybeUpdateMCDCTestVectorBitmap(const Expr *E);
1684
1685 /// Update the MCDC temp value with the condition's evaluated result.
1686 void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val);
1687
1688 /// Get the profiler's count for the given statement.
1689 uint64_t getProfileCount(const Stmt *S);
1690
1691 /// Set the profiler's current count.
1692 void setCurrentProfileCount(uint64_t Count);
1693
1694 /// Get the profiler's current count. This is generally the count for the most
1695 /// recently incremented counter.
1696 uint64_t getCurrentProfileCount();
1697
1698 /// See CGDebugInfo::addInstToCurrentSourceAtom.
1699 void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction,
1700 llvm::Value *Backup);
1701
1702 /// See CGDebugInfo::addInstToSpecificSourceAtom.
1703 void addInstToSpecificSourceAtom(llvm::Instruction *KeyInstruction,
1704 llvm::Value *Backup, uint64_t Atom);
1705
1706 /// Add \p KeyInstruction and an optional \p Backup instruction to a new atom
1707 /// group (See ApplyAtomGroup for more info).
1708 void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction,
1709 llvm::Value *Backup);
1710
1711private:
1712 /// SwitchInsn - This is nearest current switch instruction. It is null if
1713 /// current context is not in a switch.
1714 llvm::SwitchInst *SwitchInsn = nullptr;
1715 /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1716 SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
1717
1718 /// The likelihood attributes of the SwitchCase.
1719 SmallVector<Stmt::Likelihood, 16> *SwitchLikelihood = nullptr;
1720
1721 /// CaseRangeBlock - This block holds if condition check for last case
1722 /// statement range in current switch instruction.
1723 llvm::BasicBlock *CaseRangeBlock = nullptr;
1724
1725 /// OpaqueLValues - Keeps track of the current set of opaque value
1726 /// expressions.
1727 llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1728 llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1729
1730 // VLASizeMap - This keeps track of the associated size for each VLA type.
1731 // We track this by the size expression rather than the type itself because
1732 // in certain situations, like a const qualifier applied to an VLA typedef,
1733 // multiple VLA types can share the same size expression.
1734 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1735 // enter/leave scopes.
1736 llvm::DenseMap<const Expr *, llvm::Value *> VLASizeMap;
1737
1738 /// A block containing a single 'unreachable' instruction. Created
1739 /// lazily by getUnreachableBlock().
1740 llvm::BasicBlock *UnreachableBlock = nullptr;
1741
1742 /// Counts of the number return expressions in the function.
1743 unsigned NumReturnExprs = 0;
1744
1745 /// Count the number of simple (constant) return expressions in the function.
1746 unsigned NumSimpleReturnExprs = 0;
1747
1748 /// The last regular (non-return) debug location (breakpoint) in the function.
1749 SourceLocation LastStopPoint;
1750
1751public:
1752 /// Source location information about the default argument or member
1753 /// initializer expression we're evaluating, if any.
1757
1758 /// A scope within which we are constructing the fields of an object which
1759 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1760 /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1762 public:
1764 : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
1765 CGF.CXXDefaultInitExprThis = This;
1766 }
1768 CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
1769 }
1770
1771 private:
1772 CodeGenFunction &CGF;
1773 Address OldCXXDefaultInitExprThis;
1774 };
1775
1776 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1777 /// is overridden to be the object under construction.
1779 public:
1781 : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
1782 OldCXXThisAlignment(CGF.CXXThisAlignment),
1783 SourceLocScope(E, CGF.CurSourceLocExprScope) {
1784 CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getBasePointer();
1785 CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
1786 }
1788 CGF.CXXThisValue = OldCXXThisValue;
1789 CGF.CXXThisAlignment = OldCXXThisAlignment;
1790 }
1791
1792 public:
1794 llvm::Value *OldCXXThisValue;
1797 };
1798
1801 : SourceLocExprScopeGuard(E, CGF.CurSourceLocExprScope) {}
1802 };
1803
1804 /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1805 /// current loop index is overridden.
1807 public:
1808 ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
1809 : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
1810 CGF.ArrayInitIndex = Index;
1811 }
1812 ~ArrayInitLoopExprScope() { CGF.ArrayInitIndex = OldArrayInitIndex; }
1813
1814 private:
1815 CodeGenFunction &CGF;
1816 llvm::Value *OldArrayInitIndex;
1817 };
1818
1820 public:
1822 : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl),
1823 OldCurCodeDecl(CGF.CurCodeDecl),
1824 OldCXXABIThisDecl(CGF.CXXABIThisDecl),
1825 OldCXXABIThisValue(CGF.CXXABIThisValue),
1826 OldCXXThisValue(CGF.CXXThisValue),
1827 OldCXXABIThisAlignment(CGF.CXXABIThisAlignment),
1828 OldCXXThisAlignment(CGF.CXXThisAlignment),
1829 OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy),
1830 OldCXXInheritedCtorInitExprArgs(
1831 std::move(CGF.CXXInheritedCtorInitExprArgs)) {
1832 CGF.CurGD = GD;
1833 CGF.CurFuncDecl = CGF.CurCodeDecl =
1834 cast<CXXConstructorDecl>(GD.getDecl());
1835 CGF.CXXABIThisDecl = nullptr;
1836 CGF.CXXABIThisValue = nullptr;
1837 CGF.CXXThisValue = nullptr;
1838 CGF.CXXABIThisAlignment = CharUnits();
1839 CGF.CXXThisAlignment = CharUnits();
1840 CGF.ReturnValue = Address::invalid();
1841 CGF.FnRetTy = QualType();
1842 CGF.CXXInheritedCtorInitExprArgs.clear();
1843 }
1845 CGF.CurGD = OldCurGD;
1846 CGF.CurFuncDecl = OldCurFuncDecl;
1847 CGF.CurCodeDecl = OldCurCodeDecl;
1848 CGF.CXXABIThisDecl = OldCXXABIThisDecl;
1849 CGF.CXXABIThisValue = OldCXXABIThisValue;
1850 CGF.CXXThisValue = OldCXXThisValue;
1851 CGF.CXXABIThisAlignment = OldCXXABIThisAlignment;
1852 CGF.CXXThisAlignment = OldCXXThisAlignment;
1853 CGF.ReturnValue = OldReturnValue;
1854 CGF.FnRetTy = OldFnRetTy;
1855 CGF.CXXInheritedCtorInitExprArgs =
1856 std::move(OldCXXInheritedCtorInitExprArgs);
1857 }
1858
1859 private:
1860 CodeGenFunction &CGF;
1861 GlobalDecl OldCurGD;
1862 const Decl *OldCurFuncDecl;
1863 const Decl *OldCurCodeDecl;
1864 ImplicitParamDecl *OldCXXABIThisDecl;
1865 llvm::Value *OldCXXABIThisValue;
1866 llvm::Value *OldCXXThisValue;
1867 CharUnits OldCXXABIThisAlignment;
1868 CharUnits OldCXXThisAlignment;
1869 Address OldReturnValue;
1870 QualType OldFnRetTy;
1871 CallArgList OldCXXInheritedCtorInitExprArgs;
1872 };
1873
1874 // Helper class for the OpenMP IR Builder. Allows reusability of code used for
1875 // region body, and finalization codegen callbacks. This will class will also
1876 // contain privatization functions used by the privatization call backs
1877 //
1878 // TODO: this is temporary class for things that are being moved out of
1879 // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or
1880 // utility function for use with the OMPBuilder. Once that move to use the
1881 // OMPBuilder is done, everything here will either become part of CodeGenFunc.
1882 // directly, or a new helper class that will contain functions used by both
1883 // this and the OMPBuilder
1884
1886
1890
1891 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1892
1893 /// Cleanup action for allocate support.
1895
1896 private:
1897 llvm::CallInst *RTLFnCI;
1898
1899 public:
1900 OMPAllocateCleanupTy(llvm::CallInst *RLFnCI) : RTLFnCI(RLFnCI) {
1901 RLFnCI->removeFromParent();
1902 }
1903
1904 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
1905 if (!CGF.HaveInsertPoint())
1906 return;
1907 CGF.Builder.Insert(RTLFnCI);
1908 }
1909 };
1910
1911 /// Returns address of the threadprivate variable for the current
1912 /// thread. This Also create any necessary OMP runtime calls.
1913 ///
1914 /// \param VD VarDecl for Threadprivate variable.
1915 /// \param VDAddr Address of the Vardecl
1916 /// \param Loc The location where the barrier directive was encountered
1917 static Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
1918 const VarDecl *VD, Address VDAddr,
1920
1921 /// Gets the OpenMP-specific address of the local variable /p VD.
1922 static Address getAddressOfLocalVariable(CodeGenFunction &CGF,
1923 const VarDecl *VD);
1924 /// Get the platform-specific name separator.
1925 /// \param Parts different parts of the final name that needs separation
1926 /// \param FirstSeparator First separator used between the initial two
1927 /// parts of the name.
1928 /// \param Separator separator used between all of the rest consecutinve
1929 /// parts of the name
1930 static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
1931 StringRef FirstSeparator = ".",
1932 StringRef Separator = ".");
1933 /// Emit the Finalization for an OMP region
1934 /// \param CGF The Codegen function this belongs to
1935 /// \param IP Insertion point for generating the finalization code.
1937 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1938 assert(IP.getBlock()->end() != IP.getPoint() &&
1939 "OpenMP IR Builder should cause terminated block!");
1940
1941 llvm::BasicBlock *IPBB = IP.getBlock();
1942 llvm::BasicBlock *DestBB = IPBB->getUniqueSuccessor();
1943 assert(DestBB && "Finalization block should have one successor!");
1944
1945 // erase and replace with cleanup branch.
1946 IPBB->getTerminator()->eraseFromParent();
1947 CGF.Builder.SetInsertPoint(IPBB);
1949 CGF.EmitBranchThroughCleanup(Dest);
1950 }
1951
1952 /// Emit the body of an OMP region
1953 /// \param CGF The Codegen function this belongs to
1954 /// \param RegionBodyStmt The body statement for the OpenMP region being
1955 /// generated
1956 /// \param AllocaIP Where to insert alloca instructions
1957 /// \param CodeGenIP Where to insert the region code
1958 /// \param RegionName Name to be used for new blocks
1959 static void EmitOMPInlinedRegionBody(CodeGenFunction &CGF,
1960 const Stmt *RegionBodyStmt,
1961 InsertPointTy AllocaIP,
1962 InsertPointTy CodeGenIP,
1963 Twine RegionName);
1964
1965 static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP,
1966 llvm::BasicBlock &FiniBB, llvm::Function *Fn,
1968 llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
1969 if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator())
1970 CodeGenIPBBTI->eraseFromParent();
1971
1972 CGF.Builder.SetInsertPoint(CodeGenIPBB);
1973
1974 if (Fn->doesNotThrow())
1975 CGF.EmitNounwindRuntimeCall(Fn, Args);
1976 else
1977 CGF.EmitRuntimeCall(Fn, Args);
1978
1979 if (CGF.Builder.saveIP().isSet())
1980 CGF.Builder.CreateBr(&FiniBB);
1981 }
1982
1983 /// Emit the body of an OMP region that will be outlined in
1984 /// OpenMPIRBuilder::finalize().
1985 /// \param CGF The Codegen function this belongs to
1986 /// \param RegionBodyStmt The body statement for the OpenMP region being
1987 /// generated
1988 /// \param AllocaIP Where to insert alloca instructions
1989 /// \param CodeGenIP Where to insert the region code
1990 /// \param RegionName Name to be used for new blocks
1991 static void EmitOMPOutlinedRegionBody(CodeGenFunction &CGF,
1992 const Stmt *RegionBodyStmt,
1993 InsertPointTy AllocaIP,
1994 InsertPointTy CodeGenIP,
1995 Twine RegionName);
1996
1997 /// RAII for preserving necessary info during Outlined region body codegen.
1999
2000 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2001 CodeGenFunction::JumpDest OldReturnBlock;
2002 CodeGenFunction &CGF;
2003
2004 public:
2006 llvm::BasicBlock &RetBB)
2007 : CGF(cgf) {
2008 assert(AllocaIP.isSet() &&
2009 "Must specify Insertion point for allocas of outlined function");
2010 OldAllocaIP = CGF.AllocaInsertPt;
2011 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2012
2013 OldReturnBlock = CGF.ReturnBlock;
2014 CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(&RetBB);
2015 }
2016
2018 CGF.AllocaInsertPt = OldAllocaIP;
2019 CGF.ReturnBlock = OldReturnBlock;
2020 }
2021 };
2022
2023 /// RAII for preserving necessary info during inlined region body codegen.
2025
2026 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2027 CodeGenFunction &CGF;
2028
2029 public:
2031 llvm::BasicBlock &FiniBB)
2032 : CGF(cgf) {
2033 // Alloca insertion block should be in the entry block of the containing
2034 // function so it expects an empty AllocaIP in which case will reuse the
2035 // old alloca insertion point, or a new AllocaIP in the same block as
2036 // the old one
2037 assert((!AllocaIP.isSet() ||
2038 CGF.AllocaInsertPt->getParent() == AllocaIP.getBlock()) &&
2039 "Insertion point should be in the entry block of containing "
2040 "function!");
2041 OldAllocaIP = CGF.AllocaInsertPt;
2042 if (AllocaIP.isSet())
2043 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2044
2045 // TODO: Remove the call, after making sure the counter is not used by
2046 // the EHStack.
2047 // Since this is an inlined region, it should not modify the
2048 // ReturnBlock, and should reuse the one for the enclosing outlined
2049 // region. So, the JumpDest being return by the function is discarded
2050 (void)CGF.getJumpDestInCurrentScope(&FiniBB);
2051 }
2052
2054 };
2055 };
2056
2057private:
2058 /// CXXThisDecl - When generating code for a C++ member function,
2059 /// this will hold the implicit 'this' declaration.
2060 ImplicitParamDecl *CXXABIThisDecl = nullptr;
2061 llvm::Value *CXXABIThisValue = nullptr;
2062 llvm::Value *CXXThisValue = nullptr;
2063 CharUnits CXXABIThisAlignment;
2064 CharUnits CXXThisAlignment;
2065
2066 /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
2067 /// this expression.
2068 Address CXXDefaultInitExprThis = Address::invalid();
2069
2070 /// The current array initialization index when evaluating an
2071 /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
2072 llvm::Value *ArrayInitIndex = nullptr;
2073
2074 /// The values of function arguments to use when evaluating
2075 /// CXXInheritedCtorInitExprs within this context.
2076 CallArgList CXXInheritedCtorInitExprArgs;
2077
2078 /// CXXStructorImplicitParamDecl - When generating code for a constructor or
2079 /// destructor, this will hold the implicit argument (e.g. VTT).
2080 ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
2081 llvm::Value *CXXStructorImplicitParamValue = nullptr;
2082
2083 /// OutermostConditional - Points to the outermost active
2084 /// conditional control. This is used so that we know if a
2085 /// temporary should be destroyed conditionally.
2086 ConditionalEvaluation *OutermostConditional = nullptr;
2087
2088 /// The current lexical scope.
2089 LexicalScope *CurLexicalScope = nullptr;
2090
2091 /// The current source location that should be used for exception
2092 /// handling code.
2093 SourceLocation CurEHLocation;
2094
2095 /// BlockByrefInfos - For each __block variable, contains
2096 /// information about the layout of the variable.
2097 llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
2098
2099 /// Used by -fsanitize=nullability-return to determine whether the return
2100 /// value can be checked.
2101 llvm::Value *RetValNullabilityPrecondition = nullptr;
2102
2103 /// Check if -fsanitize=nullability-return instrumentation is required for
2104 /// this function.
2105 bool requiresReturnValueNullabilityCheck() const {
2106 return RetValNullabilityPrecondition;
2107 }
2108
2109 /// Used to store precise source locations for return statements by the
2110 /// runtime return value checks.
2111 Address ReturnLocation = Address::invalid();
2112
2113 /// Check if the return value of this function requires sanitization.
2114 bool requiresReturnValueCheck() const;
2115
2116 bool isInAllocaArgument(CGCXXABI &ABI, QualType Ty);
2117 bool hasInAllocaArg(const CXXMethodDecl *MD);
2118
2119 llvm::BasicBlock *TerminateLandingPad = nullptr;
2120 llvm::BasicBlock *TerminateHandler = nullptr;
2122
2123 /// Terminate funclets keyed by parent funclet pad.
2124 llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
2125
2126 /// Largest vector width used in ths function. Will be used to create a
2127 /// function attribute.
2128 unsigned LargestVectorWidth = 0;
2129
2130 /// True if we need emit the life-time markers. This is initially set in
2131 /// the constructor, but could be overwritten to true if this is a coroutine.
2132 bool ShouldEmitLifetimeMarkers;
2133
2134 /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
2135 /// the function metadata.
2136 void EmitKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn);
2137
2138public:
2139 CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext = false);
2140 ~CodeGenFunction();
2141
2142 CodeGenTypes &getTypes() const { return CGM.getTypes(); }
2143 ASTContext &getContext() const { return CGM.getContext(); }
2145 if (DisableDebugInfo)
2146 return nullptr;
2147 return DebugInfo;
2148 }
2149 void disableDebugInfo() { DisableDebugInfo = true; }
2150 void enableDebugInfo() { DisableDebugInfo = false; }
2151
2153 return CGM.getCodeGenOpts().OptimizationLevel == 0;
2154 }
2155
2156 const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
2157
2158 /// Returns a pointer to the function's exception object and selector slot,
2159 /// which is assigned in every landing pad.
2160 Address getExceptionSlot();
2161 Address getEHSelectorSlot();
2162
2163 /// Returns the contents of the function's exception object and selector
2164 /// slots.
2165 llvm::Value *getExceptionFromSlot();
2166 llvm::Value *getSelectorFromSlot();
2167
2168 RawAddress getNormalCleanupDestSlot();
2169
2170 llvm::BasicBlock *getUnreachableBlock() {
2171 if (!UnreachableBlock) {
2172 UnreachableBlock = createBasicBlock("unreachable");
2173 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
2174 }
2175 return UnreachableBlock;
2176 }
2177
2178 llvm::BasicBlock *getInvokeDest() {
2179 if (!EHStack.requiresLandingPad())
2180 return nullptr;
2181 return getInvokeDestImpl();
2182 }
2183
2184 bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; }
2185
2186 const TargetInfo &getTarget() const { return Target; }
2187 llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
2189 return CGM.getTargetCodeGenInfo();
2190 }
2191
2192 //===--------------------------------------------------------------------===//
2193 // Cleanups
2194 //===--------------------------------------------------------------------===//
2195
2196 typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
2197
2198 void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2199 Address arrayEndPointer,
2200 QualType elementType,
2201 CharUnits elementAlignment,
2202 Destroyer *destroyer);
2203 void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2204 llvm::Value *arrayEnd,
2205 QualType elementType,
2206 CharUnits elementAlignment,
2207 Destroyer *destroyer);
2208
2209 void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
2210 QualType type);
2211 void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr,
2212 QualType type);
2213 void pushDestroy(CleanupKind kind, Address addr, QualType type,
2214 Destroyer *destroyer, bool useEHCleanupForArray);
2215 void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind,
2216 Address addr, QualType type);
2217 void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr,
2218 QualType type, Destroyer *destroyer,
2219 bool useEHCleanupForArray);
2220 void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
2221 QualType type, Destroyer *destroyer,
2222 bool useEHCleanupForArray);
2223 void pushLifetimeExtendedDestroy(QualType::DestructionKind dtorKind,
2224 Address addr, QualType type);
2225 void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
2226 llvm::Value *CompletePtr,
2227 QualType ElementType);
2228 void pushStackRestore(CleanupKind kind, Address SPMem);
2229 void pushKmpcAllocFree(CleanupKind Kind,
2230 std::pair<llvm::Value *, llvm::Value *> AddrSizePair);
2231 void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
2232 bool useEHCleanupForArray);
2233 llvm::Function *generateDestroyHelper(Address addr, QualType type,
2234 Destroyer *destroyer,
2235 bool useEHCleanupForArray,
2236 const VarDecl *VD);
2237 void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
2238 QualType elementType, CharUnits elementAlign,
2239 Destroyer *destroyer, bool checkZeroLength,
2240 bool useEHCleanup);
2241
2242 Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
2243
2244 /// Determines whether an EH cleanup is required to destroy a type
2245 /// with the given destruction kind.
2247 switch (kind) {
2248 case QualType::DK_none:
2249 return false;
2250 case QualType::DK_cxx_destructor:
2251 case QualType::DK_objc_weak_lifetime:
2252 case QualType::DK_nontrivial_c_struct:
2253 return getLangOpts().Exceptions;
2254 case QualType::DK_objc_strong_lifetime:
2255 return getLangOpts().Exceptions &&
2256 CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
2257 }
2258 llvm_unreachable("bad destruction kind");
2259 }
2260
2262 return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
2263 }
2264
2265 //===--------------------------------------------------------------------===//
2266 // Objective-C
2267 //===--------------------------------------------------------------------===//
2268
2269 void GenerateObjCMethod(const ObjCMethodDecl *OMD);
2270
2271 void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD);
2272
2273 /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
2274 void GenerateObjCGetter(ObjCImplementationDecl *IMP,
2275 const ObjCPropertyImplDecl *PID);
2276 void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
2277 const ObjCPropertyImplDecl *propImpl,
2278 const ObjCMethodDecl *GetterMothodDecl,
2279 llvm::Constant *AtomicHelperFn);
2280
2281 void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
2282 ObjCMethodDecl *MD, bool ctor);
2283
2284 /// GenerateObjCSetter - Synthesize an Objective-C property setter function
2285 /// for the given property.
2286 void GenerateObjCSetter(ObjCImplementationDecl *IMP,
2287 const ObjCPropertyImplDecl *PID);
2288 void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
2289 const ObjCPropertyImplDecl *propImpl,
2290 llvm::Constant *AtomicHelperFn);
2291
2292 //===--------------------------------------------------------------------===//
2293 // Block Bits
2294 //===--------------------------------------------------------------------===//
2295
2296 /// Emit block literal.
2297 /// \return an LLVM value which is a pointer to a struct which contains
2298 /// information about the block, including the block invoke function, the
2299 /// captured variables, etc.
2300 llvm::Value *EmitBlockLiteral(const BlockExpr *);
2301
2302 llvm::Function *GenerateBlockFunction(GlobalDecl GD, const CGBlockInfo &Info,
2303 const DeclMapTy &ldm,
2304 bool IsLambdaConversionToBlock,
2305 bool BuildGlobalBlock);
2306
2307 /// Check if \p T is a C++ class that has a destructor that can throw.
2308 static bool cxxDestructorCanThrow(QualType T);
2309
2310 llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
2311 llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
2312 llvm::Constant *
2313 GenerateObjCAtomicSetterCopyHelperFunction(const ObjCPropertyImplDecl *PID);
2314 llvm::Constant *
2315 GenerateObjCAtomicGetterCopyHelperFunction(const ObjCPropertyImplDecl *PID);
2316 llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
2317
2318 void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags,
2319 bool CanThrow);
2320
2321 class AutoVarEmission;
2322
2323 void emitByrefStructureInit(const AutoVarEmission &emission);
2324
2325 /// Enter a cleanup to destroy a __block variable. Note that this
2326 /// cleanup should be a no-op if the variable hasn't left the stack
2327 /// yet; if a cleanup is required for the variable itself, that needs
2328 /// to be done externally.
2329 ///
2330 /// \param Kind Cleanup kind.
2331 ///
2332 /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
2333 /// structure that will be passed to _Block_object_dispose. When
2334 /// \p LoadBlockVarAddr is true, the address of the field of the block
2335 /// structure that holds the address of the __block structure.
2336 ///
2337 /// \param Flags The flag that will be passed to _Block_object_dispose.
2338 ///
2339 /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
2340 /// \p Addr to get the address of the __block structure.
2341 void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags,
2342 bool LoadBlockVarAddr, bool CanThrow);
2343
2344 void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
2345 llvm::Value *ptr);
2346
2347 Address LoadBlockStruct();
2348 Address GetAddrOfBlockDecl(const VarDecl *var);
2349
2350 /// BuildBlockByrefAddress - Computes the location of the
2351 /// data in a variable which is declared as __block.
2352 Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V,
2353 bool followForward = true);
2354 Address emitBlockByrefAddress(Address baseAddr, const BlockByrefInfo &info,
2355 bool followForward, const llvm::Twine &name);
2356
2357 const BlockByrefInfo &getBlockByrefInfo(const VarDecl *var);
2358
2359 QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args);
2360
2361 void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
2362 const CGFunctionInfo &FnInfo);
2363
2364 /// Annotate the function with an attribute that disables TSan checking at
2365 /// runtime.
2366 void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn);
2367
2368 /// Emit code for the start of a function.
2369 /// \param Loc The location to be associated with the function.
2370 /// \param StartLoc The location of the function body.
2371 void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn,
2372 const CGFunctionInfo &FnInfo, const FunctionArgList &Args,
2374 SourceLocation StartLoc = SourceLocation());
2375
2376 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor);
2377
2378 void EmitConstructorBody(FunctionArgList &Args);
2379 void EmitDestructorBody(FunctionArgList &Args);
2380 void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
2381 void EmitFunctionBody(const Stmt *Body);
2382 void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
2383
2384 void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
2385 CallArgList &CallArgs,
2386 const CGFunctionInfo *CallOpFnInfo = nullptr,
2387 llvm::Constant *CallOpFn = nullptr);
2388 void EmitLambdaBlockInvokeBody();
2389 void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
2390 void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD,
2391 CallArgList &CallArgs);
2392 void EmitLambdaInAllocaImplFn(const CXXMethodDecl *CallOp,
2393 const CGFunctionInfo **ImplFnInfo,
2394 llvm::Function **ImplFn);
2395 void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD);
2397 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2398 }
2399 void EmitAsanPrologueOrEpilogue(bool Prologue);
2400
2401 /// Emit the unified return block, trying to avoid its emission when
2402 /// possible.
2403 /// \return The debug location of the user written return statement if the
2404 /// return block is avoided.
2405 llvm::DebugLoc EmitReturnBlock();
2406
2407 /// FinishFunction - Complete IR generation of the current function. It is
2408 /// legal to call this function even if there is no current insertion point.
2409 void FinishFunction(SourceLocation EndLoc = SourceLocation());
2410
2411 void StartThunk(llvm::Function *Fn, GlobalDecl GD,
2412 const CGFunctionInfo &FnInfo, bool IsUnprototyped);
2413
2414 void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
2415 const ThunkInfo *Thunk, bool IsUnprototyped);
2416
2417 void FinishThunk();
2418
2419 /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
2420 void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
2421 llvm::FunctionCallee Callee);
2422
2423 /// Generate a thunk for the given method.
2424 void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
2425 GlobalDecl GD, const ThunkInfo &Thunk,
2426 bool IsUnprototyped);
2427
2428 llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
2429 const CGFunctionInfo &FnInfo,
2430 GlobalDecl GD, const ThunkInfo &Thunk);
2431
2432 void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
2433 FunctionArgList &Args);
2434
2435 void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init);
2436
2437 /// Struct with all information about dynamic [sub]class needed to set vptr.
2438 struct VPtr {
2443 };
2444
2445 /// Initialize the vtable pointer of the given subobject.
2446 void InitializeVTablePointer(const VPtr &vptr);
2447
2449
2451 VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass);
2452
2453 void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase,
2454 CharUnits OffsetFromNearestVBase,
2455 bool BaseIsNonVirtualPrimaryBase,
2456 const CXXRecordDecl *VTableClass,
2457 VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
2458
2459 void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
2460
2461 // VTableTrapMode - whether we guarantee that loading the
2462 // vtable is guaranteed to trap on authentication failure,
2463 // even if the resulting vtable pointer is unused.
2464 enum class VTableAuthMode {
2465 Authenticate,
2466 MustTrap,
2467 UnsafeUbsanStrip // Should only be used for Vptr UBSan check
2468 };
2469 /// GetVTablePtr - Return the Value of the vtable pointer member pointed
2470 /// to by This.
2471 llvm::Value *
2472 GetVTablePtr(Address This, llvm::Type *VTableTy,
2473 const CXXRecordDecl *VTableClass,
2474 VTableAuthMode AuthMode = VTableAuthMode::Authenticate);
2475
2484 };
2485
2486 /// Derived is the presumed address of an object of type T after a
2487 /// cast. If T is a polymorphic class type, emit a check that the virtual
2488 /// table for Derived belongs to a class derived from T.
2489 void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull,
2490 CFITypeCheckKind TCK, SourceLocation Loc);
2491
2492 /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
2493 /// If vptr CFI is enabled, emit a check that VTable is valid.
2494 void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable,
2495 CFITypeCheckKind TCK, SourceLocation Loc);
2496
2497 /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
2498 /// RD using llvm.type.test.
2499 void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
2500 CFITypeCheckKind TCK, SourceLocation Loc);
2501
2502 /// If whole-program virtual table optimization is enabled, emit an assumption
2503 /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
2504 /// enabled, emit a check that VTable is a member of RD's type identifier.
2505 void EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
2506 llvm::Value *VTable, SourceLocation Loc);
2507
2508 /// Returns whether we should perform a type checked load when loading a
2509 /// virtual function for virtual calls to members of RD. This is generally
2510 /// true when both vcall CFI and whole-program-vtables are enabled.
2511 bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD);
2512
2513 /// Emit a type checked load from the given vtable.
2514 llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD,
2515 llvm::Value *VTable,
2516 llvm::Type *VTableTy,
2517 uint64_t VTableByteOffset);
2518
2519 /// EnterDtorCleanups - Enter the cleanups necessary to complete the
2520 /// given phase of destruction for a destructor. The end result
2521 /// should call destructors on members and base classes in reverse
2522 /// order of their construction.
2523 void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
2524
2525 /// ShouldInstrumentFunction - Return true if the current function should be
2526 /// instrumented with __cyg_profile_func_* calls
2527 bool ShouldInstrumentFunction();
2528
2529 /// ShouldSkipSanitizerInstrumentation - Return true if the current function
2530 /// should not be instrumented with sanitizers.
2531 bool ShouldSkipSanitizerInstrumentation();
2532
2533 /// ShouldXRayInstrument - Return true if the current function should be
2534 /// instrumented with XRay nop sleds.
2535 bool ShouldXRayInstrumentFunction() const;
2536
2537 /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
2538 /// XRay custom event handling calls.
2539 bool AlwaysEmitXRayCustomEvents() const;
2540
2541 /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
2542 /// XRay typed event handling calls.
2543 bool AlwaysEmitXRayTypedEvents() const;
2544
2545 /// Return a type hash constant for a function instrumented by
2546 /// -fsanitize=function.
2547 llvm::ConstantInt *getUBSanFunctionTypeHash(QualType T) const;
2548
2549 /// EmitFunctionProlog - Emit the target specific LLVM code to load the
2550 /// arguments for the given function. This is also responsible for naming the
2551 /// LLVM function arguments.
2552 void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn,
2553 const FunctionArgList &Args);
2554
2555 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
2556 /// given temporary. Specify the source location atom group (Key Instructions
2557 /// debug info feature) for the `ret` using \p RetKeyInstructionsSourceAtom.
2558 /// If it's 0, the `ret` will get added to a new source atom group.
2559 void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
2560 SourceLocation EndLoc,
2561 uint64_t RetKeyInstructionsSourceAtom);
2562
2563 /// Emit a test that checks if the return value \p RV is nonnull.
2564 void EmitReturnValueCheck(llvm::Value *RV);
2565
2566 /// EmitStartEHSpec - Emit the start of the exception spec.
2567 void EmitStartEHSpec(const Decl *D);
2568
2569 /// EmitEndEHSpec - Emit the end of the exception spec.
2570 void EmitEndEHSpec(const Decl *D);
2571
2572 /// getTerminateLandingPad - Return a landing pad that just calls terminate.
2573 llvm::BasicBlock *getTerminateLandingPad();
2574
2575 /// getTerminateLandingPad - Return a cleanup funclet that just calls
2576 /// terminate.
2577 llvm::BasicBlock *getTerminateFunclet();
2578
2579 /// getTerminateHandler - Return a handler (not a landing pad, just
2580 /// a catch handler) that just calls terminate. This is used when
2581 /// a terminate scope encloses a try.
2582 llvm::BasicBlock *getTerminateHandler();
2583
2584 llvm::Type *ConvertTypeForMem(QualType T);
2585 llvm::Type *ConvertType(QualType T);
2586 llvm::Type *convertTypeForLoadStore(QualType ASTTy,
2587 llvm::Type *LLVMTy = nullptr);
2588 llvm::Type *ConvertType(const TypeDecl *T) {
2589 return ConvertType(getContext().getTypeDeclType(T));
2590 }
2591
2592 /// LoadObjCSelf - Load the value of self. This function is only valid while
2593 /// generating code for an Objective-C method.
2594 llvm::Value *LoadObjCSelf();
2595
2596 /// TypeOfSelfObject - Return type of object that this self represents.
2597 QualType TypeOfSelfObject();
2598
2599 /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2600 static TypeEvaluationKind getEvaluationKind(QualType T);
2601
2603 return getEvaluationKind(T) == TEK_Scalar;
2604 }
2605
2607 return getEvaluationKind(T) == TEK_Aggregate;
2608 }
2609
2610 /// createBasicBlock - Create an LLVM basic block.
2611 llvm::BasicBlock *createBasicBlock(const Twine &name = "",
2612 llvm::Function *parent = nullptr,
2613 llvm::BasicBlock *before = nullptr) {
2614 return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
2615 }
2616
2617 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2618 /// label maps to.
2619 JumpDest getJumpDestForLabel(const LabelDecl *S);
2620
2621 /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2622 /// another basic block, simplify it. This assumes that no other code could
2623 /// potentially reference the basic block.
2624 void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
2625
2626 /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2627 /// adding a fall-through branch from the current insert block if
2628 /// necessary. It is legal to call this function even if there is no current
2629 /// insertion point.
2630 ///
2631 /// IsFinished - If true, indicates that the caller has finished emitting
2632 /// branches to the given block and does not expect to emit code into it. This
2633 /// means the block can be ignored if it is unreachable.
2634 void EmitBlock(llvm::BasicBlock *BB, bool IsFinished = false);
2635
2636 /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2637 /// near its uses, and leave the insertion point in it.
2638 void EmitBlockAfterUses(llvm::BasicBlock *BB);
2639
2640 /// EmitBranch - Emit a branch to the specified basic block from the current
2641 /// insert block, taking care to avoid creation of branches from dummy
2642 /// blocks. It is legal to call this function even if there is no current
2643 /// insertion point.
2644 ///
2645 /// This function clears the current insertion point. The caller should follow
2646 /// calls to this function with calls to Emit*Block prior to generation new
2647 /// code.
2648 void EmitBranch(llvm::BasicBlock *Block);
2649
2650 /// HaveInsertPoint - True if an insertion point is defined. If not, this
2651 /// indicates that the current code being emitted is unreachable.
2652 bool HaveInsertPoint() const { return Builder.GetInsertBlock() != nullptr; }
2653
2654 /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2655 /// emitted IR has a place to go. Note that by definition, if this function
2656 /// creates a block then that block is unreachable; callers may do better to
2657 /// detect when no insertion point is defined and simply skip IR generation.
2659 if (!HaveInsertPoint())
2660 EmitBlock(createBasicBlock());
2661 }
2662
2663 /// ErrorUnsupported - Print out an error that codegen doesn't support the
2664 /// specified stmt yet.
2665 void ErrorUnsupported(const Stmt *S, const char *Type);
2666
2667 //===--------------------------------------------------------------------===//
2668 // Helpers
2669 //===--------------------------------------------------------------------===//
2670
2672 llvm::BasicBlock *LHSBlock,
2673 llvm::BasicBlock *RHSBlock,
2674 llvm::BasicBlock *MergeBlock,
2675 QualType MergedType) {
2676 Builder.SetInsertPoint(MergeBlock);
2677 llvm::PHINode *PtrPhi = Builder.CreatePHI(LHS.getType(), 2, "cond");
2678 PtrPhi->addIncoming(LHS.getBasePointer(), LHSBlock);
2679 PtrPhi->addIncoming(RHS.getBasePointer(), RHSBlock);
2680 LHS.replaceBasePointer(PtrPhi);
2681 LHS.setAlignment(std::min(LHS.getAlignment(), RHS.getAlignment()));
2682 return LHS;
2683 }
2684
2685 /// Construct an address with the natural alignment of T. If a pointer to T
2686 /// is expected to be signed, the pointer passed to this function must have
2687 /// been signed, and the returned Address will have the pointer authentication
2688 /// information needed to authenticate the signed pointer.
2690 llvm::Value *Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(),
2691 bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr,
2692 TBAAAccessInfo *TBAAInfo = nullptr,
2693 KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
2694 if (Alignment.isZero())
2695 Alignment =
2696 CGM.getNaturalTypeAlignment(T, BaseInfo, TBAAInfo, ForPointeeType);
2697 return Address(Ptr, ConvertTypeForMem(T), Alignment,
2698 CGM.getPointerAuthInfoForPointeeType(T), /*Offset=*/nullptr,
2699 IsKnownNonNull);
2700 }
2701
2703 AlignmentSource Source = AlignmentSource::Type) {
2704 return MakeAddrLValue(Addr, T, LValueBaseInfo(Source),
2705 CGM.getTBAAAccessInfo(T));
2706 }
2707
2709 TBAAAccessInfo TBAAInfo) {
2710 return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
2711 }
2712
2713 LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2714 AlignmentSource Source = AlignmentSource::Type) {
2715 return MakeAddrLValue(makeNaturalAddressForPointer(V, T, Alignment), T,
2716 LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T));
2717 }
2718
2719 /// Same as MakeAddrLValue above except that the pointer is known to be
2720 /// unsigned.
2721 LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2722 AlignmentSource Source = AlignmentSource::Type) {
2723 Address Addr(V, ConvertTypeForMem(T), Alignment);
2724 return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2725 CGM.getTBAAAccessInfo(T));
2726 }
2727
2728 LValue
2730 AlignmentSource Source = AlignmentSource::Type) {
2731 return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2732 TBAAAccessInfo());
2733 }
2734
2735 /// Given a value of type T* that may not be to a complete object, construct
2736 /// an l-value with the natural pointee alignment of T.
2737 LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
2738
2739 LValue
2740 MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T,
2741 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
2742
2743 /// Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known
2744 /// to be unsigned.
2745 LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T);
2746
2747 LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T);
2748
2749 Address EmitLoadOfReference(LValue RefLVal,
2750 LValueBaseInfo *PointeeBaseInfo = nullptr,
2751 TBAAAccessInfo *PointeeTBAAInfo = nullptr);
2752 LValue EmitLoadOfReferenceLValue(LValue RefLVal);
2753 LValue
2755 AlignmentSource Source = AlignmentSource::Type) {
2756 LValue RefLVal = MakeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source),
2757 CGM.getTBAAAccessInfo(RefTy));
2758 return EmitLoadOfReferenceLValue(RefLVal);
2759 }
2760
2761 /// Load a pointer with type \p PtrTy stored at address \p Ptr.
2762 /// Note that \p PtrTy is the type of the loaded pointer, not the addresses
2763 /// it is loaded from.
2764 Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy,
2765 LValueBaseInfo *BaseInfo = nullptr,
2766 TBAAAccessInfo *TBAAInfo = nullptr);
2767 LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy);
2768
2769private:
2770 struct AllocaTracker {
2771 void Add(llvm::AllocaInst *I) { Allocas.push_back(I); }
2772 llvm::SmallVector<llvm::AllocaInst *> Take() { return std::move(Allocas); }
2773
2774 private:
2776 };
2777 AllocaTracker *Allocas = nullptr;
2778
2779 /// CGDecl helper.
2780 void emitStoresForConstant(const VarDecl &D, Address Loc, bool isVolatile,
2781 llvm::Constant *constant, bool IsAutoInit);
2782 /// CGDecl helper.
2783 void emitStoresForZeroInit(const VarDecl &D, Address Loc, bool isVolatile);
2784 /// CGDecl helper.
2785 void emitStoresForPatternInit(const VarDecl &D, Address Loc, bool isVolatile);
2786 /// CGDecl helper.
2787 void emitStoresForInitAfterBZero(llvm::Constant *Init, Address Loc,
2788 bool isVolatile, bool IsAutoInit);
2789
2790public:
2791 // Captures all the allocas created during the scope of its RAII object.
2794 : CGF(CGF), OldTracker(CGF.Allocas) {
2795 CGF.Allocas = &Tracker;
2796 }
2797 ~AllocaTrackerRAII() { CGF.Allocas = OldTracker; }
2798
2799 llvm::SmallVector<llvm::AllocaInst *> Take() { return Tracker.Take(); }
2800
2801 private:
2802 CodeGenFunction &CGF;
2803 AllocaTracker *OldTracker;
2804 AllocaTracker Tracker;
2805 };
2806
2807private:
2808 /// If \p Alloca is not in the same address space as \p DestLangAS, insert an
2809 /// address space cast and return a new RawAddress based on this value.
2810 RawAddress MaybeCastStackAddressSpace(RawAddress Alloca, LangAS DestLangAS,
2811 llvm::Value *ArraySize = nullptr);
2812
2813public:
2814 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2815 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2816 /// insertion point of the builder. The caller is responsible for setting an
2817 /// appropriate alignment on
2818 /// the alloca.
2819 ///
2820 /// \p ArraySize is the number of array elements to be allocated if it
2821 /// is not nullptr.
2822 ///
2823 /// LangAS::Default is the address space of pointers to local variables and
2824 /// temporaries, as exposed in the source language. In certain
2825 /// configurations, this is not the same as the alloca address space, and a
2826 /// cast is needed to lift the pointer from the alloca AS into
2827 /// LangAS::Default. This can happen when the target uses a restricted
2828 /// address space for the stack but the source language requires
2829 /// LangAS::Default to be a generic address space. The latter condition is
2830 /// common for most programming languages; OpenCL is an exception in that
2831 /// LangAS::Default is the private address space, which naturally maps
2832 /// to the stack.
2833 ///
2834 /// Because the address of a temporary is often exposed to the program in
2835 /// various ways, this function will perform the cast. The original alloca
2836 /// instruction is returned through \p Alloca if it is not nullptr.
2837 ///
2838 /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2839 /// more efficient if the caller knows that the address will not be exposed.
2840 llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
2841 llvm::Value *ArraySize = nullptr);
2842
2843 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
2844 /// block. The alloca is casted to the address space of \p UseAddrSpace if
2845 /// necessary.
2846 RawAddress CreateTempAlloca(llvm::Type *Ty, LangAS UseAddrSpace,
2847 CharUnits align, const Twine &Name = "tmp",
2848 llvm::Value *ArraySize = nullptr,
2849 RawAddress *Alloca = nullptr);
2850
2851 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
2852 /// block. The alloca is casted to default address space if necessary.
2853 ///
2854 /// FIXME: This version should be removed, and context should provide the
2855 /// context use address space used instead of default.
2857 const Twine &Name = "tmp",
2858 llvm::Value *ArraySize = nullptr,
2859 RawAddress *Alloca = nullptr) {
2860 return CreateTempAlloca(Ty, LangAS::Default, align, Name, ArraySize,
2861 Alloca);
2862 }
2863
2864 RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
2865 const Twine &Name = "tmp",
2866 llvm::Value *ArraySize = nullptr);
2867
2868 /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2869 /// default ABI alignment of the given LLVM type.
2870 ///
2871 /// IMPORTANT NOTE: This is *not* generally the right alignment for
2872 /// any given AST type that happens to have been lowered to the
2873 /// given IR type. This should only ever be used for function-local,
2874 /// IR-driven manipulations like saving and restoring a value. Do
2875 /// not hand this address off to arbitrary IRGen routines, and especially
2876 /// do not pass it as an argument to a function that might expect a
2877 /// properly ABI-aligned value.
2878 RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty,
2879 const Twine &Name = "tmp");
2880
2881 /// CreateIRTemp - Create a temporary IR object of the given type, with
2882 /// appropriate alignment. This routine should only be used when an temporary
2883 /// value needs to be stored into an alloca (for example, to avoid explicit
2884 /// PHI construction), but the type is the IR type, not the type appropriate
2885 /// for storing in memory.
2886 ///
2887 /// That is, this is exactly equivalent to CreateMemTemp, but calling
2888 /// ConvertType instead of ConvertTypeForMem.
2889 RawAddress CreateIRTemp(QualType T, const Twine &Name = "tmp");
2890
2891 /// CreateMemTemp - Create a temporary memory object of the given type, with
2892 /// appropriate alignmen and cast it to the default address space. Returns
2893 /// the original alloca instruction by \p Alloca if it is not nullptr.
2894 RawAddress CreateMemTemp(QualType T, const Twine &Name = "tmp",
2895 RawAddress *Alloca = nullptr);
2896 RawAddress CreateMemTemp(QualType T, CharUnits Align,
2897 const Twine &Name = "tmp",
2898 RawAddress *Alloca = nullptr);
2899
2900 /// CreateMemTemp - Create a temporary memory object of the given type, with
2901 /// appropriate alignmen without casting it to the default address space.
2902 RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
2903 RawAddress CreateMemTempWithoutCast(QualType T, CharUnits Align,
2904 const Twine &Name = "tmp");
2905
2906 /// CreateAggTemp - Create a temporary memory object for the given
2907 /// aggregate type.
2908 AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp",
2909 RawAddress *Alloca = nullptr) {
2910 return AggValueSlot::forAddr(
2911 CreateMemTemp(T, Name, Alloca), T.getQualifiers(),
2912 AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers,
2913 AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap);
2914 }
2915
2916 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2917 /// expression and compare the result against zero, returning an Int1Ty value.
2918 llvm::Value *EvaluateExprAsBool(const Expr *E);
2919
2920 /// Retrieve the implicit cast expression of the rhs in a binary operator
2921 /// expression by passing pointers to Value and QualType
2922 /// This is used for implicit bitfield conversion checks, which
2923 /// must compare with the value before potential truncation.
2924 llvm::Value *EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E,
2925 llvm::Value **Previous,
2926 QualType *SrcType);
2927
2928 /// Emit a check that an [implicit] conversion of a bitfield. It is not UB,
2929 /// so we use the value after conversion.
2930 void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType,
2931 llvm::Value *Dst, QualType DstType,
2932 const CGBitFieldInfo &Info,
2934
2935 /// EmitIgnoredExpr - Emit an expression in a context which ignores the
2936 /// result.
2937 void EmitIgnoredExpr(const Expr *E);
2938
2939 /// EmitAnyExpr - Emit code to compute the specified expression which can have
2940 /// any type. The result is returned as an RValue struct. If this is an
2941 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2942 /// the result should be returned.
2943 ///
2944 /// \param ignoreResult True if the resulting value isn't used.
2945 RValue EmitAnyExpr(const Expr *E,
2946 AggValueSlot aggSlot = AggValueSlot::ignored(),
2947 bool ignoreResult = false);
2948
2949 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2950 // or the value of the expression, depending on how va_list is defined.
2951 Address EmitVAListRef(const Expr *E);
2952
2953 /// Emit a "reference" to a __builtin_ms_va_list; this is
2954 /// always the value of the expression, because a __builtin_ms_va_list is a
2955 /// pointer to a char.
2956 Address EmitMSVAListRef(const Expr *E);
2957
2958 /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2959 /// always be accessible even if no aggregate location is provided.
2960 RValue EmitAnyExprToTemp(const Expr *E);
2961
2962 /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2963 /// arbitrary expression into the given memory location.
2964 void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals,
2965 bool IsInitializer);
2966
2967 void EmitAnyExprToExn(const Expr *E, Address Addr);
2968
2969 /// EmitInitializationToLValue - Emit an initializer to an LValue.
2970 void EmitInitializationToLValue(
2971 const Expr *E, LValue LV,
2972 AggValueSlot::IsZeroed_t IsZeroed = AggValueSlot::IsNotZeroed);
2973
2974 /// EmitExprAsInit - Emits the code necessary to initialize a
2975 /// location in memory with the given initializer.
2976 void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2977 bool capturedByInit);
2978
2979 /// hasVolatileMember - returns true if aggregate type has a volatile
2980 /// member.
2982 if (const auto *RD = T->getAsRecordDecl())
2983 return RD->hasVolatileMember();
2984 return false;
2985 }
2986
2987 /// Determine whether a return value slot may overlap some other object.
2989 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2990 // class subobjects. These cases may need to be revisited depending on the
2991 // resolution of the relevant core issue.
2992 return AggValueSlot::DoesNotOverlap;
2993 }
2994
2995 /// Determine whether a field initialization may overlap some other object.
2996 AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD);
2997
2998 /// Determine whether a base class initialization may overlap some other
2999 /// object.
3000 AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD,
3001 const CXXRecordDecl *BaseRD,
3002 bool IsVirtual);
3003
3004 /// Emit an aggregate assignment.
3006 ApplyAtomGroup Grp(getDebugInfo());
3007 bool IsVolatile = hasVolatileMember(EltTy);
3008 EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile);
3009 }
3010
3012 AggValueSlot::Overlap_t MayOverlap) {
3013 EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap);
3014 }
3015
3016 /// EmitAggregateCopy - Emit an aggregate copy.
3017 ///
3018 /// \param isVolatile \c true iff either the source or the destination is
3019 /// volatile.
3020 /// \param MayOverlap Whether the tail padding of the destination might be
3021 /// occupied by some other object. More efficient code can often be
3022 /// generated if not.
3023 void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy,
3024 AggValueSlot::Overlap_t MayOverlap,
3025 bool isVolatile = false);
3026
3027 /// GetAddrOfLocalVar - Return the address of a local variable.
3029 auto it = LocalDeclMap.find(VD);
3030 assert(it != LocalDeclMap.end() &&
3031 "Invalid argument to GetAddrOfLocalVar(), no decl!");
3032 return it->second;
3033 }
3034
3035 /// Given an opaque value expression, return its LValue mapping if it exists,
3036 /// otherwise create one.
3037 LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e);
3038
3039 /// Given an opaque value expression, return its RValue mapping if it exists,
3040 /// otherwise create one.
3041 RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e);
3042
3043 /// isOpaqueValueEmitted - Return true if the opaque value expression has
3044 /// already been emitted.
3045 bool isOpaqueValueEmitted(const OpaqueValueExpr *E);
3046
3047 /// Get the index of the current ArrayInitLoopExpr, if any.
3048 llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
3049
3050 /// getAccessedFieldNo - Given an encoded value and a result number, return
3051 /// the input field number being accessed.
3052 static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
3053
3054 llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
3055 llvm::BasicBlock *GetIndirectGotoBlock();
3056
3057 /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
3058 static bool IsWrappedCXXThis(const Expr *E);
3059
3060 /// EmitNullInitialization - Generate code to set a value of the given type to
3061 /// null, If the type contains data member pointers, they will be initialized
3062 /// to -1 in accordance with the Itanium C++ ABI.
3063 void EmitNullInitialization(Address DestPtr, QualType Ty);
3064
3065 /// Emits a call to an LLVM variable-argument intrinsic, either
3066 /// \c llvm.va_start or \c llvm.va_end.
3067 /// \param ArgValue A reference to the \c va_list as emitted by either
3068 /// \c EmitVAListRef or \c EmitMSVAListRef.
3069 /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
3070 /// calls \c llvm.va_end.
3071 llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
3072
3073 /// Generate code to get an argument from the passed in pointer
3074 /// and update it accordingly.
3075 /// \param VE The \c VAArgExpr for which to generate code.
3076 /// \param VAListAddr Receives a reference to the \c va_list as emitted by
3077 /// either \c EmitVAListRef or \c EmitMSVAListRef.
3078 /// \returns A pointer to the argument.
3079 // FIXME: We should be able to get rid of this method and use the va_arg
3080 // instruction in LLVM instead once it works well enough.
3081 RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr,
3082 AggValueSlot Slot = AggValueSlot::ignored());
3083
3084 /// emitArrayLength - Compute the length of an array, even if it's a
3085 /// VLA, and drill down to the base element type.
3086 llvm::Value *emitArrayLength(const ArrayType *arrayType, QualType &baseType,
3087 Address &addr);
3088
3089 /// EmitVLASize - Capture all the sizes for the VLA expressions in
3090 /// the given variably-modified type and store them in the VLASizeMap.
3091 ///
3092 /// This function can be called with a null (unreachable) insert point.
3093 void EmitVariablyModifiedType(QualType Ty);
3094
3096 llvm::Value *NumElts;
3098
3099 VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
3100 };
3101
3102 /// Return the number of elements for a single dimension
3103 /// for the given array type.
3104 VlaSizePair getVLAElements1D(const VariableArrayType *vla);
3105 VlaSizePair getVLAElements1D(QualType vla);
3106
3107 /// Returns an LLVM value that corresponds to the size,
3108 /// in non-variably-sized elements, of a variable length array type,
3109 /// plus that largest non-variably-sized element type. Assumes that
3110 /// the type has already been emitted with EmitVariablyModifiedType.
3111 VlaSizePair getVLASize(const VariableArrayType *vla);
3112 VlaSizePair getVLASize(QualType vla);
3113
3114 /// LoadCXXThis - Load the value of 'this'. This function is only valid while
3115 /// generating code for an C++ member function.
3116 llvm::Value *LoadCXXThis() {
3117 assert(CXXThisValue && "no 'this' value for this function");
3118 return CXXThisValue;
3119 }
3120 Address LoadCXXThisAddress();
3121
3122 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
3123 /// virtual bases.
3124 // FIXME: Every place that calls LoadCXXVTT is something
3125 // that needs to be abstracted properly.
3126 llvm::Value *LoadCXXVTT() {
3127 assert(CXXStructorImplicitParamValue && "no VTT value for this function");
3128 return CXXStructorImplicitParamValue;
3129 }
3130
3131 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
3132 /// complete class to the given direct base.
3133 Address GetAddressOfDirectBaseInCompleteClass(Address Value,
3134 const CXXRecordDecl *Derived,
3135 const CXXRecordDecl *Base,
3136 bool BaseIsVirtual);
3137
3138 static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
3139
3140 /// GetAddressOfBaseClass - This function will add the necessary delta to the
3141 /// load of 'this' and returns address of the base class.
3142 Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived,
3145 bool NullCheckValue, SourceLocation Loc);
3146
3147 Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived,
3150 bool NullCheckValue);
3151
3152 /// GetVTTParameter - Return the VTT parameter that should be passed to a
3153 /// base constructor/destructor with virtual bases.
3154 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
3155 /// to ItaniumCXXABI.cpp together with all the references to VTT.
3156 llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
3157 bool Delegating);
3158
3159 void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
3160 CXXCtorType CtorType,
3161 const FunctionArgList &Args,
3163 // It's important not to confuse this and the previous function. Delegating
3164 // constructors are the C++0x feature. The constructor delegate optimization
3165 // is used to reduce duplication in the base and complete consturctors where
3166 // they are substantially the same.
3167 void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
3168 const FunctionArgList &Args);
3169
3170 /// Emit a call to an inheriting constructor (that is, one that invokes a
3171 /// constructor inherited from a base class) by inlining its definition. This
3172 /// is necessary if the ABI does not support forwarding the arguments to the
3173 /// base class constructor (because they're variadic or similar).
3174 void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor,
3175 CXXCtorType CtorType,
3176 bool ForVirtualBase,
3177 bool Delegating,
3178 CallArgList &Args);
3179
3180 /// Emit a call to a constructor inherited from a base class, passing the
3181 /// current constructor's arguments along unmodified (without even making
3182 /// a copy).
3183 void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D,
3184 bool ForVirtualBase, Address This,
3185 bool InheritedFromVBase,
3187
3188 void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
3189 bool ForVirtualBase, bool Delegating,
3190 AggValueSlot ThisAVS, const CXXConstructExpr *E);
3191
3192 void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
3193 bool ForVirtualBase, bool Delegating,
3194 Address This, CallArgList &Args,
3196 SourceLocation Loc, bool NewPointerIsChecked,
3197 llvm::CallBase **CallOrInvoke = nullptr);
3198
3199 /// Emit assumption load for all bases. Requires to be called only on
3200 /// most-derived class and not under construction of the object.
3201 void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This);
3202
3203 /// Emit assumption that vptr load == global vtable.
3204 void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
3205
3206 void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, Address This,
3207 Address Src, const CXXConstructExpr *E);
3208
3209 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
3210 const ArrayType *ArrayTy, Address ArrayPtr,
3211 const CXXConstructExpr *E,
3212 bool NewPointerIsChecked,
3213 bool ZeroInitialization = false);
3214
3215 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
3216 llvm::Value *NumElements, Address ArrayPtr,
3217 const CXXConstructExpr *E,
3218 bool NewPointerIsChecked,
3219 bool ZeroInitialization = false);
3220
3221 static Destroyer destroyCXXObject;
3222
3223 void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
3224 bool ForVirtualBase, bool Delegating, Address This,
3225 QualType ThisTy);
3226
3227 void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
3228 llvm::Type *ElementTy, Address NewPtr,
3229 llvm::Value *NumElements,
3230 llvm::Value *AllocSizeWithoutCookie);
3231
3232 void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
3233 Address Ptr);
3234
3235 void EmitSehCppScopeBegin();
3236 void EmitSehCppScopeEnd();
3237 void EmitSehTryScopeBegin();
3238 void EmitSehTryScopeEnd();
3239
3240 bool EmitLifetimeStart(llvm::Value *Addr);
3241 void EmitLifetimeEnd(llvm::Value *Addr);
3242
3243 llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
3244 void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
3245
3246 void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
3247 QualType DeleteTy, llvm::Value *NumElements = nullptr,
3248 CharUnits CookieSize = CharUnits());
3249
3250 RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
3251 const CallExpr *TheCallExpr, bool IsDelete);
3252
3253 llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
3254 llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
3255 Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
3256
3257 /// Situations in which we might emit a check for the suitability of a
3258 /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in
3259 /// compiler-rt.
3261 /// Checking the operand of a load. Must be suitably sized and aligned.
3263 /// Checking the destination of a store. Must be suitably sized and aligned.
3265 /// Checking the bound value in a reference binding. Must be suitably sized
3266 /// and aligned, but is not required to refer to an object (until the
3267 /// reference is used), per core issue 453.
3269 /// Checking the object expression in a non-static data member access. Must
3270 /// be an object within its lifetime.
3272 /// Checking the 'this' pointer for a call to a non-static member function.
3273 /// Must be an object within its lifetime.
3275 /// Checking the 'this' pointer for a constructor call.
3277 /// Checking the operand of a static_cast to a derived pointer type. Must be
3278 /// null or an object within its lifetime.
3280 /// Checking the operand of a static_cast to a derived reference type. Must
3281 /// be an object within its lifetime.
3283 /// Checking the operand of a cast to a base object. Must be suitably sized
3284 /// and aligned.
3286 /// Checking the operand of a cast to a virtual base object. Must be an
3287 /// object within its lifetime.
3289 /// Checking the value assigned to a _Nonnull pointer. Must not be null.
3291 /// Checking the operand of a dynamic_cast or a typeid expression. Must be
3292 /// null or an object within its lifetime.
3293 TCK_DynamicOperation
3295
3296 /// Determine whether the pointer type check \p TCK permits null pointers.
3297 static bool isNullPointerAllowed(TypeCheckKind TCK);
3298
3299 /// Determine whether the pointer type check \p TCK requires a vptr check.
3300 static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty);
3301
3302 /// Whether any type-checking sanitizers are enabled. If \c false,
3303 /// calls to EmitTypeCheck can be skipped.
3304 bool sanitizePerformTypeCheck() const;
3305
3307 QualType Type, SanitizerSet SkippedChecks = SanitizerSet(),
3308 llvm::Value *ArraySize = nullptr) {
3309 if (!sanitizePerformTypeCheck())
3310 return;
3311 EmitTypeCheck(TCK, Loc, LV.emitRawPointer(*this), Type, LV.getAlignment(),
3312 SkippedChecks, ArraySize);
3313 }
3314
3316 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3317 SanitizerSet SkippedChecks = SanitizerSet(),
3318 llvm::Value *ArraySize = nullptr) {
3319 if (!sanitizePerformTypeCheck())
3320 return;
3321 EmitTypeCheck(TCK, Loc, Addr.emitRawPointer(*this), Type, Alignment,
3322 SkippedChecks, ArraySize);
3323 }
3324
3325 /// Emit a check that \p V is the address of storage of the
3326 /// appropriate size and alignment for an object of type \p Type
3327 /// (or if ArraySize is provided, for an array of that bound).
3328 void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
3329 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3330 SanitizerSet SkippedChecks = SanitizerSet(),
3331 llvm::Value *ArraySize = nullptr);
3332
3333 /// Emit a check that \p Base points into an array object, which
3334 /// we can access at index \p Index. \p Accessed should be \c false if we
3335 /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
3336 void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
3337 QualType IndexType, bool Accessed);
3338 void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
3339 llvm::Value *Index, QualType IndexType,
3340 QualType IndexedType, bool Accessed);
3341
3342 /// Returns debug info, with additional annotation if
3343 /// CGM.getCodeGenOpts().SanitizeAnnotateDebugInfo[Ordinal] is enabled for
3344 /// any of the ordinals.
3345 llvm::DILocation *
3346 SanitizerAnnotateDebugInfo(ArrayRef<SanitizerKind::SanitizerOrdinal> Ordinals,
3347 SanitizerHandler Handler);
3348
3349 llvm::Value *GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD,
3350 const FieldDecl *CountDecl);
3351
3352 /// Build an expression accessing the "counted_by" field.
3353 llvm::Value *EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD,
3354 const FieldDecl *CountDecl);
3355
3356 // Emit bounds checking for flexible array and pointer members with the
3357 // counted_by attribute.
3358 void EmitCountedByBoundsChecking(const Expr *E, llvm::Value *Idx,
3359 Address Addr, QualType IdxTy,
3360 QualType ArrayTy, bool Accessed,
3361 bool FlexibleArray);
3362
3363 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3364 bool isInc, bool isPre);
3365 ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
3366 bool isInc, bool isPre);
3367
3368 /// Converts Location to a DebugLoc, if debug information is enabled.
3369 llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
3370
3371 /// Get the record field index as represented in debug info.
3372 unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex);
3373
3374 //===--------------------------------------------------------------------===//
3375 // Declaration Emission
3376 //===--------------------------------------------------------------------===//
3377
3378 /// EmitDecl - Emit a declaration.
3379 ///
3380 /// This function can be called with a null (unreachable) insert point.
3381 void EmitDecl(const Decl &D, bool EvaluateConditionDecl = false);
3382
3383 /// EmitVarDecl - Emit a local variable declaration.
3384 ///
3385 /// This function can be called with a null (unreachable) insert point.
3386 void EmitVarDecl(const VarDecl &D);
3387
3388 void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
3389 bool capturedByInit);
3390
3391 typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
3392 llvm::Value *Address);
3393
3394 /// Determine whether the given initializer is trivial in the sense
3395 /// that it requires no code to be generated.
3396 bool isTrivialInitializer(const Expr *Init);
3397
3398 /// EmitAutoVarDecl - Emit an auto variable declaration.
3399 ///
3400 /// This function can be called with a null (unreachable) insert point.
3401 void EmitAutoVarDecl(const VarDecl &D);
3402
3404 friend class CodeGenFunction;
3405
3406 const VarDecl *Variable;
3407
3408 /// The address of the alloca for languages with explicit address space
3409 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
3410 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
3411 /// as a global constant.
3412 Address Addr;
3413
3414 llvm::Value *NRVOFlag;
3415
3416 /// True if the variable is a __block variable that is captured by an
3417 /// escaping block.
3418 bool IsEscapingByRef;
3419
3420 /// True if the variable is of aggregate type and has a constant
3421 /// initializer.
3422 bool IsConstantAggregate;
3423
3424 /// True if lifetime markers should be used.
3425 bool UseLifetimeMarkers;
3426
3427 /// Address with original alloca instruction. Invalid if the variable was
3428 /// emitted as a global constant.
3429 RawAddress AllocaAddr;
3430
3431 struct Invalid {};
3432 AutoVarEmission(Invalid)
3433 : Variable(nullptr), Addr(Address::invalid()),
3434 AllocaAddr(RawAddress::invalid()) {}
3435
3436 AutoVarEmission(const VarDecl &variable)
3437 : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
3438 IsEscapingByRef(false), IsConstantAggregate(false),
3439 UseLifetimeMarkers(false), AllocaAddr(RawAddress::invalid()) {}
3440
3441 bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
3442
3443 public:
3444 static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
3445
3446 bool useLifetimeMarkers() const { return UseLifetimeMarkers; }
3447
3448 /// Returns the raw, allocated address, which is not necessarily
3449 /// the address of the object itself. It is casted to default
3450 /// address space for address space agnostic languages.
3452
3453 /// Returns the address for the original alloca instruction.
3454 RawAddress getOriginalAllocatedAddress() const { return AllocaAddr; }
3455
3456 /// Returns the address of the object within this declaration.
3457 /// Note that this does not chase the forwarding pointer for
3458 /// __block decls.
3460 if (!IsEscapingByRef)
3461 return Addr;
3462
3463 return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
3464 }
3465 };
3466 AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
3467 void EmitAutoVarInit(const AutoVarEmission &emission);
3468 void EmitAutoVarCleanups(const AutoVarEmission &emission);
3469 void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
3470 QualType::DestructionKind dtorKind);
3471
3472 void MaybeEmitDeferredVarDeclInit(const VarDecl *var);
3473
3474 /// Emits the alloca and debug information for the size expressions for each
3475 /// dimension of an array. It registers the association of its (1-dimensional)
3476 /// QualTypes and size expression's debug node, so that CGDebugInfo can
3477 /// reference this node when creating the DISubrange object to describe the
3478 /// array types.
3479 void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI, const VarDecl &D,
3480 bool EmitDebugInfo);
3481
3482 void EmitStaticVarDecl(const VarDecl &D,
3483 llvm::GlobalValue::LinkageTypes Linkage);
3484
3486 union {
3488 llvm::Value *Value;
3489 };
3490
3491 bool IsIndirect;
3492
3493 ParamValue(llvm::Value *V) : Value(V), IsIndirect(false) {}
3494 ParamValue(Address A) : Addr(A), IsIndirect(true) {}
3495
3496 public:
3497 static ParamValue forDirect(llvm::Value *value) {
3498 return ParamValue(value);
3499 }
3501 assert(!addr.getAlignment().isZero());
3502 return ParamValue(addr);
3503 }
3504
3505 bool isIndirect() const { return IsIndirect; }
3506 llvm::Value *getAnyValue() const {
3507 if (!isIndirect())
3508 return Value;
3509 assert(!Addr.hasOffset() && "unexpected offset");
3510 return Addr.getBasePointer();
3511 }
3512
3513 llvm::Value *getDirectValue() const {
3514 assert(!isIndirect());
3515 return Value;
3516 }
3517
3519 assert(isIndirect());
3520 return Addr;
3521 }
3522 };
3523
3524 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
3525 void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
3526
3527 /// protectFromPeepholes - Protect a value that we're intending to
3528 /// store to the side, but which will probably be used later, from
3529 /// aggressive peepholing optimizations that might delete it.
3530 ///
3531 /// Pass the result to unprotectFromPeepholes to declare that
3532 /// protection is no longer required.
3533 ///
3534 /// There's no particular reason why this shouldn't apply to
3535 /// l-values, it's just that no existing peepholes work on pointers.
3536 PeepholeProtection protectFromPeepholes(RValue rvalue);
3537 void unprotectFromPeepholes(PeepholeProtection protection);
3538
3539 void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
3541 SourceLocation AssumptionLoc,
3542 llvm::Value *Alignment,
3543 llvm::Value *OffsetValue,
3544 llvm::Value *TheCheck,
3545 llvm::Instruction *Assumption);
3546
3547 void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
3548 SourceLocation Loc, SourceLocation AssumptionLoc,
3549 llvm::Value *Alignment,
3550 llvm::Value *OffsetValue = nullptr);
3551
3552 void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
3553 SourceLocation AssumptionLoc,
3554 llvm::Value *Alignment,
3555 llvm::Value *OffsetValue = nullptr);
3556
3557 //===--------------------------------------------------------------------===//
3558 // Statement Emission
3559 //===--------------------------------------------------------------------===//
3560
3561 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
3562 void EmitStopPoint(const Stmt *S);
3563
3564 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
3565 /// this function even if there is no current insertion point.
3566 ///
3567 /// This function may clear the current insertion point; callers should use
3568 /// EnsureInsertPoint if they wish to subsequently generate code without first
3569 /// calling EmitBlock, EmitBranch, or EmitStmt.
3570 void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = {});
3571
3572 /// EmitSimpleStmt - Try to emit a "simple" statement which does not
3573 /// necessarily require an insertion point or debug information; typically
3574 /// because the statement amounts to a jump or a container of other
3575 /// statements.
3576 ///
3577 /// \return True if the statement was handled.
3578 bool EmitSimpleStmt(const Stmt *S, ArrayRef<const Attr *> Attrs);
3579
3580 Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
3581 AggValueSlot AVS = AggValueSlot::ignored());
3582 Address
3583 EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast = false,
3584 AggValueSlot AVS = AggValueSlot::ignored());
3585
3586 /// EmitLabel - Emit the block for the given label. It is legal to call this
3587 /// function even if there is no current insertion point.
3588 void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
3589
3590 void EmitLabelStmt(const LabelStmt &S);
3591 void EmitAttributedStmt(const AttributedStmt &S);
3592 void EmitGotoStmt(const GotoStmt &S);
3593 void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
3594 void EmitIfStmt(const IfStmt &S);
3595
3596 void EmitWhileStmt(const WhileStmt &S, ArrayRef<const Attr *> Attrs = {});
3597 void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = {});
3598 void EmitForStmt(const ForStmt &S, ArrayRef<const Attr *> Attrs = {});
3599 void EmitReturnStmt(const ReturnStmt &S);
3600 void EmitDeclStmt(const DeclStmt &S);
3601 void EmitBreakStmt(const BreakStmt &S);
3602 void EmitContinueStmt(const ContinueStmt &S);
3603 void EmitSwitchStmt(const SwitchStmt &S);
3604 void EmitDefaultStmt(const DefaultStmt &S, ArrayRef<const Attr *> Attrs);
3605 void EmitCaseStmt(const CaseStmt &S, ArrayRef<const Attr *> Attrs);
3606 void EmitCaseStmtRange(const CaseStmt &S, ArrayRef<const Attr *> Attrs);
3607 void EmitAsmStmt(const AsmStmt &S);
3608
3609 void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
3610 void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
3611 void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
3612 void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
3613 void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
3614
3615 void EmitCoroutineBody(const CoroutineBodyStmt &S);
3616 void EmitCoreturnStmt(const CoreturnStmt &S);
3617 RValue EmitCoawaitExpr(const CoawaitExpr &E,
3618 AggValueSlot aggSlot = AggValueSlot::ignored(),
3619 bool ignoreResult = false);
3620 LValue EmitCoawaitLValue(const CoawaitExpr *E);
3621 RValue EmitCoyieldExpr(const CoyieldExpr &E,
3622 AggValueSlot aggSlot = AggValueSlot::ignored(),
3623 bool ignoreResult = false);
3624 LValue EmitCoyieldLValue(const CoyieldExpr *E);
3625 RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
3626
3627 void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3628 void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3629
3630 void EmitCXXTryStmt(const CXXTryStmt &S);
3631 void EmitSEHTryStmt(const SEHTryStmt &S);
3632 void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
3633 void EnterSEHTryStmt(const SEHTryStmt &S);
3634 void ExitSEHTryStmt(const SEHTryStmt &S);
3635 void VolatilizeTryBlocks(llvm::BasicBlock *BB,
3637
3638 void pushSEHCleanup(CleanupKind kind, llvm::Function *FinallyFunc);
3639 void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter,
3640 const Stmt *OutlinedStmt);
3641
3642 llvm::Function *GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
3643 const SEHExceptStmt &Except);
3644
3645 llvm::Function *GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
3646 const SEHFinallyStmt &Finally);
3647
3648 void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
3649 llvm::Value *ParentFP, llvm::Value *EntryEBP);
3650 llvm::Value *EmitSEHExceptionCode();
3651 llvm::Value *EmitSEHExceptionInfo();
3652 llvm::Value *EmitSEHAbnormalTermination();
3653
3654 /// Emit simple code for OpenMP directives in Simd-only mode.
3655 void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D);
3656
3657 /// Scan the outlined statement for captures from the parent function. For
3658 /// each capture, mark the capture as escaped and emit a call to
3659 /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
3660 void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt,
3661 bool IsFilter);
3662
3663 /// Recovers the address of a local in a parent function. ParentVar is the
3664 /// address of the variable used in the immediate parent function. It can
3665 /// either be an alloca or a call to llvm.localrecover if there are nested
3666 /// outlined functions. ParentFP is the frame pointer of the outermost parent
3667 /// frame.
3668 Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
3669 Address ParentVar, llvm::Value *ParentFP);
3670
3671 void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
3672 ArrayRef<const Attr *> Attrs = {});
3673
3674 /// Controls insertion of cancellation exit blocks in worksharing constructs.
3676 CodeGenFunction &CGF;
3677
3678 public:
3680 bool HasCancel)
3681 : CGF(CGF) {
3682 CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
3683 }
3684 ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
3685 };
3686
3687 /// Returns calculated size of the specified type.
3688 llvm::Value *getTypeSize(QualType Ty);
3689 LValue InitCapturedStruct(const CapturedStmt &S);
3690 llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
3691 llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
3692 Address GenerateCapturedStmtArgument(const CapturedStmt &S);
3693 llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
3695 void GenerateOpenMPCapturedVars(const CapturedStmt &S,
3696 SmallVectorImpl<llvm::Value *> &CapturedVars);
3697 void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
3699 /// Perform element by element copying of arrays with type \a
3700 /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
3701 /// generated by \a CopyGen.
3702 ///
3703 /// \param DestAddr Address of the destination array.
3704 /// \param SrcAddr Address of the source array.
3705 /// \param OriginalType Type of destination and source arrays.
3706 /// \param CopyGen Copying procedure that copies value of single array element
3707 /// to another single array element.
3708 void EmitOMPAggregateAssign(
3709 Address DestAddr, Address SrcAddr, QualType OriginalType,
3710 const llvm::function_ref<void(Address, Address)> CopyGen);
3711 /// Emit proper copying of data from one variable to another.
3712 ///
3713 /// \param OriginalType Original type of the copied variables.
3714 /// \param DestAddr Destination address.
3715 /// \param SrcAddr Source address.
3716 /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
3717 /// type of the base array element).
3718 /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
3719 /// the base array element).
3720 /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
3721 /// DestVD.
3722 void EmitOMPCopy(QualType OriginalType, Address DestAddr, Address SrcAddr,
3723 const VarDecl *DestVD, const VarDecl *SrcVD,
3724 const Expr *Copy);
3725 /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
3726 /// \a X = \a E \a BO \a E.
3727 ///
3728 /// \param X Value to be updated.
3729 /// \param E Update value.
3730 /// \param BO Binary operation for update operation.
3731 /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
3732 /// expression, false otherwise.
3733 /// \param AO Atomic ordering of the generated atomic instructions.
3734 /// \param CommonGen Code generator for complex expressions that cannot be
3735 /// expressed through atomicrmw instruction.
3736 /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
3737 /// generated, <false, RValue::get(nullptr)> otherwise.
3738 std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
3739 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3740 llvm::AtomicOrdering AO, SourceLocation Loc,
3741 const llvm::function_ref<RValue(RValue)> CommonGen);
3742 bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
3743 OMPPrivateScope &PrivateScope);
3744 void EmitOMPPrivateClause(const OMPExecutableDirective &D,
3745 OMPPrivateScope &PrivateScope);
3746 void EmitOMPUseDevicePtrClause(
3747 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
3748 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3749 CaptureDeviceAddrMap);
3750 void EmitOMPUseDeviceAddrClause(
3751 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
3752 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3753 CaptureDeviceAddrMap);
3754 /// Emit code for copyin clause in \a D directive. The next code is
3755 /// generated at the start of outlined functions for directives:
3756 /// \code
3757 /// threadprivate_var1 = master_threadprivate_var1;
3758 /// operator=(threadprivate_var2, master_threadprivate_var2);
3759 /// ...
3760 /// __kmpc_barrier(&loc, global_tid);
3761 /// \endcode
3762 ///
3763 /// \param D OpenMP directive possibly with 'copyin' clause(s).
3764 /// \returns true if at least one copyin variable is found, false otherwise.
3765 bool EmitOMPCopyinClause(const OMPExecutableDirective &D);
3766 /// Emit initial code for lastprivate variables. If some variable is
3767 /// not also firstprivate, then the default initialization is used. Otherwise
3768 /// initialization of this variable is performed by EmitOMPFirstprivateClause
3769 /// method.
3770 ///
3771 /// \param D Directive that may have 'lastprivate' directives.
3772 /// \param PrivateScope Private scope for capturing lastprivate variables for
3773 /// proper codegen in internal captured statement.
3774 ///
3775 /// \returns true if there is at least one lastprivate variable, false
3776 /// otherwise.
3777 bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D,
3778 OMPPrivateScope &PrivateScope);
3779 /// Emit final copying of lastprivate values to original variables at
3780 /// the end of the worksharing or simd directive.
3781 ///
3782 /// \param D Directive that has at least one 'lastprivate' directives.
3783 /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3784 /// it is the last iteration of the loop code in associated directive, or to
3785 /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3786 void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D,
3787 bool NoFinals,
3788 llvm::Value *IsLastIterCond = nullptr);
3789 /// Emit initial code for linear clauses.
3790 void EmitOMPLinearClause(const OMPLoopDirective &D,
3791 CodeGenFunction::OMPPrivateScope &PrivateScope);
3792 /// Emit final code for linear clauses.
3793 /// \param CondGen Optional conditional code for final part of codegen for
3794 /// linear clause.
3795 void EmitOMPLinearClauseFinal(
3796 const OMPLoopDirective &D,
3797 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3798 /// Emit initial code for reduction variables. Creates reduction copies
3799 /// and initializes them with the values according to OpenMP standard.
3800 ///
3801 /// \param D Directive (possibly) with the 'reduction' clause.
3802 /// \param PrivateScope Private scope for capturing reduction variables for
3803 /// proper codegen in internal captured statement.
3804 ///
3805 void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
3806 OMPPrivateScope &PrivateScope,
3807 bool ForInscan = false);
3808 /// Emit final update of reduction values to original variables at
3809 /// the end of the directive.
3810 ///
3811 /// \param D Directive that has at least one 'reduction' directives.
3812 /// \param ReductionKind The kind of reduction to perform.
3813 void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D,
3814 const OpenMPDirectiveKind ReductionKind);
3815 /// Emit initial code for linear variables. Creates private copies
3816 /// and initializes them with the values according to OpenMP standard.
3817 ///
3818 /// \param D Directive (possibly) with the 'linear' clause.
3819 /// \return true if at least one linear variable is found that should be
3820 /// initialized with the value of the original variable, false otherwise.
3821 bool EmitOMPLinearClauseInit(const OMPLoopDirective &D);
3822
3823 typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
3824 llvm::Function * /*OutlinedFn*/,
3825 const OMPTaskDataTy & /*Data*/)>
3827 void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
3828 const OpenMPDirectiveKind CapturedRegion,
3829 const RegionCodeGenTy &BodyGen,
3830 const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
3832 Address BasePointersArray = Address::invalid();
3833 Address PointersArray = Address::invalid();
3834 Address SizesArray = Address::invalid();
3835 Address MappersArray = Address::invalid();
3836 unsigned NumberOfTargetItems = 0;
3837 explicit OMPTargetDataInfo() = default;
3838 OMPTargetDataInfo(Address BasePointersArray, Address PointersArray,
3839 Address SizesArray, Address MappersArray,
3840 unsigned NumberOfTargetItems)
3841 : BasePointersArray(BasePointersArray), PointersArray(PointersArray),
3842 SizesArray(SizesArray), MappersArray(MappersArray),
3843 NumberOfTargetItems(NumberOfTargetItems) {}
3844 };
3845 void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S,
3846 const RegionCodeGenTy &BodyGen,
3847 OMPTargetDataInfo &InputInfo);
3848 void processInReduction(const OMPExecutableDirective &S, OMPTaskDataTy &Data,
3849 CodeGenFunction &CGF, const CapturedStmt *CS,
3850 OMPPrivateScope &Scope);
3851 void EmitOMPMetaDirective(const OMPMetaDirective &S);
3852 void EmitOMPParallelDirective(const OMPParallelDirective &S);
3853 void EmitOMPSimdDirective(const OMPSimdDirective &S);
3854 void EmitOMPTileDirective(const OMPTileDirective &S);
3855 void EmitOMPStripeDirective(const OMPStripeDirective &S);
3856 void EmitOMPUnrollDirective(const OMPUnrollDirective &S);
3857 void EmitOMPReverseDirective(const OMPReverseDirective &S);
3858 void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S);
3859 void EmitOMPForDirective(const OMPForDirective &S);
3860 void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
3861 void EmitOMPScopeDirective(const OMPScopeDirective &S);
3862 void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
3863 void EmitOMPSectionDirective(const OMPSectionDirective &S);
3864 void EmitOMPSingleDirective(const OMPSingleDirective &S);
3865 void EmitOMPMasterDirective(const OMPMasterDirective &S);
3866 void EmitOMPMaskedDirective(const OMPMaskedDirective &S);
3867 void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
3868 void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
3869 void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S);
3870 void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S);
3871 void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S);
3872 void EmitOMPTaskDirective(const OMPTaskDirective &S);
3873 void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
3874 void EmitOMPErrorDirective(const OMPErrorDirective &S);
3875 void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
3876 void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
3877 void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
3878 void EmitOMPFlushDirective(const OMPFlushDirective &S);
3879 void EmitOMPDepobjDirective(const OMPDepobjDirective &S);
3880 void EmitOMPScanDirective(const OMPScanDirective &S);
3881 void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
3882 void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
3883 void EmitOMPTargetDirective(const OMPTargetDirective &S);
3884 void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S);
3885 void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S);
3886 void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S);
3887 void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S);
3888 void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S);
3889 void
3890 EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S);
3891 void EmitOMPTeamsDirective(const OMPTeamsDirective &S);
3892 void
3893 EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S);
3894 void EmitOMPCancelDirective(const OMPCancelDirective &S);
3895 void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S);
3896 void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);
3897 void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);
3898 void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S);
3899 void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S);
3900 void
3901 EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S);
3902 void
3903 EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S);
3904 void EmitOMPParallelMasterTaskLoopDirective(
3906 void EmitOMPParallelMaskedTaskLoopDirective(
3908 void EmitOMPParallelMasterTaskLoopSimdDirective(
3910 void EmitOMPParallelMaskedTaskLoopSimdDirective(
3912 void EmitOMPDistributeDirective(const OMPDistributeDirective &S);
3913 void EmitOMPDistributeParallelForDirective(
3915 void EmitOMPDistributeParallelForSimdDirective(
3917 void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S);
3918 void EmitOMPTargetParallelForSimdDirective(
3920 void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S);
3921 void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S);
3922 void
3923 EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S);
3924 void EmitOMPTeamsDistributeParallelForSimdDirective(
3926 void EmitOMPTeamsDistributeParallelForDirective(
3928 void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S);
3929 void EmitOMPTargetTeamsDistributeDirective(
3931 void EmitOMPTargetTeamsDistributeParallelForDirective(
3933 void EmitOMPTargetTeamsDistributeParallelForSimdDirective(
3935 void EmitOMPTargetTeamsDistributeSimdDirective(
3937 void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S);
3938 void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S);
3939 void EmitOMPTargetParallelGenericLoopDirective(
3941 void EmitOMPTargetTeamsGenericLoopDirective(
3943 void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S);
3944 void EmitOMPInteropDirective(const OMPInteropDirective &S);
3945 void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S);
3946 void EmitOMPAssumeDirective(const OMPAssumeDirective &S);
3947
3948 /// Emit device code for the target directive.
3949 static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
3950 StringRef ParentName,
3951 const OMPTargetDirective &S);
3952 static void
3953 EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3955 /// Emit device code for the target parallel for directive.
3956 static void EmitOMPTargetParallelForDeviceFunction(
3957 CodeGenModule &CGM, StringRef ParentName,
3959 /// Emit device code for the target parallel for simd directive.
3960 static void EmitOMPTargetParallelForSimdDeviceFunction(
3961 CodeGenModule &CGM, StringRef ParentName,
3963 /// Emit device code for the target teams directive.
3964 static void
3965 EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3966 const OMPTargetTeamsDirective &S);
3967 /// Emit device code for the target teams distribute directive.
3968 static void EmitOMPTargetTeamsDistributeDeviceFunction(
3969 CodeGenModule &CGM, StringRef ParentName,
3971 /// Emit device code for the target teams distribute simd directive.
3972 static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(
3973 CodeGenModule &CGM, StringRef ParentName,
3975 /// Emit device code for the target simd directive.
3976 static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM,
3977 StringRef ParentName,
3978 const OMPTargetSimdDirective &S);
3979 /// Emit device code for the target teams distribute parallel for simd
3980 /// directive.
3981 static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
3982 CodeGenModule &CGM, StringRef ParentName,
3984
3985 /// Emit device code for the target teams loop directive.
3986 static void EmitOMPTargetTeamsGenericLoopDeviceFunction(
3987 CodeGenModule &CGM, StringRef ParentName,
3989
3990 /// Emit device code for the target parallel loop directive.
3991 static void EmitOMPTargetParallelGenericLoopDeviceFunction(
3992 CodeGenModule &CGM, StringRef ParentName,
3994
3995 static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
3996 CodeGenModule &CGM, StringRef ParentName,
3998
3999 /// Emit the Stmt \p S and return its topmost canonical loop, if any.
4000 /// TODO: The \p Depth paramter is not yet implemented and must be 1. In the
4001 /// future it is meant to be the number of loops expected in the loop nests
4002 /// (usually specified by the "collapse" clause) that are collapsed to a
4003 /// single loop by this function.
4004 llvm::CanonicalLoopInfo *EmitOMPCollapsedCanonicalLoopNest(const Stmt *S,
4005 int Depth);
4006
4007 /// Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
4008 void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S);
4009
4010 /// Emit inner loop of the worksharing/simd construct.
4011 ///
4012 /// \param S Directive, for which the inner loop must be emitted.
4013 /// \param RequiresCleanup true, if directive has some associated private
4014 /// variables.
4015 /// \param LoopCond Bollean condition for loop continuation.
4016 /// \param IncExpr Increment expression for loop control variable.
4017 /// \param BodyGen Generator for the inner body of the inner loop.
4018 /// \param PostIncGen Genrator for post-increment code (required for ordered
4019 /// loop directvies).
4020 void EmitOMPInnerLoop(
4021 const OMPExecutableDirective &S, bool RequiresCleanup,
4022 const Expr *LoopCond, const Expr *IncExpr,
4023 const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
4024 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
4025
4026 JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind);
4027 /// Emit initial code for loop counters of loop-based directives.
4028 void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S,
4029 OMPPrivateScope &LoopScope);
4030
4031 /// Helper for the OpenMP loop directives.
4032 void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
4033
4034 /// Emit code for the worksharing loop-based directive.
4035 /// \return true, if this construct has any lastprivate clause, false -
4036 /// otherwise.
4037 bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB,
4038 const CodeGenLoopBoundsTy &CodeGenLoopBounds,
4039 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4040
4041 /// Emit code for the distribute loop-based directive.
4042 void EmitOMPDistributeLoop(const OMPLoopDirective &S,
4043 const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
4044
4045 /// Helpers for the OpenMP loop directives.
4046 void EmitOMPSimdInit(const OMPLoopDirective &D);
4047 void EmitOMPSimdFinal(
4048 const OMPLoopDirective &D,
4049 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
4050
4051 /// Emits the lvalue for the expression with possibly captured variable.
4052 LValue EmitOMPSharedLValue(const Expr *E);
4053
4054private:
4055 /// Helpers for blocks.
4056 llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
4057
4058 /// struct with the values to be passed to the OpenMP loop-related functions
4059 struct OMPLoopArguments {
4060 /// loop lower bound
4061 Address LB = Address::invalid();
4062 /// loop upper bound
4063 Address UB = Address::invalid();
4064 /// loop stride
4065 Address ST = Address::invalid();
4066 /// isLastIteration argument for runtime functions
4067 Address IL = Address::invalid();
4068 /// Chunk value generated by sema
4069 llvm::Value *Chunk = nullptr;
4070 /// EnsureUpperBound
4071 Expr *EUB = nullptr;
4072 /// IncrementExpression
4073 Expr *IncExpr = nullptr;
4074 /// Loop initialization
4075 Expr *Init = nullptr;
4076 /// Loop exit condition
4077 Expr *Cond = nullptr;
4078 /// Update of LB after a whole chunk has been executed
4079 Expr *NextLB = nullptr;
4080 /// Update of UB after a whole chunk has been executed
4081 Expr *NextUB = nullptr;
4082 /// Distinguish between the for distribute and sections
4083 OpenMPDirectiveKind DKind = llvm::omp::OMPD_unknown;
4084 OMPLoopArguments() = default;
4085 OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
4086 llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
4087 Expr *IncExpr = nullptr, Expr *Init = nullptr,
4088 Expr *Cond = nullptr, Expr *NextLB = nullptr,
4089 Expr *NextUB = nullptr)
4090 : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
4091 IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
4092 NextUB(NextUB) {}
4093 };
4094 void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
4095 const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
4096 const OMPLoopArguments &LoopArgs,
4097 const CodeGenLoopTy &CodeGenLoop,
4098 const CodeGenOrderedTy &CodeGenOrdered);
4099 void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
4100 bool IsMonotonic, const OMPLoopDirective &S,
4101 OMPPrivateScope &LoopScope, bool Ordered,
4102 const OMPLoopArguments &LoopArgs,
4103 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4104 void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
4105 const OMPLoopDirective &S,
4106 OMPPrivateScope &LoopScope,
4107 const OMPLoopArguments &LoopArgs,
4108 const CodeGenLoopTy &CodeGenLoopContent);
4109 /// Emit code for sections directive.
4110 void EmitSections(const OMPExecutableDirective &S);
4111
4112public:
4113 //===--------------------------------------------------------------------===//
4114 // OpenACC Emission
4115 //===--------------------------------------------------------------------===//
4117 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4118 // simply emitting its structured block, but in the future we will implement
4119 // some sort of IR.
4120 EmitStmt(S.getStructuredBlock());
4121 }
4122
4124 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4125 // simply emitting its loop, but in the future we will implement
4126 // some sort of IR.
4127 EmitStmt(S.getLoop());
4128 }
4129
4131 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4132 // simply emitting its loop, but in the future we will implement
4133 // some sort of IR.
4134 EmitStmt(S.getLoop());
4135 }
4136
4138 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4139 // simply emitting its structured block, but in the future we will implement
4140 // some sort of IR.
4141 EmitStmt(S.getStructuredBlock());
4142 }
4143
4145 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4146 // but in the future we will implement some sort of IR.
4147 }
4148
4150 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4151 // but in the future we will implement some sort of IR.
4152 }
4153
4155 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4156 // simply emitting its structured block, but in the future we will implement
4157 // some sort of IR.
4158 EmitStmt(S.getStructuredBlock());
4159 }
4160
4162 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4163 // but in the future we will implement some sort of IR.
4164 }
4165
4167 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4168 // but in the future we will implement some sort of IR.
4169 }
4170
4172 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4173 // but in the future we will implement some sort of IR.
4174 }
4175
4177 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4178 // but in the future we will implement some sort of IR.
4179 }
4180
4182 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4183 // but in the future we will implement some sort of IR.
4184 }
4185
4187 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4188 // simply emitting its associated stmt, but in the future we will implement
4189 // some sort of IR.
4190 EmitStmt(S.getAssociatedStmt());
4191 }
4193 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4194 // but in the future we will implement some sort of IR.
4195 }
4196
4197 //===--------------------------------------------------------------------===//
4198 // LValue Expression Emission
4199 //===--------------------------------------------------------------------===//
4200
4201 /// Create a check that a scalar RValue is non-null.
4202 llvm::Value *EmitNonNullRValueCheck(RValue RV, QualType T);
4203
4204 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
4205 RValue GetUndefRValue(QualType Ty);
4206
4207 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
4208 /// and issue an ErrorUnsupported style diagnostic (using the
4209 /// provided Name).
4210 RValue EmitUnsupportedRValue(const Expr *E, const char *Name);
4211
4212 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
4213 /// an ErrorUnsupported style diagnostic (using the provided Name).
4214 LValue EmitUnsupportedLValue(const Expr *E, const char *Name);
4215
4216 /// EmitLValue - Emit code to compute a designator that specifies the location
4217 /// of the expression.
4218 ///
4219 /// This can return one of two things: a simple address or a bitfield
4220 /// reference. In either case, the LLVM Value* in the LValue structure is
4221 /// guaranteed to be an LLVM pointer type.
4222 ///
4223 /// If this returns a bitfield reference, nothing about the pointee type of
4224 /// the LLVM value is known: For example, it may not be a pointer to an
4225 /// integer.
4226 ///
4227 /// If this returns a normal address, and if the lvalue's C type is fixed
4228 /// size, this method guarantees that the returned pointer type will point to
4229 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
4230 /// variable length type, this is not possible.
4231 ///
4232 LValue EmitLValue(const Expr *E,
4233 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
4234
4235private:
4236 LValue EmitLValueHelper(const Expr *E, KnownNonNull_t IsKnownNonNull);
4237
4238public:
4239 /// Same as EmitLValue but additionally we generate checking code to
4240 /// guard against undefined behavior. This is only suitable when we know
4241 /// that the address will be used to access the object.
4242 LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
4243
4244 RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc);
4245
4246 void EmitAtomicInit(Expr *E, LValue lvalue);
4247
4248 bool LValueIsSuitableForInlineAtomic(LValue Src);
4249
4250 RValue EmitAtomicLoad(LValue LV, SourceLocation SL,
4251 AggValueSlot Slot = AggValueSlot::ignored());
4252
4253 RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc,
4254 llvm::AtomicOrdering AO, bool IsVolatile = false,
4255 AggValueSlot slot = AggValueSlot::ignored());
4256
4257 void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
4258
4259 void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
4260 bool IsVolatile, bool isInit);
4261
4262 std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
4264 llvm::AtomicOrdering Success =
4265 llvm::AtomicOrdering::SequentiallyConsistent,
4266 llvm::AtomicOrdering Failure =
4267 llvm::AtomicOrdering::SequentiallyConsistent,
4268 bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
4269
4270 /// Emit an atomicrmw instruction, and applying relevant metadata when
4271 /// applicable.
4272 llvm::AtomicRMWInst *emitAtomicRMWInst(
4273 llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val,
4274 llvm::AtomicOrdering Order = llvm::AtomicOrdering::SequentiallyConsistent,
4275 llvm::SyncScope::ID SSID = llvm::SyncScope::System,
4276 const AtomicExpr *AE = nullptr);
4277
4278 void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
4279 const llvm::function_ref<RValue(RValue)> &UpdateOp,
4280 bool IsVolatile);
4281
4282 /// EmitToMemory - Change a scalar value from its value
4283 /// representation to its in-memory representation.
4284 llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
4285
4286 /// EmitFromMemory - Change a scalar value from its memory
4287 /// representation to its value representation.
4288 llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
4289
4290 /// Check if the scalar \p Value is within the valid range for the given
4291 /// type \p Ty.
4292 ///
4293 /// Returns true if a check is needed (even if the range is unknown).
4294 bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
4296
4297 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4298 /// care to appropriately convert from the memory representation to
4299 /// the LLVM value representation.
4300 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4302 AlignmentSource Source = AlignmentSource::Type,
4303 bool isNontemporal = false) {
4304 return EmitLoadOfScalar(Addr, Volatile, Ty, Loc, LValueBaseInfo(Source),
4305 CGM.getTBAAAccessInfo(Ty), isNontemporal);
4306 }
4307
4308 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4310 TBAAAccessInfo TBAAInfo,
4311 bool isNontemporal = false);
4312
4313 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4314 /// care to appropriately convert from the memory representation to
4315 /// the LLVM value representation. The l-value must be a simple
4316 /// l-value.
4317 llvm::Value *EmitLoadOfScalar(LValue lvalue, SourceLocation Loc);
4318
4319 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4320 /// care to appropriately convert from the memory representation to
4321 /// the LLVM value representation.
4322 void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile,
4323 QualType Ty,
4324 AlignmentSource Source = AlignmentSource::Type,
4325 bool isInit = false, bool isNontemporal = false) {
4326 EmitStoreOfScalar(Value, Addr, Volatile, Ty, LValueBaseInfo(Source),
4327 CGM.getTBAAAccessInfo(Ty), isInit, isNontemporal);
4328 }
4329
4330 void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile,
4331 QualType Ty, LValueBaseInfo BaseInfo,
4332 TBAAAccessInfo TBAAInfo, bool isInit = false,
4333 bool isNontemporal = false);
4334
4335 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4336 /// care to appropriately convert from the memory representation to
4337 /// the LLVM value representation. The l-value must be a simple
4338 /// l-value. The isInit flag indicates whether this is an initialization.
4339 /// If so, atomic qualifiers are ignored and the store is always non-atomic.
4340 void EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
4341 bool isInit = false);
4342
4343 /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
4344 /// this method emits the address of the lvalue, then loads the result as an
4345 /// rvalue, returning the rvalue.
4346 RValue EmitLoadOfLValue(LValue V, SourceLocation Loc);
4347 RValue EmitLoadOfExtVectorElementLValue(LValue V);
4348 RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc);
4349 RValue EmitLoadOfGlobalRegLValue(LValue LV);
4350
4351 /// Like EmitLoadOfLValue but also handles complex and aggregate types.
4352 RValue EmitLoadOfAnyValue(LValue V,
4353 AggValueSlot Slot = AggValueSlot::ignored(),
4354 SourceLocation Loc = {});
4355
4356 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
4357 /// lvalue, where both are guaranteed to the have the same type, and that type
4358 /// is 'Ty'.
4359 void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
4360 void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
4361 void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst);
4362
4363 /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
4364 /// as EmitStoreThroughLValue.
4365 ///
4366 /// \param Result [out] - If non-null, this will be set to a Value* for the
4367 /// bit-field contents after the store, appropriate for use as the result of
4368 /// an assignment to the bit-field.
4369 void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
4370 llvm::Value **Result = nullptr);
4371
4372 /// Emit an l-value for an assignment (simple or compound) of complex type.
4373 LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
4374 LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
4375 LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
4376 llvm::Value *&Result);
4377
4378 // Note: only available for agg return types
4379 LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
4380 LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
4381 // Note: only available for agg return types
4382 LValue EmitCallExprLValue(const CallExpr *E,
4383 llvm::CallBase **CallOrInvoke = nullptr);
4384 // Note: only available for agg return types
4385 LValue EmitVAArgExprLValue(const VAArgExpr *E);
4386 LValue EmitDeclRefLValue(const DeclRefExpr *E);
4387 LValue EmitStringLiteralLValue(const StringLiteral *E);
4388 LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
4389 LValue EmitPredefinedLValue(const PredefinedExpr *E);
4390 LValue EmitUnaryOpLValue(const UnaryOperator *E);
4391 LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
4392 bool Accessed = false);
4393 llvm::Value *EmitMatrixIndexExpr(const Expr *E);
4394 LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E);
4395 LValue EmitArraySectionExpr(const ArraySectionExpr *E,
4396 bool IsLowerBound = true);
4397 LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
4398 LValue EmitMemberExpr(const MemberExpr *E);
4399 LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
4400 LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
4401 LValue EmitInitListLValue(const InitListExpr *E);
4402 void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E);
4403 LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
4404 LValue EmitCastLValue(const CastExpr *E);
4405 LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
4406 LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
4407 LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E);
4408
4409 std::pair<LValue, LValue> EmitHLSLOutArgLValues(const HLSLOutArgExpr *E,
4410 QualType Ty);
4411 LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args,
4412 QualType Ty);
4413
4414 Address EmitExtVectorElementLValue(LValue V);
4415
4416 RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
4417
4418 Address EmitArrayToPointerDecay(const Expr *Array,
4419 LValueBaseInfo *BaseInfo = nullptr,
4420 TBAAAccessInfo *TBAAInfo = nullptr);
4421
4423 llvm::PointerIntPair<llvm::Constant *, 1, bool> ValueAndIsReference;
4424 ConstantEmission(llvm::Constant *C, bool isReference)
4425 : ValueAndIsReference(C, isReference) {}
4426
4427 public:
4429 static ConstantEmission forReference(llvm::Constant *C) {
4430 return ConstantEmission(C, true);
4431 }
4432 static ConstantEmission forValue(llvm::Constant *C) {
4433 return ConstantEmission(C, false);
4434 }
4435
4436 explicit operator bool() const {
4437 return ValueAndIsReference.getOpaqueValue() != nullptr;
4438 }
4439
4440 bool isReference() const { return ValueAndIsReference.getInt(); }
4441 LValue getReferenceLValue(CodeGenFunction &CGF, const Expr *RefExpr) const {
4442 assert(isReference());
4443 return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
4444 RefExpr->getType());
4445 }
4446
4447 llvm::Constant *getValue() const {
4448 assert(!isReference());
4449 return ValueAndIsReference.getPointer();
4450 }
4451 };
4452
4453 ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr);
4454 ConstantEmission tryEmitAsConstant(const MemberExpr *ME);
4455 llvm::Value *emitScalarConstant(const ConstantEmission &Constant, Expr *E);
4456
4457 RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
4458 AggValueSlot slot = AggValueSlot::ignored());
4459 LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
4460
4461 void FlattenAccessAndType(
4462 Address Addr, QualType AddrTy,
4463 SmallVectorImpl<std::pair<Address, llvm::Value *>> &AccessList,
4464 SmallVectorImpl<QualType> &FlatTypes);
4465
4466 llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
4467 const ObjCIvarDecl *Ivar);
4468 llvm::Value *EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface,
4469 const ObjCIvarDecl *Ivar);
4470 LValue EmitLValueForField(LValue Base, const FieldDecl *Field,
4471 bool IsInBounds = true);
4472 LValue EmitLValueForLambdaField(const FieldDecl *Field);
4473 LValue EmitLValueForLambdaField(const FieldDecl *Field,
4474 llvm::Value *ThisValue);
4475
4476 /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
4477 /// if the Field is a reference, this will return the address of the reference
4478 /// and not the address of the value stored in the reference.
4479 LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field);
4480
4481 LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base,
4482 const ObjCIvarDecl *Ivar, unsigned CVRQualifiers);
4483
4484 LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
4485 LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
4486 LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
4487 LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
4488
4489 LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
4490 LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
4491 LValue EmitStmtExprLValue(const StmtExpr *E);
4492 LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
4493 LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
4494 void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init);
4495
4496 //===--------------------------------------------------------------------===//
4497 // Scalar Expression Emission
4498 //===--------------------------------------------------------------------===//
4499
4500 /// EmitCall - Generate a call of the given function, expecting the given
4501 /// result type, and using the given argument list which specifies both the
4502 /// LLVM arguments and the types they were derived from.
4503 RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
4504 ReturnValueSlot ReturnValue, const CallArgList &Args,
4505 llvm::CallBase **CallOrInvoke, bool IsMustTail,
4507 bool IsVirtualFunctionPointerThunk = false);
4508 RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
4509 ReturnValueSlot ReturnValue, const CallArgList &Args,
4510 llvm::CallBase **CallOrInvoke = nullptr,
4511 bool IsMustTail = false) {
4512 return EmitCall(CallInfo, Callee, ReturnValue, Args, CallOrInvoke,
4513 IsMustTail, SourceLocation());
4514 }
4515 RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
4516 ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr,
4517 llvm::CallBase **CallOrInvoke = nullptr,
4518 CGFunctionInfo const **ResolvedFnInfo = nullptr);
4519
4520 // If a Call or Invoke instruction was emitted for this CallExpr, this method
4521 // writes the pointer to `CallOrInvoke` if it's not null.
4522 RValue EmitCallExpr(const CallExpr *E,
4523 ReturnValueSlot ReturnValue = ReturnValueSlot(),
4524 llvm::CallBase **CallOrInvoke = nullptr);
4525 RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue,
4526 llvm::CallBase **CallOrInvoke = nullptr);
4527 CGCallee EmitCallee(const Expr *E);
4528
4529 void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
4530 void checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl);
4531
4532 llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
4533 const Twine &name = "");
4534 llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
4536 const Twine &name = "");
4537 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4538 const Twine &name = "");
4539 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4540 ArrayRef<Address> args,
4541 const Twine &name = "");
4542 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4544 const Twine &name = "");
4545
4547 getBundlesForFunclet(llvm::Value *Callee);
4548
4549 llvm::CallBase *EmitCallOrInvoke(llvm::FunctionCallee Callee,
4551 const Twine &Name = "");
4552 llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4554 const Twine &name = "");
4555 llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4556 const Twine &name = "");
4557 void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4559
4561 NestedNameSpecifier Qual, llvm::Type *Ty);
4562
4563 CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
4565 const CXXRecordDecl *RD);
4566
4567 bool isPointerKnownNonNull(const Expr *E);
4568 /// Check whether the underlying base pointer is a constant null.
4569 bool isUnderlyingBasePointerConstantNull(const Expr *E);
4570
4571 /// Create the discriminator from the storage address and the entity hash.
4572 llvm::Value *EmitPointerAuthBlendDiscriminator(llvm::Value *StorageAddress,
4573 llvm::Value *Discriminator);
4574 CGPointerAuthInfo EmitPointerAuthInfo(const PointerAuthSchema &Schema,
4575 llvm::Value *StorageAddress,
4576 GlobalDecl SchemaDecl,
4577 QualType SchemaType);
4578
4579 llvm::Value *EmitPointerAuthSign(const CGPointerAuthInfo &Info,
4580 llvm::Value *Pointer);
4581
4582 llvm::Value *EmitPointerAuthAuth(const CGPointerAuthInfo &Info,
4583 llvm::Value *Pointer);
4584
4585 llvm::Value *emitPointerAuthResign(llvm::Value *Pointer, QualType PointerType,
4586 const CGPointerAuthInfo &CurAuthInfo,
4587 const CGPointerAuthInfo &NewAuthInfo,
4588 bool IsKnownNonNull);
4589 llvm::Value *emitPointerAuthResignCall(llvm::Value *Pointer,
4590 const CGPointerAuthInfo &CurInfo,
4591 const CGPointerAuthInfo &NewInfo);
4592
4593 void EmitPointerAuthOperandBundle(
4594 const CGPointerAuthInfo &Info,
4596
4597 CGPointerAuthInfo EmitPointerAuthInfo(PointerAuthQualifier Qualifier,
4598 Address StorageAddress);
4599 llvm::Value *EmitPointerAuthQualify(PointerAuthQualifier Qualifier,
4600 llvm::Value *Pointer, QualType ValueType,
4601 Address StorageAddress,
4602 bool IsKnownNonNull);
4603 llvm::Value *EmitPointerAuthQualify(PointerAuthQualifier Qualifier,
4604 const Expr *PointerExpr,
4605 Address StorageAddress);
4606 llvm::Value *EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier,
4607 llvm::Value *Pointer,
4609 Address StorageAddress,
4610 bool IsKnownNonNull);
4611 void EmitPointerAuthCopy(PointerAuthQualifier Qualifier, QualType Type,
4612 Address DestField, Address SrcField);
4613
4614 std::pair<llvm::Value *, CGPointerAuthInfo>
4615 EmitOrigPointerRValue(const Expr *E);
4616
4617 llvm::Value *authPointerToPointerCast(llvm::Value *ResultPtr,
4618 QualType SourceType, QualType DestType);
4619 Address authPointerToPointerCast(Address Ptr, QualType SourceType,
4620 QualType DestType);
4621
4622 Address getAsNaturalAddressOf(Address Addr, QualType PointeeTy);
4623
4624 llvm::Value *getAsNaturalPointerTo(Address Addr, QualType PointeeType) {
4625 return getAsNaturalAddressOf(Addr, PointeeType).getBasePointer();
4626 }
4627
4628 // Return the copy constructor name with the prefix "__copy_constructor_"
4629 // removed.
4630 static std::string getNonTrivialCopyConstructorStr(QualType QT,
4631 CharUnits Alignment,
4632 bool IsVolatile,
4633 ASTContext &Ctx);
4634
4635 // Return the destructor name with the prefix "__destructor_" removed.
4636 static std::string getNonTrivialDestructorStr(QualType QT,
4637 CharUnits Alignment,
4638 bool IsVolatile,
4639 ASTContext &Ctx);
4640
4641 // These functions emit calls to the special functions of non-trivial C
4642 // structs.
4643 void defaultInitNonTrivialCStructVar(LValue Dst);
4644 void callCStructDefaultConstructor(LValue Dst);
4645 void callCStructDestructor(LValue Dst);
4646 void callCStructCopyConstructor(LValue Dst, LValue Src);
4647 void callCStructMoveConstructor(LValue Dst, LValue Src);
4648 void callCStructCopyAssignmentOperator(LValue Dst, LValue Src);
4649 void callCStructMoveAssignmentOperator(LValue Dst, LValue Src);
4650
4651 RValue EmitCXXMemberOrOperatorCall(
4652 const CXXMethodDecl *Method, const CGCallee &Callee,
4653 ReturnValueSlot ReturnValue, llvm::Value *This,
4654 llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *E,
4655 CallArgList *RtlArgs, llvm::CallBase **CallOrInvoke);
4656 RValue EmitCXXDestructorCall(GlobalDecl Dtor, const CGCallee &Callee,
4657 llvm::Value *This, QualType ThisTy,
4658 llvm::Value *ImplicitParam,
4659 QualType ImplicitParamTy, const CallExpr *E,
4660 llvm::CallBase **CallOrInvoke = nullptr);
4661 RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
4662 ReturnValueSlot ReturnValue,
4663 llvm::CallBase **CallOrInvoke = nullptr);
4664 RValue EmitCXXMemberOrOperatorMemberCallExpr(
4665 const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
4666 bool HasQualifier, NestedNameSpecifier Qualifier, bool IsArrow,
4667 const Expr *Base, llvm::CallBase **CallOrInvoke);
4668 // Compute the object pointer.
4669 Address EmitCXXMemberDataPointerAddress(
4670 const Expr *E, Address base, llvm::Value *memberPtr,
4671 const MemberPointerType *memberPtrType, bool IsInBounds,
4672 LValueBaseInfo *BaseInfo = nullptr, TBAAAccessInfo *TBAAInfo = nullptr);
4673 RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
4674 ReturnValueSlot ReturnValue,
4675 llvm::CallBase **CallOrInvoke);
4676
4677 RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
4678 const CXXMethodDecl *MD,
4679 ReturnValueSlot ReturnValue,
4680 llvm::CallBase **CallOrInvoke);
4681 RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E);
4682
4683 RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
4684 ReturnValueSlot ReturnValue,
4685 llvm::CallBase **CallOrInvoke);
4686
4687 RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E);
4688 RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E);
4689
4690 RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
4691 const CallExpr *E, ReturnValueSlot ReturnValue);
4692
4693 RValue emitRotate(const CallExpr *E, bool IsRotateRight);
4694
4695 /// Emit IR for __builtin_os_log_format.
4696 RValue emitBuiltinOSLogFormat(const CallExpr &E);
4697
4698 /// Emit IR for __builtin_is_aligned.
4699 RValue EmitBuiltinIsAligned(const CallExpr *E);
4700 /// Emit IR for __builtin_align_up/__builtin_align_down.
4701 RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp);
4702
4703 llvm::Function *generateBuiltinOSLogHelperFunction(
4705 CharUnits BufferAlignment);
4706
4707 RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue,
4708 llvm::CallBase **CallOrInvoke);
4709
4710 /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
4711 /// is unhandled by the current target.
4712 llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4713 ReturnValueSlot ReturnValue);
4714
4715 llvm::Value *
4716 EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
4717 const llvm::CmpInst::Predicate Pred,
4718 const llvm::Twine &Name = "");
4719 llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4720 ReturnValueSlot ReturnValue,
4721 llvm::Triple::ArchType Arch);
4722 llvm::Value *EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4723 ReturnValueSlot ReturnValue,
4724 llvm::Triple::ArchType Arch);
4725 llvm::Value *EmitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4726 ReturnValueSlot ReturnValue,
4727 llvm::Triple::ArchType Arch);
4728 llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy,
4729 QualType RTy);
4730 llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::ArrayType *ATy,
4731 QualType RTy);
4732
4733 llvm::Value *
4734 EmitCommonNeonBuiltinExpr(unsigned BuiltinID, unsigned LLVMIntrinsic,
4735 unsigned AltLLVMIntrinsic, const char *NameHint,
4736 unsigned Modifier, const CallExpr *E,
4738 Address PtrOp1, llvm::Triple::ArchType Arch);
4739
4740 llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
4741 unsigned Modifier, llvm::Type *ArgTy,
4742 const CallExpr *E);
4743 llvm::Value *EmitNeonCall(llvm::Function *F,
4744 SmallVectorImpl<llvm::Value *> &O, const char *name,
4745 unsigned shift = 0, bool rightshift = false);
4746 llvm::Value *EmitFP8NeonCall(unsigned IID, ArrayRef<llvm::Type *> Tys,
4748 const CallExpr *E, const char *name);
4749 llvm::Value *EmitFP8NeonCvtCall(unsigned IID, llvm::Type *Ty0,
4750 llvm::Type *Ty1, bool Extract,
4752 const CallExpr *E, const char *name);
4753 llvm::Value *EmitFP8NeonFDOTCall(unsigned IID, bool ExtendLaneArg,
4754 llvm::Type *RetTy,
4756 const CallExpr *E, const char *name);
4757 llvm::Value *EmitFP8NeonFMLACall(unsigned IID, bool ExtendLaneArg,
4758 llvm::Type *RetTy,
4760 const CallExpr *E, const char *name);
4761 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx,
4762 const llvm::ElementCount &Count);
4763 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
4764 llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
4765 bool negateForRightShift);
4766 llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
4767 llvm::Type *Ty, bool usgn, const char *name);
4768 llvm::Value *vectorWrapScalar16(llvm::Value *Op);
4769 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
4770 /// access builtin. Only required if it can't be inferred from the base
4771 /// pointer operand.
4772 llvm::Type *SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags);
4773
4775 getSVEOverloadTypes(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType,
4777 llvm::Type *getEltType(const SVETypeFlags &TypeFlags);
4778 llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags);
4779 llvm::ScalableVectorType *getSVEPredType(const SVETypeFlags &TypeFlags);
4780 llvm::Value *EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
4782 llvm::Value *EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
4783 llvm::Type *ReturnType,
4785 llvm::Value *EmitSVEAllTruePred(const SVETypeFlags &TypeFlags);
4786 llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
4787 llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty);
4788 llvm::Value *EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty);
4789 llvm::Value *EmitSVEPMull(const SVETypeFlags &TypeFlags,
4791 unsigned BuiltinID);
4792 llvm::Value *EmitSVEMovl(const SVETypeFlags &TypeFlags,
4794 unsigned BuiltinID);
4795 llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred,
4796 llvm::ScalableVectorType *VTy);
4797 llvm::Value *EmitSVEPredicateTupleCast(llvm::Value *PredTuple,
4798 llvm::StructType *Ty);
4799 llvm::Value *EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
4801 unsigned IntID);
4802 llvm::Value *EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
4804 unsigned IntID);
4805 llvm::Value *EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy,
4807 unsigned BuiltinID, bool IsZExtReturn);
4808 llvm::Value *EmitSVEMaskedStore(const CallExpr *,
4810 unsigned BuiltinID);
4811 llvm::Value *EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
4813 unsigned BuiltinID);
4814 llvm::Value *EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
4816 unsigned IntID);
4817 llvm::Value *EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
4819 unsigned IntID);
4820 llvm::Value *EmitSVEStructStore(const SVETypeFlags &TypeFlags,
4822 unsigned IntID);
4823 llvm::Value *EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4824
4825 llvm::Value *EmitSMELd1St1(const SVETypeFlags &TypeFlags,
4827 unsigned IntID);
4828 llvm::Value *EmitSMEReadWrite(const SVETypeFlags &TypeFlags,
4830 unsigned IntID);
4831 llvm::Value *EmitSMEZero(const SVETypeFlags &TypeFlags,
4833 unsigned IntID);
4834 llvm::Value *EmitSMELdrStr(const SVETypeFlags &TypeFlags,
4836 unsigned IntID);
4837
4838 void GetAArch64SVEProcessedOperands(unsigned BuiltinID, const CallExpr *E,
4840 SVETypeFlags TypeFlags);
4841
4842 llvm::Value *EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4843
4844 llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4845 llvm::Triple::ArchType Arch);
4846 llvm::Value *EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4847
4848 llvm::Value *BuildVector(ArrayRef<llvm::Value *> Ops);
4849 llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4850 llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4851 llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4852 llvm::Value *EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4853 ReturnValueSlot ReturnValue);
4854
4855 // Returns a builtin function that the SPIR-V backend will expand into a spec
4856 // constant.
4857 llvm::Function *
4858 getSpecConstantFunction(const clang::QualType &SpecConstantType);
4859
4860 llvm::Value *EmitDirectXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4861 llvm::Value *EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4862 llvm::Value *EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx,
4863 const CallExpr *E);
4864 llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4865 llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4866 llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
4867 const CallExpr *E);
4868 llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4869 llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4870 ReturnValueSlot ReturnValue);
4871
4872 llvm::Value *EmitRISCVCpuSupports(const CallExpr *E);
4873 llvm::Value *EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs);
4874 llvm::Value *EmitRISCVCpuInit();
4875 llvm::Value *EmitRISCVCpuIs(const CallExpr *E);
4876 llvm::Value *EmitRISCVCpuIs(StringRef CPUStr);
4877
4878 void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
4879 const CallExpr *E);
4880 void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
4881 llvm::AtomicOrdering &AO,
4882 llvm::SyncScope::ID &SSID);
4883
4884 enum class MSVCIntrin;
4885 llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
4886
4887 llvm::Value *EmitBuiltinAvailable(const VersionTuple &Version);
4888
4889 llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
4890 llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
4891 llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
4892 llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
4893 llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
4894 llvm::Value *
4895 EmitObjCCollectionLiteral(const Expr *E,
4896 const ObjCMethodDecl *MethodWithObjects);
4897 llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
4898 RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
4899 ReturnValueSlot Return = ReturnValueSlot());
4900
4901 /// Retrieves the default cleanup kind for an ARC cleanup.
4902 /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
4904 return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions ? NormalAndEHCleanup
4905 : NormalCleanup;
4906 }
4907
4908 // ARC primitives.
4909 void EmitARCInitWeak(Address addr, llvm::Value *value);
4910 void EmitARCDestroyWeak(Address addr);
4911 llvm::Value *EmitARCLoadWeak(Address addr);
4912 llvm::Value *EmitARCLoadWeakRetained(Address addr);
4913 llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
4914 void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
4915 void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
4916 void EmitARCCopyWeak(Address dst, Address src);
4917 void EmitARCMoveWeak(Address dst, Address src);
4918 llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
4919 llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
4920 llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
4921 bool resultIgnored);
4922 llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
4923 bool resultIgnored);
4924 llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
4925 llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
4926 llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
4927 void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise);
4928 void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
4929 llvm::Value *EmitARCAutorelease(llvm::Value *value);
4930 llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
4931 llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
4932 llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
4933 llvm::Value *EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value);
4934
4935 llvm::Value *EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType);
4936 llvm::Value *EmitObjCRetainNonBlock(llvm::Value *value,
4937 llvm::Type *returnType);
4938 void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
4939
4940 std::pair<LValue, llvm::Value *>
4941 EmitARCStoreAutoreleasing(const BinaryOperator *e);
4942 std::pair<LValue, llvm::Value *> EmitARCStoreStrong(const BinaryOperator *e,
4943 bool ignored);
4944 std::pair<LValue, llvm::Value *>
4945 EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored);
4946
4947 llvm::Value *EmitObjCAlloc(llvm::Value *value, llvm::Type *returnType);
4948 llvm::Value *EmitObjCAllocWithZone(llvm::Value *value,
4949 llvm::Type *returnType);
4950 llvm::Value *EmitObjCAllocInit(llvm::Value *value, llvm::Type *resultType);
4951
4952 llvm::Value *EmitObjCThrowOperand(const Expr *expr);
4953 llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
4954 llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
4955
4956 llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
4957 llvm::Value *EmitARCReclaimReturnedObject(const Expr *e,
4958 bool allowUnsafeClaim);
4959 llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
4960 llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
4961 llvm::Value *EmitARCUnsafeUnretainedScalarExpr(const Expr *expr);
4962
4963 void EmitARCIntrinsicUse(ArrayRef<llvm::Value *> values);
4964
4965 void EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values);
4966
4968 static Destroyer destroyARCStrongPrecise;
4969 static Destroyer destroyARCWeak;
4970 static Destroyer emitARCIntrinsicUse;
4972
4973 void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
4974 llvm::Value *EmitObjCAutoreleasePoolPush();
4975 llvm::Value *EmitObjCMRRAutoreleasePoolPush();
4976 void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
4977 void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
4978
4979 /// Emits a reference binding to the passed in expression.
4980 RValue EmitReferenceBindingToExpr(const Expr *E);
4981
4982 //===--------------------------------------------------------------------===//
4983 // Expression Emission
4984 //===--------------------------------------------------------------------===//
4985
4986 // Expressions are broken into three classes: scalar, complex, aggregate.
4987
4988 /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
4989 /// scalar type, returning the result.
4990 llvm::Value *EmitScalarExpr(const Expr *E, bool IgnoreResultAssign = false);
4991
4992 /// Emit a conversion from the specified type to the specified destination
4993 /// type, both of which are LLVM scalar types.
4994 llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
4995 QualType DstTy, SourceLocation Loc);
4996
4997 /// Emit a conversion from the specified complex type to the specified
4998 /// destination type, where the destination type is an LLVM scalar type.
4999 llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
5000 QualType DstTy,
5002
5003 /// EmitAggExpr - Emit the computation of the specified expression
5004 /// of aggregate type. The result is computed into the given slot,
5005 /// which may be null to indicate that the value is not needed.
5006 void EmitAggExpr(const Expr *E, AggValueSlot AS);
5007
5008 /// EmitAggExprToLValue - Emit the computation of the specified expression of
5009 /// aggregate type into a temporary LValue.
5010 LValue EmitAggExprToLValue(const Expr *E);
5011
5012 enum ExprValueKind { EVK_RValue, EVK_NonRValue };
5013
5014 /// EmitAggFinalDestCopy - Emit copy of the specified aggregate into
5015 /// destination address.
5016 void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src,
5017 ExprValueKind SrcKind);
5018
5019 /// Create a store to \arg DstPtr from \arg Src, truncating the stored value
5020 /// to at most \arg DstSize bytes.
5021 void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize,
5022 bool DstIsVolatile);
5023
5024 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
5025 /// make sure it survives garbage collection until this point.
5026 void EmitExtendGCLifetime(llvm::Value *object);
5027
5028 /// EmitComplexExpr - Emit the computation of the specified expression of
5029 /// complex type, returning the result.
5030 ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal = false,
5031 bool IgnoreImag = false);
5032
5033 /// EmitComplexExprIntoLValue - Emit the given expression of complex
5034 /// type and place its result into the specified l-value.
5035 void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
5036
5037 /// EmitStoreOfComplex - Store a complex number into the specified l-value.
5038 void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
5039
5040 /// EmitLoadOfComplex - Load a complex number from the specified l-value.
5041 ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
5042
5043 ComplexPairTy EmitPromotedComplexExpr(const Expr *E, QualType PromotionType);
5044 llvm::Value *EmitPromotedScalarExpr(const Expr *E, QualType PromotionType);
5045 ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType);
5046 ComplexPairTy EmitUnPromotedValue(ComplexPairTy result,
5047 QualType PromotionType);
5048
5049 Address emitAddrOfRealComponent(Address complex, QualType complexType);
5050 Address emitAddrOfImagComponent(Address complex, QualType complexType);
5051
5052 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
5053 /// global variable that has already been created for it. If the initializer
5054 /// has a different type than GV does, this may free GV and return a different
5055 /// one. Otherwise it just returns GV.
5056 llvm::GlobalVariable *AddInitializerToStaticVarDecl(const VarDecl &D,
5057 llvm::GlobalVariable *GV);
5058
5059 // Emit an @llvm.invariant.start call for the given memory region.
5060 void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size);
5061
5062 /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
5063 /// variable with global storage.
5064 void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV,
5065 bool PerformInit);
5066
5067 llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
5068 llvm::Constant *Addr);
5069
5070 llvm::Function *createTLSAtExitStub(const VarDecl &VD,
5071 llvm::FunctionCallee Dtor,
5072 llvm::Constant *Addr,
5073 llvm::FunctionCallee &AtExit);
5074
5075 /// Call atexit() with a function that passes the given argument to
5076 /// the given function.
5077 void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn,
5078 llvm::Constant *addr);
5079
5080 /// Registers the dtor using 'llvm.global_dtors' for platforms that do not
5081 /// support an 'atexit()' function.
5082 void registerGlobalDtorWithLLVM(const VarDecl &D, llvm::FunctionCallee fn,
5083 llvm::Constant *addr);
5084
5085 /// Call atexit() with function dtorStub.
5086 void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
5087
5088 /// Call unatexit() with function dtorStub.
5089 llvm::Value *unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub);
5090
5091 /// Emit code in this function to perform a guarded variable
5092 /// initialization. Guarded initializations are used when it's not
5093 /// possible to prove that an initialization will be done exactly
5094 /// once, e.g. with a static local variable or a static data member
5095 /// of a class template.
5096 void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
5097 bool PerformInit);
5098
5099 enum class GuardKind { VariableGuard, TlsGuard };
5100
5101 /// Emit a branch to select whether or not to perform guarded initialization.
5102 void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
5103 llvm::BasicBlock *InitBlock,
5104 llvm::BasicBlock *NoInitBlock, GuardKind Kind,
5105 const VarDecl *D);
5106
5107 /// GenerateCXXGlobalInitFunc - Generates code for initializing global
5108 /// variables.
5109 void
5110 GenerateCXXGlobalInitFunc(llvm::Function *Fn,
5111 ArrayRef<llvm::Function *> CXXThreadLocals,
5112 ConstantAddress Guard = ConstantAddress::invalid());
5113
5114 /// GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global
5115 /// variables.
5116 void GenerateCXXGlobalCleanUpFunc(
5117 llvm::Function *Fn,
5118 ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
5119 llvm::Constant *>>
5120 DtorsOrStermFinalizers);
5121
5122 void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D,
5123 llvm::GlobalVariable *Addr,
5124 bool PerformInit);
5125
5126 void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
5127
5128 void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
5129
5130 void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
5131
5132 RValue EmitAtomicExpr(AtomicExpr *E);
5133
5134 void EmitFakeUse(Address Addr);
5135
5136 //===--------------------------------------------------------------------===//
5137 // Annotations Emission
5138 //===--------------------------------------------------------------------===//
5139
5140 /// Emit an annotation call (intrinsic).
5141 llvm::Value *EmitAnnotationCall(llvm::Function *AnnotationFn,
5142 llvm::Value *AnnotatedVal,
5143 StringRef AnnotationStr,
5144 SourceLocation Location,
5145 const AnnotateAttr *Attr);
5146
5147 /// Emit local annotations for the local variable V, declared by D.
5148 void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
5149
5150 /// Emit field annotations for the given field & value. Returns the
5151 /// annotation result.
5152 Address EmitFieldAnnotations(const FieldDecl *D, Address V);
5153
5154 //===--------------------------------------------------------------------===//
5155 // Internal Helpers
5156 //===--------------------------------------------------------------------===//
5157
5158 /// ContainsLabel - Return true if the statement contains a label in it. If
5159 /// this statement is not executed normally, it not containing a label means
5160 /// that we can just remove the code.
5161 static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
5162
5163 /// containsBreak - Return true if the statement contains a break out of it.
5164 /// If the statement (recursively) contains a switch or loop with a break
5165 /// inside of it, this is fine.
5166 static bool containsBreak(const Stmt *S);
5167
5168 /// Determine if the given statement might introduce a declaration into the
5169 /// current scope, by being a (possibly-labelled) DeclStmt.
5170 static bool mightAddDeclToScope(const Stmt *S);
5171
5172 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5173 /// to a constant, or if it does but contains a label, return false. If it
5174 /// constant folds return true and set the boolean result in Result.
5175 bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result,
5176 bool AllowLabels = false);
5177
5178 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5179 /// to a constant, or if it does but contains a label, return false. If it
5180 /// constant folds return true and set the folded value.
5181 bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
5182 bool AllowLabels = false);
5183
5184 /// Ignore parentheses and logical-NOT to track conditions consistently.
5185 static const Expr *stripCond(const Expr *C);
5186
5187 /// isInstrumentedCondition - Determine whether the given condition is an
5188 /// instrumentable condition (i.e. no "&&" or "||").
5189 static bool isInstrumentedCondition(const Expr *C);
5190
5191 /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
5192 /// increments a profile counter based on the semantics of the given logical
5193 /// operator opcode. This is used to instrument branch condition coverage
5194 /// for logical operators.
5195 void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp,
5196 llvm::BasicBlock *TrueBlock,
5197 llvm::BasicBlock *FalseBlock,
5198 uint64_t TrueCount = 0,
5199 Stmt::Likelihood LH = Stmt::LH_None,
5200 const Expr *CntrIdx = nullptr);
5201
5202 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
5203 /// if statement) to the specified blocks. Based on the condition, this might
5204 /// try to simplify the codegen of the conditional based on the branch.
5205 /// TrueCount should be the number of times we expect the condition to
5206 /// evaluate to true based on PGO data.
5207 void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
5208 llvm::BasicBlock *FalseBlock, uint64_t TrueCount,
5209 Stmt::Likelihood LH = Stmt::LH_None,
5210 const Expr *ConditionalOp = nullptr,
5211 const VarDecl *ConditionalDecl = nullptr);
5212
5213 /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
5214 /// nonnull, if \p LHS is marked _Nonnull.
5215 void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
5216
5217 /// An enumeration which makes it easier to specify whether or not an
5218 /// operation is a subtraction.
5219 enum { NotSubtraction = false, IsSubtraction = true };
5220
5221 /// Emit pointer + index arithmetic.
5222 llvm::Value *EmitPointerArithmetic(const BinaryOperator *BO,
5223 Expr *pointerOperand, llvm::Value *pointer,
5224 Expr *indexOperand, llvm::Value *index,
5225 bool isSubtraction);
5226
5227 /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
5228 /// detect undefined behavior when the pointer overflow sanitizer is enabled.
5229 /// \p SignedIndices indicates whether any of the GEP indices are signed.
5230 /// \p IsSubtraction indicates whether the expression used to form the GEP
5231 /// is a subtraction.
5232 llvm::Value *EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr,
5234 bool SignedIndices, bool IsSubtraction,
5236 const Twine &Name = "");
5237
5239 llvm::Type *elementType, bool SignedIndices,
5240 bool IsSubtraction, SourceLocation Loc,
5241 CharUnits Align, const Twine &Name = "");
5242
5243 /// Specifies which type of sanitizer check to apply when handling a
5244 /// particular builtin.
5249 };
5250
5251 /// Emits an argument for a call to a builtin. If the builtin sanitizer is
5252 /// enabled, a runtime check specified by \p Kind is also emitted.
5253 llvm::Value *EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind);
5254
5255 /// Emits an argument for a call to a `__builtin_assume`. If the builtin
5256 /// sanitizer is enabled, a runtime check is also emitted.
5257 llvm::Value *EmitCheckedArgForAssume(const Expr *E);
5258
5259 /// Emit a description of a type in a format suitable for passing to
5260 /// a runtime sanitizer handler.
5261 llvm::Constant *EmitCheckTypeDescriptor(QualType T);
5262
5263 /// Convert a value into a format suitable for passing to a runtime
5264 /// sanitizer handler.
5265 llvm::Value *EmitCheckValue(llvm::Value *V);
5266
5267 /// Emit a description of a source location in a format suitable for
5268 /// passing to a runtime sanitizer handler.
5269 llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
5270
5271 void EmitKCFIOperandBundle(const CGCallee &Callee,
5273
5274 /// Create a basic block that will either trap or call a handler function in
5275 /// the UBSan runtime with the provided arguments, and create a conditional
5276 /// branch to it.
5277 void
5278 EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
5279 Checked,
5281 ArrayRef<llvm::Value *> DynamicArgs,
5282 const TrapReason *TR = nullptr);
5283
5284 /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
5285 /// if Cond if false.
5286 void EmitCfiSlowPathCheck(SanitizerKind::SanitizerOrdinal Ordinal,
5287 llvm::Value *Cond, llvm::ConstantInt *TypeId,
5288 llvm::Value *Ptr,
5289 ArrayRef<llvm::Constant *> StaticArgs);
5290
5291 /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
5292 /// checking is enabled. Otherwise, just emit an unreachable instruction.
5293 void EmitUnreachable(SourceLocation Loc);
5294
5295 /// Create a basic block that will call the trap intrinsic, and emit a
5296 /// conditional branch to it, for the -ftrapv checks.
5297 void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID,
5298 bool NoMerge = false, const TrapReason *TR = nullptr);
5299
5300 /// Emit a call to trap or debugtrap and attach function attribute
5301 /// "trap-func-name" if specified.
5302 llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
5303
5304 /// Emit a stub for the cross-DSO CFI check function.
5305 void EmitCfiCheckStub();
5306
5307 /// Emit a cross-DSO CFI failure handling function.
5308 void EmitCfiCheckFail();
5309
5310 /// Create a check for a function parameter that may potentially be
5311 /// declared as non-null.
5312 void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
5313 AbstractCallee AC, unsigned ParmNum);
5314
5315 void EmitNonNullArgCheck(Address Addr, QualType ArgType,
5316 SourceLocation ArgLoc, AbstractCallee AC,
5317 unsigned ParmNum);
5318
5319 /// EmitWriteback - Emit callbacks for function.
5320 void EmitWritebacks(const CallArgList &Args);
5321
5322 /// EmitCallArg - Emit a single call argument.
5323 void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
5324
5325 /// EmitDelegateCallArg - We are performing a delegate call; that
5326 /// is, the current function is delegating to another one. Produce
5327 /// a r-value suitable for passing the given parameter.
5328 void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
5329 SourceLocation loc);
5330
5331 /// SetFPAccuracy - Set the minimum required accuracy of the given floating
5332 /// point operation, expressed as the maximum relative error in ulp.
5333 void SetFPAccuracy(llvm::Value *Val, float Accuracy);
5334
5335 /// Set the minimum required accuracy of the given sqrt operation
5336 /// based on CodeGenOpts.
5337 void SetSqrtFPAccuracy(llvm::Value *Val);
5338
5339 /// Set the minimum required accuracy of the given sqrt operation based on
5340 /// CodeGenOpts.
5341 void SetDivFPAccuracy(llvm::Value *Val);
5342
5343 /// Set the codegen fast-math flags.
5344 void SetFastMathFlags(FPOptions FPFeatures);
5345
5346 // Truncate or extend a boolean vector to the requested number of elements.
5347 llvm::Value *emitBoolVecConversion(llvm::Value *SrcVec,
5348 unsigned NumElementsDst,
5349 const llvm::Twine &Name = "");
5350
5351 void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty,
5353
5354private:
5355 // Emits a convergence_loop instruction for the given |BB|, with |ParentToken|
5356 // as it's parent convergence instr.
5357 llvm::ConvergenceControlInst *emitConvergenceLoopToken(llvm::BasicBlock *BB);
5358
5359 // Adds a convergence_ctrl token with |ParentToken| as parent convergence
5360 // instr to the call |Input|.
5361 llvm::CallBase *addConvergenceControlToken(llvm::CallBase *Input);
5362
5363 // Find the convergence_entry instruction |F|, or emits ones if none exists.
5364 // Returns the convergence instruction.
5365 llvm::ConvergenceControlInst *
5366 getOrEmitConvergenceEntryToken(llvm::Function *F);
5367
5368private:
5369 llvm::MDNode *getRangeForLoadFromType(QualType Ty);
5370 void EmitReturnOfRValue(RValue RV, QualType Ty);
5371
5372 void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
5373
5375 DeferredReplacements;
5376
5377 /// Set the address of a local variable.
5378 void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
5379 assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
5380 LocalDeclMap.insert({VD, Addr});
5381 }
5382
5383 /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
5384 /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
5385 ///
5386 /// \param AI - The first function argument of the expansion.
5387 void ExpandTypeFromArgs(QualType Ty, LValue Dst,
5388 llvm::Function::arg_iterator &AI);
5389
5390 /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
5391 /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
5392 /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
5393 void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
5394 SmallVectorImpl<llvm::Value *> &IRCallArgs,
5395 unsigned &IRCallArgPos);
5396
5397 std::pair<llvm::Value *, llvm::Type *>
5398 EmitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr,
5399 std::string &ConstraintStr);
5400
5401 std::pair<llvm::Value *, llvm::Type *>
5402 EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue,
5403 QualType InputType, std::string &ConstraintStr,
5404 SourceLocation Loc);
5405
5406 /// Attempts to statically evaluate the object size of E. If that
5407 /// fails, emits code to figure the size of E out for us. This is
5408 /// pass_object_size aware.
5409 ///
5410 /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
5411 llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
5412 llvm::IntegerType *ResType,
5413 llvm::Value *EmittedE,
5414 bool IsDynamic);
5415
5416 /// Emits the size of E, as required by __builtin_object_size. This
5417 /// function is aware of pass_object_size parameters, and will act accordingly
5418 /// if E is a parameter with the pass_object_size attribute.
5419 llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
5420 llvm::IntegerType *ResType,
5421 llvm::Value *EmittedE, bool IsDynamic);
5422
5423 llvm::Value *emitCountedBySize(const Expr *E, llvm::Value *EmittedE,
5424 unsigned Type, llvm::IntegerType *ResType);
5425
5426 llvm::Value *emitCountedByMemberSize(const MemberExpr *E, const Expr *Idx,
5427 llvm::Value *EmittedE,
5428 QualType CastedArrayElementTy,
5429 unsigned Type,
5430 llvm::IntegerType *ResType);
5431
5432 llvm::Value *emitCountedByPointerSize(const ImplicitCastExpr *E,
5433 const Expr *Idx, llvm::Value *EmittedE,
5434 QualType CastedArrayElementTy,
5435 unsigned Type,
5436 llvm::IntegerType *ResType);
5437
5438 void emitZeroOrPatternForAutoVarInit(QualType type, const VarDecl &D,
5439 Address Loc);
5440
5441public:
5442 enum class EvaluationOrder {
5443 ///! No language constraints on evaluation order.
5444 Default,
5445 ///! Language semantics require left-to-right evaluation.
5446 ForceLeftToRight,
5447 ///! Language semantics require right-to-left evaluation.
5448 ForceRightToLeft
5449 };
5450
5451 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
5452 // an ObjCMethodDecl.
5454 llvm::PointerUnion<const FunctionProtoType *, const ObjCMethodDecl *> P;
5455
5458 };
5459
5460 void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype,
5461 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
5462 AbstractCallee AC = AbstractCallee(),
5463 unsigned ParamsToSkip = 0,
5464 EvaluationOrder Order = EvaluationOrder::Default);
5465
5466 /// EmitPointerWithAlignment - Given an expression with a pointer type,
5467 /// emit the value and compute our best estimate of the alignment of the
5468 /// pointee.
5469 ///
5470 /// \param BaseInfo - If non-null, this will be initialized with
5471 /// information about the source of the alignment and the may-alias
5472 /// attribute. Note that this function will conservatively fall back on
5473 /// the type when it doesn't recognize the expression and may-alias will
5474 /// be set to false.
5475 ///
5476 /// One reasonable way to use this information is when there's a language
5477 /// guarantee that the pointer must be aligned to some stricter value, and
5478 /// we're simply trying to ensure that sufficiently obvious uses of under-
5479 /// aligned objects don't get miscompiled; for example, a placement new
5480 /// into the address of a local variable. In such a case, it's quite
5481 /// reasonable to just ignore the returned alignment when it isn't from an
5482 /// explicit source.
5483 Address
5484 EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo = nullptr,
5485 TBAAAccessInfo *TBAAInfo = nullptr,
5486 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
5487
5488 /// If \p E references a parameter with pass_object_size info or a constant
5489 /// array size modifier, emit the object size divided by the size of \p EltTy.
5490 /// Otherwise return null.
5491 llvm::Value *LoadPassedObjectSize(const Expr *E, QualType EltTy);
5492
5493 void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
5494
5496 llvm::Function *Function;
5498 std::optional<StringRef> Architecture;
5499
5500 FMVResolverOption(llvm::Function *F, ArrayRef<StringRef> Feats,
5501 std::optional<StringRef> Arch = std::nullopt)
5502 : Function(F), Features(Feats), Architecture(Arch) {}
5503 };
5504
5505 // Emits the body of a multiversion function's resolver. Assumes that the
5506 // options are already sorted in the proper order, with the 'default' option
5507 // last (if it exists).
5508 void EmitMultiVersionResolver(llvm::Function *Resolver,
5510 void EmitX86MultiVersionResolver(llvm::Function *Resolver,
5512 void EmitAArch64MultiVersionResolver(llvm::Function *Resolver,
5514 void EmitRISCVMultiVersionResolver(llvm::Function *Resolver,
5516
5517private:
5518 QualType getVarArgType(const Expr *Arg);
5519
5520 void EmitDeclMetadata();
5521
5522 BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
5523 const AutoVarEmission &emission);
5524
5525 void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
5526
5527 llvm::Value *GetValueForARMHint(unsigned BuiltinID);
5528 llvm::Value *EmitX86CpuIs(const CallExpr *E);
5529 llvm::Value *EmitX86CpuIs(StringRef CPUStr);
5530 llvm::Value *EmitX86CpuSupports(const CallExpr *E);
5531 llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
5532 llvm::Value *EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask);
5533 llvm::Value *EmitX86CpuInit();
5534 llvm::Value *FormX86ResolverCondition(const FMVResolverOption &RO);
5535 llvm::Value *EmitAArch64CpuInit();
5536 llvm::Value *FormAArch64ResolverCondition(const FMVResolverOption &RO);
5537 llvm::Value *EmitAArch64CpuSupports(const CallExpr *E);
5538 llvm::Value *EmitAArch64CpuSupports(ArrayRef<StringRef> FeatureStrs);
5539};
5540
5543 if (!needsSaving(value))
5544 return saved_type(value, false);
5545
5546 // Otherwise, we need an alloca.
5547 auto align = CharUnits::fromQuantity(
5548 CGF.CGM.getDataLayout().getPrefTypeAlign(value->getType()));
5549 Address alloca =
5550 CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
5551 CGF.Builder.CreateStore(value, alloca);
5552
5553 return saved_type(alloca.emitRawPointer(CGF), true);
5554}
5555
5557 saved_type value) {
5558 // If the value says it wasn't saved, trust that it's still dominating.
5559 if (!value.getInt())
5560 return value.getPointer();
5561
5562 // Otherwise, it should be an alloca instruction, as set up in save().
5563 auto alloca = cast<llvm::AllocaInst>(value.getPointer());
5564 return CGF.Builder.CreateAlignedLoad(alloca->getAllocatedType(), alloca,
5565 alloca->getAlign());
5566}
5567
5568} // end namespace CodeGen
5569
5570// Map the LangOption for floating point exception behavior into
5571// the corresponding enum in the IR.
5572llvm::fp::ExceptionBehavior
5574} // end namespace clang
5575
5576#endif
Enums/classes describing ABI related information about constructors, destructors and thunks.
#define V(N, I)
Definition: ASTContext.h:3597
MatchType Type
StringRef P
static bool CanThrow(Expr *E, ASTContext &Ctx)
Definition: CFG.cpp:2777
static T * buildByrefHelpers(CodeGenModule &CGM, const BlockByrefInfo &byrefInfo, T &&generator)
Lazily build the copy and dispose helpers for a __block variable with the given information.
Definition: CGBlocks.cpp:2521
static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF, GlobalDecl GD, llvm::Type *Ty, const CXXRecordDecl *RD)
Definition: CGCXX.cpp:243
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:4246
@ ForDeactivation
Definition: CGCleanup.cpp:1213
CodeGenFunction::ComplexPairTy ComplexPairTy
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition: CGExpr.cpp:1376
const Decl * D
enum clang::sema::@1840::IndirectLocalPathEntry::EntryKind Kind
Expr * E
unsigned OldSize
Defines the clang::Expr interface and subclasses for C++ expressions.
const CFGBlock * Block
Definition: HTMLLogger.cpp:152
#define X(type, name)
Definition: Value.h:145
llvm::MachO::Architecture Architecture
Definition: MachO.h:27
llvm::MachO::Target Target
Definition: MachO.h:51
OffloadArch Arch
Definition: OffloadArch.cpp:10
Defines some OpenMP-specific enums and functions.
SanitizerHandler
SourceRange Range
Definition: SemaObjC.cpp:753
VarDecl * Variable
Definition: SemaObjC.cpp:752
SourceLocation Loc
Definition: SemaObjC.cpp:754
const char * Data
This file defines OpenACC AST classes for statement-level contructs.
This file defines OpenMP AST classes for executable directives and clauses.
This file defines SYCL AST classes used to represent calls to SYCL kernels.
C Language Family Type Representation.
StateNode * Previous
a trap message and trap category.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4289
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition: Expr.h:7092
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2723
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: TypeBase.h:3738
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6816
Attr - This represents one attribute.
Definition: Attr.h:44
Represents an attribute applied to a statement.
Definition: Stmt.h:2206
BinaryConditionalOperator - The GNU extension to the conditional operator which allows the middle ope...
Definition: Expr.h:4389
OpaqueValueExpr * getOpaqueValue() const
getOpaqueValue - Return the opaque value placeholder.
Definition: Expr.h:4427
Expr * getCommon() const
getCommon - Return the common expression, written to the left of the condition.
Definition: Expr.h:4424
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3974
static bool isLogicalOp(Opcode Opc)
Definition: Expr.h:4107
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:6560
Represents a call to a CUDA kernel function.
Definition: ExprCXX.h:234
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1494
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1549
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2604
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1271
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1378
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition: ExprCXX.h:2620
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2869
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition: ExprCXX.h:481
Represents a call to an inherited base class constructor from an inheriting constructor.
Definition: ExprCXX.h:1753
Represents a call to a member function that may be written either with member call syntax (e....
Definition: ExprCXX.h:179
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2129
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition: ExprCXX.h:2349
A call to an overloaded operator written using operator syntax.
Definition: ExprCXX.h:84
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2739
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
Represents a C++ temporary.
Definition: ExprCXX.h:1460
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1209
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:848
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition: ExprCXX.h:1069
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2879
Describes the capture of either a variable, or 'this', or variable-length array type.
Definition: Stmt.h:3868
This captures a statement into a function.
Definition: Stmt.h:3855
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3612
const CXXBaseSpecifier *const * path_const_iterator
Definition: Expr.h:3679
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
llvm::Value * getBasePointer() const
Definition: Address.h:198
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:253
CharUnits getAlignment() const
Definition: Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:209
void setAlignment(CharUnits Value)
Definition: Address.h:196
llvm::Value * getOffset() const
Definition: Address.h:246
void replaceBasePointer(llvm::Value *P)
This function is used in situations where the caller is doing some sort of opaque "laundering" of the...
Definition: Address.h:186
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:204
An aggregate value slot.
Definition: CGValue.h:504
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A pair of helper functions for a __block variable.
Information about the layout of a __block variable.
Definition: CGBlocks.h:136
CGBlockInfo - Information to generate a block literal.
Definition: CGBlocks.h:157
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:140
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:132
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
All available information about a concrete callee.
Definition: CGCall.h:63
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:274
An abstract representation of regular/ObjC call/message targets.
const ParmVarDecl * getParamDecl(unsigned I) const
ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself.
RawAddress getOriginalAllocatedAddress() const
Returns the address for the original alloca instruction.
Address getObjectAddress(CodeGenFunction &CGF) const
Returns the address of the object within this declaration.
CGAtomicOptionsRAII(CodeGenModule &CGM_, AtomicOptions AO)
CGAtomicOptionsRAII(CodeGenModule &CGM_, const AtomicAttr *AA)
CGAtomicOptionsRAII(const CGAtomicOptionsRAII &)=delete
CGAtomicOptionsRAII & operator=(const CGAtomicOptionsRAII &)=delete
API for captured statement code generation.
static bool classof(const CGCapturedStmtInfo *)
llvm::SmallDenseMap< const VarDecl *, FieldDecl * > getCaptureFields()
Get the CaptureFields.
CGCapturedStmtInfo(CapturedRegionKind K=CR_Default)
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
CGCapturedStmtInfo(const CapturedStmt &S, CapturedRegionKind K=CR_Default)
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
RAII for correct setting/restoring of CapturedStmtInfo.
CGCapturedStmtRAII(CodeGenFunction &CGF, CGCapturedStmtInfo *NewCapturedStmtInfo)
CXXDefaultInitExprScope(CodeGenFunction &CGF, const CXXDefaultInitExpr *E)
void Emit(CodeGenFunction &CGF, Flags flags) override
Emit the cleanup.
An object to manage conditionally-evaluated expressions.
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
LValue getReferenceLValue(CodeGenFunction &CGF, const Expr *RefExpr) const
void Emit(CodeGenFunction &CGF, Flags flags) override
Emit the cleanup.
A scope within which we are constructing the fields of an object which might use a CXXDefaultInitExpr...
FieldConstructionScope(CodeGenFunction &CGF, Address This)
A class controlling the emission of a finally block.
InlinedInheritingConstructorScope(CodeGenFunction &CGF, GlobalDecl GD)
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
RAII for preserving necessary info during inlined region body codegen.
InlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP, llvm::BasicBlock &FiniBB)
void Emit(CodeGenFunction &CGF, Flags) override
Emit the cleanup.
RAII for preserving necessary info during Outlined region body codegen.
OutlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP, llvm::BasicBlock &RetBB)
Controls insertion of cancellation exit blocks in worksharing constructs.
OMPCancelStackRAII(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel)
Save/restore original map of previously emitted local vars in case when we need to duplicate emission...
The class used to assign some variables some temporarily addresses.
bool apply(CodeGenFunction &CGF)
Applies new addresses to the list of the variables.
void restore(CodeGenFunction &CGF)
Restores original addresses of the variables.
bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD, Address TempAddr)
Sets the address of the variable LocalVD to be TempAddr in function CGF.
The scope used to remap some variables as private in the OpenMP loop body (or other captured region e...
void restoreMap()
Restore all mapped variables w/o clean up.
bool Privatize()
Privatizes local variables previously registered as private.
bool isGlobalVarCaptured(const VarDecl *VD) const
Checks if the global variable is captured in current function.
OMPPrivateScope(CodeGenFunction &CGF)
Enter a new OpenMP private scope.
~OMPPrivateScope()
Exit scope - all the mapped variables are restored.
bool addPrivate(const VarDecl *LocalVD, Address Addr)
Registers LocalVD variable as a private with Addr as the address of the corresponding private variabl...
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV)
Build the opaque value mapping for an OpaqueValueExpr whose source expression is set to the expressio...
OpaqueValueMapping(CodeGenFunction &CGF, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it's the GNU ?...
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, RValue rvalue)
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, LValue lvalue)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
ParentLoopDirectiveForScanRegion(CodeGenFunction &CGF, const OMPExecutableDirective &ParentLoopDirectiveForScan)
An object which temporarily prevents a value from being destroyed by aggressive peephole optimization...
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
RunCleanupsScope(CodeGenFunction &CGF)
Enter a new cleanup scope.
~RunCleanupsScope()
Exit this cleanup scope, emitting any accumulated cleanups.
void ForceCleanup(std::initializer_list< llvm::Value ** > ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
An RAII object to record that we're evaluating a statement expression.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
static Destroyer destroyNonTrivialCStruct
bool isBinaryLogicalOp(const Expr *E) const
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
SanitizerSet SanOpts
Sanitizers enabled for this function.
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
SmallVector< Address, 1 > SEHCodeSlotStack
A stack of exception code slots.
JumpDest getJumpDestInCurrentScope(StringRef Name=StringRef())
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
static bool hasScalarEvaluationKind(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
llvm::Value * EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
AwaitSuspendWrapperInfo CurAwaitSuspendWrapper
void EmitFakeUse(Address Addr)
Definition: CGDecl.cpp:1380
llvm::function_ref< std::pair< llvm::Value *, llvm::Value * >(CodeGenFunction &, const OMPExecutableDirective &S, Address LB, Address UB)> CodeGenDispatchBoundsTy
CGCapturedStmtInfo * CapturedStmtInfo
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we're intending to store to the side, but which will prob...
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
const OMPExecutableDirective * OMPParentLoopDirectiveForScan
Parent loop-based directive for scan directive.
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
void unprotectFromPeepholes(PeepholeProtection protection)
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, Address Addr, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
const CodeGen::CGBlockInfo * BlockInfo
void EmitAggregateCopyCtor(LValue Dest, LValue Src, AggValueSlot::Overlap_t MayOverlap)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_ConstructorCall
Checking the 'this' pointer for a constructor call.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_NonnullAssign
Checking the value assigned to a _Nonnull pointer. Must not be null.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
@ TCK_Upcast
Checking the operand of a cast to a base object.
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
Definition: CGCleanup.cpp:1112
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Value * EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx, const llvm::ElementCount &Count)
Address EmitCheckedInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *elementType, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align, const Twine &Name="")
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
llvm::BasicBlock * getUnreachableBlock()
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy)
Emit an aggregate assignment.
llvm::SmallVector< const JumpDest *, 2 > SEHTryEpilogueStack
JumpDest ReturnBlock
ReturnBlock - Unified return block.
DominatingValue< T >::saved_type saveValueInCond(T value)
const llvm::function_ref< void(CodeGenFunction &, llvm::Function *, const OMPTaskDataTy &)> TaskGenTy
llvm::Value * EmitSVEDupX(llvm::Value *Scalar)
llvm::SmallPtrSet< const CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
void pushCleanupAndDeferDeactivation(CleanupKind Kind, As... A)
llvm::DenseMap< const Decl *, Address > DeclMapTy
const TargetInfo & getTarget() const
void initFullExprCleanup()
Set up the last cleanup that was pushed as a conditional full-expression cleanup.
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Definition: CGCleanup.cpp:424
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
Definition: CGCleanup.cpp:1293
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
static Destroyer destroyARCStrongImprecise
llvm::BasicBlock * getInvokeDest()
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
llvm::Value * EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty)
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
Definition: CGStmt.cpp:1012
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke=nullptr, bool IsMustTail=false)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
Same as MakeAddrLValue above except that the pointer is known to be unsigned.
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
Definition: CGBlocks.cpp:2634
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:151
llvm::function_ref< std::pair< LValue, LValue >(CodeGenFunction &, const OMPExecutableDirective &S)> CodeGenLoopBoundsTy
llvm::Function * generateAwaitSuspendWrapper(Twine const &CoroName, Twine const &SuspendPointName, CoroutineSuspendExpr const &S)
const TargetCodeGenInfo & getTargetHooks() const
void setBeforeOutermostConditional(llvm::Value *value, Address addr, CodeGenFunction &CGF)
void EmitLifetimeEnd(llvm::Value *Addr)
Definition: CGDecl.cpp:1369
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind, RawAddress ActiveFlag, As... A)
int ExpectedOMPLoopDepth
Number of nested loop to be consumed by the last surrounding loop-associated directive.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
SmallVector< llvm::CanonicalLoopInfo *, 4 > OMPLoopNestStack
List of recently emitted OMPCanonicalLoops.
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
void EmitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &S)
llvm::Value * LoadCXXVTT()
LoadCXXVTT - Load the VTT parameter to base constructors/destructors have virtual bases.
void EmitOpenACCCacheConstruct(const OpenACCCacheConstruct &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition: CGExpr.cpp:264
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
Definition: CGStmt.cpp:61
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
CodeGenTypes & getTypes() const
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
RawAddress CreateTempAlloca(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr, RawAddress *Alloca=nullptr)
CreateTempAlloca - This creates a alloca and inserts it into the entry block.
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:186
SmallVector< llvm::Value *, 8 > ObjCEHValueStack
ObjCEHValueStack - Stack of Objective-C exception values, used for rethrows.
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D, llvm::Value *Address)
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters.
void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S)
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::function_ref< void(CodeGenFunction &, SourceLocation, const unsigned, const bool)> CodeGenOrderedTy
const CGFunctionInfo * CurFnInfo
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition: CGExpr.cpp:1631
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
static Destroyer destroyARCStrongPrecise
bool checkIfFunctionMustProgress()
Returns true if a function must make progress, which means the mustprogress attribute can be added.
llvm::SmallVector< VPtr, 4 > VPtrsVector
llvm::function_ref< void(CodeGenFunction &, const OMPLoopDirective &, JumpDest)> CodeGenLoopTy
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, ArrayRef< Address > args, const Twine &name="")
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
bool hasLabelBeenSeenInCurrentScope() const
Return true if a label was seen in the current scope.
LValue EmitLoadOfReferenceLValue(Address RefAddr, QualType RefTy, AlignmentSource Source=AlignmentSource::Type)
llvm::Type * ConvertType(const TypeDecl *T)
This class organizes the cross-function state that is used while generating LLVM code.
void setAtomicOpts(AtomicOptions AO)
Set the current Atomic options.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CGPointerAuthInfo getPointerAuthInfoForPointeeType(QualType type)
const llvm::DataLayout & getDataLayout() const
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Definition: CodeGenTypes.h:54
A specialization of Address that requires the address to be an LLVM Constant.
Definition: Address.h:296
DominatingValue< Address >::saved_type AggregateAddr
static saved_type save(CodeGenFunction &CGF, RValue value)
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:146
ConditionalCleanup stores the saved form of its parameters, then restores them and performs the clean...
Definition: EHScopeStack.h:208
A saved depth on the scope stack.
Definition: EHScopeStack.h:106
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Definition: EHScopeStack.h:99
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:375
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:398
void pushCleanupTuple(CleanupKind Kind, std::tuple< As... > A)
Push a lazily-created cleanup on the stack. Tuple version.
Definition: EHScopeStack.h:300
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:375
LValue - This represents an lvalue references.
Definition: CGValue.h:182
CharUnits getAlignment() const
Definition: CGValue.h:343
QualType getType() const
Definition: CGValue.h:291
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
A stack of loop information corresponding to loop nesting levels.
Definition: CGLoopInfo.h:207
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
An abstract representation of an aligned address.
Definition: Address.h:42
bool isValid() const
Definition: Address.h:62
Class provides a way to call simple version of codegen for OpenMP region, or an advanced with possibl...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:379
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition: TargetInfo.h:47
The class detects jumps which bypass local variables declaration: goto L; int a; L:
CompoundAssignOperator - For compound assignments (e.g.
Definition: Expr.h:4236
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3541
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1731
Represents an expression that might suspend coroutine execution; either a co_await or co_yield expres...
Definition: ExprCXX.h:5249
Represents the current source location and context used to determine the value of the source location...
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2393
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1272
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
This represents one expression.
Definition: Expr.h:112
QualType getType() const
Definition: Expr.h:144
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:6500
Represents a member of a struct/union/class.
Definition: Decl.h:3153
Represents a function declaration or definition.
Definition: Decl.h:1999
Represents a prototype with parameter type info, e.g.
Definition: TypeBase.h:5282
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:57
const Decl * getDecl() const
Definition: GlobalDecl.h:106
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2972
This class represents temporary values used to represent inout and out arguments in HLSL.
Definition: Expr.h:7258
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2262
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:3011
Describes an C or C++ initializer list.
Definition: Expr.h:5235
Represents the declaration of a label.
Definition: Decl.h:523
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2157
FPExceptionModeKind
Possible floating point exception behavior.
Definition: LangOptions.h:227
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:434
Represents a point when we exit a loop.
Definition: ProgramPoint.h:721
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4914
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2801
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3300
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: TypeBase.h:3669
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
This represents '#pragma omp atomic' directive.
Definition: StmtOpenMP.h:2947
This represents '#pragma omp barrier' directive.
Definition: StmtOpenMP.h:2625
This represents '#pragma omp cancel' directive.
Definition: StmtOpenMP.h:3655
This represents '#pragma omp cancellation point' directive.
Definition: StmtOpenMP.h:3597
Representation of an OpenMP canonical loop.
Definition: StmtOpenMP.h:142
This represents '#pragma omp critical' directive.
Definition: StmtOpenMP.h:2076
This represents '#pragma omp depobj' directive.
Definition: StmtOpenMP.h:2841
This represents '#pragma omp distribute' directive.
Definition: StmtOpenMP.h:4425
This represents '#pragma omp distribute parallel for' composite directive.
Definition: StmtOpenMP.h:4547
This represents '#pragma omp distribute parallel for simd' composite directive.
Definition: StmtOpenMP.h:4643
This represents '#pragma omp distribute simd' composite directive.
Definition: StmtOpenMP.h:4708
This represents '#pragma omp error' directive.
Definition: StmtOpenMP.h:6514
This is a basic class for representing single OpenMP executable directive.
Definition: StmtOpenMP.h:266
This represents '#pragma omp flush' directive.
Definition: StmtOpenMP.h:2789
This represents '#pragma omp for' directive.
Definition: StmtOpenMP.h:1634
This represents '#pragma omp for simd' directive.
Definition: StmtOpenMP.h:1724
This represents '#pragma omp loop' directive.
Definition: StmtOpenMP.h:6185
Represents the '#pragma omp interchange' loop transformation directive.
Definition: StmtOpenMP.h:5851
This represents '#pragma omp interop' directive.
Definition: StmtOpenMP.h:5977
This is a common base class for loop directives ('omp simd', 'omp for', 'omp for simd' etc....
Definition: StmtOpenMP.h:1004
This represents '#pragma omp masked' directive.
Definition: StmtOpenMP.h:6095
This represents '#pragma omp masked taskloop' directive.
Definition: StmtOpenMP.h:3930
This represents '#pragma omp masked taskloop simd' directive.
Definition: StmtOpenMP.h:4071
This represents '#pragma omp master' directive.
Definition: StmtOpenMP.h:2028
This represents '#pragma omp master taskloop' directive.
Definition: StmtOpenMP.h:3854
This represents '#pragma omp master taskloop simd' directive.
Definition: StmtOpenMP.h:4006
This represents '#pragma omp metadirective' directive.
Definition: StmtOpenMP.h:6146
This represents '#pragma omp ordered' directive.
Definition: StmtOpenMP.h:2893
This represents '#pragma omp parallel' directive.
Definition: StmtOpenMP.h:611
This represents '#pragma omp parallel for' directive.
Definition: StmtOpenMP.h:2147
This represents '#pragma omp parallel for simd' directive.
Definition: StmtOpenMP.h:2244
This represents '#pragma omp parallel masked' directive.
Definition: StmtOpenMP.h:2372
This represents '#pragma omp parallel masked taskloop' directive.
Definition: StmtOpenMP.h:4215
This represents '#pragma omp parallel masked taskloop simd' directive.
Definition: StmtOpenMP.h:4360
This represents '#pragma omp parallel master' directive.
Definition: StmtOpenMP.h:2309
This represents '#pragma omp parallel master taskloop' directive.
Definition: StmtOpenMP.h:4137
This represents '#pragma omp parallel master taskloop simd' directive.
Definition: StmtOpenMP.h:4293
This represents '#pragma omp parallel sections' directive.
Definition: StmtOpenMP.h:2436
Represents the '#pragma omp reverse' loop transformation directive.
Definition: StmtOpenMP.h:5779
This represents '#pragma omp scan' directive.
Definition: StmtOpenMP.h:5924
This represents '#pragma omp scope' directive.
Definition: StmtOpenMP.h:1925
This represents '#pragma omp section' directive.
Definition: StmtOpenMP.h:1864
This represents '#pragma omp sections' directive.
Definition: StmtOpenMP.h:1787
This represents '#pragma omp simd' directive.
Definition: StmtOpenMP.h:1571
This represents '#pragma omp single' directive.
Definition: StmtOpenMP.h:1977
This represents the '#pragma omp stripe' loop transformation directive.
Definition: StmtOpenMP.h:5625
This represents '#pragma omp target data' directive.
Definition: StmtOpenMP.h:3206
This represents '#pragma omp target' directive.
Definition: StmtOpenMP.h:3152
This represents '#pragma omp target enter data' directive.
Definition: StmtOpenMP.h:3260
This represents '#pragma omp target exit data' directive.
Definition: StmtOpenMP.h:3315
This represents '#pragma omp target parallel' directive.
Definition: StmtOpenMP.h:3369
This represents '#pragma omp target parallel for' directive.
Definition: StmtOpenMP.h:3449
This represents '#pragma omp target parallel for simd' directive.
Definition: StmtOpenMP.h:4774
This represents '#pragma omp target parallel loop' directive.
Definition: StmtOpenMP.h:6452
This represents '#pragma omp target simd' directive.
Definition: StmtOpenMP.h:4841
This represents '#pragma omp target teams' directive.
Definition: StmtOpenMP.h:5199
This represents '#pragma omp target teams distribute' combined directive.
Definition: StmtOpenMP.h:5255
This represents '#pragma omp target teams distribute parallel for' combined directive.
Definition: StmtOpenMP.h:5322
This represents '#pragma omp target teams distribute parallel for simd' combined directive.
Definition: StmtOpenMP.h:5420
This represents '#pragma omp target teams distribute simd' combined directive.
Definition: StmtOpenMP.h:5490
This represents '#pragma omp target teams loop' directive.
Definition: StmtOpenMP.h:6312
This represents '#pragma omp target update' directive.
Definition: StmtOpenMP.h:4491
This represents '#pragma omp task' directive.
Definition: StmtOpenMP.h:2517
This represents '#pragma omp taskloop' directive.
Definition: StmtOpenMP.h:3715
This represents '#pragma omp taskloop simd' directive.
Definition: StmtOpenMP.h:3788
This represents '#pragma omp taskgroup' directive.
Definition: StmtOpenMP.h:2722
This represents '#pragma omp taskwait' directive.
Definition: StmtOpenMP.h:2671
This represents '#pragma omp taskyield' directive.
Definition: StmtOpenMP.h:2579
This represents '#pragma omp teams' directive.
Definition: StmtOpenMP.h:3544
This represents '#pragma omp teams distribute' directive.
Definition: StmtOpenMP.h:4906
This represents '#pragma omp teams distribute parallel for' composite directive.
Definition: StmtOpenMP.h:5106
This represents '#pragma omp teams distribute parallel for simd' composite directive.
Definition: StmtOpenMP.h:5040
This represents '#pragma omp teams distribute simd' combined directive.
Definition: StmtOpenMP.h:4972
This represents '#pragma omp teams loop' directive.
Definition: StmtOpenMP.h:6247
This represents the '#pragma omp tile' loop transformation directive.
Definition: StmtOpenMP.h:5548
This represents the '#pragma omp unroll' loop transformation directive.
Definition: StmtOpenMP.h:5705
This represents clause 'use_device_addr' in the '#pragma omp ...' directives.
This represents clause 'use_device_ptr' in the '#pragma omp ...' directives.
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp,...
Definition: ExprObjC.h:192
ObjCBoxedExpr - used for generalized expression boxing.
Definition: ExprObjC.h:128
ObjCContainerDecl - Represents a container for method declarations.
Definition: DeclObjC.h:948
ObjCDictionaryLiteral - AST node to represent objective-c dictionary literals; as in:"name" : NSUserN...
Definition: ExprObjC.h:308
ObjCEncodeExpr, used for @encode in Objective-C.
Definition: ExprObjC.h:409
ObjCImplementationDecl - Represents a class definition - this is where method definitions are specifi...
Definition: DeclObjC.h:2597
Represents an ObjC class declaration.
Definition: DeclObjC.h:1154
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition: ExprObjC.h:1498
ObjCIvarDecl - Represents an ObjC instance variable.
Definition: DeclObjC.h:1952
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:548
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:940
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:140
ObjCPropertyImplDecl - Represents implementation declaration of a property in a class or category imp...
Definition: DeclObjC.h:2805
ObjCProtocolExpr used for protocol expression in Objective-C.
Definition: ExprObjC.h:504
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:454
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:52
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1180
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1230
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Definition: StmtOpenACC.h:132
This class represents a 'loop' construct.
Definition: StmtOpenACC.h:190
Represents a parameter to a function.
Definition: Decl.h:1789
Pointer-authentication qualifiers.
Definition: TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: TypeBase.h:3346
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:2007
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6692
A (possibly-)qualified type.
Definition: TypeBase.h:937
The collection of all-type qualifiers we support.
Definition: TypeBase.h:331
Represents a struct/union/class.
Definition: Decl.h:4305
Flags to identify the types for overloaded SVE builtins.
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
A trivial tuple used to represent a source range.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4531
Stmt - This represents one statement.
Definition: Stmt.h:85
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1426
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1801
Exposes information about the current target.
Definition: TargetInfo.h:226
Represents a declaration of a type.
Definition: Decl.h:3506
The base class of the type hierarchy.
Definition: TypeBase.h:1833
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition: Type.h:41
bool isReferenceType() const
Definition: TypeBase.h:8604
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2246
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4893
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:711
QualType getType() const
Definition: Decl.h:722
Represents a variable declaration or definition.
Definition: Decl.h:925
VarDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition: Decl.cpp:2257
bool isLocalVarDeclOrParm() const
Similar to isLocalVarDecl but also includes parameters.
Definition: Decl.h:1261
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: TypeBase.h:3982
Expr * getSizeExpr() const
Definition: TypeBase.h:3996
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2700
#define bool
Definition: gpuintrin.h:32
Defines the clang::TargetInfo interface.
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:141
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
ARCPreciseLifetime_t
Does an ARC strong l-value have precise lifetime?
Definition: CGValue.h:135
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
std::variant< struct RequiresDecl, struct HeaderDecl, struct UmbrellaDirDecl, struct ModuleDecl, struct ExcludeDecl, struct ExportDecl, struct ExportAsDecl, struct ExternModuleDecl, struct UseDecl, struct LinkDecl, struct ConfigMacrosDecl, struct ConflictDecl > Decl
All declarations that can appear in a module declaration.
Definition: ModuleMapFile.h:36
The JSON file list parser is used to communicate input to InstallAPI.
CXXCtorType
C++ constructor types.
Definition: ABI.h:24
llvm::omp::Directive OpenMPDirectiveKind
OpenMP directives.
Definition: OpenMPKinds.h:25
BinaryOperatorKind
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ CR_Default
Definition: CapturedStmt.h:17
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
Definition: Linkage.h:24
CXXDtorType
C++ destructor types.
Definition: ABI.h:33
LangAS
Defines the address space values used by the address space qualifier of QualType.
Definition: AddressSpaces.h:25
ExprValueKind
The categorization of expression values, currently following the C++11 scheme.
Definition: Specifiers.h:132
const FunctionProtoType * T
llvm::fp::ExceptionBehavior ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
#define true
Definition: stdbool.h:25
#define false
Definition: stdbool.h:26
unsigned fine_grained_memory
Definition: LangOptions.h:1051
unsigned ignore_denormal_mode
Definition: LangOptions.h:1052
Structure with information about how a bitfield should be accessed.
llvm::SmallVector< llvm::AllocaInst * > Take()
CXXDefaultArgExprScope(CodeGenFunction &CGF, const CXXDefaultArgExpr *E)
FMVResolverOption(llvm::Function *F, ArrayRef< StringRef > Feats, std::optional< StringRef > Arch=std::nullopt)
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth, unsigned Index)
Header for data within LifetimeExtendedCleanupStack.
unsigned Size
The size of the following cleanup object.
unsigned IsConditional
Whether this is a conditional cleanup.
llvm::OpenMPIRBuilder::InsertPointTy InsertPointTy
static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP, llvm::BasicBlock &FiniBB, llvm::Function *Fn, ArrayRef< llvm::Value * > Args)
static void FinalizeOMPRegion(CodeGenFunction &CGF, InsertPointTy IP)
Emit the Finalization for an OMP region.
OMPBuilderCBHelpers & operator=(const OMPBuilderCBHelpers &)=delete
OMPBuilderCBHelpers(const OMPBuilderCBHelpers &)=delete
OMPTargetDataInfo(Address BasePointersArray, Address PointersArray, Address SizesArray, Address MappersArray, unsigned NumberOfTargetItems)
llvm::PointerUnion< const FunctionProtoType *, const ObjCMethodDecl * > P
Struct with all information about dynamic [sub]class needed to set vptr.
This structure provides a set of types that are commonly used during IR emission.
Helper class with most of the code for saving a value for a conditional expression cleanup.
llvm::PointerIntPair< llvm::Value *, 1, bool > saved_type
static llvm::Value * restore(CodeGenFunction &CGF, saved_type value)
static saved_type save(CodeGenFunction &CGF, llvm::Value *value)
static bool needsSaving(llvm::Value *value)
Answer whether the given value needs extra work to be saved.
static type restore(CodeGenFunction &CGF, saved_type value)
static type restore(CodeGenFunction &CGF, saved_type value)
static saved_type save(CodeGenFunction &CGF, type value)
static saved_type save(CodeGenFunction &CGF, type value)
static type restore(CodeGenFunction &CGF, saved_type value)
A metaprogramming class for ensuring that a value will dominate an arbitrary position in a function.
Definition: EHScopeStack.h:65
The this pointer adjustment as well as an optional return adjustment for a thunk.
Definition: Thunk.h:157