clang 22.0.0git
CodeGenFunction.h
Go to the documentation of this file.
1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the internal per-function state used for llvm translation.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14#define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15
16#include "CGBuilder.h"
17#include "CGLoopInfo.h"
18#include "CGValue.h"
19#include "CodeGenModule.h"
20#include "EHScopeStack.h"
21#include "SanitizerHandler.h"
22#include "VarBypassDetector.h"
23#include "clang/AST/CharUnits.h"
25#include "clang/AST/ExprCXX.h"
26#include "clang/AST/ExprObjC.h"
30#include "clang/AST/StmtSYCL.h"
31#include "clang/AST/Type.h"
32#include "clang/Basic/ABI.h"
37#include "llvm/ADT/ArrayRef.h"
38#include "llvm/ADT/DenseMap.h"
39#include "llvm/ADT/MapVector.h"
40#include "llvm/ADT/SmallVector.h"
41#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/ValueHandle.h"
44#include "llvm/Support/Debug.h"
45#include "llvm/Transforms/Utils/SanitizerStats.h"
46#include <optional>
47
48namespace llvm {
49class BasicBlock;
50class ConvergenceControlInst;
51class LLVMContext;
52class MDNode;
53class SwitchInst;
54class Twine;
55class Value;
56class CanonicalLoopInfo;
57} // namespace llvm
58
59namespace clang {
60class ASTContext;
61class CXXDestructorDecl;
62class CXXForRangeStmt;
63class CXXTryStmt;
64class Decl;
65class LabelDecl;
66class FunctionDecl;
67class FunctionProtoType;
68class LabelStmt;
69class ObjCContainerDecl;
70class ObjCInterfaceDecl;
71class ObjCIvarDecl;
72class ObjCMethodDecl;
73class ObjCImplementationDecl;
74class ObjCPropertyImplDecl;
75class TargetInfo;
76class VarDecl;
77class ObjCForCollectionStmt;
78class ObjCAtTryStmt;
79class ObjCAtThrowStmt;
80class ObjCAtSynchronizedStmt;
81class ObjCAutoreleasePoolStmt;
82class OMPUseDevicePtrClause;
83class OMPUseDeviceAddrClause;
84class SVETypeFlags;
85class OMPExecutableDirective;
86
87namespace analyze_os_log {
88class OSLogBufferLayout;
89}
90
91namespace CodeGen {
92class CodeGenTypes;
93class CodeGenPGO;
94class CGCallee;
95class CGFunctionInfo;
96class CGBlockInfo;
97class CGCXXABI;
98class BlockByrefHelpers;
99class BlockByrefInfo;
100class BlockFieldFlags;
101class RegionCodeGenTy;
102class TargetCodeGenInfo;
103struct OMPTaskDataTy;
104struct CGCoroData;
105
106// clang-format off
107/// The kind of evaluation to perform on values of a particular
108/// type. Basically, is the code in CGExprScalar, CGExprComplex, or
109/// CGExprAgg?
110///
111/// TODO: should vectors maybe be split out into their own thing?
117// clang-format on
118
119/// Helper class with most of the code for saving a value for a
120/// conditional expression cleanup.
122 typedef llvm::PointerIntPair<llvm::Value *, 1, bool> saved_type;
123
124 /// Answer whether the given value needs extra work to be saved.
125 static bool needsSaving(llvm::Value *value) {
126 if (!value)
127 return false;
128
129 // If it's not an instruction, we don't need to save.
130 if (!isa<llvm::Instruction>(value))
131 return false;
132
133 // If it's an instruction in the entry block, we don't need to save.
134 llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
135 return (block != &block->getParent()->getEntryBlock());
136 }
137
138 static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
139 static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
140};
141
142/// A partial specialization of DominatingValue for llvm::Values that
143/// might be llvm::Instructions.
144template <class T> struct DominatingPointer<T, true> : DominatingLLVMValue {
145 typedef T *type;
147 return static_cast<T *>(DominatingLLVMValue::restore(CGF, value));
148 }
149};
150
151/// A specialization of DominatingValue for Address.
152template <> struct DominatingValue<Address> {
153 typedef Address type;
154
155 struct saved_type {
157 llvm::Type *ElementType;
160 llvm::PointerType *EffectiveType;
161 };
162
163 static bool needsSaving(type value) {
166 return true;
167 return false;
168 }
169 static saved_type save(CodeGenFunction &CGF, type value) {
170 return {DominatingLLVMValue::save(CGF, value.getBasePointer()),
171 value.getElementType(), value.getAlignment(),
172 DominatingLLVMValue::save(CGF, value.getOffset()), value.getType()};
173 }
175 return Address(DominatingLLVMValue::restore(CGF, value.BasePtr),
176 value.ElementType, value.Alignment, CGPointerAuthInfo(),
177 DominatingLLVMValue::restore(CGF, value.Offset));
178 }
179};
180
181/// A specialization of DominatingValue for RValue.
182template <> struct DominatingValue<RValue> {
183 typedef RValue type;
185 enum Kind {
186 ScalarLiteral,
187 ScalarAddress,
188 AggregateLiteral,
189 AggregateAddress,
190 ComplexAddress
191 };
192 union {
193 struct {
195 } Vals;
197 };
198 LLVM_PREFERRED_TYPE(Kind)
199 unsigned K : 3;
200
202 : Vals{Val1, DominatingLLVMValue::saved_type()}, K(K) {}
203
206 : Vals{Val1, Val2}, K(ComplexAddress) {}
207
208 saved_type(DominatingValue<Address>::saved_type AggregateAddr, unsigned K)
209 : AggregateAddr(AggregateAddr), K(K) {}
210
211 public:
212 static bool needsSaving(RValue value);
215
216 // implementations in CGCleanup.cpp
217 };
218
219 static bool needsSaving(type value) { return saved_type::needsSaving(value); }
220 static saved_type save(CodeGenFunction &CGF, type value) {
221 return saved_type::save(CGF, value);
222 }
224 return value.restore(CGF);
225 }
226};
227
228/// A scoped helper to set the current source atom group for
229/// CGDebugInfo::addInstToCurrentSourceAtom. A source atom is a source construct
230/// that is "interesting" for debug stepping purposes. We use an atom group
231/// number to track the instruction(s) that implement the functionality for the
232/// atom, plus backup instructions/source locations.
234 uint64_t OriginalAtom = 0;
235 CGDebugInfo *DI = nullptr;
236
237 ApplyAtomGroup(const ApplyAtomGroup &) = delete;
238 void operator=(const ApplyAtomGroup &) = delete;
239
240public:
243};
244
245/// CodeGenFunction - This class organizes the per-function state that is used
246/// while generating LLVM code.
248 CodeGenFunction(const CodeGenFunction &) = delete;
249 void operator=(const CodeGenFunction &) = delete;
250
251 friend class CGCXXABI;
252
253public:
254 /// A jump destination is an abstract label, branching to which may
255 /// require a jump out through normal cleanups.
256 struct JumpDest {
257 JumpDest() : Block(nullptr), Index(0) {}
258 JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth,
259 unsigned Index)
260 : Block(Block), ScopeDepth(Depth), Index(Index) {}
261
262 bool isValid() const { return Block != nullptr; }
263 llvm::BasicBlock *getBlock() const { return Block; }
264 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
265 unsigned getDestIndex() const { return Index; }
266
267 // This should be used cautiously.
269 ScopeDepth = depth;
270 }
271
272 private:
273 llvm::BasicBlock *Block;
275 unsigned Index;
276 };
277
278 CodeGenModule &CGM; // Per-module state.
280
281 // For EH/SEH outlined funclets, this field points to parent's CGF
283
284 typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
287
288 // Stores variables for which we can't generate correct lifetime markers
289 // because of jumps.
291
292 /// List of recently emitted OMPCanonicalLoops.
293 ///
294 /// Since OMPCanonicalLoops are nested inside other statements (in particular
295 /// CapturedStmt generated by OMPExecutableDirective and non-perfectly nested
296 /// loops), we cannot directly call OMPEmitOMPCanonicalLoop and receive its
297 /// llvm::CanonicalLoopInfo. Instead, we call EmitStmt and any
298 /// OMPEmitOMPCanonicalLoop called by it will add its CanonicalLoopInfo to
299 /// this stack when done. Entering a new loop requires clearing this list; it
300 /// either means we start parsing a new loop nest (in which case the previous
301 /// loop nest goes out of scope) or a second loop in the same level in which
302 /// case it would be ambiguous into which of the two (or more) loops the loop
303 /// nest would extend.
305
306 /// Stack to track the Logical Operator recursion nest for MC/DC.
308
309 /// Stack to track the controlled convergence tokens.
311
312 /// Number of nested loop to be consumed by the last surrounding
313 /// loop-associated directive.
315
316 // CodeGen lambda for loops and support for ordered clause
317 typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
318 JumpDest)>
320 typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
321 const unsigned, const bool)>
323
324 // Codegen lambda for loop bounds in worksharing loop constructs
325 typedef llvm::function_ref<std::pair<LValue, LValue>(
328
329 // Codegen lambda for loop bounds in dispatch-based loop implementation
330 typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
332 Address UB)>
334
335 /// CGBuilder insert helper. This function is called after an
336 /// instruction is created using Builder.
337 void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
338 llvm::BasicBlock::iterator InsertPt) const;
339
340 /// CurFuncDecl - Holds the Decl for the current outermost
341 /// non-closure context.
342 const Decl *CurFuncDecl = nullptr;
343 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
344 const Decl *CurCodeDecl = nullptr;
345 const CGFunctionInfo *CurFnInfo = nullptr;
347 llvm::Function *CurFn = nullptr;
348
349 /// Save Parameter Decl for coroutine.
351
352 // Holds coroutine data if the current function is a coroutine. We use a
353 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
354 // in this header.
355 struct CGCoroInfo {
356 std::unique_ptr<CGCoroData> Data;
357 bool InSuspendBlock = false;
358 CGCoroInfo();
359 ~CGCoroInfo();
360 };
362
363 bool isCoroutine() const { return CurCoro.Data != nullptr; }
364
365 bool inSuspendBlock() const {
367 }
368
369 // Holds FramePtr for await_suspend wrapper generation,
370 // so that __builtin_coro_frame call can be lowered
371 // directly to value of its second argument
373 llvm::Value *FramePtr = nullptr;
374 };
376
377 // Generates wrapper function for `llvm.coro.await.suspend.*` intrinisics.
378 // It encapsulates SuspendExpr in a function, to separate it's body
379 // from the main coroutine to avoid miscompilations. Intrinisic
380 // is lowered to this function call in CoroSplit pass
381 // Function signature is:
382 // <type> __await_suspend_wrapper_<name>(ptr %awaiter, ptr %hdl)
383 // where type is one of (void, i1, ptr)
384 llvm::Function *generateAwaitSuspendWrapper(Twine const &CoroName,
385 Twine const &SuspendPointName,
386 CoroutineSuspendExpr const &S);
387
388 /// CurGD - The GlobalDecl for the current function being compiled.
390
391 /// PrologueCleanupDepth - The cleanup depth enclosing all the
392 /// cleanups associated with the parameters.
394
395 /// ReturnBlock - Unified return block.
397
398 /// ReturnValue - The temporary alloca to hold the return
399 /// value. This is invalid iff the function has no return value.
401
402 /// ReturnValuePointer - The temporary alloca to hold a pointer to sret.
403 /// This is invalid if sret is not in use.
405
406 /// If a return statement is being visited, this holds the return statment's
407 /// result expression.
408 const Expr *RetExpr = nullptr;
409
410 /// Return true if a label was seen in the current scope.
412 if (CurLexicalScope)
413 return CurLexicalScope->hasLabels();
414 return !LabelMap.empty();
415 }
416
417 /// AllocaInsertPoint - This is an instruction in the entry block before which
418 /// we prefer to insert allocas.
419 llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
420
421private:
422 /// PostAllocaInsertPt - This is a place in the prologue where code can be
423 /// inserted that will be dominated by all the static allocas. This helps
424 /// achieve two things:
425 /// 1. Contiguity of all static allocas (within the prologue) is maintained.
426 /// 2. All other prologue code (which are dominated by static allocas) do
427 /// appear in the source order immediately after all static allocas.
428 ///
429 /// PostAllocaInsertPt will be lazily created when it is *really* required.
430 llvm::AssertingVH<llvm::Instruction> PostAllocaInsertPt = nullptr;
431
432public:
433 /// Return PostAllocaInsertPt. If it is not yet created, then insert it
434 /// immediately after AllocaInsertPt.
435 llvm::Instruction *getPostAllocaInsertPoint() {
436 if (!PostAllocaInsertPt) {
437 assert(AllocaInsertPt &&
438 "Expected static alloca insertion point at function prologue");
439 assert(AllocaInsertPt->getParent()->isEntryBlock() &&
440 "EBB should be entry block of the current code gen function");
441 PostAllocaInsertPt = AllocaInsertPt->clone();
442 PostAllocaInsertPt->setName("postallocapt");
443 PostAllocaInsertPt->insertAfter(AllocaInsertPt->getIterator());
444 }
445
446 return PostAllocaInsertPt;
447 }
448
449 /// API for captured statement code generation.
451 public:
453 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
456 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
457
459 S.getCapturedRecordDecl()->field_begin();
460 for (CapturedStmt::const_capture_iterator I = S.capture_begin(),
461 E = S.capture_end();
462 I != E; ++I, ++Field) {
463 if (I->capturesThis())
464 CXXThisFieldDecl = *Field;
465 else if (I->capturesVariable())
466 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
467 else if (I->capturesVariableByCopy())
468 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
469 }
470 }
471
472 virtual ~CGCapturedStmtInfo();
473
474 CapturedRegionKind getKind() const { return Kind; }
475
476 virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
477 // Retrieve the value of the context parameter.
478 virtual llvm::Value *getContextValue() const { return ThisValue; }
479
480 /// Lookup the captured field decl for a variable.
481 virtual const FieldDecl *lookup(const VarDecl *VD) const {
482 return CaptureFields.lookup(VD->getCanonicalDecl());
483 }
484
485 bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
486 virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
487
488 static bool classof(const CGCapturedStmtInfo *) { return true; }
489
490 /// Emit the captured statement body.
491 virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
493 CGF.EmitStmt(S);
494 }
495
496 /// Get the name of the capture helper.
497 virtual StringRef getHelperName() const { return "__captured_stmt"; }
498
499 /// Get the CaptureFields
500 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> getCaptureFields() {
501 return CaptureFields;
502 }
503
504 private:
505 /// The kind of captured statement being generated.
507
508 /// Keep the map between VarDecl and FieldDecl.
509 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
510
511 /// The base address of the captured record, passed in as the first
512 /// argument of the parallel region function.
513 llvm::Value *ThisValue;
514
515 /// Captured 'this' type.
516 FieldDecl *CXXThisFieldDecl;
517 };
519
520 /// RAII for correct setting/restoring of CapturedStmtInfo.
522 private:
523 CodeGenFunction &CGF;
524 CGCapturedStmtInfo *PrevCapturedStmtInfo;
525
526 public:
528 CGCapturedStmtInfo *NewCapturedStmtInfo)
529 : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
530 CGF.CapturedStmtInfo = NewCapturedStmtInfo;
531 }
532 ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
533 };
534
535 /// An abstract representation of regular/ObjC call/message targets.
537 /// The function declaration of the callee.
538 const Decl *CalleeDecl;
539
540 public:
541 AbstractCallee() : CalleeDecl(nullptr) {}
542 AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
543 AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
544 bool hasFunctionDecl() const {
545 return isa_and_nonnull<FunctionDecl>(CalleeDecl);
546 }
547 const Decl *getDecl() const { return CalleeDecl; }
548 unsigned getNumParams() const {
549 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
550 return FD->getNumParams();
551 return cast<ObjCMethodDecl>(CalleeDecl)->param_size();
552 }
553 const ParmVarDecl *getParamDecl(unsigned I) const {
554 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
555 return FD->getParamDecl(I);
556 return *(cast<ObjCMethodDecl>(CalleeDecl)->param_begin() + I);
557 }
558 };
559
560 /// Sanitizers enabled for this function.
562
563 /// True if CodeGen currently emits code implementing sanitizer checks.
564 bool IsSanitizerScope = false;
565
566 /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
568 CodeGenFunction *CGF;
569
570 public:
573 };
574
575 /// In C++, whether we are code generating a thunk. This controls whether we
576 /// should emit cleanups.
577 bool CurFuncIsThunk = false;
578
579 /// In ARC, whether we should autorelease the return value.
580 bool AutoreleaseResult = false;
581
582 /// Whether we processed a Microsoft-style asm block during CodeGen. These can
583 /// potentially set the return value.
584 bool SawAsmBlock = false;
585
587
588 /// True if the current function is an outlined SEH helper. This can be a
589 /// finally block or filter expression.
591
592 /// True if CodeGen currently emits code inside presereved access index
593 /// region.
595
596 /// True if the current statement has nomerge attribute.
598
599 /// True if the current statement has noinline attribute.
601
602 /// True if the current statement has always_inline attribute.
604
605 /// True if the current statement has noconvergent attribute.
607
608 /// HLSL Branch attribute.
609 HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr =
610 HLSLControlFlowHintAttr::SpellingNotCalculated;
611
612 // The CallExpr within the current statement that the musttail attribute
613 // applies to. nullptr if there is no 'musttail' on the current statement.
614 const CallExpr *MustTailCall = nullptr;
615
616 /// Returns true if a function must make progress, which means the
617 /// mustprogress attribute can be added.
619 if (CGM.getCodeGenOpts().getFiniteLoops() ==
621 return false;
622
623 // C++11 and later guarantees that a thread eventually will do one of the
624 // following (C++11 [intro.multithread]p24 and C++17 [intro.progress]p1):
625 // - terminate,
626 // - make a call to a library I/O function,
627 // - perform an access through a volatile glvalue, or
628 // - perform a synchronization operation or an atomic operation.
629 //
630 // Hence each function is 'mustprogress' in C++11 or later.
631 return getLangOpts().CPlusPlus11;
632 }
633
634 /// Returns true if a loop must make progress, which means the mustprogress
635 /// attribute can be added. \p HasConstantCond indicates whether the branch
636 /// condition is a known constant.
637 bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody);
638
640 llvm::Value *BlockPointer = nullptr;
641
642 llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
644
645 /// A mapping from NRVO variables to the flags used to indicate
646 /// when the NRVO has been applied to this variable.
647 llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
648
651
652 // A stack of cleanups which were added to EHStack but have to be deactivated
653 // later before being popped or emitted. These are usually deactivated on
654 // exiting a `CleanupDeactivationScope` scope. For instance, after a
655 // full-expr.
656 //
657 // These are specially useful for correctly emitting cleanups while
658 // encountering branches out of expression (through stmt-expr or coroutine
659 // suspensions).
662 llvm::Instruction *DominatingIP;
663 };
665
666 // Enters a new scope for capturing cleanups which are deferred to be
667 // deactivated, all of which will be deactivated once the scope is exited.
676
678 assert(!Deactivated && "Deactivating already deactivated scope");
680 for (size_t I = Stack.size(); I > OldDeactivateCleanupStackSize; I--) {
681 CGF.DeactivateCleanupBlock(Stack[I - 1].Cleanup,
682 Stack[I - 1].DominatingIP);
683 Stack[I - 1].DominatingIP->eraseFromParent();
684 }
685 Stack.resize(OldDeactivateCleanupStackSize);
686 Deactivated = true;
687 }
688
690 if (Deactivated)
691 return;
693 }
694 };
695
697
698 llvm::Instruction *CurrentFuncletPad = nullptr;
699
701 bool isRedundantBeforeReturn() override { return true; }
702
703 llvm::Value *Addr;
704
705 public:
706 CallLifetimeEnd(RawAddress addr) : Addr(addr.getPointer()) {}
707
708 void Emit(CodeGenFunction &CGF, Flags flags) override {
710 }
711 };
712
713 // We are using objects of this 'cleanup' class to emit fake.use calls
714 // for -fextend-variable-liveness. They are placed at the end of a variable's
715 // scope analogous to lifetime markers.
716 class FakeUse final : public EHScopeStack::Cleanup {
717 Address Addr;
718
719 public:
720 FakeUse(Address addr) : Addr(addr) {}
721
722 void Emit(CodeGenFunction &CGF, Flags flags) override {
723 CGF.EmitFakeUse(Addr);
724 }
725 };
726
727 /// Header for data within LifetimeExtendedCleanupStack.
728 struct alignas(uint64_t) LifetimeExtendedCleanupHeader {
729 /// The size of the following cleanup object.
730 unsigned Size;
731 /// The kind of cleanup to push.
732 LLVM_PREFERRED_TYPE(CleanupKind)
734 /// Whether this is a conditional cleanup.
735 LLVM_PREFERRED_TYPE(bool)
736 unsigned IsConditional : 1;
737
738 size_t getSize() const { return Size; }
739 CleanupKind getKind() const { return (CleanupKind)Kind; }
740 bool isConditional() const { return IsConditional; }
741 };
742
743 /// i32s containing the indexes of the cleanup destinations.
744 RawAddress NormalCleanupDest = RawAddress::invalid();
745
746 unsigned NextCleanupDestIndex = 1;
747
748 /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
749 llvm::BasicBlock *EHResumeBlock = nullptr;
750
751 /// The exception slot. All landing pads write the current exception pointer
752 /// into this alloca.
753 llvm::Value *ExceptionSlot = nullptr;
754
755 /// The selector slot. Under the MandatoryCleanup model, all landing pads
756 /// write the current selector value into this alloca.
757 llvm::AllocaInst *EHSelectorSlot = nullptr;
758
759 /// A stack of exception code slots. Entering an __except block pushes a slot
760 /// on the stack and leaving pops one. The __exception_code() intrinsic loads
761 /// a value from the top of the stack.
763
764 /// Value returned by __exception_info intrinsic.
765 llvm::Value *SEHInfo = nullptr;
766
767 /// Emits a landing pad for the current EH stack.
768 llvm::BasicBlock *EmitLandingPad();
769
770 llvm::BasicBlock *getInvokeDestImpl();
771
772 /// Parent loop-based directive for scan directive.
773 const OMPExecutableDirective *OMPParentLoopDirectiveForScan = nullptr;
774 llvm::BasicBlock *OMPBeforeScanBlock = nullptr;
775 llvm::BasicBlock *OMPAfterScanBlock = nullptr;
776 llvm::BasicBlock *OMPScanExitBlock = nullptr;
777 llvm::BasicBlock *OMPScanDispatch = nullptr;
778 bool OMPFirstScanLoop = false;
779
780 /// Manages parent directive for scan directives.
782 CodeGenFunction &CGF;
783 const OMPExecutableDirective *ParentLoopDirectiveForScan;
784
785 public:
787 CodeGenFunction &CGF,
788 const OMPExecutableDirective &ParentLoopDirectiveForScan)
789 : CGF(CGF),
790 ParentLoopDirectiveForScan(CGF.OMPParentLoopDirectiveForScan) {
791 CGF.OMPParentLoopDirectiveForScan = &ParentLoopDirectiveForScan;
792 }
794 CGF.OMPParentLoopDirectiveForScan = ParentLoopDirectiveForScan;
795 }
796 };
797
798 template <class T>
800 return DominatingValue<T>::save(*this, value);
801 }
802
804 public:
805 CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures);
808
809 private:
810 void ConstructorHelper(FPOptions FPFeatures);
811 CodeGenFunction &CGF;
812 FPOptions OldFPFeatures;
813 llvm::fp::ExceptionBehavior OldExcept;
814 llvm::RoundingMode OldRounding;
815 std::optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
816 };
818
820 public:
822 : CGM(CGM_), SavedAtomicOpts(CGM.getAtomicOpts()) {
823 CGM.setAtomicOpts(AO);
824 }
825 CGAtomicOptionsRAII(CodeGenModule &CGM_, const AtomicAttr *AA)
826 : CGM(CGM_), SavedAtomicOpts(CGM.getAtomicOpts()) {
827 if (!AA)
828 return;
829 AtomicOptions AO = SavedAtomicOpts;
830 for (auto Option : AA->atomicOptions()) {
831 switch (Option) {
832 case AtomicAttr::remote_memory:
833 AO.remote_memory = true;
834 break;
835 case AtomicAttr::no_remote_memory:
836 AO.remote_memory = false;
837 break;
838 case AtomicAttr::fine_grained_memory:
839 AO.fine_grained_memory = true;
840 break;
841 case AtomicAttr::no_fine_grained_memory:
842 AO.fine_grained_memory = false;
843 break;
844 case AtomicAttr::ignore_denormal_mode:
845 AO.ignore_denormal_mode = true;
846 break;
847 case AtomicAttr::no_ignore_denormal_mode:
848 AO.ignore_denormal_mode = false;
849 break;
850 }
851 }
852 CGM.setAtomicOpts(AO);
853 }
854
857 ~CGAtomicOptionsRAII() { CGM.setAtomicOpts(SavedAtomicOpts); }
858
859 private:
860 CodeGenModule &CGM;
861 AtomicOptions SavedAtomicOpts;
862 };
863
864public:
865 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
866 /// rethrows.
868
869 /// A class controlling the emission of a finally block.
871 /// Where the catchall's edge through the cleanup should go.
872 JumpDest RethrowDest;
873
874 /// A function to call to enter the catch.
875 llvm::FunctionCallee BeginCatchFn;
876
877 /// An i1 variable indicating whether or not the @finally is
878 /// running for an exception.
879 llvm::AllocaInst *ForEHVar = nullptr;
880
881 /// An i8* variable into which the exception pointer to rethrow
882 /// has been saved.
883 llvm::AllocaInst *SavedExnVar = nullptr;
884
885 public:
886 void enter(CodeGenFunction &CGF, const Stmt *Finally,
887 llvm::FunctionCallee beginCatchFn,
888 llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn);
889 void exit(CodeGenFunction &CGF);
890 };
891
892 /// Returns true inside SEH __try blocks.
893 bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
894
895 /// Returns true while emitting a cleanuppad.
896 bool isCleanupPadScope() const {
897 return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
898 }
899
900 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
901 /// current full-expression. Safe against the possibility that
902 /// we're currently inside a conditionally-evaluated expression.
903 template <class T, class... As>
904 void pushFullExprCleanup(CleanupKind kind, As... A) {
905 // If we're not in a conditional branch, or if none of the
906 // arguments requires saving, then use the unconditional cleanup.
907 if (!isInConditionalBranch())
908 return EHStack.pushCleanup<T>(kind, A...);
909
910 // Stash values in a tuple so we can guarantee the order of saves.
911 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
912 SavedTuple Saved{saveValueInCond(A)...};
913
914 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
915 EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
916 initFullExprCleanup();
917 }
918
919 /// Queue a cleanup to be pushed after finishing the current full-expression,
920 /// potentially with an active flag.
921 template <class T, class... As>
923 if (!isInConditionalBranch())
924 return pushCleanupAfterFullExprWithActiveFlag<T>(
925 Kind, RawAddress::invalid(), A...);
926
927 RawAddress ActiveFlag = createCleanupActiveFlag();
928 assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
929 "cleanup active flag should never need saving");
930
931 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
932 SavedTuple Saved{saveValueInCond(A)...};
933
934 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
935 pushCleanupAfterFullExprWithActiveFlag<CleanupType>(Kind, ActiveFlag,
936 Saved);
937 }
938
939 template <class T, class... As>
941 RawAddress ActiveFlag, As... A) {
942 LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
943 ActiveFlag.isValid()};
944
945 size_t OldSize = LifetimeExtendedCleanupStack.size();
946 LifetimeExtendedCleanupStack.resize(
947 LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
948 (Header.IsConditional ? sizeof(ActiveFlag) : 0));
949
950 static_assert((alignof(LifetimeExtendedCleanupHeader) == alignof(T)) &&
951 (alignof(T) == alignof(RawAddress)),
952 "Cleanup will be allocated on misaligned address");
953 char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
954 new (Buffer) LifetimeExtendedCleanupHeader(Header);
955 new (Buffer + sizeof(Header)) T(A...);
956 if (Header.IsConditional)
957 new (Buffer + sizeof(Header) + sizeof(T)) RawAddress(ActiveFlag);
958 }
959
960 // Push a cleanup onto EHStack and deactivate it later. It is usually
961 // deactivated when exiting a `CleanupDeactivationScope` (for example: after a
962 // full expression).
963 template <class T, class... As>
965 // Placeholder dominating IP for this cleanup.
966 llvm::Instruction *DominatingIP =
967 Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
968 EHStack.pushCleanup<T>(Kind, A...);
969 DeferredDeactivationCleanupStack.push_back(
970 {EHStack.stable_begin(), DominatingIP});
971 }
972
973 /// Set up the last cleanup that was pushed as a conditional
974 /// full-expression cleanup.
976 initFullExprCleanupWithFlag(createCleanupActiveFlag());
977 }
978
979 void initFullExprCleanupWithFlag(RawAddress ActiveFlag);
980 RawAddress createCleanupActiveFlag();
981
982 /// PushDestructorCleanup - Push a cleanup to call the
983 /// complete-object destructor of an object of the given type at the
984 /// given address. Does nothing if T is not a C++ class type with a
985 /// non-trivial destructor.
986 void PushDestructorCleanup(QualType T, Address Addr);
987
988 /// PushDestructorCleanup - Push a cleanup to call the
989 /// complete-object variant of the given destructor on the object at
990 /// the given address.
991 void PushDestructorCleanup(const CXXDestructorDecl *Dtor, QualType T,
992 Address Addr);
993
994 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
995 /// process all branch fixups.
996 void PopCleanupBlock(bool FallThroughIsBranchThrough = false,
997 bool ForDeactivation = false);
998
999 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
1000 /// The block cannot be reactivated. Pops it if it's the top of the
1001 /// stack.
1002 ///
1003 /// \param DominatingIP - An instruction which is known to
1004 /// dominate the current IP (if set) and which lies along
1005 /// all paths of execution between the current IP and the
1006 /// the point at which the cleanup comes into scope.
1007 void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
1008 llvm::Instruction *DominatingIP);
1009
1010 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
1011 /// Cannot be used to resurrect a deactivated cleanup.
1012 ///
1013 /// \param DominatingIP - An instruction which is known to
1014 /// dominate the current IP (if set) and which lies along
1015 /// all paths of execution between the current IP and the
1016 /// the point at which the cleanup comes into scope.
1017 void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
1018 llvm::Instruction *DominatingIP);
1019
1020 /// Enters a new scope for capturing cleanups, all of which
1021 /// will be executed once the scope is exited.
1023 EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
1024 size_t LifetimeExtendedCleanupStackSize;
1025 CleanupDeactivationScope DeactivateCleanups;
1026 bool OldDidCallStackSave;
1027
1028 protected:
1030
1031 private:
1032 RunCleanupsScope(const RunCleanupsScope &) = delete;
1033 void operator=(const RunCleanupsScope &) = delete;
1034
1035 protected:
1037
1038 public:
1039 /// Enter a new cleanup scope.
1041 : DeactivateCleanups(CGF), PerformCleanup(true), CGF(CGF) {
1042 CleanupStackDepth = CGF.EHStack.stable_begin();
1043 LifetimeExtendedCleanupStackSize =
1045 OldDidCallStackSave = CGF.DidCallStackSave;
1046 CGF.DidCallStackSave = false;
1047 OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
1048 CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
1049 }
1050
1051 /// Exit this cleanup scope, emitting any accumulated cleanups.
1053 if (PerformCleanup)
1054 ForceCleanup();
1055 }
1056
1057 /// Determine whether this scope requires any cleanups.
1058 bool requiresCleanups() const {
1059 return CGF.EHStack.stable_begin() != CleanupStackDepth;
1060 }
1061
1062 /// Force the emission of cleanups now, instead of waiting
1063 /// until this object is destroyed.
1064 /// \param ValuesToReload - A list of values that need to be available at
1065 /// the insertion point after cleanup emission. If cleanup emission created
1066 /// a shared cleanup block, these value pointers will be rewritten.
1067 /// Otherwise, they not will be modified.
1068 void
1069 ForceCleanup(std::initializer_list<llvm::Value **> ValuesToReload = {}) {
1070 assert(PerformCleanup && "Already forced cleanup");
1071 CGF.DidCallStackSave = OldDidCallStackSave;
1072 DeactivateCleanups.ForceDeactivate();
1073 CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
1074 ValuesToReload);
1075 PerformCleanup = false;
1076 CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
1077 }
1078 };
1079
1080 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1081 EHScopeStack::stable_iterator CurrentCleanupScopeDepth =
1082 EHScopeStack::stable_end();
1083
1087 LexicalScope *ParentScope;
1088
1089 LexicalScope(const LexicalScope &) = delete;
1090 void operator=(const LexicalScope &) = delete;
1091
1092 public:
1093 /// Enter a new cleanup scope.
1095
1096 void addLabel(const LabelDecl *label) {
1097 assert(PerformCleanup && "adding label to dead scope?");
1098 Labels.push_back(label);
1099 }
1100
1101 /// Exit this cleanup scope, emitting any accumulated
1102 /// cleanups.
1103 ~LexicalScope();
1104
1105 /// Force the emission of cleanups now, instead of waiting
1106 /// until this object is destroyed.
1108 CGF.CurLexicalScope = ParentScope;
1109 RunCleanupsScope::ForceCleanup();
1110
1111 if (!Labels.empty())
1112 rescopeLabels();
1113 }
1114
1115 bool hasLabels() const { return !Labels.empty(); }
1116
1117 void rescopeLabels();
1118 };
1119
1120 typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
1121
1122 /// The class used to assign some variables some temporarily addresses.
1124 DeclMapTy SavedLocals;
1125 DeclMapTy SavedTempAddresses;
1126 OMPMapVars(const OMPMapVars &) = delete;
1127 void operator=(const OMPMapVars &) = delete;
1128
1129 public:
1130 explicit OMPMapVars() = default;
1132 assert(SavedLocals.empty() && "Did not restored original addresses.");
1133 };
1134
1135 /// Sets the address of the variable \p LocalVD to be \p TempAddr in
1136 /// function \p CGF.
1137 /// \return true if at least one variable was set already, false otherwise.
1138 bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
1139 Address TempAddr) {
1140 LocalVD = LocalVD->getCanonicalDecl();
1141 // Only save it once.
1142 if (SavedLocals.count(LocalVD))
1143 return false;
1144
1145 // Copy the existing local entry to SavedLocals.
1146 auto it = CGF.LocalDeclMap.find(LocalVD);
1147 if (it != CGF.LocalDeclMap.end())
1148 SavedLocals.try_emplace(LocalVD, it->second);
1149 else
1150 SavedLocals.try_emplace(LocalVD, Address::invalid());
1151
1152 // Generate the private entry.
1153 QualType VarTy = LocalVD->getType();
1154 if (VarTy->isReferenceType()) {
1155 Address Temp = CGF.CreateMemTemp(VarTy);
1156 CGF.Builder.CreateStore(TempAddr.emitRawPointer(CGF), Temp);
1157 TempAddr = Temp;
1158 }
1159 SavedTempAddresses.try_emplace(LocalVD, TempAddr);
1160
1161 return true;
1162 }
1163
1164 /// Applies new addresses to the list of the variables.
1165 /// \return true if at least one variable is using new address, false
1166 /// otherwise.
1168 copyInto(SavedTempAddresses, CGF.LocalDeclMap);
1169 SavedTempAddresses.clear();
1170 return !SavedLocals.empty();
1171 }
1172
1173 /// Restores original addresses of the variables.
1175 if (!SavedLocals.empty()) {
1176 copyInto(SavedLocals, CGF.LocalDeclMap);
1177 SavedLocals.clear();
1178 }
1179 }
1180
1181 private:
1182 /// Copy all the entries in the source map over the corresponding
1183 /// entries in the destination, which must exist.
1184 static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
1185 for (auto &[Decl, Addr] : Src) {
1186 if (!Addr.isValid())
1187 Dest.erase(Decl);
1188 else
1189 Dest.insert_or_assign(Decl, Addr);
1190 }
1191 }
1192 };
1193
1194 /// The scope used to remap some variables as private in the OpenMP loop body
1195 /// (or other captured region emitted without outlining), and to restore old
1196 /// vars back on exit.
1198 OMPMapVars MappedVars;
1199 OMPPrivateScope(const OMPPrivateScope &) = delete;
1200 void operator=(const OMPPrivateScope &) = delete;
1201
1202 public:
1203 /// Enter a new OpenMP private scope.
1205
1206 /// Registers \p LocalVD variable as a private with \p Addr as the address
1207 /// of the corresponding private variable. \p
1208 /// PrivateGen is the address of the generated private variable.
1209 /// \return true if the variable is registered as private, false if it has
1210 /// been privatized already.
1211 bool addPrivate(const VarDecl *LocalVD, Address Addr) {
1212 assert(PerformCleanup && "adding private to dead scope");
1213 return MappedVars.setVarAddr(CGF, LocalVD, Addr);
1214 }
1215
1216 /// Privatizes local variables previously registered as private.
1217 /// Registration is separate from the actual privatization to allow
1218 /// initializers use values of the original variables, not the private one.
1219 /// This is important, for example, if the private variable is a class
1220 /// variable initialized by a constructor that references other private
1221 /// variables. But at initialization original variables must be used, not
1222 /// private copies.
1223 /// \return true if at least one variable was privatized, false otherwise.
1224 bool Privatize() { return MappedVars.apply(CGF); }
1225
1227 RunCleanupsScope::ForceCleanup();
1228 restoreMap();
1229 }
1230
1231 /// Exit scope - all the mapped variables are restored.
1233 if (PerformCleanup)
1234 ForceCleanup();
1235 }
1236
1237 /// Checks if the global variable is captured in current function.
1238 bool isGlobalVarCaptured(const VarDecl *VD) const {
1239 VD = VD->getCanonicalDecl();
1240 return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
1241 }
1242
1243 /// Restore all mapped variables w/o clean up. This is usefully when we want
1244 /// to reference the original variables but don't want the clean up because
1245 /// that could emit lifetime end too early, causing backend issue #56913.
1246 void restoreMap() { MappedVars.restore(CGF); }
1247 };
1248
1249 /// Save/restore original map of previously emitted local vars in case when we
1250 /// need to duplicate emission of the same code several times in the same
1251 /// function for OpenMP code.
1253 CodeGenFunction &CGF;
1254 DeclMapTy SavedMap;
1255
1256 public:
1258 : CGF(CGF), SavedMap(CGF.LocalDeclMap) {}
1259 ~OMPLocalDeclMapRAII() { SavedMap.swap(CGF.LocalDeclMap); }
1260 };
1261
1262 /// Takes the old cleanup stack size and emits the cleanup blocks
1263 /// that have been added.
1264 void
1265 PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
1266 std::initializer_list<llvm::Value **> ValuesToReload = {});
1267
1268 /// Takes the old cleanup stack size and emits the cleanup blocks
1269 /// that have been added, then adds all lifetime-extended cleanups from
1270 /// the given position to the stack.
1271 void
1272 PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
1273 size_t OldLifetimeExtendedStackSize,
1274 std::initializer_list<llvm::Value **> ValuesToReload = {});
1275
1276 void ResolveBranchFixups(llvm::BasicBlock *Target);
1277
1278 /// The given basic block lies in the current EH scope, but may be a
1279 /// target of a potentially scope-crossing jump; get a stable handle
1280 /// to which we can perform this jump later.
1282 return JumpDest(Target, EHStack.getInnermostNormalCleanup(),
1283 NextCleanupDestIndex++);
1284 }
1285
1286 /// The given basic block lies in the current EH scope, but may be a
1287 /// target of a potentially scope-crossing jump; get a stable handle
1288 /// to which we can perform this jump later.
1289 JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
1290 return getJumpDestInCurrentScope(createBasicBlock(Name));
1291 }
1292
1293 /// EmitBranchThroughCleanup - Emit a branch from the current insert
1294 /// block through the normal cleanup handling code (if any) and then
1295 /// on to \arg Dest.
1296 void EmitBranchThroughCleanup(JumpDest Dest);
1297
1298 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
1299 /// specified destination obviously has no cleanups to run. 'false' is always
1300 /// a conservatively correct answer for this method.
1301 bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
1302
1303 /// popCatchScope - Pops the catch scope at the top of the EHScope
1304 /// stack, emitting any required code (other than the catch handlers
1305 /// themselves).
1306 void popCatchScope();
1307
1308 llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
1309 llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
1310 llvm::BasicBlock *
1311 getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope);
1312
1313 /// An object to manage conditionally-evaluated expressions.
1315 llvm::BasicBlock *StartBB;
1316
1317 public:
1319 : StartBB(CGF.Builder.GetInsertBlock()) {}
1320
1322 assert(CGF.OutermostConditional != this);
1323 if (!CGF.OutermostConditional)
1324 CGF.OutermostConditional = this;
1325 }
1326
1328 assert(CGF.OutermostConditional != nullptr);
1329 if (CGF.OutermostConditional == this)
1330 CGF.OutermostConditional = nullptr;
1331 }
1332
1333 /// Returns a block which will be executed prior to each
1334 /// evaluation of the conditional code.
1335 llvm::BasicBlock *getStartingBlock() const { return StartBB; }
1336 };
1337
1338 /// isInConditionalBranch - Return true if we're currently emitting
1339 /// one branch or the other of a conditional expression.
1340 bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
1341
1342 void setBeforeOutermostConditional(llvm::Value *value, Address addr,
1343 CodeGenFunction &CGF) {
1344 assert(isInConditionalBranch());
1345 llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
1346 auto store = new llvm::StoreInst(value, addr.emitRawPointer(CGF),
1347 block->back().getIterator());
1348 store->setAlignment(addr.getAlignment().getAsAlign());
1349 }
1350
1351 /// An RAII object to record that we're evaluating a statement
1352 /// expression.
1354 CodeGenFunction &CGF;
1355
1356 /// We have to save the outermost conditional: cleanups in a
1357 /// statement expression aren't conditional just because the
1358 /// StmtExpr is.
1359 ConditionalEvaluation *SavedOutermostConditional;
1360
1361 public:
1363 : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
1364 CGF.OutermostConditional = nullptr;
1365 }
1366
1368 CGF.OutermostConditional = SavedOutermostConditional;
1369 CGF.EnsureInsertPoint();
1370 }
1371 };
1372
1373 /// An object which temporarily prevents a value from being
1374 /// destroyed by aggressive peephole optimizations that assume that
1375 /// all uses of a value have been realized in the IR.
1377 llvm::Instruction *Inst = nullptr;
1378 friend class CodeGenFunction;
1379
1380 public:
1382 };
1383
1384 /// A non-RAII class containing all the information about a bound
1385 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1386 /// this which makes individual mappings very simple; using this
1387 /// class directly is useful when you have a variable number of
1388 /// opaque values or don't want the RAII functionality for some
1389 /// reason.
1391 const OpaqueValueExpr *OpaqueValue;
1392 bool BoundLValue;
1394
1395 OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue)
1396 : OpaqueValue(ov), BoundLValue(boundLValue) {}
1397
1398 public:
1399 OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1400
1401 static bool shouldBindAsLValue(const Expr *expr) {
1402 // gl-values should be bound as l-values for obvious reasons.
1403 // Records should be bound as l-values because IR generation
1404 // always keeps them in memory. Expressions of function type
1405 // act exactly like l-values but are formally required to be
1406 // r-values in C.
1407 return expr->isGLValue() || expr->getType()->isFunctionType() ||
1408 hasAggregateEvaluationKind(expr->getType());
1409 }
1410
1412 bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e) {
1413 if (shouldBindAsLValue(ov))
1414 return bind(CGF, ov, CGF.EmitLValue(e));
1415 return bind(CGF, ov, CGF.EmitAnyExpr(e));
1416 }
1417
1419 bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv) {
1420 assert(shouldBindAsLValue(ov));
1421 CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1422 return OpaqueValueMappingData(ov, true);
1423 }
1424
1426 bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv) {
1427 assert(!shouldBindAsLValue(ov));
1428 CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1429
1430 OpaqueValueMappingData data(ov, false);
1431
1432 // Work around an extremely aggressive peephole optimization in
1433 // EmitScalarConversion which assumes that all other uses of a
1434 // value are extant.
1435 data.Protection = CGF.protectFromPeepholes(rv);
1436
1437 return data;
1438 }
1439
1440 bool isValid() const { return OpaqueValue != nullptr; }
1441 void clear() { OpaqueValue = nullptr; }
1442
1444 assert(OpaqueValue && "no data to unbind!");
1445
1446 if (BoundLValue) {
1447 CGF.OpaqueLValues.erase(OpaqueValue);
1448 } else {
1449 CGF.OpaqueRValues.erase(OpaqueValue);
1450 CGF.unprotectFromPeepholes(Protection);
1451 }
1452 }
1453 };
1454
1455 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1457 CodeGenFunction &CGF;
1459
1460 public:
1461 static bool shouldBindAsLValue(const Expr *expr) {
1462 return OpaqueValueMappingData::shouldBindAsLValue(expr);
1463 }
1464
1465 /// Build the opaque value mapping for the given conditional
1466 /// operator if it's the GNU ?: extension. This is a common
1467 /// enough pattern that the convenience operator is really
1468 /// helpful.
1469 ///
1472 : CGF(CGF) {
1473 if (isa<ConditionalOperator>(op))
1474 // Leave Data empty.
1475 return;
1476
1477 const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1478 Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1479 e->getCommon());
1480 }
1481
1482 /// Build the opaque value mapping for an OpaqueValueExpr whose source
1483 /// expression is set to the expression the OVE represents.
1485 : CGF(CGF) {
1486 if (OV) {
1487 assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1488 "for OVE with no source expression");
1489 Data = OpaqueValueMappingData::bind(CGF, OV, OV->getSourceExpr());
1490 }
1491 }
1492
1494 LValue lvalue)
1495 : CGF(CGF),
1496 Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {}
1497
1499 RValue rvalue)
1500 : CGF(CGF),
1501 Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {}
1502
1503 void pop() {
1504 Data.unbind(CGF);
1505 Data.clear();
1506 }
1507
1509 if (Data.isValid())
1510 Data.unbind(CGF);
1511 }
1512 };
1513
1514private:
1515 CGDebugInfo *DebugInfo;
1516 /// Used to create unique names for artificial VLA size debug info variables.
1517 unsigned VLAExprCounter = 0;
1518 bool DisableDebugInfo = false;
1519
1520 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1521 /// calling llvm.stacksave for multiple VLAs in the same scope.
1522 bool DidCallStackSave = false;
1523
1524 /// IndirectBranch - The first time an indirect goto is seen we create a block
1525 /// with an indirect branch. Every time we see the address of a label taken,
1526 /// we add the label to the indirect goto. Every subsequent indirect goto is
1527 /// codegen'd as a jump to the IndirectBranch's basic block.
1528 llvm::IndirectBrInst *IndirectBranch = nullptr;
1529
1530 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1531 /// decls.
1532 DeclMapTy LocalDeclMap;
1533
1534 // Keep track of the cleanups for callee-destructed parameters pushed to the
1535 // cleanup stack so that they can be deactivated later.
1536 llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1537 CalleeDestructedParamCleanups;
1538
1539 /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1540 /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1541 /// parameter.
1542 llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1543 SizeArguments;
1544
1545 /// Track escaped local variables with auto storage. Used during SEH
1546 /// outlining to produce a call to llvm.localescape.
1547 llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1548
1549 /// LabelMap - This keeps track of the LLVM basic block for each C label.
1550 llvm::DenseMap<const LabelDecl *, JumpDest> LabelMap;
1551
1552 // BreakContinueStack - This keeps track of where break and continue
1553 // statements should jump to.
1554 struct BreakContinue {
1555 BreakContinue(const Stmt &LoopOrSwitch, JumpDest Break, JumpDest Continue)
1556 : LoopOrSwitch(&LoopOrSwitch), BreakBlock(Break),
1557 ContinueBlock(Continue) {}
1558
1559 const Stmt *LoopOrSwitch;
1560 JumpDest BreakBlock;
1561 JumpDest ContinueBlock;
1562 };
1563 SmallVector<BreakContinue, 8> BreakContinueStack;
1564
1565 /// Handles cancellation exit points in OpenMP-related constructs.
1566 class OpenMPCancelExitStack {
1567 /// Tracks cancellation exit point and join point for cancel-related exit
1568 /// and normal exit.
1569 struct CancelExit {
1570 CancelExit() = default;
1571 CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1572 JumpDest ContBlock)
1573 : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1574 OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown;
1575 /// true if the exit block has been emitted already by the special
1576 /// emitExit() call, false if the default codegen is used.
1577 bool HasBeenEmitted = false;
1578 JumpDest ExitBlock;
1579 JumpDest ContBlock;
1580 };
1581
1582 SmallVector<CancelExit, 8> Stack;
1583
1584 public:
1585 OpenMPCancelExitStack() : Stack(1) {}
1586 ~OpenMPCancelExitStack() = default;
1587 /// Fetches the exit block for the current OpenMP construct.
1588 JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1589 /// Emits exit block with special codegen procedure specific for the related
1590 /// OpenMP construct + emits code for normal construct cleanup.
1591 void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1592 const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1593 if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1594 assert(CGF.getOMPCancelDestination(Kind).isValid());
1595 assert(CGF.HaveInsertPoint());
1596 assert(!Stack.back().HasBeenEmitted);
1597 auto IP = CGF.Builder.saveAndClearIP();
1598 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1599 CodeGen(CGF);
1600 CGF.EmitBranch(Stack.back().ContBlock.getBlock());
1601 CGF.Builder.restoreIP(IP);
1602 Stack.back().HasBeenEmitted = true;
1603 }
1604 CodeGen(CGF);
1605 }
1606 /// Enter the cancel supporting \a Kind construct.
1607 /// \param Kind OpenMP directive that supports cancel constructs.
1608 /// \param HasCancel true, if the construct has inner cancel directive,
1609 /// false otherwise.
1610 void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
1611 Stack.push_back({Kind,
1612 HasCancel ? CGF.getJumpDestInCurrentScope("cancel.exit")
1613 : JumpDest(),
1614 HasCancel ? CGF.getJumpDestInCurrentScope("cancel.cont")
1615 : JumpDest()});
1616 }
1617 /// Emits default exit point for the cancel construct (if the special one
1618 /// has not be used) + join point for cancel/normal exits.
1619 void exit(CodeGenFunction &CGF) {
1620 if (getExitBlock().isValid()) {
1621 assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
1622 bool HaveIP = CGF.HaveInsertPoint();
1623 if (!Stack.back().HasBeenEmitted) {
1624 if (HaveIP)
1625 CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1626 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1627 CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1628 }
1629 CGF.EmitBlock(Stack.back().ContBlock.getBlock());
1630 if (!HaveIP) {
1631 CGF.Builder.CreateUnreachable();
1632 CGF.Builder.ClearInsertionPoint();
1633 }
1634 }
1635 Stack.pop_back();
1636 }
1637 };
1638 OpenMPCancelExitStack OMPCancelStack;
1639
1640 /// Lower the Likelihood knowledge about the \p Cond via llvm.expect intrin.
1641 llvm::Value *emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
1642 Stmt::Likelihood LH);
1643
1644 std::unique_ptr<CodeGenPGO> PGO;
1645
1646 /// Bitmap used by MC/DC to track condition outcomes of a boolean expression.
1647 Address MCDCCondBitmapAddr = Address::invalid();
1648
1649 /// Calculate branch weights appropriate for PGO data
1650 llvm::MDNode *createProfileWeights(uint64_t TrueCount,
1651 uint64_t FalseCount) const;
1652 llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights) const;
1653 llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
1654 uint64_t LoopCount) const;
1655
1656public:
1657 std::pair<bool, bool> getIsCounterPair(const Stmt *S) const;
1658 void markStmtAsUsed(bool Skipped, const Stmt *S);
1659 void markStmtMaybeUsed(const Stmt *S);
1660
1661 /// Increment the profiler's counter for the given statement by \p StepV.
1662 /// If \p StepV is null, the default increment is 1.
1663 void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr);
1664
1666 return (CGM.getCodeGenOpts().hasProfileClangInstr() &&
1667 CGM.getCodeGenOpts().MCDCCoverage &&
1668 !CurFn->hasFnAttribute(llvm::Attribute::NoProfile));
1669 }
1670
1671 /// Allocate a temp value on the stack that MCDC can use to track condition
1672 /// results.
1673 void maybeCreateMCDCCondBitmap();
1674
1675 bool isBinaryLogicalOp(const Expr *E) const {
1676 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(E->IgnoreParens());
1677 return (BOp && BOp->isLogicalOp());
1678 }
1679
1680 /// Zero-init the MCDC temp value.
1681 void maybeResetMCDCCondBitmap(const Expr *E);
1682
1683 /// Increment the profiler's counter for the given expression by \p StepV.
1684 /// If \p StepV is null, the default increment is 1.
1685 void maybeUpdateMCDCTestVectorBitmap(const Expr *E);
1686
1687 /// Update the MCDC temp value with the condition's evaluated result.
1688 void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val);
1689
1690 /// Get the profiler's count for the given statement.
1691 uint64_t getProfileCount(const Stmt *S);
1692
1693 /// Set the profiler's current count.
1694 void setCurrentProfileCount(uint64_t Count);
1695
1696 /// Get the profiler's current count. This is generally the count for the most
1697 /// recently incremented counter.
1698 uint64_t getCurrentProfileCount();
1699
1700 /// See CGDebugInfo::addInstToCurrentSourceAtom.
1701 void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction,
1702 llvm::Value *Backup);
1703
1704 /// See CGDebugInfo::addInstToSpecificSourceAtom.
1705 void addInstToSpecificSourceAtom(llvm::Instruction *KeyInstruction,
1706 llvm::Value *Backup, uint64_t Atom);
1707
1708 /// Add \p KeyInstruction and an optional \p Backup instruction to a new atom
1709 /// group (See ApplyAtomGroup for more info).
1710 void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction,
1711 llvm::Value *Backup);
1712
1713private:
1714 /// SwitchInsn - This is nearest current switch instruction. It is null if
1715 /// current context is not in a switch.
1716 llvm::SwitchInst *SwitchInsn = nullptr;
1717 /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1718 SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
1719
1720 /// The likelihood attributes of the SwitchCase.
1721 SmallVector<Stmt::Likelihood, 16> *SwitchLikelihood = nullptr;
1722
1723 /// CaseRangeBlock - This block holds if condition check for last case
1724 /// statement range in current switch instruction.
1725 llvm::BasicBlock *CaseRangeBlock = nullptr;
1726
1727 /// OpaqueLValues - Keeps track of the current set of opaque value
1728 /// expressions.
1729 llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1730 llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1731
1732 // VLASizeMap - This keeps track of the associated size for each VLA type.
1733 // We track this by the size expression rather than the type itself because
1734 // in certain situations, like a const qualifier applied to an VLA typedef,
1735 // multiple VLA types can share the same size expression.
1736 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1737 // enter/leave scopes.
1738 llvm::DenseMap<const Expr *, llvm::Value *> VLASizeMap;
1739
1740 /// A block containing a single 'unreachable' instruction. Created
1741 /// lazily by getUnreachableBlock().
1742 llvm::BasicBlock *UnreachableBlock = nullptr;
1743
1744 /// Counts of the number return expressions in the function.
1745 unsigned NumReturnExprs = 0;
1746
1747 /// Count the number of simple (constant) return expressions in the function.
1748 unsigned NumSimpleReturnExprs = 0;
1749
1750 /// The last regular (non-return) debug location (breakpoint) in the function.
1751 SourceLocation LastStopPoint;
1752
1753public:
1754 /// Source location information about the default argument or member
1755 /// initializer expression we're evaluating, if any.
1759
1760 /// A scope within which we are constructing the fields of an object which
1761 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1762 /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1764 public:
1766 : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
1767 CGF.CXXDefaultInitExprThis = This;
1768 }
1770 CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
1771 }
1772
1773 private:
1774 CodeGenFunction &CGF;
1775 Address OldCXXDefaultInitExprThis;
1776 };
1777
1778 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1779 /// is overridden to be the object under construction.
1781 public:
1783 : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
1784 OldCXXThisAlignment(CGF.CXXThisAlignment),
1785 SourceLocScope(E, CGF.CurSourceLocExprScope) {
1786 CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getBasePointer();
1787 CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
1788 }
1790 CGF.CXXThisValue = OldCXXThisValue;
1791 CGF.CXXThisAlignment = OldCXXThisAlignment;
1792 }
1793
1794 public:
1796 llvm::Value *OldCXXThisValue;
1799 };
1800
1803 : SourceLocExprScopeGuard(E, CGF.CurSourceLocExprScope) {}
1804 };
1805
1806 /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1807 /// current loop index is overridden.
1809 public:
1810 ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
1811 : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
1812 CGF.ArrayInitIndex = Index;
1813 }
1814 ~ArrayInitLoopExprScope() { CGF.ArrayInitIndex = OldArrayInitIndex; }
1815
1816 private:
1817 CodeGenFunction &CGF;
1818 llvm::Value *OldArrayInitIndex;
1819 };
1820
1822 public:
1824 : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl),
1825 OldCurCodeDecl(CGF.CurCodeDecl),
1826 OldCXXABIThisDecl(CGF.CXXABIThisDecl),
1827 OldCXXABIThisValue(CGF.CXXABIThisValue),
1828 OldCXXThisValue(CGF.CXXThisValue),
1829 OldCXXABIThisAlignment(CGF.CXXABIThisAlignment),
1830 OldCXXThisAlignment(CGF.CXXThisAlignment),
1831 OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy),
1832 OldCXXInheritedCtorInitExprArgs(
1833 std::move(CGF.CXXInheritedCtorInitExprArgs)) {
1834 CGF.CurGD = GD;
1835 CGF.CurFuncDecl = CGF.CurCodeDecl =
1836 cast<CXXConstructorDecl>(GD.getDecl());
1837 CGF.CXXABIThisDecl = nullptr;
1838 CGF.CXXABIThisValue = nullptr;
1839 CGF.CXXThisValue = nullptr;
1840 CGF.CXXABIThisAlignment = CharUnits();
1841 CGF.CXXThisAlignment = CharUnits();
1842 CGF.ReturnValue = Address::invalid();
1843 CGF.FnRetTy = QualType();
1844 CGF.CXXInheritedCtorInitExprArgs.clear();
1845 }
1847 CGF.CurGD = OldCurGD;
1848 CGF.CurFuncDecl = OldCurFuncDecl;
1849 CGF.CurCodeDecl = OldCurCodeDecl;
1850 CGF.CXXABIThisDecl = OldCXXABIThisDecl;
1851 CGF.CXXABIThisValue = OldCXXABIThisValue;
1852 CGF.CXXThisValue = OldCXXThisValue;
1853 CGF.CXXABIThisAlignment = OldCXXABIThisAlignment;
1854 CGF.CXXThisAlignment = OldCXXThisAlignment;
1855 CGF.ReturnValue = OldReturnValue;
1856 CGF.FnRetTy = OldFnRetTy;
1857 CGF.CXXInheritedCtorInitExprArgs =
1858 std::move(OldCXXInheritedCtorInitExprArgs);
1859 }
1860
1861 private:
1862 CodeGenFunction &CGF;
1863 GlobalDecl OldCurGD;
1864 const Decl *OldCurFuncDecl;
1865 const Decl *OldCurCodeDecl;
1866 ImplicitParamDecl *OldCXXABIThisDecl;
1867 llvm::Value *OldCXXABIThisValue;
1868 llvm::Value *OldCXXThisValue;
1869 CharUnits OldCXXABIThisAlignment;
1870 CharUnits OldCXXThisAlignment;
1871 Address OldReturnValue;
1872 QualType OldFnRetTy;
1873 CallArgList OldCXXInheritedCtorInitExprArgs;
1874 };
1875
1876 // Helper class for the OpenMP IR Builder. Allows reusability of code used for
1877 // region body, and finalization codegen callbacks. This will class will also
1878 // contain privatization functions used by the privatization call backs
1879 //
1880 // TODO: this is temporary class for things that are being moved out of
1881 // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or
1882 // utility function for use with the OMPBuilder. Once that move to use the
1883 // OMPBuilder is done, everything here will either become part of CodeGenFunc.
1884 // directly, or a new helper class that will contain functions used by both
1885 // this and the OMPBuilder
1886
1888
1892
1893 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1894
1895 /// Cleanup action for allocate support.
1897
1898 private:
1899 llvm::CallInst *RTLFnCI;
1900
1901 public:
1902 OMPAllocateCleanupTy(llvm::CallInst *RLFnCI) : RTLFnCI(RLFnCI) {
1903 RLFnCI->removeFromParent();
1904 }
1905
1906 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
1907 if (!CGF.HaveInsertPoint())
1908 return;
1909 CGF.Builder.Insert(RTLFnCI);
1910 }
1911 };
1912
1913 /// Returns address of the threadprivate variable for the current
1914 /// thread. This Also create any necessary OMP runtime calls.
1915 ///
1916 /// \param VD VarDecl for Threadprivate variable.
1917 /// \param VDAddr Address of the Vardecl
1918 /// \param Loc The location where the barrier directive was encountered
1919 static Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
1920 const VarDecl *VD, Address VDAddr,
1922
1923 /// Gets the OpenMP-specific address of the local variable /p VD.
1924 static Address getAddressOfLocalVariable(CodeGenFunction &CGF,
1925 const VarDecl *VD);
1926 /// Get the platform-specific name separator.
1927 /// \param Parts different parts of the final name that needs separation
1928 /// \param FirstSeparator First separator used between the initial two
1929 /// parts of the name.
1930 /// \param Separator separator used between all of the rest consecutinve
1931 /// parts of the name
1932 static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
1933 StringRef FirstSeparator = ".",
1934 StringRef Separator = ".");
1935 /// Emit the Finalization for an OMP region
1936 /// \param CGF The Codegen function this belongs to
1937 /// \param IP Insertion point for generating the finalization code.
1939 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1940 assert(IP.getBlock()->end() != IP.getPoint() &&
1941 "OpenMP IR Builder should cause terminated block!");
1942
1943 llvm::BasicBlock *IPBB = IP.getBlock();
1944 llvm::BasicBlock *DestBB = IPBB->getUniqueSuccessor();
1945 assert(DestBB && "Finalization block should have one successor!");
1946
1947 // erase and replace with cleanup branch.
1948 IPBB->getTerminator()->eraseFromParent();
1949 CGF.Builder.SetInsertPoint(IPBB);
1951 CGF.EmitBranchThroughCleanup(Dest);
1952 }
1953
1954 /// Emit the body of an OMP region
1955 /// \param CGF The Codegen function this belongs to
1956 /// \param RegionBodyStmt The body statement for the OpenMP region being
1957 /// generated
1958 /// \param AllocaIP Where to insert alloca instructions
1959 /// \param CodeGenIP Where to insert the region code
1960 /// \param RegionName Name to be used for new blocks
1961 static void EmitOMPInlinedRegionBody(CodeGenFunction &CGF,
1962 const Stmt *RegionBodyStmt,
1963 InsertPointTy AllocaIP,
1964 InsertPointTy CodeGenIP,
1965 Twine RegionName);
1966
1967 static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP,
1968 llvm::BasicBlock &FiniBB, llvm::Function *Fn,
1970 llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
1971 if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator())
1972 CodeGenIPBBTI->eraseFromParent();
1973
1974 CGF.Builder.SetInsertPoint(CodeGenIPBB);
1975
1976 if (Fn->doesNotThrow())
1977 CGF.EmitNounwindRuntimeCall(Fn, Args);
1978 else
1979 CGF.EmitRuntimeCall(Fn, Args);
1980
1981 if (CGF.Builder.saveIP().isSet())
1982 CGF.Builder.CreateBr(&FiniBB);
1983 }
1984
1985 /// Emit the body of an OMP region that will be outlined in
1986 /// OpenMPIRBuilder::finalize().
1987 /// \param CGF The Codegen function this belongs to
1988 /// \param RegionBodyStmt The body statement for the OpenMP region being
1989 /// generated
1990 /// \param AllocaIP Where to insert alloca instructions
1991 /// \param CodeGenIP Where to insert the region code
1992 /// \param RegionName Name to be used for new blocks
1993 static void EmitOMPOutlinedRegionBody(CodeGenFunction &CGF,
1994 const Stmt *RegionBodyStmt,
1995 InsertPointTy AllocaIP,
1996 InsertPointTy CodeGenIP,
1997 Twine RegionName);
1998
1999 /// RAII for preserving necessary info during Outlined region body codegen.
2001
2002 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2003 CodeGenFunction::JumpDest OldReturnBlock;
2004 CodeGenFunction &CGF;
2005
2006 public:
2008 llvm::BasicBlock &RetBB)
2009 : CGF(cgf) {
2010 assert(AllocaIP.isSet() &&
2011 "Must specify Insertion point for allocas of outlined function");
2012 OldAllocaIP = CGF.AllocaInsertPt;
2013 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2014
2015 OldReturnBlock = CGF.ReturnBlock;
2016 CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(&RetBB);
2017 }
2018
2020 CGF.AllocaInsertPt = OldAllocaIP;
2021 CGF.ReturnBlock = OldReturnBlock;
2022 }
2023 };
2024
2025 /// RAII for preserving necessary info during inlined region body codegen.
2027
2028 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2029 CodeGenFunction &CGF;
2030
2031 public:
2033 llvm::BasicBlock &FiniBB)
2034 : CGF(cgf) {
2035 // Alloca insertion block should be in the entry block of the containing
2036 // function so it expects an empty AllocaIP in which case will reuse the
2037 // old alloca insertion point, or a new AllocaIP in the same block as
2038 // the old one
2039 assert((!AllocaIP.isSet() ||
2040 CGF.AllocaInsertPt->getParent() == AllocaIP.getBlock()) &&
2041 "Insertion point should be in the entry block of containing "
2042 "function!");
2043 OldAllocaIP = CGF.AllocaInsertPt;
2044 if (AllocaIP.isSet())
2045 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2046
2047 // TODO: Remove the call, after making sure the counter is not used by
2048 // the EHStack.
2049 // Since this is an inlined region, it should not modify the
2050 // ReturnBlock, and should reuse the one for the enclosing outlined
2051 // region. So, the JumpDest being return by the function is discarded
2052 (void)CGF.getJumpDestInCurrentScope(&FiniBB);
2053 }
2054
2056 };
2057 };
2058
2059private:
2060 /// CXXThisDecl - When generating code for a C++ member function,
2061 /// this will hold the implicit 'this' declaration.
2062 ImplicitParamDecl *CXXABIThisDecl = nullptr;
2063 llvm::Value *CXXABIThisValue = nullptr;
2064 llvm::Value *CXXThisValue = nullptr;
2065 CharUnits CXXABIThisAlignment;
2066 CharUnits CXXThisAlignment;
2067
2068 /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
2069 /// this expression.
2070 Address CXXDefaultInitExprThis = Address::invalid();
2071
2072 /// The current array initialization index when evaluating an
2073 /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
2074 llvm::Value *ArrayInitIndex = nullptr;
2075
2076 /// The values of function arguments to use when evaluating
2077 /// CXXInheritedCtorInitExprs within this context.
2078 CallArgList CXXInheritedCtorInitExprArgs;
2079
2080 /// CXXStructorImplicitParamDecl - When generating code for a constructor or
2081 /// destructor, this will hold the implicit argument (e.g. VTT).
2082 ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
2083 llvm::Value *CXXStructorImplicitParamValue = nullptr;
2084
2085 /// OutermostConditional - Points to the outermost active
2086 /// conditional control. This is used so that we know if a
2087 /// temporary should be destroyed conditionally.
2088 ConditionalEvaluation *OutermostConditional = nullptr;
2089
2090 /// The current lexical scope.
2091 LexicalScope *CurLexicalScope = nullptr;
2092
2093 /// The current source location that should be used for exception
2094 /// handling code.
2095 SourceLocation CurEHLocation;
2096
2097 /// BlockByrefInfos - For each __block variable, contains
2098 /// information about the layout of the variable.
2099 llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
2100
2101 /// Used by -fsanitize=nullability-return to determine whether the return
2102 /// value can be checked.
2103 llvm::Value *RetValNullabilityPrecondition = nullptr;
2104
2105 /// Check if -fsanitize=nullability-return instrumentation is required for
2106 /// this function.
2107 bool requiresReturnValueNullabilityCheck() const {
2108 return RetValNullabilityPrecondition;
2109 }
2110
2111 /// Used to store precise source locations for return statements by the
2112 /// runtime return value checks.
2113 Address ReturnLocation = Address::invalid();
2114
2115 /// Check if the return value of this function requires sanitization.
2116 bool requiresReturnValueCheck() const;
2117
2118 bool isInAllocaArgument(CGCXXABI &ABI, QualType Ty);
2119 bool hasInAllocaArg(const CXXMethodDecl *MD);
2120
2121 llvm::BasicBlock *TerminateLandingPad = nullptr;
2122 llvm::BasicBlock *TerminateHandler = nullptr;
2124
2125 /// Terminate funclets keyed by parent funclet pad.
2126 llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
2127
2128 /// Largest vector width used in ths function. Will be used to create a
2129 /// function attribute.
2130 unsigned LargestVectorWidth = 0;
2131
2132 /// True if we need emit the life-time markers. This is initially set in
2133 /// the constructor, but could be overwritten to true if this is a coroutine.
2134 bool ShouldEmitLifetimeMarkers;
2135
2136 /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
2137 /// the function metadata.
2138 void EmitKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn);
2139
2140public:
2141 CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext = false);
2142 ~CodeGenFunction();
2143
2144 CodeGenTypes &getTypes() const { return CGM.getTypes(); }
2145 ASTContext &getContext() const { return CGM.getContext(); }
2147 if (DisableDebugInfo)
2148 return nullptr;
2149 return DebugInfo;
2150 }
2151 void disableDebugInfo() { DisableDebugInfo = true; }
2152 void enableDebugInfo() { DisableDebugInfo = false; }
2153
2155 return CGM.getCodeGenOpts().OptimizationLevel == 0;
2156 }
2157
2158 const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
2159
2160 /// Returns a pointer to the function's exception object and selector slot,
2161 /// which is assigned in every landing pad.
2162 Address getExceptionSlot();
2163 Address getEHSelectorSlot();
2164
2165 /// Returns the contents of the function's exception object and selector
2166 /// slots.
2167 llvm::Value *getExceptionFromSlot();
2168 llvm::Value *getSelectorFromSlot();
2169
2170 RawAddress getNormalCleanupDestSlot();
2171
2172 llvm::BasicBlock *getUnreachableBlock() {
2173 if (!UnreachableBlock) {
2174 UnreachableBlock = createBasicBlock("unreachable");
2175 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
2176 }
2177 return UnreachableBlock;
2178 }
2179
2180 llvm::BasicBlock *getInvokeDest() {
2181 if (!EHStack.requiresLandingPad())
2182 return nullptr;
2183 return getInvokeDestImpl();
2184 }
2185
2186 bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; }
2187
2188 const TargetInfo &getTarget() const { return Target; }
2189 llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
2191 return CGM.getTargetCodeGenInfo();
2192 }
2193
2194 //===--------------------------------------------------------------------===//
2195 // Cleanups
2196 //===--------------------------------------------------------------------===//
2197
2198 typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
2199
2200 void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2201 Address arrayEndPointer,
2202 QualType elementType,
2203 CharUnits elementAlignment,
2204 Destroyer *destroyer);
2205 void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2206 llvm::Value *arrayEnd,
2207 QualType elementType,
2208 CharUnits elementAlignment,
2209 Destroyer *destroyer);
2210
2211 void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
2212 QualType type);
2213 void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr,
2214 QualType type);
2215 void pushDestroy(CleanupKind kind, Address addr, QualType type,
2216 Destroyer *destroyer, bool useEHCleanupForArray);
2217 void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind,
2218 Address addr, QualType type);
2219 void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr,
2220 QualType type, Destroyer *destroyer,
2221 bool useEHCleanupForArray);
2222 void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
2223 QualType type, Destroyer *destroyer,
2224 bool useEHCleanupForArray);
2225 void pushLifetimeExtendedDestroy(QualType::DestructionKind dtorKind,
2226 Address addr, QualType type);
2227 void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
2228 llvm::Value *CompletePtr,
2229 QualType ElementType);
2230 void pushStackRestore(CleanupKind kind, Address SPMem);
2231 void pushKmpcAllocFree(CleanupKind Kind,
2232 std::pair<llvm::Value *, llvm::Value *> AddrSizePair);
2233 void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
2234 bool useEHCleanupForArray);
2235 llvm::Function *generateDestroyHelper(Address addr, QualType type,
2236 Destroyer *destroyer,
2237 bool useEHCleanupForArray,
2238 const VarDecl *VD);
2239 void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
2240 QualType elementType, CharUnits elementAlign,
2241 Destroyer *destroyer, bool checkZeroLength,
2242 bool useEHCleanup);
2243
2244 Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
2245
2246 /// Determines whether an EH cleanup is required to destroy a type
2247 /// with the given destruction kind.
2249 switch (kind) {
2250 case QualType::DK_none:
2251 return false;
2252 case QualType::DK_cxx_destructor:
2253 case QualType::DK_objc_weak_lifetime:
2254 case QualType::DK_nontrivial_c_struct:
2255 return getLangOpts().Exceptions;
2256 case QualType::DK_objc_strong_lifetime:
2257 return getLangOpts().Exceptions &&
2258 CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
2259 }
2260 llvm_unreachable("bad destruction kind");
2261 }
2262
2264 return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
2265 }
2266
2267 //===--------------------------------------------------------------------===//
2268 // Objective-C
2269 //===--------------------------------------------------------------------===//
2270
2271 void GenerateObjCMethod(const ObjCMethodDecl *OMD);
2272
2273 void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD);
2274
2275 /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
2276 void GenerateObjCGetter(ObjCImplementationDecl *IMP,
2277 const ObjCPropertyImplDecl *PID);
2278 void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
2279 const ObjCPropertyImplDecl *propImpl,
2280 const ObjCMethodDecl *GetterMothodDecl,
2281 llvm::Constant *AtomicHelperFn);
2282
2283 void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
2284 ObjCMethodDecl *MD, bool ctor);
2285
2286 /// GenerateObjCSetter - Synthesize an Objective-C property setter function
2287 /// for the given property.
2288 void GenerateObjCSetter(ObjCImplementationDecl *IMP,
2289 const ObjCPropertyImplDecl *PID);
2290 void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
2291 const ObjCPropertyImplDecl *propImpl,
2292 llvm::Constant *AtomicHelperFn);
2293
2294 //===--------------------------------------------------------------------===//
2295 // Block Bits
2296 //===--------------------------------------------------------------------===//
2297
2298 /// Emit block literal.
2299 /// \return an LLVM value which is a pointer to a struct which contains
2300 /// information about the block, including the block invoke function, the
2301 /// captured variables, etc.
2302 llvm::Value *EmitBlockLiteral(const BlockExpr *);
2303
2304 llvm::Function *GenerateBlockFunction(GlobalDecl GD, const CGBlockInfo &Info,
2305 const DeclMapTy &ldm,
2306 bool IsLambdaConversionToBlock,
2307 bool BuildGlobalBlock);
2308
2309 /// Check if \p T is a C++ class that has a destructor that can throw.
2310 static bool cxxDestructorCanThrow(QualType T);
2311
2312 llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
2313 llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
2314 llvm::Constant *
2315 GenerateObjCAtomicSetterCopyHelperFunction(const ObjCPropertyImplDecl *PID);
2316 llvm::Constant *
2317 GenerateObjCAtomicGetterCopyHelperFunction(const ObjCPropertyImplDecl *PID);
2318 llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
2319
2320 void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags,
2321 bool CanThrow);
2322
2323 class AutoVarEmission;
2324
2325 void emitByrefStructureInit(const AutoVarEmission &emission);
2326
2327 /// Enter a cleanup to destroy a __block variable. Note that this
2328 /// cleanup should be a no-op if the variable hasn't left the stack
2329 /// yet; if a cleanup is required for the variable itself, that needs
2330 /// to be done externally.
2331 ///
2332 /// \param Kind Cleanup kind.
2333 ///
2334 /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
2335 /// structure that will be passed to _Block_object_dispose. When
2336 /// \p LoadBlockVarAddr is true, the address of the field of the block
2337 /// structure that holds the address of the __block structure.
2338 ///
2339 /// \param Flags The flag that will be passed to _Block_object_dispose.
2340 ///
2341 /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
2342 /// \p Addr to get the address of the __block structure.
2343 void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags,
2344 bool LoadBlockVarAddr, bool CanThrow);
2345
2346 void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
2347 llvm::Value *ptr);
2348
2349 Address LoadBlockStruct();
2350 Address GetAddrOfBlockDecl(const VarDecl *var);
2351
2352 /// BuildBlockByrefAddress - Computes the location of the
2353 /// data in a variable which is declared as __block.
2354 Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V,
2355 bool followForward = true);
2356 Address emitBlockByrefAddress(Address baseAddr, const BlockByrefInfo &info,
2357 bool followForward, const llvm::Twine &name);
2358
2359 const BlockByrefInfo &getBlockByrefInfo(const VarDecl *var);
2360
2361 QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args);
2362
2363 void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
2364 const CGFunctionInfo &FnInfo);
2365
2366 /// Annotate the function with an attribute that disables TSan checking at
2367 /// runtime.
2368 void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn);
2369
2370 /// Emit code for the start of a function.
2371 /// \param Loc The location to be associated with the function.
2372 /// \param StartLoc The location of the function body.
2373 void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn,
2374 const CGFunctionInfo &FnInfo, const FunctionArgList &Args,
2376 SourceLocation StartLoc = SourceLocation());
2377
2378 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor);
2379
2380 void EmitConstructorBody(FunctionArgList &Args);
2381 void EmitDestructorBody(FunctionArgList &Args);
2382 void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
2383 void EmitFunctionBody(const Stmt *Body);
2384 void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
2385
2386 void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
2387 CallArgList &CallArgs,
2388 const CGFunctionInfo *CallOpFnInfo = nullptr,
2389 llvm::Constant *CallOpFn = nullptr);
2390 void EmitLambdaBlockInvokeBody();
2391 void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
2392 void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD,
2393 CallArgList &CallArgs);
2394 void EmitLambdaInAllocaImplFn(const CXXMethodDecl *CallOp,
2395 const CGFunctionInfo **ImplFnInfo,
2396 llvm::Function **ImplFn);
2397 void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD);
2399 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2400 }
2401 void EmitAsanPrologueOrEpilogue(bool Prologue);
2402
2403 /// Emit the unified return block, trying to avoid its emission when
2404 /// possible.
2405 /// \return The debug location of the user written return statement if the
2406 /// return block is avoided.
2407 llvm::DebugLoc EmitReturnBlock();
2408
2409 /// FinishFunction - Complete IR generation of the current function. It is
2410 /// legal to call this function even if there is no current insertion point.
2411 void FinishFunction(SourceLocation EndLoc = SourceLocation());
2412
2413 void StartThunk(llvm::Function *Fn, GlobalDecl GD,
2414 const CGFunctionInfo &FnInfo, bool IsUnprototyped);
2415
2416 void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
2417 const ThunkInfo *Thunk, bool IsUnprototyped);
2418
2419 void FinishThunk();
2420
2421 /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
2422 void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
2423 llvm::FunctionCallee Callee);
2424
2425 /// Generate a thunk for the given method.
2426 void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
2427 GlobalDecl GD, const ThunkInfo &Thunk,
2428 bool IsUnprototyped);
2429
2430 llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
2431 const CGFunctionInfo &FnInfo,
2432 GlobalDecl GD, const ThunkInfo &Thunk);
2433
2434 void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
2435 FunctionArgList &Args);
2436
2437 void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init);
2438
2439 /// Struct with all information about dynamic [sub]class needed to set vptr.
2440 struct VPtr {
2445 };
2446
2447 /// Initialize the vtable pointer of the given subobject.
2448 void InitializeVTablePointer(const VPtr &vptr);
2449
2451
2453 VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass);
2454
2455 void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase,
2456 CharUnits OffsetFromNearestVBase,
2457 bool BaseIsNonVirtualPrimaryBase,
2458 const CXXRecordDecl *VTableClass,
2459 VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
2460
2461 void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
2462
2463 // VTableTrapMode - whether we guarantee that loading the
2464 // vtable is guaranteed to trap on authentication failure,
2465 // even if the resulting vtable pointer is unused.
2466 enum class VTableAuthMode {
2467 Authenticate,
2468 MustTrap,
2469 UnsafeUbsanStrip // Should only be used for Vptr UBSan check
2470 };
2471 /// GetVTablePtr - Return the Value of the vtable pointer member pointed
2472 /// to by This.
2473 llvm::Value *
2474 GetVTablePtr(Address This, llvm::Type *VTableTy,
2475 const CXXRecordDecl *VTableClass,
2476 VTableAuthMode AuthMode = VTableAuthMode::Authenticate);
2477
2486 };
2487
2488 /// Derived is the presumed address of an object of type T after a
2489 /// cast. If T is a polymorphic class type, emit a check that the virtual
2490 /// table for Derived belongs to a class derived from T.
2491 void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull,
2492 CFITypeCheckKind TCK, SourceLocation Loc);
2493
2494 /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
2495 /// If vptr CFI is enabled, emit a check that VTable is valid.
2496 void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable,
2497 CFITypeCheckKind TCK, SourceLocation Loc);
2498
2499 /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
2500 /// RD using llvm.type.test.
2501 void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
2502 CFITypeCheckKind TCK, SourceLocation Loc);
2503
2504 /// If whole-program virtual table optimization is enabled, emit an assumption
2505 /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
2506 /// enabled, emit a check that VTable is a member of RD's type identifier.
2507 void EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
2508 llvm::Value *VTable, SourceLocation Loc);
2509
2510 /// Returns whether we should perform a type checked load when loading a
2511 /// virtual function for virtual calls to members of RD. This is generally
2512 /// true when both vcall CFI and whole-program-vtables are enabled.
2513 bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD);
2514
2515 /// Emit a type checked load from the given vtable.
2516 llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD,
2517 llvm::Value *VTable,
2518 llvm::Type *VTableTy,
2519 uint64_t VTableByteOffset);
2520
2521 /// EnterDtorCleanups - Enter the cleanups necessary to complete the
2522 /// given phase of destruction for a destructor. The end result
2523 /// should call destructors on members and base classes in reverse
2524 /// order of their construction.
2525 void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
2526
2527 /// ShouldInstrumentFunction - Return true if the current function should be
2528 /// instrumented with __cyg_profile_func_* calls
2529 bool ShouldInstrumentFunction();
2530
2531 /// ShouldSkipSanitizerInstrumentation - Return true if the current function
2532 /// should not be instrumented with sanitizers.
2533 bool ShouldSkipSanitizerInstrumentation();
2534
2535 /// ShouldXRayInstrument - Return true if the current function should be
2536 /// instrumented with XRay nop sleds.
2537 bool ShouldXRayInstrumentFunction() const;
2538
2539 /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
2540 /// XRay custom event handling calls.
2541 bool AlwaysEmitXRayCustomEvents() const;
2542
2543 /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
2544 /// XRay typed event handling calls.
2545 bool AlwaysEmitXRayTypedEvents() const;
2546
2547 /// Return a type hash constant for a function instrumented by
2548 /// -fsanitize=function.
2549 llvm::ConstantInt *getUBSanFunctionTypeHash(QualType T) const;
2550
2551 /// EmitFunctionProlog - Emit the target specific LLVM code to load the
2552 /// arguments for the given function. This is also responsible for naming the
2553 /// LLVM function arguments.
2554 void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn,
2555 const FunctionArgList &Args);
2556
2557 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
2558 /// given temporary. Specify the source location atom group (Key Instructions
2559 /// debug info feature) for the `ret` using \p RetKeyInstructionsSourceAtom.
2560 /// If it's 0, the `ret` will get added to a new source atom group.
2561 void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
2562 SourceLocation EndLoc,
2563 uint64_t RetKeyInstructionsSourceAtom);
2564
2565 /// Emit a test that checks if the return value \p RV is nonnull.
2566 void EmitReturnValueCheck(llvm::Value *RV);
2567
2568 /// EmitStartEHSpec - Emit the start of the exception spec.
2569 void EmitStartEHSpec(const Decl *D);
2570
2571 /// EmitEndEHSpec - Emit the end of the exception spec.
2572 void EmitEndEHSpec(const Decl *D);
2573
2574 /// getTerminateLandingPad - Return a landing pad that just calls terminate.
2575 llvm::BasicBlock *getTerminateLandingPad();
2576
2577 /// getTerminateLandingPad - Return a cleanup funclet that just calls
2578 /// terminate.
2579 llvm::BasicBlock *getTerminateFunclet();
2580
2581 /// getTerminateHandler - Return a handler (not a landing pad, just
2582 /// a catch handler) that just calls terminate. This is used when
2583 /// a terminate scope encloses a try.
2584 llvm::BasicBlock *getTerminateHandler();
2585
2586 llvm::Type *ConvertTypeForMem(QualType T);
2587 llvm::Type *ConvertType(QualType T);
2588 llvm::Type *convertTypeForLoadStore(QualType ASTTy,
2589 llvm::Type *LLVMTy = nullptr);
2590 llvm::Type *ConvertType(const TypeDecl *T) {
2591 return ConvertType(getContext().getTypeDeclType(T));
2592 }
2593
2594 /// LoadObjCSelf - Load the value of self. This function is only valid while
2595 /// generating code for an Objective-C method.
2596 llvm::Value *LoadObjCSelf();
2597
2598 /// TypeOfSelfObject - Return type of object that this self represents.
2599 QualType TypeOfSelfObject();
2600
2601 /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2602 static TypeEvaluationKind getEvaluationKind(QualType T);
2603
2605 return getEvaluationKind(T) == TEK_Scalar;
2606 }
2607
2609 return getEvaluationKind(T) == TEK_Aggregate;
2610 }
2611
2612 /// createBasicBlock - Create an LLVM basic block.
2613 llvm::BasicBlock *createBasicBlock(const Twine &name = "",
2614 llvm::Function *parent = nullptr,
2615 llvm::BasicBlock *before = nullptr) {
2616 return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
2617 }
2618
2619 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2620 /// label maps to.
2621 JumpDest getJumpDestForLabel(const LabelDecl *S);
2622
2623 /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2624 /// another basic block, simplify it. This assumes that no other code could
2625 /// potentially reference the basic block.
2626 void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
2627
2628 /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2629 /// adding a fall-through branch from the current insert block if
2630 /// necessary. It is legal to call this function even if there is no current
2631 /// insertion point.
2632 ///
2633 /// IsFinished - If true, indicates that the caller has finished emitting
2634 /// branches to the given block and does not expect to emit code into it. This
2635 /// means the block can be ignored if it is unreachable.
2636 void EmitBlock(llvm::BasicBlock *BB, bool IsFinished = false);
2637
2638 /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2639 /// near its uses, and leave the insertion point in it.
2640 void EmitBlockAfterUses(llvm::BasicBlock *BB);
2641
2642 /// EmitBranch - Emit a branch to the specified basic block from the current
2643 /// insert block, taking care to avoid creation of branches from dummy
2644 /// blocks. It is legal to call this function even if there is no current
2645 /// insertion point.
2646 ///
2647 /// This function clears the current insertion point. The caller should follow
2648 /// calls to this function with calls to Emit*Block prior to generation new
2649 /// code.
2650 void EmitBranch(llvm::BasicBlock *Block);
2651
2652 /// HaveInsertPoint - True if an insertion point is defined. If not, this
2653 /// indicates that the current code being emitted is unreachable.
2654 bool HaveInsertPoint() const { return Builder.GetInsertBlock() != nullptr; }
2655
2656 /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2657 /// emitted IR has a place to go. Note that by definition, if this function
2658 /// creates a block then that block is unreachable; callers may do better to
2659 /// detect when no insertion point is defined and simply skip IR generation.
2661 if (!HaveInsertPoint())
2662 EmitBlock(createBasicBlock());
2663 }
2664
2665 /// ErrorUnsupported - Print out an error that codegen doesn't support the
2666 /// specified stmt yet.
2667 void ErrorUnsupported(const Stmt *S, const char *Type);
2668
2669 //===--------------------------------------------------------------------===//
2670 // Helpers
2671 //===--------------------------------------------------------------------===//
2672
2674 llvm::BasicBlock *LHSBlock,
2675 llvm::BasicBlock *RHSBlock,
2676 llvm::BasicBlock *MergeBlock,
2677 QualType MergedType) {
2678 Builder.SetInsertPoint(MergeBlock);
2679 llvm::PHINode *PtrPhi = Builder.CreatePHI(LHS.getType(), 2, "cond");
2680 PtrPhi->addIncoming(LHS.getBasePointer(), LHSBlock);
2681 PtrPhi->addIncoming(RHS.getBasePointer(), RHSBlock);
2682 LHS.replaceBasePointer(PtrPhi);
2683 LHS.setAlignment(std::min(LHS.getAlignment(), RHS.getAlignment()));
2684 return LHS;
2685 }
2686
2687 /// Construct an address with the natural alignment of T. If a pointer to T
2688 /// is expected to be signed, the pointer passed to this function must have
2689 /// been signed, and the returned Address will have the pointer authentication
2690 /// information needed to authenticate the signed pointer.
2692 llvm::Value *Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(),
2693 bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr,
2694 TBAAAccessInfo *TBAAInfo = nullptr,
2695 KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
2696 if (Alignment.isZero())
2697 Alignment =
2698 CGM.getNaturalTypeAlignment(T, BaseInfo, TBAAInfo, ForPointeeType);
2699 return Address(Ptr, ConvertTypeForMem(T), Alignment,
2700 CGM.getPointerAuthInfoForPointeeType(T), /*Offset=*/nullptr,
2701 IsKnownNonNull);
2702 }
2703
2705 AlignmentSource Source = AlignmentSource::Type) {
2706 return MakeAddrLValue(Addr, T, LValueBaseInfo(Source),
2707 CGM.getTBAAAccessInfo(T));
2708 }
2709
2711 TBAAAccessInfo TBAAInfo) {
2712 return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
2713 }
2714
2715 LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2716 AlignmentSource Source = AlignmentSource::Type) {
2717 return MakeAddrLValue(makeNaturalAddressForPointer(V, T, Alignment), T,
2718 LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T));
2719 }
2720
2721 /// Same as MakeAddrLValue above except that the pointer is known to be
2722 /// unsigned.
2723 LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2724 AlignmentSource Source = AlignmentSource::Type) {
2725 Address Addr(V, ConvertTypeForMem(T), Alignment);
2726 return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2727 CGM.getTBAAAccessInfo(T));
2728 }
2729
2730 LValue
2732 AlignmentSource Source = AlignmentSource::Type) {
2733 return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2734 TBAAAccessInfo());
2735 }
2736
2737 /// Given a value of type T* that may not be to a complete object, construct
2738 /// an l-value with the natural pointee alignment of T.
2739 LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
2740
2741 LValue
2742 MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T,
2743 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
2744
2745 /// Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known
2746 /// to be unsigned.
2747 LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T);
2748
2749 LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T);
2750
2751 Address EmitLoadOfReference(LValue RefLVal,
2752 LValueBaseInfo *PointeeBaseInfo = nullptr,
2753 TBAAAccessInfo *PointeeTBAAInfo = nullptr);
2754 LValue EmitLoadOfReferenceLValue(LValue RefLVal);
2755 LValue
2757 AlignmentSource Source = AlignmentSource::Type) {
2758 LValue RefLVal = MakeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source),
2759 CGM.getTBAAAccessInfo(RefTy));
2760 return EmitLoadOfReferenceLValue(RefLVal);
2761 }
2762
2763 /// Load a pointer with type \p PtrTy stored at address \p Ptr.
2764 /// Note that \p PtrTy is the type of the loaded pointer, not the addresses
2765 /// it is loaded from.
2766 Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy,
2767 LValueBaseInfo *BaseInfo = nullptr,
2768 TBAAAccessInfo *TBAAInfo = nullptr);
2769 LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy);
2770
2771private:
2772 struct AllocaTracker {
2773 void Add(llvm::AllocaInst *I) { Allocas.push_back(I); }
2774 llvm::SmallVector<llvm::AllocaInst *> Take() { return std::move(Allocas); }
2775
2776 private:
2778 };
2779 AllocaTracker *Allocas = nullptr;
2780
2781 /// CGDecl helper.
2782 void emitStoresForConstant(const VarDecl &D, Address Loc, bool isVolatile,
2783 llvm::Constant *constant, bool IsAutoInit);
2784 /// CGDecl helper.
2785 void emitStoresForZeroInit(const VarDecl &D, Address Loc, bool isVolatile);
2786 /// CGDecl helper.
2787 void emitStoresForPatternInit(const VarDecl &D, Address Loc, bool isVolatile);
2788 /// CGDecl helper.
2789 void emitStoresForInitAfterBZero(llvm::Constant *Init, Address Loc,
2790 bool isVolatile, bool IsAutoInit);
2791
2792public:
2793 // Captures all the allocas created during the scope of its RAII object.
2796 : CGF(CGF), OldTracker(CGF.Allocas) {
2797 CGF.Allocas = &Tracker;
2798 }
2799 ~AllocaTrackerRAII() { CGF.Allocas = OldTracker; }
2800
2801 llvm::SmallVector<llvm::AllocaInst *> Take() { return Tracker.Take(); }
2802
2803 private:
2804 CodeGenFunction &CGF;
2805 AllocaTracker *OldTracker;
2806 AllocaTracker Tracker;
2807 };
2808
2809private:
2810 /// If \p Alloca is not in the same address space as \p DestLangAS, insert an
2811 /// address space cast and return a new RawAddress based on this value.
2812 RawAddress MaybeCastStackAddressSpace(RawAddress Alloca, LangAS DestLangAS,
2813 llvm::Value *ArraySize = nullptr);
2814
2815public:
2816 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2817 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2818 /// insertion point of the builder. The caller is responsible for setting an
2819 /// appropriate alignment on
2820 /// the alloca.
2821 ///
2822 /// \p ArraySize is the number of array elements to be allocated if it
2823 /// is not nullptr.
2824 ///
2825 /// LangAS::Default is the address space of pointers to local variables and
2826 /// temporaries, as exposed in the source language. In certain
2827 /// configurations, this is not the same as the alloca address space, and a
2828 /// cast is needed to lift the pointer from the alloca AS into
2829 /// LangAS::Default. This can happen when the target uses a restricted
2830 /// address space for the stack but the source language requires
2831 /// LangAS::Default to be a generic address space. The latter condition is
2832 /// common for most programming languages; OpenCL is an exception in that
2833 /// LangAS::Default is the private address space, which naturally maps
2834 /// to the stack.
2835 ///
2836 /// Because the address of a temporary is often exposed to the program in
2837 /// various ways, this function will perform the cast. The original alloca
2838 /// instruction is returned through \p Alloca if it is not nullptr.
2839 ///
2840 /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2841 /// more efficient if the caller knows that the address will not be exposed.
2842 llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
2843 llvm::Value *ArraySize = nullptr);
2844
2845 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
2846 /// block. The alloca is casted to the address space of \p UseAddrSpace if
2847 /// necessary.
2848 RawAddress CreateTempAlloca(llvm::Type *Ty, LangAS UseAddrSpace,
2849 CharUnits align, const Twine &Name = "tmp",
2850 llvm::Value *ArraySize = nullptr,
2851 RawAddress *Alloca = nullptr);
2852
2853 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
2854 /// block. The alloca is casted to default address space if necessary.
2855 ///
2856 /// FIXME: This version should be removed, and context should provide the
2857 /// context use address space used instead of default.
2859 const Twine &Name = "tmp",
2860 llvm::Value *ArraySize = nullptr,
2861 RawAddress *Alloca = nullptr) {
2862 return CreateTempAlloca(Ty, LangAS::Default, align, Name, ArraySize,
2863 Alloca);
2864 }
2865
2866 RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
2867 const Twine &Name = "tmp",
2868 llvm::Value *ArraySize = nullptr);
2869
2870 /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2871 /// default ABI alignment of the given LLVM type.
2872 ///
2873 /// IMPORTANT NOTE: This is *not* generally the right alignment for
2874 /// any given AST type that happens to have been lowered to the
2875 /// given IR type. This should only ever be used for function-local,
2876 /// IR-driven manipulations like saving and restoring a value. Do
2877 /// not hand this address off to arbitrary IRGen routines, and especially
2878 /// do not pass it as an argument to a function that might expect a
2879 /// properly ABI-aligned value.
2880 RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty,
2881 const Twine &Name = "tmp");
2882
2883 /// CreateIRTemp - Create a temporary IR object of the given type, with
2884 /// appropriate alignment. This routine should only be used when an temporary
2885 /// value needs to be stored into an alloca (for example, to avoid explicit
2886 /// PHI construction), but the type is the IR type, not the type appropriate
2887 /// for storing in memory.
2888 ///
2889 /// That is, this is exactly equivalent to CreateMemTemp, but calling
2890 /// ConvertType instead of ConvertTypeForMem.
2891 RawAddress CreateIRTemp(QualType T, const Twine &Name = "tmp");
2892
2893 /// CreateMemTemp - Create a temporary memory object of the given type, with
2894 /// appropriate alignmen and cast it to the default address space. Returns
2895 /// the original alloca instruction by \p Alloca if it is not nullptr.
2896 RawAddress CreateMemTemp(QualType T, const Twine &Name = "tmp",
2897 RawAddress *Alloca = nullptr);
2898 RawAddress CreateMemTemp(QualType T, CharUnits Align,
2899 const Twine &Name = "tmp",
2900 RawAddress *Alloca = nullptr);
2901
2902 /// CreateMemTemp - Create a temporary memory object of the given type, with
2903 /// appropriate alignmen without casting it to the default address space.
2904 RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
2905 RawAddress CreateMemTempWithoutCast(QualType T, CharUnits Align,
2906 const Twine &Name = "tmp");
2907
2908 /// CreateAggTemp - Create a temporary memory object for the given
2909 /// aggregate type.
2910 AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp",
2911 RawAddress *Alloca = nullptr) {
2912 return AggValueSlot::forAddr(
2913 CreateMemTemp(T, Name, Alloca), T.getQualifiers(),
2914 AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers,
2915 AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap);
2916 }
2917
2918 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2919 /// expression and compare the result against zero, returning an Int1Ty value.
2920 llvm::Value *EvaluateExprAsBool(const Expr *E);
2921
2922 /// Retrieve the implicit cast expression of the rhs in a binary operator
2923 /// expression by passing pointers to Value and QualType
2924 /// This is used for implicit bitfield conversion checks, which
2925 /// must compare with the value before potential truncation.
2926 llvm::Value *EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E,
2927 llvm::Value **Previous,
2928 QualType *SrcType);
2929
2930 /// Emit a check that an [implicit] conversion of a bitfield. It is not UB,
2931 /// so we use the value after conversion.
2932 void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType,
2933 llvm::Value *Dst, QualType DstType,
2934 const CGBitFieldInfo &Info,
2936
2937 /// EmitIgnoredExpr - Emit an expression in a context which ignores the
2938 /// result.
2939 void EmitIgnoredExpr(const Expr *E);
2940
2941 /// EmitAnyExpr - Emit code to compute the specified expression which can have
2942 /// any type. The result is returned as an RValue struct. If this is an
2943 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2944 /// the result should be returned.
2945 ///
2946 /// \param ignoreResult True if the resulting value isn't used.
2947 RValue EmitAnyExpr(const Expr *E,
2948 AggValueSlot aggSlot = AggValueSlot::ignored(),
2949 bool ignoreResult = false);
2950
2951 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2952 // or the value of the expression, depending on how va_list is defined.
2953 Address EmitVAListRef(const Expr *E);
2954
2955 /// Emit a "reference" to a __builtin_ms_va_list; this is
2956 /// always the value of the expression, because a __builtin_ms_va_list is a
2957 /// pointer to a char.
2958 Address EmitMSVAListRef(const Expr *E);
2959
2960 /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2961 /// always be accessible even if no aggregate location is provided.
2962 RValue EmitAnyExprToTemp(const Expr *E);
2963
2964 /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2965 /// arbitrary expression into the given memory location.
2966 void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals,
2967 bool IsInitializer);
2968
2969 void EmitAnyExprToExn(const Expr *E, Address Addr);
2970
2971 /// EmitInitializationToLValue - Emit an initializer to an LValue.
2972 void EmitInitializationToLValue(
2973 const Expr *E, LValue LV,
2974 AggValueSlot::IsZeroed_t IsZeroed = AggValueSlot::IsNotZeroed);
2975
2976 /// EmitExprAsInit - Emits the code necessary to initialize a
2977 /// location in memory with the given initializer.
2978 void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2979 bool capturedByInit);
2980
2981 /// hasVolatileMember - returns true if aggregate type has a volatile
2982 /// member.
2984 if (const auto *RD = T->getAsRecordDecl())
2985 return RD->hasVolatileMember();
2986 return false;
2987 }
2988
2989 /// Determine whether a return value slot may overlap some other object.
2991 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2992 // class subobjects. These cases may need to be revisited depending on the
2993 // resolution of the relevant core issue.
2994 return AggValueSlot::DoesNotOverlap;
2995 }
2996
2997 /// Determine whether a field initialization may overlap some other object.
2998 AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD);
2999
3000 /// Determine whether a base class initialization may overlap some other
3001 /// object.
3002 AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD,
3003 const CXXRecordDecl *BaseRD,
3004 bool IsVirtual);
3005
3006 /// Emit an aggregate assignment.
3008 ApplyAtomGroup Grp(getDebugInfo());
3009 bool IsVolatile = hasVolatileMember(EltTy);
3010 EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile);
3011 }
3012
3014 AggValueSlot::Overlap_t MayOverlap) {
3015 EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap);
3016 }
3017
3018 /// EmitAggregateCopy - Emit an aggregate copy.
3019 ///
3020 /// \param isVolatile \c true iff either the source or the destination is
3021 /// volatile.
3022 /// \param MayOverlap Whether the tail padding of the destination might be
3023 /// occupied by some other object. More efficient code can often be
3024 /// generated if not.
3025 void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy,
3026 AggValueSlot::Overlap_t MayOverlap,
3027 bool isVolatile = false);
3028
3029 /// GetAddrOfLocalVar - Return the address of a local variable.
3031 auto it = LocalDeclMap.find(VD);
3032 assert(it != LocalDeclMap.end() &&
3033 "Invalid argument to GetAddrOfLocalVar(), no decl!");
3034 return it->second;
3035 }
3036
3037 /// Given an opaque value expression, return its LValue mapping if it exists,
3038 /// otherwise create one.
3039 LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e);
3040
3041 /// Given an opaque value expression, return its RValue mapping if it exists,
3042 /// otherwise create one.
3043 RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e);
3044
3045 /// isOpaqueValueEmitted - Return true if the opaque value expression has
3046 /// already been emitted.
3047 bool isOpaqueValueEmitted(const OpaqueValueExpr *E);
3048
3049 /// Get the index of the current ArrayInitLoopExpr, if any.
3050 llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
3051
3052 /// getAccessedFieldNo - Given an encoded value and a result number, return
3053 /// the input field number being accessed.
3054 static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
3055
3056 llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
3057 llvm::BasicBlock *GetIndirectGotoBlock();
3058
3059 /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
3060 static bool IsWrappedCXXThis(const Expr *E);
3061
3062 /// EmitNullInitialization - Generate code to set a value of the given type to
3063 /// null, If the type contains data member pointers, they will be initialized
3064 /// to -1 in accordance with the Itanium C++ ABI.
3065 void EmitNullInitialization(Address DestPtr, QualType Ty);
3066
3067 /// Emits a call to an LLVM variable-argument intrinsic, either
3068 /// \c llvm.va_start or \c llvm.va_end.
3069 /// \param ArgValue A reference to the \c va_list as emitted by either
3070 /// \c EmitVAListRef or \c EmitMSVAListRef.
3071 /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
3072 /// calls \c llvm.va_end.
3073 llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
3074
3075 /// Generate code to get an argument from the passed in pointer
3076 /// and update it accordingly.
3077 /// \param VE The \c VAArgExpr for which to generate code.
3078 /// \param VAListAddr Receives a reference to the \c va_list as emitted by
3079 /// either \c EmitVAListRef or \c EmitMSVAListRef.
3080 /// \returns A pointer to the argument.
3081 // FIXME: We should be able to get rid of this method and use the va_arg
3082 // instruction in LLVM instead once it works well enough.
3083 RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr,
3084 AggValueSlot Slot = AggValueSlot::ignored());
3085
3086 /// emitArrayLength - Compute the length of an array, even if it's a
3087 /// VLA, and drill down to the base element type.
3088 llvm::Value *emitArrayLength(const ArrayType *arrayType, QualType &baseType,
3089 Address &addr);
3090
3091 /// EmitVLASize - Capture all the sizes for the VLA expressions in
3092 /// the given variably-modified type and store them in the VLASizeMap.
3093 ///
3094 /// This function can be called with a null (unreachable) insert point.
3095 void EmitVariablyModifiedType(QualType Ty);
3096
3098 llvm::Value *NumElts;
3100
3101 VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
3102 };
3103
3104 /// Return the number of elements for a single dimension
3105 /// for the given array type.
3106 VlaSizePair getVLAElements1D(const VariableArrayType *vla);
3107 VlaSizePair getVLAElements1D(QualType vla);
3108
3109 /// Returns an LLVM value that corresponds to the size,
3110 /// in non-variably-sized elements, of a variable length array type,
3111 /// plus that largest non-variably-sized element type. Assumes that
3112 /// the type has already been emitted with EmitVariablyModifiedType.
3113 VlaSizePair getVLASize(const VariableArrayType *vla);
3114 VlaSizePair getVLASize(QualType vla);
3115
3116 /// LoadCXXThis - Load the value of 'this'. This function is only valid while
3117 /// generating code for an C++ member function.
3118 llvm::Value *LoadCXXThis() {
3119 assert(CXXThisValue && "no 'this' value for this function");
3120 return CXXThisValue;
3121 }
3122 Address LoadCXXThisAddress();
3123
3124 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
3125 /// virtual bases.
3126 // FIXME: Every place that calls LoadCXXVTT is something
3127 // that needs to be abstracted properly.
3128 llvm::Value *LoadCXXVTT() {
3129 assert(CXXStructorImplicitParamValue && "no VTT value for this function");
3130 return CXXStructorImplicitParamValue;
3131 }
3132
3133 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
3134 /// complete class to the given direct base.
3135 Address GetAddressOfDirectBaseInCompleteClass(Address Value,
3136 const CXXRecordDecl *Derived,
3137 const CXXRecordDecl *Base,
3138 bool BaseIsVirtual);
3139
3140 static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
3141
3142 /// GetAddressOfBaseClass - This function will add the necessary delta to the
3143 /// load of 'this' and returns address of the base class.
3144 Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived,
3147 bool NullCheckValue, SourceLocation Loc);
3148
3149 Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived,
3152 bool NullCheckValue);
3153
3154 /// GetVTTParameter - Return the VTT parameter that should be passed to a
3155 /// base constructor/destructor with virtual bases.
3156 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
3157 /// to ItaniumCXXABI.cpp together with all the references to VTT.
3158 llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
3159 bool Delegating);
3160
3161 void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
3162 CXXCtorType CtorType,
3163 const FunctionArgList &Args,
3165 // It's important not to confuse this and the previous function. Delegating
3166 // constructors are the C++0x feature. The constructor delegate optimization
3167 // is used to reduce duplication in the base and complete consturctors where
3168 // they are substantially the same.
3169 void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
3170 const FunctionArgList &Args);
3171
3172 /// Emit a call to an inheriting constructor (that is, one that invokes a
3173 /// constructor inherited from a base class) by inlining its definition. This
3174 /// is necessary if the ABI does not support forwarding the arguments to the
3175 /// base class constructor (because they're variadic or similar).
3176 void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor,
3177 CXXCtorType CtorType,
3178 bool ForVirtualBase,
3179 bool Delegating,
3180 CallArgList &Args);
3181
3182 /// Emit a call to a constructor inherited from a base class, passing the
3183 /// current constructor's arguments along unmodified (without even making
3184 /// a copy).
3185 void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D,
3186 bool ForVirtualBase, Address This,
3187 bool InheritedFromVBase,
3189
3190 void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
3191 bool ForVirtualBase, bool Delegating,
3192 AggValueSlot ThisAVS, const CXXConstructExpr *E);
3193
3194 void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
3195 bool ForVirtualBase, bool Delegating,
3196 Address This, CallArgList &Args,
3198 SourceLocation Loc, bool NewPointerIsChecked,
3199 llvm::CallBase **CallOrInvoke = nullptr);
3200
3201 /// Emit assumption load for all bases. Requires to be called only on
3202 /// most-derived class and not under construction of the object.
3203 void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This);
3204
3205 /// Emit assumption that vptr load == global vtable.
3206 void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
3207
3208 void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, Address This,
3209 Address Src, const CXXConstructExpr *E);
3210
3211 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
3212 const ArrayType *ArrayTy, Address ArrayPtr,
3213 const CXXConstructExpr *E,
3214 bool NewPointerIsChecked,
3215 bool ZeroInitialization = false);
3216
3217 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
3218 llvm::Value *NumElements, Address ArrayPtr,
3219 const CXXConstructExpr *E,
3220 bool NewPointerIsChecked,
3221 bool ZeroInitialization = false);
3222
3223 static Destroyer destroyCXXObject;
3224
3225 void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
3226 bool ForVirtualBase, bool Delegating, Address This,
3227 QualType ThisTy);
3228
3229 void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
3230 llvm::Type *ElementTy, Address NewPtr,
3231 llvm::Value *NumElements,
3232 llvm::Value *AllocSizeWithoutCookie);
3233
3234 void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
3235 Address Ptr);
3236
3237 void EmitSehCppScopeBegin();
3238 void EmitSehCppScopeEnd();
3239 void EmitSehTryScopeBegin();
3240 void EmitSehTryScopeEnd();
3241
3242 bool EmitLifetimeStart(llvm::Value *Addr);
3243 void EmitLifetimeEnd(llvm::Value *Addr);
3244
3245 llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
3246 void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
3247
3248 void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
3249 QualType DeleteTy, llvm::Value *NumElements = nullptr,
3250 CharUnits CookieSize = CharUnits());
3251
3252 RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
3253 const CallExpr *TheCallExpr, bool IsDelete);
3254
3255 llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
3256 llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
3257 Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
3258
3259 /// Situations in which we might emit a check for the suitability of a
3260 /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in
3261 /// compiler-rt.
3263 /// Checking the operand of a load. Must be suitably sized and aligned.
3265 /// Checking the destination of a store. Must be suitably sized and aligned.
3267 /// Checking the bound value in a reference binding. Must be suitably sized
3268 /// and aligned, but is not required to refer to an object (until the
3269 /// reference is used), per core issue 453.
3271 /// Checking the object expression in a non-static data member access. Must
3272 /// be an object within its lifetime.
3274 /// Checking the 'this' pointer for a call to a non-static member function.
3275 /// Must be an object within its lifetime.
3277 /// Checking the 'this' pointer for a constructor call.
3279 /// Checking the operand of a static_cast to a derived pointer type. Must be
3280 /// null or an object within its lifetime.
3282 /// Checking the operand of a static_cast to a derived reference type. Must
3283 /// be an object within its lifetime.
3285 /// Checking the operand of a cast to a base object. Must be suitably sized
3286 /// and aligned.
3288 /// Checking the operand of a cast to a virtual base object. Must be an
3289 /// object within its lifetime.
3291 /// Checking the value assigned to a _Nonnull pointer. Must not be null.
3293 /// Checking the operand of a dynamic_cast or a typeid expression. Must be
3294 /// null or an object within its lifetime.
3295 TCK_DynamicOperation
3297
3298 /// Determine whether the pointer type check \p TCK permits null pointers.
3299 static bool isNullPointerAllowed(TypeCheckKind TCK);
3300
3301 /// Determine whether the pointer type check \p TCK requires a vptr check.
3302 static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty);
3303
3304 /// Whether any type-checking sanitizers are enabled. If \c false,
3305 /// calls to EmitTypeCheck can be skipped.
3306 bool sanitizePerformTypeCheck() const;
3307
3309 QualType Type, SanitizerSet SkippedChecks = SanitizerSet(),
3310 llvm::Value *ArraySize = nullptr) {
3311 if (!sanitizePerformTypeCheck())
3312 return;
3313 EmitTypeCheck(TCK, Loc, LV.emitRawPointer(*this), Type, LV.getAlignment(),
3314 SkippedChecks, ArraySize);
3315 }
3316
3318 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3319 SanitizerSet SkippedChecks = SanitizerSet(),
3320 llvm::Value *ArraySize = nullptr) {
3321 if (!sanitizePerformTypeCheck())
3322 return;
3323 EmitTypeCheck(TCK, Loc, Addr.emitRawPointer(*this), Type, Alignment,
3324 SkippedChecks, ArraySize);
3325 }
3326
3327 /// Emit a check that \p V is the address of storage of the
3328 /// appropriate size and alignment for an object of type \p Type
3329 /// (or if ArraySize is provided, for an array of that bound).
3330 void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
3331 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3332 SanitizerSet SkippedChecks = SanitizerSet(),
3333 llvm::Value *ArraySize = nullptr);
3334
3335 /// Emit a check that \p Base points into an array object, which
3336 /// we can access at index \p Index. \p Accessed should be \c false if we
3337 /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
3338 void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
3339 QualType IndexType, bool Accessed);
3340 void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
3341 llvm::Value *Index, QualType IndexType,
3342 QualType IndexedType, bool Accessed);
3343
3344 /// Returns debug info, with additional annotation if
3345 /// CGM.getCodeGenOpts().SanitizeAnnotateDebugInfo[Ordinal] is enabled for
3346 /// any of the ordinals.
3347 llvm::DILocation *
3348 SanitizerAnnotateDebugInfo(ArrayRef<SanitizerKind::SanitizerOrdinal> Ordinals,
3349 SanitizerHandler Handler);
3350
3351 llvm::Value *GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD,
3352 const FieldDecl *CountDecl);
3353
3354 /// Build an expression accessing the "counted_by" field.
3355 llvm::Value *EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD,
3356 const FieldDecl *CountDecl);
3357
3358 // Emit bounds checking for flexible array and pointer members with the
3359 // counted_by attribute.
3360 void EmitCountedByBoundsChecking(const Expr *E, llvm::Value *Idx,
3361 Address Addr, QualType IdxTy,
3362 QualType ArrayTy, bool Accessed,
3363 bool FlexibleArray);
3364
3365 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3366 bool isInc, bool isPre);
3367 ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
3368 bool isInc, bool isPre);
3369
3370 /// Converts Location to a DebugLoc, if debug information is enabled.
3371 llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
3372
3373 /// Get the record field index as represented in debug info.
3374 unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex);
3375
3376 //===--------------------------------------------------------------------===//
3377 // Declaration Emission
3378 //===--------------------------------------------------------------------===//
3379
3380 /// EmitDecl - Emit a declaration.
3381 ///
3382 /// This function can be called with a null (unreachable) insert point.
3383 void EmitDecl(const Decl &D, bool EvaluateConditionDecl = false);
3384
3385 /// EmitVarDecl - Emit a local variable declaration.
3386 ///
3387 /// This function can be called with a null (unreachable) insert point.
3388 void EmitVarDecl(const VarDecl &D);
3389
3390 void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
3391 bool capturedByInit);
3392
3393 typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
3394 llvm::Value *Address);
3395
3396 /// Determine whether the given initializer is trivial in the sense
3397 /// that it requires no code to be generated.
3398 bool isTrivialInitializer(const Expr *Init);
3399
3400 /// EmitAutoVarDecl - Emit an auto variable declaration.
3401 ///
3402 /// This function can be called with a null (unreachable) insert point.
3403 void EmitAutoVarDecl(const VarDecl &D);
3404
3406 friend class CodeGenFunction;
3407
3408 const VarDecl *Variable;
3409
3410 /// The address of the alloca for languages with explicit address space
3411 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
3412 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
3413 /// as a global constant.
3414 Address Addr;
3415
3416 llvm::Value *NRVOFlag;
3417
3418 /// True if the variable is a __block variable that is captured by an
3419 /// escaping block.
3420 bool IsEscapingByRef;
3421
3422 /// True if the variable is of aggregate type and has a constant
3423 /// initializer.
3424 bool IsConstantAggregate;
3425
3426 /// True if lifetime markers should be used.
3427 bool UseLifetimeMarkers;
3428
3429 /// Address with original alloca instruction. Invalid if the variable was
3430 /// emitted as a global constant.
3431 RawAddress AllocaAddr;
3432
3433 struct Invalid {};
3434 AutoVarEmission(Invalid)
3435 : Variable(nullptr), Addr(Address::invalid()),
3436 AllocaAddr(RawAddress::invalid()) {}
3437
3438 AutoVarEmission(const VarDecl &variable)
3439 : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
3440 IsEscapingByRef(false), IsConstantAggregate(false),
3441 UseLifetimeMarkers(false), AllocaAddr(RawAddress::invalid()) {}
3442
3443 bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
3444
3445 public:
3446 static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
3447
3448 bool useLifetimeMarkers() const { return UseLifetimeMarkers; }
3449
3450 /// Returns the raw, allocated address, which is not necessarily
3451 /// the address of the object itself. It is casted to default
3452 /// address space for address space agnostic languages.
3454
3455 /// Returns the address for the original alloca instruction.
3456 RawAddress getOriginalAllocatedAddress() const { return AllocaAddr; }
3457
3458 /// Returns the address of the object within this declaration.
3459 /// Note that this does not chase the forwarding pointer for
3460 /// __block decls.
3462 if (!IsEscapingByRef)
3463 return Addr;
3464
3465 return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
3466 }
3467 };
3468 AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
3469 void EmitAutoVarInit(const AutoVarEmission &emission);
3470 void EmitAutoVarCleanups(const AutoVarEmission &emission);
3471 void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
3472 QualType::DestructionKind dtorKind);
3473
3474 void MaybeEmitDeferredVarDeclInit(const VarDecl *var);
3475
3476 /// Emits the alloca and debug information for the size expressions for each
3477 /// dimension of an array. It registers the association of its (1-dimensional)
3478 /// QualTypes and size expression's debug node, so that CGDebugInfo can
3479 /// reference this node when creating the DISubrange object to describe the
3480 /// array types.
3481 void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI, const VarDecl &D,
3482 bool EmitDebugInfo);
3483
3484 void EmitStaticVarDecl(const VarDecl &D,
3485 llvm::GlobalValue::LinkageTypes Linkage);
3486
3488 union {
3490 llvm::Value *Value;
3491 };
3492
3493 bool IsIndirect;
3494
3495 ParamValue(llvm::Value *V) : Value(V), IsIndirect(false) {}
3496 ParamValue(Address A) : Addr(A), IsIndirect(true) {}
3497
3498 public:
3499 static ParamValue forDirect(llvm::Value *value) {
3500 return ParamValue(value);
3501 }
3503 assert(!addr.getAlignment().isZero());
3504 return ParamValue(addr);
3505 }
3506
3507 bool isIndirect() const { return IsIndirect; }
3508 llvm::Value *getAnyValue() const {
3509 if (!isIndirect())
3510 return Value;
3511 assert(!Addr.hasOffset() && "unexpected offset");
3512 return Addr.getBasePointer();
3513 }
3514
3515 llvm::Value *getDirectValue() const {
3516 assert(!isIndirect());
3517 return Value;
3518 }
3519
3521 assert(isIndirect());
3522 return Addr;
3523 }
3524 };
3525
3526 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
3527 void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
3528
3529 /// protectFromPeepholes - Protect a value that we're intending to
3530 /// store to the side, but which will probably be used later, from
3531 /// aggressive peepholing optimizations that might delete it.
3532 ///
3533 /// Pass the result to unprotectFromPeepholes to declare that
3534 /// protection is no longer required.
3535 ///
3536 /// There's no particular reason why this shouldn't apply to
3537 /// l-values, it's just that no existing peepholes work on pointers.
3538 PeepholeProtection protectFromPeepholes(RValue rvalue);
3539 void unprotectFromPeepholes(PeepholeProtection protection);
3540
3541 void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
3543 SourceLocation AssumptionLoc,
3544 llvm::Value *Alignment,
3545 llvm::Value *OffsetValue,
3546 llvm::Value *TheCheck,
3547 llvm::Instruction *Assumption);
3548
3549 void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
3550 SourceLocation Loc, SourceLocation AssumptionLoc,
3551 llvm::Value *Alignment,
3552 llvm::Value *OffsetValue = nullptr);
3553
3554 void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
3555 SourceLocation AssumptionLoc,
3556 llvm::Value *Alignment,
3557 llvm::Value *OffsetValue = nullptr);
3558
3559 //===--------------------------------------------------------------------===//
3560 // Statement Emission
3561 //===--------------------------------------------------------------------===//
3562
3563 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
3564 void EmitStopPoint(const Stmt *S);
3565
3566 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
3567 /// this function even if there is no current insertion point.
3568 ///
3569 /// This function may clear the current insertion point; callers should use
3570 /// EnsureInsertPoint if they wish to subsequently generate code without first
3571 /// calling EmitBlock, EmitBranch, or EmitStmt.
3572 void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = {});
3573
3574 /// EmitSimpleStmt - Try to emit a "simple" statement which does not
3575 /// necessarily require an insertion point or debug information; typically
3576 /// because the statement amounts to a jump or a container of other
3577 /// statements.
3578 ///
3579 /// \return True if the statement was handled.
3580 bool EmitSimpleStmt(const Stmt *S, ArrayRef<const Attr *> Attrs);
3581
3582 Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
3583 AggValueSlot AVS = AggValueSlot::ignored());
3584 Address
3585 EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast = false,
3586 AggValueSlot AVS = AggValueSlot::ignored());
3587
3588 /// EmitLabel - Emit the block for the given label. It is legal to call this
3589 /// function even if there is no current insertion point.
3590 void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
3591
3592 void EmitLabelStmt(const LabelStmt &S);
3593 void EmitAttributedStmt(const AttributedStmt &S);
3594 void EmitGotoStmt(const GotoStmt &S);
3595 void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
3596 void EmitIfStmt(const IfStmt &S);
3597
3598 void EmitWhileStmt(const WhileStmt &S, ArrayRef<const Attr *> Attrs = {});
3599 void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = {});
3600 void EmitForStmt(const ForStmt &S, ArrayRef<const Attr *> Attrs = {});
3601 void EmitReturnStmt(const ReturnStmt &S);
3602 void EmitDeclStmt(const DeclStmt &S);
3603 void EmitBreakStmt(const BreakStmt &S);
3604 void EmitContinueStmt(const ContinueStmt &S);
3605 void EmitSwitchStmt(const SwitchStmt &S);
3606 void EmitDefaultStmt(const DefaultStmt &S, ArrayRef<const Attr *> Attrs);
3607 void EmitCaseStmt(const CaseStmt &S, ArrayRef<const Attr *> Attrs);
3608 void EmitCaseStmtRange(const CaseStmt &S, ArrayRef<const Attr *> Attrs);
3609 void EmitAsmStmt(const AsmStmt &S);
3610
3611 const BreakContinue *GetDestForLoopControlStmt(const LoopControlStmt &S);
3612
3613 void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
3614 void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
3615 void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
3616 void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
3617 void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
3618
3619 void EmitCoroutineBody(const CoroutineBodyStmt &S);
3620 void EmitCoreturnStmt(const CoreturnStmt &S);
3621 RValue EmitCoawaitExpr(const CoawaitExpr &E,
3622 AggValueSlot aggSlot = AggValueSlot::ignored(),
3623 bool ignoreResult = false);
3624 LValue EmitCoawaitLValue(const CoawaitExpr *E);
3625 RValue EmitCoyieldExpr(const CoyieldExpr &E,
3626 AggValueSlot aggSlot = AggValueSlot::ignored(),
3627 bool ignoreResult = false);
3628 LValue EmitCoyieldLValue(const CoyieldExpr *E);
3629 RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
3630
3631 void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3632 void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3633
3634 void EmitCXXTryStmt(const CXXTryStmt &S);
3635 void EmitSEHTryStmt(const SEHTryStmt &S);
3636 void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
3637 void EnterSEHTryStmt(const SEHTryStmt &S);
3638 void ExitSEHTryStmt(const SEHTryStmt &S);
3639 void VolatilizeTryBlocks(llvm::BasicBlock *BB,
3641
3642 void pushSEHCleanup(CleanupKind kind, llvm::Function *FinallyFunc);
3643 void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter,
3644 const Stmt *OutlinedStmt);
3645
3646 llvm::Function *GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
3647 const SEHExceptStmt &Except);
3648
3649 llvm::Function *GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
3650 const SEHFinallyStmt &Finally);
3651
3652 void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
3653 llvm::Value *ParentFP, llvm::Value *EntryEBP);
3654 llvm::Value *EmitSEHExceptionCode();
3655 llvm::Value *EmitSEHExceptionInfo();
3656 llvm::Value *EmitSEHAbnormalTermination();
3657
3658 /// Emit simple code for OpenMP directives in Simd-only mode.
3659 void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D);
3660
3661 /// Scan the outlined statement for captures from the parent function. For
3662 /// each capture, mark the capture as escaped and emit a call to
3663 /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
3664 void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt,
3665 bool IsFilter);
3666
3667 /// Recovers the address of a local in a parent function. ParentVar is the
3668 /// address of the variable used in the immediate parent function. It can
3669 /// either be an alloca or a call to llvm.localrecover if there are nested
3670 /// outlined functions. ParentFP is the frame pointer of the outermost parent
3671 /// frame.
3672 Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
3673 Address ParentVar, llvm::Value *ParentFP);
3674
3675 void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
3676 ArrayRef<const Attr *> Attrs = {});
3677
3678 /// Controls insertion of cancellation exit blocks in worksharing constructs.
3680 CodeGenFunction &CGF;
3681
3682 public:
3684 bool HasCancel)
3685 : CGF(CGF) {
3686 CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
3687 }
3688 ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
3689 };
3690
3691 /// Returns calculated size of the specified type.
3692 llvm::Value *getTypeSize(QualType Ty);
3693 LValue InitCapturedStruct(const CapturedStmt &S);
3694 llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
3695 llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
3696 Address GenerateCapturedStmtArgument(const CapturedStmt &S);
3697 llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
3699 void GenerateOpenMPCapturedVars(const CapturedStmt &S,
3700 SmallVectorImpl<llvm::Value *> &CapturedVars);
3701 void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
3703 /// Perform element by element copying of arrays with type \a
3704 /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
3705 /// generated by \a CopyGen.
3706 ///
3707 /// \param DestAddr Address of the destination array.
3708 /// \param SrcAddr Address of the source array.
3709 /// \param OriginalType Type of destination and source arrays.
3710 /// \param CopyGen Copying procedure that copies value of single array element
3711 /// to another single array element.
3712 void EmitOMPAggregateAssign(
3713 Address DestAddr, Address SrcAddr, QualType OriginalType,
3714 const llvm::function_ref<void(Address, Address)> CopyGen);
3715 /// Emit proper copying of data from one variable to another.
3716 ///
3717 /// \param OriginalType Original type of the copied variables.
3718 /// \param DestAddr Destination address.
3719 /// \param SrcAddr Source address.
3720 /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
3721 /// type of the base array element).
3722 /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
3723 /// the base array element).
3724 /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
3725 /// DestVD.
3726 void EmitOMPCopy(QualType OriginalType, Address DestAddr, Address SrcAddr,
3727 const VarDecl *DestVD, const VarDecl *SrcVD,
3728 const Expr *Copy);
3729 /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
3730 /// \a X = \a E \a BO \a E.
3731 ///
3732 /// \param X Value to be updated.
3733 /// \param E Update value.
3734 /// \param BO Binary operation for update operation.
3735 /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
3736 /// expression, false otherwise.
3737 /// \param AO Atomic ordering of the generated atomic instructions.
3738 /// \param CommonGen Code generator for complex expressions that cannot be
3739 /// expressed through atomicrmw instruction.
3740 /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
3741 /// generated, <false, RValue::get(nullptr)> otherwise.
3742 std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
3743 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3744 llvm::AtomicOrdering AO, SourceLocation Loc,
3745 const llvm::function_ref<RValue(RValue)> CommonGen);
3746 bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
3747 OMPPrivateScope &PrivateScope);
3748 void EmitOMPPrivateClause(const OMPExecutableDirective &D,
3749 OMPPrivateScope &PrivateScope);
3750 void EmitOMPUseDevicePtrClause(
3751 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
3752 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3753 CaptureDeviceAddrMap);
3754 void EmitOMPUseDeviceAddrClause(
3755 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
3756 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3757 CaptureDeviceAddrMap);
3758 /// Emit code for copyin clause in \a D directive. The next code is
3759 /// generated at the start of outlined functions for directives:
3760 /// \code
3761 /// threadprivate_var1 = master_threadprivate_var1;
3762 /// operator=(threadprivate_var2, master_threadprivate_var2);
3763 /// ...
3764 /// __kmpc_barrier(&loc, global_tid);
3765 /// \endcode
3766 ///
3767 /// \param D OpenMP directive possibly with 'copyin' clause(s).
3768 /// \returns true if at least one copyin variable is found, false otherwise.
3769 bool EmitOMPCopyinClause(const OMPExecutableDirective &D);
3770 /// Emit initial code for lastprivate variables. If some variable is
3771 /// not also firstprivate, then the default initialization is used. Otherwise
3772 /// initialization of this variable is performed by EmitOMPFirstprivateClause
3773 /// method.
3774 ///
3775 /// \param D Directive that may have 'lastprivate' directives.
3776 /// \param PrivateScope Private scope for capturing lastprivate variables for
3777 /// proper codegen in internal captured statement.
3778 ///
3779 /// \returns true if there is at least one lastprivate variable, false
3780 /// otherwise.
3781 bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D,
3782 OMPPrivateScope &PrivateScope);
3783 /// Emit final copying of lastprivate values to original variables at
3784 /// the end of the worksharing or simd directive.
3785 ///
3786 /// \param D Directive that has at least one 'lastprivate' directives.
3787 /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3788 /// it is the last iteration of the loop code in associated directive, or to
3789 /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3790 void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D,
3791 bool NoFinals,
3792 llvm::Value *IsLastIterCond = nullptr);
3793 /// Emit initial code for linear clauses.
3794 void EmitOMPLinearClause(const OMPLoopDirective &D,
3795 CodeGenFunction::OMPPrivateScope &PrivateScope);
3796 /// Emit final code for linear clauses.
3797 /// \param CondGen Optional conditional code for final part of codegen for
3798 /// linear clause.
3799 void EmitOMPLinearClauseFinal(
3800 const OMPLoopDirective &D,
3801 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3802 /// Emit initial code for reduction variables. Creates reduction copies
3803 /// and initializes them with the values according to OpenMP standard.
3804 ///
3805 /// \param D Directive (possibly) with the 'reduction' clause.
3806 /// \param PrivateScope Private scope for capturing reduction variables for
3807 /// proper codegen in internal captured statement.
3808 ///
3809 void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
3810 OMPPrivateScope &PrivateScope,
3811 bool ForInscan = false);
3812 /// Emit final update of reduction values to original variables at
3813 /// the end of the directive.
3814 ///
3815 /// \param D Directive that has at least one 'reduction' directives.
3816 /// \param ReductionKind The kind of reduction to perform.
3817 void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D,
3818 const OpenMPDirectiveKind ReductionKind);
3819 /// Emit initial code for linear variables. Creates private copies
3820 /// and initializes them with the values according to OpenMP standard.
3821 ///
3822 /// \param D Directive (possibly) with the 'linear' clause.
3823 /// \return true if at least one linear variable is found that should be
3824 /// initialized with the value of the original variable, false otherwise.
3825 bool EmitOMPLinearClauseInit(const OMPLoopDirective &D);
3826
3827 typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
3828 llvm::Function * /*OutlinedFn*/,
3829 const OMPTaskDataTy & /*Data*/)>
3831 void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
3832 const OpenMPDirectiveKind CapturedRegion,
3833 const RegionCodeGenTy &BodyGen,
3834 const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
3836 Address BasePointersArray = Address::invalid();
3837 Address PointersArray = Address::invalid();
3838 Address SizesArray = Address::invalid();
3839 Address MappersArray = Address::invalid();
3840 unsigned NumberOfTargetItems = 0;
3841 explicit OMPTargetDataInfo() = default;
3842 OMPTargetDataInfo(Address BasePointersArray, Address PointersArray,
3843 Address SizesArray, Address MappersArray,
3844 unsigned NumberOfTargetItems)
3845 : BasePointersArray(BasePointersArray), PointersArray(PointersArray),
3846 SizesArray(SizesArray), MappersArray(MappersArray),
3847 NumberOfTargetItems(NumberOfTargetItems) {}
3848 };
3849 void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S,
3850 const RegionCodeGenTy &BodyGen,
3851 OMPTargetDataInfo &InputInfo);
3852 void processInReduction(const OMPExecutableDirective &S, OMPTaskDataTy &Data,
3853 CodeGenFunction &CGF, const CapturedStmt *CS,
3854 OMPPrivateScope &Scope);
3855 void EmitOMPMetaDirective(const OMPMetaDirective &S);
3856 void EmitOMPParallelDirective(const OMPParallelDirective &S);
3857 void EmitOMPSimdDirective(const OMPSimdDirective &S);
3858 void EmitOMPTileDirective(const OMPTileDirective &S);
3859 void EmitOMPStripeDirective(const OMPStripeDirective &S);
3860 void EmitOMPUnrollDirective(const OMPUnrollDirective &S);
3861 void EmitOMPReverseDirective(const OMPReverseDirective &S);
3862 void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S);
3863 void EmitOMPForDirective(const OMPForDirective &S);
3864 void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
3865 void EmitOMPScopeDirective(const OMPScopeDirective &S);
3866 void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
3867 void EmitOMPSectionDirective(const OMPSectionDirective &S);
3868 void EmitOMPSingleDirective(const OMPSingleDirective &S);
3869 void EmitOMPMasterDirective(const OMPMasterDirective &S);
3870 void EmitOMPMaskedDirective(const OMPMaskedDirective &S);
3871 void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
3872 void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
3873 void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S);
3874 void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S);
3875 void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S);
3876 void EmitOMPTaskDirective(const OMPTaskDirective &S);
3877 void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
3878 void EmitOMPErrorDirective(const OMPErrorDirective &S);
3879 void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
3880 void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
3881 void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
3882 void EmitOMPFlushDirective(const OMPFlushDirective &S);
3883 void EmitOMPDepobjDirective(const OMPDepobjDirective &S);
3884 void EmitOMPScanDirective(const OMPScanDirective &S);
3885 void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
3886 void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
3887 void EmitOMPTargetDirective(const OMPTargetDirective &S);
3888 void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S);
3889 void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S);
3890 void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S);
3891 void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S);
3892 void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S);
3893 void
3894 EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S);
3895 void EmitOMPTeamsDirective(const OMPTeamsDirective &S);
3896 void
3897 EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S);
3898 void EmitOMPCancelDirective(const OMPCancelDirective &S);
3899 void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S);
3900 void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);
3901 void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);
3902 void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S);
3903 void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S);
3904 void
3905 EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S);
3906 void
3907 EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S);
3908 void EmitOMPParallelMasterTaskLoopDirective(
3910 void EmitOMPParallelMaskedTaskLoopDirective(
3912 void EmitOMPParallelMasterTaskLoopSimdDirective(
3914 void EmitOMPParallelMaskedTaskLoopSimdDirective(
3916 void EmitOMPDistributeDirective(const OMPDistributeDirective &S);
3917 void EmitOMPDistributeParallelForDirective(
3919 void EmitOMPDistributeParallelForSimdDirective(
3921 void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S);
3922 void EmitOMPTargetParallelForSimdDirective(
3924 void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S);
3925 void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S);
3926 void
3927 EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S);
3928 void EmitOMPTeamsDistributeParallelForSimdDirective(
3930 void EmitOMPTeamsDistributeParallelForDirective(
3932 void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S);
3933 void EmitOMPTargetTeamsDistributeDirective(
3935 void EmitOMPTargetTeamsDistributeParallelForDirective(
3937 void EmitOMPTargetTeamsDistributeParallelForSimdDirective(
3939 void EmitOMPTargetTeamsDistributeSimdDirective(
3941 void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S);
3942 void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S);
3943 void EmitOMPTargetParallelGenericLoopDirective(
3945 void EmitOMPTargetTeamsGenericLoopDirective(
3947 void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S);
3948 void EmitOMPInteropDirective(const OMPInteropDirective &S);
3949 void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S);
3950 void EmitOMPAssumeDirective(const OMPAssumeDirective &S);
3951
3952 /// Emit device code for the target directive.
3953 static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
3954 StringRef ParentName,
3955 const OMPTargetDirective &S);
3956 static void
3957 EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3959 /// Emit device code for the target parallel for directive.
3960 static void EmitOMPTargetParallelForDeviceFunction(
3961 CodeGenModule &CGM, StringRef ParentName,
3963 /// Emit device code for the target parallel for simd directive.
3964 static void EmitOMPTargetParallelForSimdDeviceFunction(
3965 CodeGenModule &CGM, StringRef ParentName,
3967 /// Emit device code for the target teams directive.
3968 static void
3969 EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3970 const OMPTargetTeamsDirective &S);
3971 /// Emit device code for the target teams distribute directive.
3972 static void EmitOMPTargetTeamsDistributeDeviceFunction(
3973 CodeGenModule &CGM, StringRef ParentName,
3975 /// Emit device code for the target teams distribute simd directive.
3976 static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(
3977 CodeGenModule &CGM, StringRef ParentName,
3979 /// Emit device code for the target simd directive.
3980 static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM,
3981 StringRef ParentName,
3982 const OMPTargetSimdDirective &S);
3983 /// Emit device code for the target teams distribute parallel for simd
3984 /// directive.
3985 static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
3986 CodeGenModule &CGM, StringRef ParentName,
3988
3989 /// Emit device code for the target teams loop directive.
3990 static void EmitOMPTargetTeamsGenericLoopDeviceFunction(
3991 CodeGenModule &CGM, StringRef ParentName,
3993
3994 /// Emit device code for the target parallel loop directive.
3995 static void EmitOMPTargetParallelGenericLoopDeviceFunction(
3996 CodeGenModule &CGM, StringRef ParentName,
3998
3999 static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
4000 CodeGenModule &CGM, StringRef ParentName,
4002
4003 /// Emit the Stmt \p S and return its topmost canonical loop, if any.
4004 /// TODO: The \p Depth paramter is not yet implemented and must be 1. In the
4005 /// future it is meant to be the number of loops expected in the loop nests
4006 /// (usually specified by the "collapse" clause) that are collapsed to a
4007 /// single loop by this function.
4008 llvm::CanonicalLoopInfo *EmitOMPCollapsedCanonicalLoopNest(const Stmt *S,
4009 int Depth);
4010
4011 /// Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
4012 void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S);
4013
4014 /// Emit inner loop of the worksharing/simd construct.
4015 ///
4016 /// \param S Directive, for which the inner loop must be emitted.
4017 /// \param RequiresCleanup true, if directive has some associated private
4018 /// variables.
4019 /// \param LoopCond Bollean condition for loop continuation.
4020 /// \param IncExpr Increment expression for loop control variable.
4021 /// \param BodyGen Generator for the inner body of the inner loop.
4022 /// \param PostIncGen Genrator for post-increment code (required for ordered
4023 /// loop directvies).
4024 void EmitOMPInnerLoop(
4025 const OMPExecutableDirective &S, bool RequiresCleanup,
4026 const Expr *LoopCond, const Expr *IncExpr,
4027 const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
4028 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
4029
4030 JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind);
4031 /// Emit initial code for loop counters of loop-based directives.
4032 void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S,
4033 OMPPrivateScope &LoopScope);
4034
4035 /// Helper for the OpenMP loop directives.
4036 void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
4037
4038 /// Emit code for the worksharing loop-based directive.
4039 /// \return true, if this construct has any lastprivate clause, false -
4040 /// otherwise.
4041 bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB,
4042 const CodeGenLoopBoundsTy &CodeGenLoopBounds,
4043 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4044
4045 /// Emit code for the distribute loop-based directive.
4046 void EmitOMPDistributeLoop(const OMPLoopDirective &S,
4047 const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
4048
4049 /// Helpers for the OpenMP loop directives.
4050 void EmitOMPSimdInit(const OMPLoopDirective &D);
4051 void EmitOMPSimdFinal(
4052 const OMPLoopDirective &D,
4053 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
4054
4055 /// Emits the lvalue for the expression with possibly captured variable.
4056 LValue EmitOMPSharedLValue(const Expr *E);
4057
4058private:
4059 /// Helpers for blocks.
4060 llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
4061
4062 /// struct with the values to be passed to the OpenMP loop-related functions
4063 struct OMPLoopArguments {
4064 /// loop lower bound
4065 Address LB = Address::invalid();
4066 /// loop upper bound
4067 Address UB = Address::invalid();
4068 /// loop stride
4069 Address ST = Address::invalid();
4070 /// isLastIteration argument for runtime functions
4071 Address IL = Address::invalid();
4072 /// Chunk value generated by sema
4073 llvm::Value *Chunk = nullptr;
4074 /// EnsureUpperBound
4075 Expr *EUB = nullptr;
4076 /// IncrementExpression
4077 Expr *IncExpr = nullptr;
4078 /// Loop initialization
4079 Expr *Init = nullptr;
4080 /// Loop exit condition
4081 Expr *Cond = nullptr;
4082 /// Update of LB after a whole chunk has been executed
4083 Expr *NextLB = nullptr;
4084 /// Update of UB after a whole chunk has been executed
4085 Expr *NextUB = nullptr;
4086 /// Distinguish between the for distribute and sections
4087 OpenMPDirectiveKind DKind = llvm::omp::OMPD_unknown;
4088 OMPLoopArguments() = default;
4089 OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
4090 llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
4091 Expr *IncExpr = nullptr, Expr *Init = nullptr,
4092 Expr *Cond = nullptr, Expr *NextLB = nullptr,
4093 Expr *NextUB = nullptr)
4094 : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
4095 IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
4096 NextUB(NextUB) {}
4097 };
4098 void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
4099 const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
4100 const OMPLoopArguments &LoopArgs,
4101 const CodeGenLoopTy &CodeGenLoop,
4102 const CodeGenOrderedTy &CodeGenOrdered);
4103 void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
4104 bool IsMonotonic, const OMPLoopDirective &S,
4105 OMPPrivateScope &LoopScope, bool Ordered,
4106 const OMPLoopArguments &LoopArgs,
4107 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4108 void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
4109 const OMPLoopDirective &S,
4110 OMPPrivateScope &LoopScope,
4111 const OMPLoopArguments &LoopArgs,
4112 const CodeGenLoopTy &CodeGenLoopContent);
4113 /// Emit code for sections directive.
4114 void EmitSections(const OMPExecutableDirective &S);
4115
4116public:
4117 //===--------------------------------------------------------------------===//
4118 // OpenACC Emission
4119 //===--------------------------------------------------------------------===//
4121 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4122 // simply emitting its structured block, but in the future we will implement
4123 // some sort of IR.
4124 EmitStmt(S.getStructuredBlock());
4125 }
4126
4128 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4129 // simply emitting its loop, but in the future we will implement
4130 // some sort of IR.
4131 EmitStmt(S.getLoop());
4132 }
4133
4135 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4136 // simply emitting its loop, but in the future we will implement
4137 // some sort of IR.
4138 EmitStmt(S.getLoop());
4139 }
4140
4142 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4143 // simply emitting its structured block, but in the future we will implement
4144 // some sort of IR.
4145 EmitStmt(S.getStructuredBlock());
4146 }
4147
4149 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4150 // but in the future we will implement some sort of IR.
4151 }
4152
4154 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4155 // but in the future we will implement some sort of IR.
4156 }
4157
4159 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4160 // simply emitting its structured block, but in the future we will implement
4161 // some sort of IR.
4162 EmitStmt(S.getStructuredBlock());
4163 }
4164
4166 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4167 // but in the future we will implement some sort of IR.
4168 }
4169
4171 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4172 // but in the future we will implement some sort of IR.
4173 }
4174
4176 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4177 // but in the future we will implement some sort of IR.
4178 }
4179
4181 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4182 // but in the future we will implement some sort of IR.
4183 }
4184
4186 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4187 // but in the future we will implement some sort of IR.
4188 }
4189
4191 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4192 // simply emitting its associated stmt, but in the future we will implement
4193 // some sort of IR.
4194 EmitStmt(S.getAssociatedStmt());
4195 }
4197 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4198 // but in the future we will implement some sort of IR.
4199 }
4200
4201 //===--------------------------------------------------------------------===//
4202 // LValue Expression Emission
4203 //===--------------------------------------------------------------------===//
4204
4205 /// Create a check that a scalar RValue is non-null.
4206 llvm::Value *EmitNonNullRValueCheck(RValue RV, QualType T);
4207
4208 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
4209 RValue GetUndefRValue(QualType Ty);
4210
4211 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
4212 /// and issue an ErrorUnsupported style diagnostic (using the
4213 /// provided Name).
4214 RValue EmitUnsupportedRValue(const Expr *E, const char *Name);
4215
4216 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
4217 /// an ErrorUnsupported style diagnostic (using the provided Name).
4218 LValue EmitUnsupportedLValue(const Expr *E, const char *Name);
4219
4220 /// EmitLValue - Emit code to compute a designator that specifies the location
4221 /// of the expression.
4222 ///
4223 /// This can return one of two things: a simple address or a bitfield
4224 /// reference. In either case, the LLVM Value* in the LValue structure is
4225 /// guaranteed to be an LLVM pointer type.
4226 ///
4227 /// If this returns a bitfield reference, nothing about the pointee type of
4228 /// the LLVM value is known: For example, it may not be a pointer to an
4229 /// integer.
4230 ///
4231 /// If this returns a normal address, and if the lvalue's C type is fixed
4232 /// size, this method guarantees that the returned pointer type will point to
4233 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
4234 /// variable length type, this is not possible.
4235 ///
4236 LValue EmitLValue(const Expr *E,
4237 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
4238
4239private:
4240 LValue EmitLValueHelper(const Expr *E, KnownNonNull_t IsKnownNonNull);
4241
4242public:
4243 /// Same as EmitLValue but additionally we generate checking code to
4244 /// guard against undefined behavior. This is only suitable when we know
4245 /// that the address will be used to access the object.
4246 LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
4247
4248 RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc);
4249
4250 void EmitAtomicInit(Expr *E, LValue lvalue);
4251
4252 bool LValueIsSuitableForInlineAtomic(LValue Src);
4253
4254 RValue EmitAtomicLoad(LValue LV, SourceLocation SL,
4255 AggValueSlot Slot = AggValueSlot::ignored());
4256
4257 RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc,
4258 llvm::AtomicOrdering AO, bool IsVolatile = false,
4259 AggValueSlot slot = AggValueSlot::ignored());
4260
4261 void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
4262
4263 void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
4264 bool IsVolatile, bool isInit);
4265
4266 std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
4268 llvm::AtomicOrdering Success =
4269 llvm::AtomicOrdering::SequentiallyConsistent,
4270 llvm::AtomicOrdering Failure =
4271 llvm::AtomicOrdering::SequentiallyConsistent,
4272 bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
4273
4274 /// Emit an atomicrmw instruction, and applying relevant metadata when
4275 /// applicable.
4276 llvm::AtomicRMWInst *emitAtomicRMWInst(
4277 llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val,
4278 llvm::AtomicOrdering Order = llvm::AtomicOrdering::SequentiallyConsistent,
4279 llvm::SyncScope::ID SSID = llvm::SyncScope::System,
4280 const AtomicExpr *AE = nullptr);
4281
4282 void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
4283 const llvm::function_ref<RValue(RValue)> &UpdateOp,
4284 bool IsVolatile);
4285
4286 /// EmitToMemory - Change a scalar value from its value
4287 /// representation to its in-memory representation.
4288 llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
4289
4290 /// EmitFromMemory - Change a scalar value from its memory
4291 /// representation to its value representation.
4292 llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
4293
4294 /// Check if the scalar \p Value is within the valid range for the given
4295 /// type \p Ty.
4296 ///
4297 /// Returns true if a check is needed (even if the range is unknown).
4298 bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
4300
4301 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4302 /// care to appropriately convert from the memory representation to
4303 /// the LLVM value representation.
4304 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4306 AlignmentSource Source = AlignmentSource::Type,
4307 bool isNontemporal = false) {
4308 return EmitLoadOfScalar(Addr, Volatile, Ty, Loc, LValueBaseInfo(Source),
4309 CGM.getTBAAAccessInfo(Ty), isNontemporal);
4310 }
4311
4312 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4314 TBAAAccessInfo TBAAInfo,
4315 bool isNontemporal = false);
4316
4317 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4318 /// care to appropriately convert from the memory representation to
4319 /// the LLVM value representation. The l-value must be a simple
4320 /// l-value.
4321 llvm::Value *EmitLoadOfScalar(LValue lvalue, SourceLocation Loc);
4322
4323 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4324 /// care to appropriately convert from the memory representation to
4325 /// the LLVM value representation.
4326 void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile,
4327 QualType Ty,
4328 AlignmentSource Source = AlignmentSource::Type,
4329 bool isInit = false, bool isNontemporal = false) {
4330 EmitStoreOfScalar(Value, Addr, Volatile, Ty, LValueBaseInfo(Source),
4331 CGM.getTBAAAccessInfo(Ty), isInit, isNontemporal);
4332 }
4333
4334 void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile,
4335 QualType Ty, LValueBaseInfo BaseInfo,
4336 TBAAAccessInfo TBAAInfo, bool isInit = false,
4337 bool isNontemporal = false);
4338
4339 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4340 /// care to appropriately convert from the memory representation to
4341 /// the LLVM value representation. The l-value must be a simple
4342 /// l-value. The isInit flag indicates whether this is an initialization.
4343 /// If so, atomic qualifiers are ignored and the store is always non-atomic.
4344 void EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
4345 bool isInit = false);
4346
4347 /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
4348 /// this method emits the address of the lvalue, then loads the result as an
4349 /// rvalue, returning the rvalue.
4350 RValue EmitLoadOfLValue(LValue V, SourceLocation Loc);
4351 RValue EmitLoadOfExtVectorElementLValue(LValue V);
4352 RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc);
4353 RValue EmitLoadOfGlobalRegLValue(LValue LV);
4354
4355 /// Like EmitLoadOfLValue but also handles complex and aggregate types.
4356 RValue EmitLoadOfAnyValue(LValue V,
4357 AggValueSlot Slot = AggValueSlot::ignored(),
4358 SourceLocation Loc = {});
4359
4360 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
4361 /// lvalue, where both are guaranteed to the have the same type, and that type
4362 /// is 'Ty'.
4363 void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
4364 void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
4365 void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst);
4366
4367 /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
4368 /// as EmitStoreThroughLValue.
4369 ///
4370 /// \param Result [out] - If non-null, this will be set to a Value* for the
4371 /// bit-field contents after the store, appropriate for use as the result of
4372 /// an assignment to the bit-field.
4373 void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
4374 llvm::Value **Result = nullptr);
4375
4376 /// Emit an l-value for an assignment (simple or compound) of complex type.
4377 LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
4378 LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
4379 LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
4380 llvm::Value *&Result);
4381
4382 // Note: only available for agg return types
4383 LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
4384 LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
4385 // Note: only available for agg return types
4386 LValue EmitCallExprLValue(const CallExpr *E,
4387 llvm::CallBase **CallOrInvoke = nullptr);
4388 // Note: only available for agg return types
4389 LValue EmitVAArgExprLValue(const VAArgExpr *E);
4390 LValue EmitDeclRefLValue(const DeclRefExpr *E);
4391 LValue EmitStringLiteralLValue(const StringLiteral *E);
4392 LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
4393 LValue EmitPredefinedLValue(const PredefinedExpr *E);
4394 LValue EmitUnaryOpLValue(const UnaryOperator *E);
4395 LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
4396 bool Accessed = false);
4397 llvm::Value *EmitMatrixIndexExpr(const Expr *E);
4398 LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E);
4399 LValue EmitArraySectionExpr(const ArraySectionExpr *E,
4400 bool IsLowerBound = true);
4401 LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
4402 LValue EmitMemberExpr(const MemberExpr *E);
4403 LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
4404 LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
4405 LValue EmitInitListLValue(const InitListExpr *E);
4406 void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E);
4407 LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
4408 LValue EmitCastLValue(const CastExpr *E);
4409 LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
4410 LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
4411 LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E);
4412
4413 std::pair<LValue, LValue> EmitHLSLOutArgLValues(const HLSLOutArgExpr *E,
4414 QualType Ty);
4415 LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args,
4416 QualType Ty);
4417
4418 Address EmitExtVectorElementLValue(LValue V);
4419
4420 RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
4421
4422 Address EmitArrayToPointerDecay(const Expr *Array,
4423 LValueBaseInfo *BaseInfo = nullptr,
4424 TBAAAccessInfo *TBAAInfo = nullptr);
4425
4427 llvm::PointerIntPair<llvm::Constant *, 1, bool> ValueAndIsReference;
4428 ConstantEmission(llvm::Constant *C, bool isReference)
4429 : ValueAndIsReference(C, isReference) {}
4430
4431 public:
4433 static ConstantEmission forReference(llvm::Constant *C) {
4434 return ConstantEmission(C, true);
4435 }
4436 static ConstantEmission forValue(llvm::Constant *C) {
4437 return ConstantEmission(C, false);
4438 }
4439
4440 explicit operator bool() const {
4441 return ValueAndIsReference.getOpaqueValue() != nullptr;
4442 }
4443
4444 bool isReference() const { return ValueAndIsReference.getInt(); }
4445 LValue getReferenceLValue(CodeGenFunction &CGF, const Expr *RefExpr) const {
4446 assert(isReference());
4447 return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
4448 RefExpr->getType());
4449 }
4450
4451 llvm::Constant *getValue() const {
4452 assert(!isReference());
4453 return ValueAndIsReference.getPointer();
4454 }
4455 };
4456
4457 ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr);
4458 ConstantEmission tryEmitAsConstant(const MemberExpr *ME);
4459 llvm::Value *emitScalarConstant(const ConstantEmission &Constant, Expr *E);
4460
4461 RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
4462 AggValueSlot slot = AggValueSlot::ignored());
4463 LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
4464
4465 void FlattenAccessAndType(
4466 Address Addr, QualType AddrTy,
4467 SmallVectorImpl<std::pair<Address, llvm::Value *>> &AccessList,
4468 SmallVectorImpl<QualType> &FlatTypes);
4469
4470 llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
4471 const ObjCIvarDecl *Ivar);
4472 llvm::Value *EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface,
4473 const ObjCIvarDecl *Ivar);
4474 LValue EmitLValueForField(LValue Base, const FieldDecl *Field,
4475 bool IsInBounds = true);
4476 LValue EmitLValueForLambdaField(const FieldDecl *Field);
4477 LValue EmitLValueForLambdaField(const FieldDecl *Field,
4478 llvm::Value *ThisValue);
4479
4480 /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
4481 /// if the Field is a reference, this will return the address of the reference
4482 /// and not the address of the value stored in the reference.
4483 LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field);
4484
4485 LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base,
4486 const ObjCIvarDecl *Ivar, unsigned CVRQualifiers);
4487
4488 LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
4489 LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
4490 LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
4491 LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
4492
4493 LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
4494 LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
4495 LValue EmitStmtExprLValue(const StmtExpr *E);
4496 LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
4497 LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
4498 void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init);
4499
4500 //===--------------------------------------------------------------------===//
4501 // Scalar Expression Emission
4502 //===--------------------------------------------------------------------===//
4503
4504 /// EmitCall - Generate a call of the given function, expecting the given
4505 /// result type, and using the given argument list which specifies both the
4506 /// LLVM arguments and the types they were derived from.
4507 RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
4508 ReturnValueSlot ReturnValue, const CallArgList &Args,
4509 llvm::CallBase **CallOrInvoke, bool IsMustTail,
4511 bool IsVirtualFunctionPointerThunk = false);
4512 RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
4513 ReturnValueSlot ReturnValue, const CallArgList &Args,
4514 llvm::CallBase **CallOrInvoke = nullptr,
4515 bool IsMustTail = false) {
4516 return EmitCall(CallInfo, Callee, ReturnValue, Args, CallOrInvoke,
4517 IsMustTail, SourceLocation());
4518 }
4519 RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
4520 ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr,
4521 llvm::CallBase **CallOrInvoke = nullptr,
4522 CGFunctionInfo const **ResolvedFnInfo = nullptr);
4523
4524 // If a Call or Invoke instruction was emitted for this CallExpr, this method
4525 // writes the pointer to `CallOrInvoke` if it's not null.
4526 RValue EmitCallExpr(const CallExpr *E,
4527 ReturnValueSlot ReturnValue = ReturnValueSlot(),
4528 llvm::CallBase **CallOrInvoke = nullptr);
4529 RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue,
4530 llvm::CallBase **CallOrInvoke = nullptr);
4531 CGCallee EmitCallee(const Expr *E);
4532
4533 void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
4534 void checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl);
4535
4536 llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
4537 const Twine &name = "");
4538 llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
4540 const Twine &name = "");
4541 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4542 const Twine &name = "");
4543 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4544 ArrayRef<Address> args,
4545 const Twine &name = "");
4546 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4548 const Twine &name = "");
4549
4551 getBundlesForFunclet(llvm::Value *Callee);
4552
4553 llvm::CallBase *EmitCallOrInvoke(llvm::FunctionCallee Callee,
4555 const Twine &Name = "");
4556 llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4558 const Twine &name = "");
4559 llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4560 const Twine &name = "");
4561 void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4563
4565 NestedNameSpecifier Qual, llvm::Type *Ty);
4566
4567 CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
4569 const CXXRecordDecl *RD);
4570
4571 bool isPointerKnownNonNull(const Expr *E);
4572 /// Check whether the underlying base pointer is a constant null.
4573 bool isUnderlyingBasePointerConstantNull(const Expr *E);
4574
4575 /// Create the discriminator from the storage address and the entity hash.
4576 llvm::Value *EmitPointerAuthBlendDiscriminator(llvm::Value *StorageAddress,
4577 llvm::Value *Discriminator);
4578 CGPointerAuthInfo EmitPointerAuthInfo(const PointerAuthSchema &Schema,
4579 llvm::Value *StorageAddress,
4580 GlobalDecl SchemaDecl,
4581 QualType SchemaType);
4582
4583 llvm::Value *EmitPointerAuthSign(const CGPointerAuthInfo &Info,
4584 llvm::Value *Pointer);
4585
4586 llvm::Value *EmitPointerAuthAuth(const CGPointerAuthInfo &Info,
4587 llvm::Value *Pointer);
4588
4589 llvm::Value *emitPointerAuthResign(llvm::Value *Pointer, QualType PointerType,
4590 const CGPointerAuthInfo &CurAuthInfo,
4591 const CGPointerAuthInfo &NewAuthInfo,
4592 bool IsKnownNonNull);
4593 llvm::Value *emitPointerAuthResignCall(llvm::Value *Pointer,
4594 const CGPointerAuthInfo &CurInfo,
4595 const CGPointerAuthInfo &NewInfo);
4596
4597 void EmitPointerAuthOperandBundle(
4598 const CGPointerAuthInfo &Info,
4600
4601 CGPointerAuthInfo EmitPointerAuthInfo(PointerAuthQualifier Qualifier,
4602 Address StorageAddress);
4603 llvm::Value *EmitPointerAuthQualify(PointerAuthQualifier Qualifier,
4604 llvm::Value *Pointer, QualType ValueType,
4605 Address StorageAddress,
4606 bool IsKnownNonNull);
4607 llvm::Value *EmitPointerAuthQualify(PointerAuthQualifier Qualifier,
4608 const Expr *PointerExpr,
4609 Address StorageAddress);
4610 llvm::Value *EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier,
4611 llvm::Value *Pointer,
4613 Address StorageAddress,
4614 bool IsKnownNonNull);
4615 void EmitPointerAuthCopy(PointerAuthQualifier Qualifier, QualType Type,
4616 Address DestField, Address SrcField);
4617
4618 std::pair<llvm::Value *, CGPointerAuthInfo>
4619 EmitOrigPointerRValue(const Expr *E);
4620
4621 llvm::Value *authPointerToPointerCast(llvm::Value *ResultPtr,
4622 QualType SourceType, QualType DestType);
4623 Address authPointerToPointerCast(Address Ptr, QualType SourceType,
4624 QualType DestType);
4625
4626 Address getAsNaturalAddressOf(Address Addr, QualType PointeeTy);
4627
4628 llvm::Value *getAsNaturalPointerTo(Address Addr, QualType PointeeType) {
4629 return getAsNaturalAddressOf(Addr, PointeeType).getBasePointer();
4630 }
4631
4632 // Return the copy constructor name with the prefix "__copy_constructor_"
4633 // removed.
4634 static std::string getNonTrivialCopyConstructorStr(QualType QT,
4635 CharUnits Alignment,
4636 bool IsVolatile,
4637 ASTContext &Ctx);
4638
4639 // Return the destructor name with the prefix "__destructor_" removed.
4640 static std::string getNonTrivialDestructorStr(QualType QT,
4641 CharUnits Alignment,
4642 bool IsVolatile,
4643 ASTContext &Ctx);
4644
4645 // These functions emit calls to the special functions of non-trivial C
4646 // structs.
4647 void defaultInitNonTrivialCStructVar(LValue Dst);
4648 void callCStructDefaultConstructor(LValue Dst);
4649 void callCStructDestructor(LValue Dst);
4650 void callCStructCopyConstructor(LValue Dst, LValue Src);
4651 void callCStructMoveConstructor(LValue Dst, LValue Src);
4652 void callCStructCopyAssignmentOperator(LValue Dst, LValue Src);
4653 void callCStructMoveAssignmentOperator(LValue Dst, LValue Src);
4654
4655 RValue EmitCXXMemberOrOperatorCall(
4656 const CXXMethodDecl *Method, const CGCallee &Callee,
4657 ReturnValueSlot ReturnValue, llvm::Value *This,
4658 llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *E,
4659 CallArgList *RtlArgs, llvm::CallBase **CallOrInvoke);
4660 RValue EmitCXXDestructorCall(GlobalDecl Dtor, const CGCallee &Callee,
4661 llvm::Value *This, QualType ThisTy,
4662 llvm::Value *ImplicitParam,
4663 QualType ImplicitParamTy, const CallExpr *E,
4664 llvm::CallBase **CallOrInvoke = nullptr);
4665 RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
4666 ReturnValueSlot ReturnValue,
4667 llvm::CallBase **CallOrInvoke = nullptr);
4668 RValue EmitCXXMemberOrOperatorMemberCallExpr(
4669 const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
4670 bool HasQualifier, NestedNameSpecifier Qualifier, bool IsArrow,
4671 const Expr *Base, llvm::CallBase **CallOrInvoke);
4672 // Compute the object pointer.
4673 Address EmitCXXMemberDataPointerAddress(
4674 const Expr *E, Address base, llvm::Value *memberPtr,
4675 const MemberPointerType *memberPtrType, bool IsInBounds,
4676 LValueBaseInfo *BaseInfo = nullptr, TBAAAccessInfo *TBAAInfo = nullptr);
4677 RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
4678 ReturnValueSlot ReturnValue,
4679 llvm::CallBase **CallOrInvoke);
4680
4681 RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
4682 const CXXMethodDecl *MD,
4683 ReturnValueSlot ReturnValue,
4684 llvm::CallBase **CallOrInvoke);
4685 RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E);
4686
4687 RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
4688 ReturnValueSlot ReturnValue,
4689 llvm::CallBase **CallOrInvoke);
4690
4691 RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E);
4692 RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E);
4693
4694 RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
4695 const CallExpr *E, ReturnValueSlot ReturnValue);
4696
4697 RValue emitRotate(const CallExpr *E, bool IsRotateRight);
4698
4699 /// Emit IR for __builtin_os_log_format.
4700 RValue emitBuiltinOSLogFormat(const CallExpr &E);
4701
4702 /// Emit IR for __builtin_is_aligned.
4703 RValue EmitBuiltinIsAligned(const CallExpr *E);
4704 /// Emit IR for __builtin_align_up/__builtin_align_down.
4705 RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp);
4706
4707 llvm::Function *generateBuiltinOSLogHelperFunction(
4709 CharUnits BufferAlignment);
4710
4711 RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue,
4712 llvm::CallBase **CallOrInvoke);
4713
4714 /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
4715 /// is unhandled by the current target.
4716 llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4717 ReturnValueSlot ReturnValue);
4718
4719 llvm::Value *
4720 EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
4721 const llvm::CmpInst::Predicate Pred,
4722 const llvm::Twine &Name = "");
4723 llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4724 ReturnValueSlot ReturnValue,
4725 llvm::Triple::ArchType Arch);
4726 llvm::Value *EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4727 ReturnValueSlot ReturnValue,
4728 llvm::Triple::ArchType Arch);
4729 llvm::Value *EmitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4730 ReturnValueSlot ReturnValue,
4731 llvm::Triple::ArchType Arch);
4732 llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy,
4733 QualType RTy);
4734 llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::ArrayType *ATy,
4735 QualType RTy);
4736
4737 llvm::Value *
4738 EmitCommonNeonBuiltinExpr(unsigned BuiltinID, unsigned LLVMIntrinsic,
4739 unsigned AltLLVMIntrinsic, const char *NameHint,
4740 unsigned Modifier, const CallExpr *E,
4742 Address PtrOp1, llvm::Triple::ArchType Arch);
4743
4744 llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
4745 unsigned Modifier, llvm::Type *ArgTy,
4746 const CallExpr *E);
4747 llvm::Value *EmitNeonCall(llvm::Function *F,
4748 SmallVectorImpl<llvm::Value *> &O, const char *name,
4749 unsigned shift = 0, bool rightshift = false);
4750 llvm::Value *EmitFP8NeonCall(unsigned IID, ArrayRef<llvm::Type *> Tys,
4752 const CallExpr *E, const char *name);
4753 llvm::Value *EmitFP8NeonCvtCall(unsigned IID, llvm::Type *Ty0,
4754 llvm::Type *Ty1, bool Extract,
4756 const CallExpr *E, const char *name);
4757 llvm::Value *EmitFP8NeonFDOTCall(unsigned IID, bool ExtendLaneArg,
4758 llvm::Type *RetTy,
4760 const CallExpr *E, const char *name);
4761 llvm::Value *EmitFP8NeonFMLACall(unsigned IID, bool ExtendLaneArg,
4762 llvm::Type *RetTy,
4764 const CallExpr *E, const char *name);
4765 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx,
4766 const llvm::ElementCount &Count);
4767 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
4768 llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
4769 bool negateForRightShift);
4770 llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
4771 llvm::Type *Ty, bool usgn, const char *name);
4772 llvm::Value *vectorWrapScalar16(llvm::Value *Op);
4773 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
4774 /// access builtin. Only required if it can't be inferred from the base
4775 /// pointer operand.
4776 llvm::Type *SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags);
4777
4779 getSVEOverloadTypes(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType,
4781 llvm::Type *getEltType(const SVETypeFlags &TypeFlags);
4782 llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags);
4783 llvm::ScalableVectorType *getSVEPredType(const SVETypeFlags &TypeFlags);
4784 llvm::Value *EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
4786 llvm::Value *EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
4787 llvm::Type *ReturnType,
4789 llvm::Value *EmitSVEAllTruePred(const SVETypeFlags &TypeFlags);
4790 llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
4791 llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty);
4792 llvm::Value *EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty);
4793 llvm::Value *EmitSVEPMull(const SVETypeFlags &TypeFlags,
4795 unsigned BuiltinID);
4796 llvm::Value *EmitSVEMovl(const SVETypeFlags &TypeFlags,
4798 unsigned BuiltinID);
4799 llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred,
4800 llvm::ScalableVectorType *VTy);
4801 llvm::Value *EmitSVEPredicateTupleCast(llvm::Value *PredTuple,
4802 llvm::StructType *Ty);
4803 llvm::Value *EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
4805 unsigned IntID);
4806 llvm::Value *EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
4808 unsigned IntID);
4809 llvm::Value *EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy,
4811 unsigned BuiltinID, bool IsZExtReturn);
4812 llvm::Value *EmitSVEMaskedStore(const CallExpr *,
4814 unsigned BuiltinID);
4815 llvm::Value *EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
4817 unsigned BuiltinID);
4818 llvm::Value *EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
4820 unsigned IntID);
4821 llvm::Value *EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
4823 unsigned IntID);
4824 llvm::Value *EmitSVEStructStore(const SVETypeFlags &TypeFlags,
4826 unsigned IntID);
4827 llvm::Value *EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4828
4829 llvm::Value *EmitSMELd1St1(const SVETypeFlags &TypeFlags,
4831 unsigned IntID);
4832 llvm::Value *EmitSMEReadWrite(const SVETypeFlags &TypeFlags,
4834 unsigned IntID);
4835 llvm::Value *EmitSMEZero(const SVETypeFlags &TypeFlags,
4837 unsigned IntID);
4838 llvm::Value *EmitSMELdrStr(const SVETypeFlags &TypeFlags,
4840 unsigned IntID);
4841
4842 void GetAArch64SVEProcessedOperands(unsigned BuiltinID, const CallExpr *E,
4844 SVETypeFlags TypeFlags);
4845
4846 llvm::Value *EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4847
4848 llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4849 llvm::Triple::ArchType Arch);
4850 llvm::Value *EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4851
4852 llvm::Value *BuildVector(ArrayRef<llvm::Value *> Ops);
4853 llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4854 llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4855 llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4856 llvm::Value *EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4857 ReturnValueSlot ReturnValue);
4858
4859 // Returns a builtin function that the SPIR-V backend will expand into a spec
4860 // constant.
4861 llvm::Function *
4862 getSpecConstantFunction(const clang::QualType &SpecConstantType);
4863
4864 llvm::Value *EmitDirectXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4865 llvm::Value *EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4866 llvm::Value *EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx,
4867 const CallExpr *E);
4868 llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4869 llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4870 llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
4871 const CallExpr *E);
4872 llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4873 llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4874 ReturnValueSlot ReturnValue);
4875
4876 llvm::Value *EmitRISCVCpuSupports(const CallExpr *E);
4877 llvm::Value *EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs);
4878 llvm::Value *EmitRISCVCpuInit();
4879 llvm::Value *EmitRISCVCpuIs(const CallExpr *E);
4880 llvm::Value *EmitRISCVCpuIs(StringRef CPUStr);
4881
4882 void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
4883 const CallExpr *E);
4884 void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
4885 llvm::AtomicOrdering &AO,
4886 llvm::SyncScope::ID &SSID);
4887
4888 enum class MSVCIntrin;
4889 llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
4890
4891 llvm::Value *EmitBuiltinAvailable(const VersionTuple &Version);
4892
4893 llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
4894 llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
4895 llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
4896 llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
4897 llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
4898 llvm::Value *
4899 EmitObjCCollectionLiteral(const Expr *E,
4900 const ObjCMethodDecl *MethodWithObjects);
4901 llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
4902 RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
4903 ReturnValueSlot Return = ReturnValueSlot());
4904
4905 /// Retrieves the default cleanup kind for an ARC cleanup.
4906 /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
4908 return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions ? NormalAndEHCleanup
4909 : NormalCleanup;
4910 }
4911
4912 // ARC primitives.
4913 void EmitARCInitWeak(Address addr, llvm::Value *value);
4914 void EmitARCDestroyWeak(Address addr);
4915 llvm::Value *EmitARCLoadWeak(Address addr);
4916 llvm::Value *EmitARCLoadWeakRetained(Address addr);
4917 llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
4918 void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
4919 void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
4920 void EmitARCCopyWeak(Address dst, Address src);
4921 void EmitARCMoveWeak(Address dst, Address src);
4922 llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
4923 llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
4924 llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
4925 bool resultIgnored);
4926 llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
4927 bool resultIgnored);
4928 llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
4929 llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
4930 llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
4931 void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise);
4932 void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
4933 llvm::Value *EmitARCAutorelease(llvm::Value *value);
4934 llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
4935 llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
4936 llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
4937 llvm::Value *EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value);
4938
4939 llvm::Value *EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType);
4940 llvm::Value *EmitObjCRetainNonBlock(llvm::Value *value,
4941 llvm::Type *returnType);
4942 void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
4943
4944 std::pair<LValue, llvm::Value *>
4945 EmitARCStoreAutoreleasing(const BinaryOperator *e);
4946 std::pair<LValue, llvm::Value *> EmitARCStoreStrong(const BinaryOperator *e,
4947 bool ignored);
4948 std::pair<LValue, llvm::Value *>
4949 EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored);
4950
4951 llvm::Value *EmitObjCAlloc(llvm::Value *value, llvm::Type *returnType);
4952 llvm::Value *EmitObjCAllocWithZone(llvm::Value *value,
4953 llvm::Type *returnType);
4954 llvm::Value *EmitObjCAllocInit(llvm::Value *value, llvm::Type *resultType);
4955
4956 llvm::Value *EmitObjCThrowOperand(const Expr *expr);
4957 llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
4958 llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
4959
4960 llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
4961 llvm::Value *EmitARCReclaimReturnedObject(const Expr *e,
4962 bool allowUnsafeClaim);
4963 llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
4964 llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
4965 llvm::Value *EmitARCUnsafeUnretainedScalarExpr(const Expr *expr);
4966
4967 void EmitARCIntrinsicUse(ArrayRef<llvm::Value *> values);
4968
4969 void EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values);
4970
4972 static Destroyer destroyARCStrongPrecise;
4973 static Destroyer destroyARCWeak;
4974 static Destroyer emitARCIntrinsicUse;
4976
4977 void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
4978 llvm::Value *EmitObjCAutoreleasePoolPush();
4979 llvm::Value *EmitObjCMRRAutoreleasePoolPush();
4980 void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
4981 void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
4982
4983 /// Emits a reference binding to the passed in expression.
4984 RValue EmitReferenceBindingToExpr(const Expr *E);
4985
4986 //===--------------------------------------------------------------------===//
4987 // Expression Emission
4988 //===--------------------------------------------------------------------===//
4989
4990 // Expressions are broken into three classes: scalar, complex, aggregate.
4991
4992 /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
4993 /// scalar type, returning the result.
4994 llvm::Value *EmitScalarExpr(const Expr *E, bool IgnoreResultAssign = false);
4995
4996 /// Emit a conversion from the specified type to the specified destination
4997 /// type, both of which are LLVM scalar types.
4998 llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
4999 QualType DstTy, SourceLocation Loc);
5000
5001 /// Emit a conversion from the specified complex type to the specified
5002 /// destination type, where the destination type is an LLVM scalar type.
5003 llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
5004 QualType DstTy,
5006
5007 /// EmitAggExpr - Emit the computation of the specified expression
5008 /// of aggregate type. The result is computed into the given slot,
5009 /// which may be null to indicate that the value is not needed.
5010 void EmitAggExpr(const Expr *E, AggValueSlot AS);
5011
5012 /// EmitAggExprToLValue - Emit the computation of the specified expression of
5013 /// aggregate type into a temporary LValue.
5014 LValue EmitAggExprToLValue(const Expr *E);
5015
5016 enum ExprValueKind { EVK_RValue, EVK_NonRValue };
5017
5018 /// EmitAggFinalDestCopy - Emit copy of the specified aggregate into
5019 /// destination address.
5020 void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src,
5021 ExprValueKind SrcKind);
5022
5023 /// Create a store to \arg DstPtr from \arg Src, truncating the stored value
5024 /// to at most \arg DstSize bytes.
5025 void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize,
5026 bool DstIsVolatile);
5027
5028 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
5029 /// make sure it survives garbage collection until this point.
5030 void EmitExtendGCLifetime(llvm::Value *object);
5031
5032 /// EmitComplexExpr - Emit the computation of the specified expression of
5033 /// complex type, returning the result.
5034 ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal = false,
5035 bool IgnoreImag = false);
5036
5037 /// EmitComplexExprIntoLValue - Emit the given expression of complex
5038 /// type and place its result into the specified l-value.
5039 void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
5040
5041 /// EmitStoreOfComplex - Store a complex number into the specified l-value.
5042 void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
5043
5044 /// EmitLoadOfComplex - Load a complex number from the specified l-value.
5045 ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
5046
5047 ComplexPairTy EmitPromotedComplexExpr(const Expr *E, QualType PromotionType);
5048 llvm::Value *EmitPromotedScalarExpr(const Expr *E, QualType PromotionType);
5049 ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType);
5050 ComplexPairTy EmitUnPromotedValue(ComplexPairTy result,
5051 QualType PromotionType);
5052
5053 Address emitAddrOfRealComponent(Address complex, QualType complexType);
5054 Address emitAddrOfImagComponent(Address complex, QualType complexType);
5055
5056 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
5057 /// global variable that has already been created for it. If the initializer
5058 /// has a different type than GV does, this may free GV and return a different
5059 /// one. Otherwise it just returns GV.
5060 llvm::GlobalVariable *AddInitializerToStaticVarDecl(const VarDecl &D,
5061 llvm::GlobalVariable *GV);
5062
5063 // Emit an @llvm.invariant.start call for the given memory region.
5064 void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size);
5065
5066 /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
5067 /// variable with global storage.
5068 void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV,
5069 bool PerformInit);
5070
5071 llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
5072 llvm::Constant *Addr);
5073
5074 llvm::Function *createTLSAtExitStub(const VarDecl &VD,
5075 llvm::FunctionCallee Dtor,
5076 llvm::Constant *Addr,
5077 llvm::FunctionCallee &AtExit);
5078
5079 /// Call atexit() with a function that passes the given argument to
5080 /// the given function.
5081 void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn,
5082 llvm::Constant *addr);
5083
5084 /// Registers the dtor using 'llvm.global_dtors' for platforms that do not
5085 /// support an 'atexit()' function.
5086 void registerGlobalDtorWithLLVM(const VarDecl &D, llvm::FunctionCallee fn,
5087 llvm::Constant *addr);
5088
5089 /// Call atexit() with function dtorStub.
5090 void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
5091
5092 /// Call unatexit() with function dtorStub.
5093 llvm::Value *unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub);
5094
5095 /// Emit code in this function to perform a guarded variable
5096 /// initialization. Guarded initializations are used when it's not
5097 /// possible to prove that an initialization will be done exactly
5098 /// once, e.g. with a static local variable or a static data member
5099 /// of a class template.
5100 void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
5101 bool PerformInit);
5102
5103 enum class GuardKind { VariableGuard, TlsGuard };
5104
5105 /// Emit a branch to select whether or not to perform guarded initialization.
5106 void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
5107 llvm::BasicBlock *InitBlock,
5108 llvm::BasicBlock *NoInitBlock, GuardKind Kind,
5109 const VarDecl *D);
5110
5111 /// GenerateCXXGlobalInitFunc - Generates code for initializing global
5112 /// variables.
5113 void
5114 GenerateCXXGlobalInitFunc(llvm::Function *Fn,
5115 ArrayRef<llvm::Function *> CXXThreadLocals,
5116 ConstantAddress Guard = ConstantAddress::invalid());
5117
5118 /// GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global
5119 /// variables.
5120 void GenerateCXXGlobalCleanUpFunc(
5121 llvm::Function *Fn,
5122 ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
5123 llvm::Constant *>>
5124 DtorsOrStermFinalizers);
5125
5126 void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D,
5127 llvm::GlobalVariable *Addr,
5128 bool PerformInit);
5129
5130 void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
5131
5132 void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
5133
5134 void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
5135
5136 RValue EmitAtomicExpr(AtomicExpr *E);
5137
5138 void EmitFakeUse(Address Addr);
5139
5140 //===--------------------------------------------------------------------===//
5141 // Annotations Emission
5142 //===--------------------------------------------------------------------===//
5143
5144 /// Emit an annotation call (intrinsic).
5145 llvm::Value *EmitAnnotationCall(llvm::Function *AnnotationFn,
5146 llvm::Value *AnnotatedVal,
5147 StringRef AnnotationStr,
5148 SourceLocation Location,
5149 const AnnotateAttr *Attr);
5150
5151 /// Emit local annotations for the local variable V, declared by D.
5152 void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
5153
5154 /// Emit field annotations for the given field & value. Returns the
5155 /// annotation result.
5156 Address EmitFieldAnnotations(const FieldDecl *D, Address V);
5157
5158 //===--------------------------------------------------------------------===//
5159 // Internal Helpers
5160 //===--------------------------------------------------------------------===//
5161
5162 /// ContainsLabel - Return true if the statement contains a label in it. If
5163 /// this statement is not executed normally, it not containing a label means
5164 /// that we can just remove the code.
5165 static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
5166
5167 /// containsBreak - Return true if the statement contains a break out of it.
5168 /// If the statement (recursively) contains a switch or loop with a break
5169 /// inside of it, this is fine.
5170 static bool containsBreak(const Stmt *S);
5171
5172 /// Determine if the given statement might introduce a declaration into the
5173 /// current scope, by being a (possibly-labelled) DeclStmt.
5174 static bool mightAddDeclToScope(const Stmt *S);
5175
5176 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5177 /// to a constant, or if it does but contains a label, return false. If it
5178 /// constant folds return true and set the boolean result in Result.
5179 bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result,
5180 bool AllowLabels = false);
5181
5182 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5183 /// to a constant, or if it does but contains a label, return false. If it
5184 /// constant folds return true and set the folded value.
5185 bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
5186 bool AllowLabels = false);
5187
5188 /// Ignore parentheses and logical-NOT to track conditions consistently.
5189 static const Expr *stripCond(const Expr *C);
5190
5191 /// isInstrumentedCondition - Determine whether the given condition is an
5192 /// instrumentable condition (i.e. no "&&" or "||").
5193 static bool isInstrumentedCondition(const Expr *C);
5194
5195 /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
5196 /// increments a profile counter based on the semantics of the given logical
5197 /// operator opcode. This is used to instrument branch condition coverage
5198 /// for logical operators.
5199 void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp,
5200 llvm::BasicBlock *TrueBlock,
5201 llvm::BasicBlock *FalseBlock,
5202 uint64_t TrueCount = 0,
5203 Stmt::Likelihood LH = Stmt::LH_None,
5204 const Expr *CntrIdx = nullptr);
5205
5206 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
5207 /// if statement) to the specified blocks. Based on the condition, this might
5208 /// try to simplify the codegen of the conditional based on the branch.
5209 /// TrueCount should be the number of times we expect the condition to
5210 /// evaluate to true based on PGO data.
5211 void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
5212 llvm::BasicBlock *FalseBlock, uint64_t TrueCount,
5213 Stmt::Likelihood LH = Stmt::LH_None,
5214 const Expr *ConditionalOp = nullptr,
5215 const VarDecl *ConditionalDecl = nullptr);
5216
5217 /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
5218 /// nonnull, if \p LHS is marked _Nonnull.
5219 void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
5220
5221 /// An enumeration which makes it easier to specify whether or not an
5222 /// operation is a subtraction.
5223 enum { NotSubtraction = false, IsSubtraction = true };
5224
5225 /// Emit pointer + index arithmetic.
5226 llvm::Value *EmitPointerArithmetic(const BinaryOperator *BO,
5227 Expr *pointerOperand, llvm::Value *pointer,
5228 Expr *indexOperand, llvm::Value *index,
5229 bool isSubtraction);
5230
5231 /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
5232 /// detect undefined behavior when the pointer overflow sanitizer is enabled.
5233 /// \p SignedIndices indicates whether any of the GEP indices are signed.
5234 /// \p IsSubtraction indicates whether the expression used to form the GEP
5235 /// is a subtraction.
5236 llvm::Value *EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr,
5238 bool SignedIndices, bool IsSubtraction,
5240 const Twine &Name = "");
5241
5243 llvm::Type *elementType, bool SignedIndices,
5244 bool IsSubtraction, SourceLocation Loc,
5245 CharUnits Align, const Twine &Name = "");
5246
5247 /// Specifies which type of sanitizer check to apply when handling a
5248 /// particular builtin.
5253 };
5254
5255 /// Emits an argument for a call to a builtin. If the builtin sanitizer is
5256 /// enabled, a runtime check specified by \p Kind is also emitted.
5257 llvm::Value *EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind);
5258
5259 /// Emits an argument for a call to a `__builtin_assume`. If the builtin
5260 /// sanitizer is enabled, a runtime check is also emitted.
5261 llvm::Value *EmitCheckedArgForAssume(const Expr *E);
5262
5263 /// Emit a description of a type in a format suitable for passing to
5264 /// a runtime sanitizer handler.
5265 llvm::Constant *EmitCheckTypeDescriptor(QualType T);
5266
5267 /// Convert a value into a format suitable for passing to a runtime
5268 /// sanitizer handler.
5269 llvm::Value *EmitCheckValue(llvm::Value *V);
5270
5271 /// Emit a description of a source location in a format suitable for
5272 /// passing to a runtime sanitizer handler.
5273 llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
5274
5275 void EmitKCFIOperandBundle(const CGCallee &Callee,
5277
5278 /// Create a basic block that will either trap or call a handler function in
5279 /// the UBSan runtime with the provided arguments, and create a conditional
5280 /// branch to it.
5281 void
5282 EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
5283 Checked,
5285 ArrayRef<llvm::Value *> DynamicArgs,
5286 const TrapReason *TR = nullptr);
5287
5288 /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
5289 /// if Cond if false.
5290 void EmitCfiSlowPathCheck(SanitizerKind::SanitizerOrdinal Ordinal,
5291 llvm::Value *Cond, llvm::ConstantInt *TypeId,
5292 llvm::Value *Ptr,
5293 ArrayRef<llvm::Constant *> StaticArgs);
5294
5295 /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
5296 /// checking is enabled. Otherwise, just emit an unreachable instruction.
5297 void EmitUnreachable(SourceLocation Loc);
5298
5299 /// Create a basic block that will call the trap intrinsic, and emit a
5300 /// conditional branch to it, for the -ftrapv checks.
5301 void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID,
5302 bool NoMerge = false, const TrapReason *TR = nullptr);
5303
5304 /// Emit a call to trap or debugtrap and attach function attribute
5305 /// "trap-func-name" if specified.
5306 llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
5307
5308 /// Emit a stub for the cross-DSO CFI check function.
5309 void EmitCfiCheckStub();
5310
5311 /// Emit a cross-DSO CFI failure handling function.
5312 void EmitCfiCheckFail();
5313
5314 /// Create a check for a function parameter that may potentially be
5315 /// declared as non-null.
5316 void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
5317 AbstractCallee AC, unsigned ParmNum);
5318
5319 void EmitNonNullArgCheck(Address Addr, QualType ArgType,
5320 SourceLocation ArgLoc, AbstractCallee AC,
5321 unsigned ParmNum);
5322
5323 /// EmitWriteback - Emit callbacks for function.
5324 void EmitWritebacks(const CallArgList &Args);
5325
5326 /// EmitCallArg - Emit a single call argument.
5327 void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
5328
5329 /// EmitDelegateCallArg - We are performing a delegate call; that
5330 /// is, the current function is delegating to another one. Produce
5331 /// a r-value suitable for passing the given parameter.
5332 void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
5333 SourceLocation loc);
5334
5335 /// SetFPAccuracy - Set the minimum required accuracy of the given floating
5336 /// point operation, expressed as the maximum relative error in ulp.
5337 void SetFPAccuracy(llvm::Value *Val, float Accuracy);
5338
5339 /// Set the minimum required accuracy of the given sqrt operation
5340 /// based on CodeGenOpts.
5341 void SetSqrtFPAccuracy(llvm::Value *Val);
5342
5343 /// Set the minimum required accuracy of the given sqrt operation based on
5344 /// CodeGenOpts.
5345 void SetDivFPAccuracy(llvm::Value *Val);
5346
5347 /// Set the codegen fast-math flags.
5348 void SetFastMathFlags(FPOptions FPFeatures);
5349
5350 // Truncate or extend a boolean vector to the requested number of elements.
5351 llvm::Value *emitBoolVecConversion(llvm::Value *SrcVec,
5352 unsigned NumElementsDst,
5353 const llvm::Twine &Name = "");
5354
5355 void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty,
5357
5358private:
5359 // Emits a convergence_loop instruction for the given |BB|, with |ParentToken|
5360 // as it's parent convergence instr.
5361 llvm::ConvergenceControlInst *emitConvergenceLoopToken(llvm::BasicBlock *BB);
5362
5363 // Adds a convergence_ctrl token with |ParentToken| as parent convergence
5364 // instr to the call |Input|.
5365 llvm::CallBase *addConvergenceControlToken(llvm::CallBase *Input);
5366
5367 // Find the convergence_entry instruction |F|, or emits ones if none exists.
5368 // Returns the convergence instruction.
5369 llvm::ConvergenceControlInst *
5370 getOrEmitConvergenceEntryToken(llvm::Function *F);
5371
5372private:
5373 llvm::MDNode *getRangeForLoadFromType(QualType Ty);
5374 void EmitReturnOfRValue(RValue RV, QualType Ty);
5375
5376 void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
5377
5379 DeferredReplacements;
5380
5381 /// Set the address of a local variable.
5382 void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
5383 assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
5384 LocalDeclMap.insert({VD, Addr});
5385 }
5386
5387 /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
5388 /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
5389 ///
5390 /// \param AI - The first function argument of the expansion.
5391 void ExpandTypeFromArgs(QualType Ty, LValue Dst,
5392 llvm::Function::arg_iterator &AI);
5393
5394 /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
5395 /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
5396 /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
5397 void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
5398 SmallVectorImpl<llvm::Value *> &IRCallArgs,
5399 unsigned &IRCallArgPos);
5400
5401 std::pair<llvm::Value *, llvm::Type *>
5402 EmitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr,
5403 std::string &ConstraintStr);
5404
5405 std::pair<llvm::Value *, llvm::Type *>
5406 EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue,
5407 QualType InputType, std::string &ConstraintStr,
5408 SourceLocation Loc);
5409
5410 /// Attempts to statically evaluate the object size of E. If that
5411 /// fails, emits code to figure the size of E out for us. This is
5412 /// pass_object_size aware.
5413 ///
5414 /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
5415 llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
5416 llvm::IntegerType *ResType,
5417 llvm::Value *EmittedE,
5418 bool IsDynamic);
5419
5420 /// Emits the size of E, as required by __builtin_object_size. This
5421 /// function is aware of pass_object_size parameters, and will act accordingly
5422 /// if E is a parameter with the pass_object_size attribute.
5423 llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
5424 llvm::IntegerType *ResType,
5425 llvm::Value *EmittedE, bool IsDynamic);
5426
5427 llvm::Value *emitCountedBySize(const Expr *E, llvm::Value *EmittedE,
5428 unsigned Type, llvm::IntegerType *ResType);
5429
5430 llvm::Value *emitCountedByMemberSize(const MemberExpr *E, const Expr *Idx,
5431 llvm::Value *EmittedE,
5432 QualType CastedArrayElementTy,
5433 unsigned Type,
5434 llvm::IntegerType *ResType);
5435
5436 llvm::Value *emitCountedByPointerSize(const ImplicitCastExpr *E,
5437 const Expr *Idx, llvm::Value *EmittedE,
5438 QualType CastedArrayElementTy,
5439 unsigned Type,
5440 llvm::IntegerType *ResType);
5441
5442 void emitZeroOrPatternForAutoVarInit(QualType type, const VarDecl &D,
5443 Address Loc);
5444
5445public:
5446 enum class EvaluationOrder {
5447 ///! No language constraints on evaluation order.
5448 Default,
5449 ///! Language semantics require left-to-right evaluation.
5450 ForceLeftToRight,
5451 ///! Language semantics require right-to-left evaluation.
5452 ForceRightToLeft
5453 };
5454
5455 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
5456 // an ObjCMethodDecl.
5458 llvm::PointerUnion<const FunctionProtoType *, const ObjCMethodDecl *> P;
5459
5462 };
5463
5464 void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype,
5465 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
5466 AbstractCallee AC = AbstractCallee(),
5467 unsigned ParamsToSkip = 0,
5468 EvaluationOrder Order = EvaluationOrder::Default);
5469
5470 /// EmitPointerWithAlignment - Given an expression with a pointer type,
5471 /// emit the value and compute our best estimate of the alignment of the
5472 /// pointee.
5473 ///
5474 /// \param BaseInfo - If non-null, this will be initialized with
5475 /// information about the source of the alignment and the may-alias
5476 /// attribute. Note that this function will conservatively fall back on
5477 /// the type when it doesn't recognize the expression and may-alias will
5478 /// be set to false.
5479 ///
5480 /// One reasonable way to use this information is when there's a language
5481 /// guarantee that the pointer must be aligned to some stricter value, and
5482 /// we're simply trying to ensure that sufficiently obvious uses of under-
5483 /// aligned objects don't get miscompiled; for example, a placement new
5484 /// into the address of a local variable. In such a case, it's quite
5485 /// reasonable to just ignore the returned alignment when it isn't from an
5486 /// explicit source.
5487 Address
5488 EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo = nullptr,
5489 TBAAAccessInfo *TBAAInfo = nullptr,
5490 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
5491
5492 /// If \p E references a parameter with pass_object_size info or a constant
5493 /// array size modifier, emit the object size divided by the size of \p EltTy.
5494 /// Otherwise return null.
5495 llvm::Value *LoadPassedObjectSize(const Expr *E, QualType EltTy);
5496
5497 void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
5498
5500 llvm::Function *Function;
5502 std::optional<StringRef> Architecture;
5503
5504 FMVResolverOption(llvm::Function *F, ArrayRef<StringRef> Feats,
5505 std::optional<StringRef> Arch = std::nullopt)
5506 : Function(F), Features(Feats), Architecture(Arch) {}
5507 };
5508
5509 // Emits the body of a multiversion function's resolver. Assumes that the
5510 // options are already sorted in the proper order, with the 'default' option
5511 // last (if it exists).
5512 void EmitMultiVersionResolver(llvm::Function *Resolver,
5514 void EmitX86MultiVersionResolver(llvm::Function *Resolver,
5516 void EmitAArch64MultiVersionResolver(llvm::Function *Resolver,
5518 void EmitRISCVMultiVersionResolver(llvm::Function *Resolver,
5520
5521private:
5522 QualType getVarArgType(const Expr *Arg);
5523
5524 void EmitDeclMetadata();
5525
5526 BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
5527 const AutoVarEmission &emission);
5528
5529 void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
5530
5531 llvm::Value *GetValueForARMHint(unsigned BuiltinID);
5532 llvm::Value *EmitX86CpuIs(const CallExpr *E);
5533 llvm::Value *EmitX86CpuIs(StringRef CPUStr);
5534 llvm::Value *EmitX86CpuSupports(const CallExpr *E);
5535 llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
5536 llvm::Value *EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask);
5537 llvm::Value *EmitX86CpuInit();
5538 llvm::Value *FormX86ResolverCondition(const FMVResolverOption &RO);
5539 llvm::Value *EmitAArch64CpuInit();
5540 llvm::Value *FormAArch64ResolverCondition(const FMVResolverOption &RO);
5541 llvm::Value *EmitAArch64CpuSupports(const CallExpr *E);
5542 llvm::Value *EmitAArch64CpuSupports(ArrayRef<StringRef> FeatureStrs);
5543};
5544
5547 if (!needsSaving(value))
5548 return saved_type(value, false);
5549
5550 // Otherwise, we need an alloca.
5551 auto align = CharUnits::fromQuantity(
5552 CGF.CGM.getDataLayout().getPrefTypeAlign(value->getType()));
5553 Address alloca =
5554 CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
5555 CGF.Builder.CreateStore(value, alloca);
5556
5557 return saved_type(alloca.emitRawPointer(CGF), true);
5558}
5559
5561 saved_type value) {
5562 // If the value says it wasn't saved, trust that it's still dominating.
5563 if (!value.getInt())
5564 return value.getPointer();
5565
5566 // Otherwise, it should be an alloca instruction, as set up in save().
5567 auto alloca = cast<llvm::AllocaInst>(value.getPointer());
5568 return CGF.Builder.CreateAlignedLoad(alloca->getAllocatedType(), alloca,
5569 alloca->getAlign());
5570}
5571
5572} // end namespace CodeGen
5573
5574// Map the LangOption for floating point exception behavior into
5575// the corresponding enum in the IR.
5576llvm::fp::ExceptionBehavior
5578} // end namespace clang
5579
5580#endif
Enums/classes describing ABI related information about constructors, destructors and thunks.
#define V(N, I)
Definition: ASTContext.h:3597
MatchType Type
StringRef P
static bool CanThrow(Expr *E, ASTContext &Ctx)
Definition: CFG.cpp:2777
static T * buildByrefHelpers(CodeGenModule &CGM, const BlockByrefInfo &byrefInfo, T &&generator)
Lazily build the copy and dispose helpers for a __block variable with the given information.
Definition: CGBlocks.cpp:2521
static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF, GlobalDecl GD, llvm::Type *Ty, const CXXRecordDecl *RD)
Definition: CGCXX.cpp:243
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:4246
@ ForDeactivation
Definition: CGCleanup.cpp:1213
CodeGenFunction::ComplexPairTy ComplexPairTy
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition: CGExpr.cpp:1376
const Decl * D
enum clang::sema::@1840::IndirectLocalPathEntry::EntryKind Kind
Expr * E
unsigned OldSize
Defines the clang::Expr interface and subclasses for C++ expressions.
const CFGBlock * Block
Definition: HTMLLogger.cpp:152
#define X(type, name)
Definition: Value.h:145
llvm::MachO::Architecture Architecture
Definition: MachO.h:27
llvm::MachO::Target Target
Definition: MachO.h:51
OffloadArch Arch
Definition: OffloadArch.cpp:10
Defines some OpenMP-specific enums and functions.
SanitizerHandler
SourceRange Range
Definition: SemaObjC.cpp:753
VarDecl * Variable
Definition: SemaObjC.cpp:752
SourceLocation Loc
Definition: SemaObjC.cpp:754
const char * Data
This file defines OpenACC AST classes for statement-level contructs.
This file defines OpenMP AST classes for executable directives and clauses.
This file defines SYCL AST classes used to represent calls to SYCL kernels.
C Language Family Type Representation.
StateNode * Previous
a trap message and trap category.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4289
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition: Expr.h:7092
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2723
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: TypeBase.h:3738
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6816
Attr - This represents one attribute.
Definition: Attr.h:44
Represents an attribute applied to a statement.
Definition: Stmt.h:2203
BinaryConditionalOperator - The GNU extension to the conditional operator which allows the middle ope...
Definition: Expr.h:4389
OpaqueValueExpr * getOpaqueValue() const
getOpaqueValue - Return the opaque value placeholder.
Definition: Expr.h:4427
Expr * getCommon() const
getCommon - Return the common expression, written to the left of the condition.
Definition: Expr.h:4424
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3974
static bool isLogicalOp(Opcode Opc)
Definition: Expr.h:4107
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:6560
Represents a call to a CUDA kernel function.
Definition: ExprCXX.h:234
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1494
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1549
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2604
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1271
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1378
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition: ExprCXX.h:2620
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2869
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition: ExprCXX.h:481
Represents a call to an inherited base class constructor from an inheriting constructor.
Definition: ExprCXX.h:1753
Represents a call to a member function that may be written either with member call syntax (e....
Definition: ExprCXX.h:179
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2129
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition: ExprCXX.h:2349
A call to an overloaded operator written using operator syntax.
Definition: ExprCXX.h:84
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2739
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
Represents a C++ temporary.
Definition: ExprCXX.h:1460
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1209
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:848
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition: ExprCXX.h:1069
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2879
Describes the capture of either a variable, or 'this', or variable-length array type.
Definition: Stmt.h:3899
This captures a statement into a function.
Definition: Stmt.h:3886
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3612
const CXXBaseSpecifier *const * path_const_iterator
Definition: Expr.h:3679
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
llvm::Value * getBasePointer() const
Definition: Address.h:198
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:253
CharUnits getAlignment() const
Definition: Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:209
void setAlignment(CharUnits Value)
Definition: Address.h:196
llvm::Value * getOffset() const
Definition: Address.h:246
void replaceBasePointer(llvm::Value *P)
This function is used in situations where the caller is doing some sort of opaque "laundering" of the...
Definition: Address.h:186
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:204
An aggregate value slot.
Definition: CGValue.h:504
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A pair of helper functions for a __block variable.
Information about the layout of a __block variable.
Definition: CGBlocks.h:136
CGBlockInfo - Information to generate a block literal.
Definition: CGBlocks.h:157
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:140
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:132
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
All available information about a concrete callee.
Definition: CGCall.h:63
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:274
An abstract representation of regular/ObjC call/message targets.
const ParmVarDecl * getParamDecl(unsigned I) const
ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself.
RawAddress getOriginalAllocatedAddress() const
Returns the address for the original alloca instruction.
Address getObjectAddress(CodeGenFunction &CGF) const
Returns the address of the object within this declaration.
CGAtomicOptionsRAII(CodeGenModule &CGM_, AtomicOptions AO)
CGAtomicOptionsRAII(CodeGenModule &CGM_, const AtomicAttr *AA)
CGAtomicOptionsRAII(const CGAtomicOptionsRAII &)=delete
CGAtomicOptionsRAII & operator=(const CGAtomicOptionsRAII &)=delete
API for captured statement code generation.
static bool classof(const CGCapturedStmtInfo *)
llvm::SmallDenseMap< const VarDecl *, FieldDecl * > getCaptureFields()
Get the CaptureFields.
CGCapturedStmtInfo(CapturedRegionKind K=CR_Default)
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
CGCapturedStmtInfo(const CapturedStmt &S, CapturedRegionKind K=CR_Default)
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
RAII for correct setting/restoring of CapturedStmtInfo.
CGCapturedStmtRAII(CodeGenFunction &CGF, CGCapturedStmtInfo *NewCapturedStmtInfo)
CXXDefaultInitExprScope(CodeGenFunction &CGF, const CXXDefaultInitExpr *E)
void Emit(CodeGenFunction &CGF, Flags flags) override
Emit the cleanup.
An object to manage conditionally-evaluated expressions.
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
LValue getReferenceLValue(CodeGenFunction &CGF, const Expr *RefExpr) const
void Emit(CodeGenFunction &CGF, Flags flags) override
Emit the cleanup.
A scope within which we are constructing the fields of an object which might use a CXXDefaultInitExpr...
FieldConstructionScope(CodeGenFunction &CGF, Address This)
A class controlling the emission of a finally block.
InlinedInheritingConstructorScope(CodeGenFunction &CGF, GlobalDecl GD)
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
RAII for preserving necessary info during inlined region body codegen.
InlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP, llvm::BasicBlock &FiniBB)
void Emit(CodeGenFunction &CGF, Flags) override
Emit the cleanup.
RAII for preserving necessary info during Outlined region body codegen.
OutlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP, llvm::BasicBlock &RetBB)
Controls insertion of cancellation exit blocks in worksharing constructs.
OMPCancelStackRAII(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel)
Save/restore original map of previously emitted local vars in case when we need to duplicate emission...
The class used to assign some variables some temporarily addresses.
bool apply(CodeGenFunction &CGF)
Applies new addresses to the list of the variables.
void restore(CodeGenFunction &CGF)
Restores original addresses of the variables.
bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD, Address TempAddr)
Sets the address of the variable LocalVD to be TempAddr in function CGF.
The scope used to remap some variables as private in the OpenMP loop body (or other captured region e...
void restoreMap()
Restore all mapped variables w/o clean up.
bool Privatize()
Privatizes local variables previously registered as private.
bool isGlobalVarCaptured(const VarDecl *VD) const
Checks if the global variable is captured in current function.
OMPPrivateScope(CodeGenFunction &CGF)
Enter a new OpenMP private scope.
~OMPPrivateScope()
Exit scope - all the mapped variables are restored.
bool addPrivate(const VarDecl *LocalVD, Address Addr)
Registers LocalVD variable as a private with Addr as the address of the corresponding private variabl...
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV)
Build the opaque value mapping for an OpaqueValueExpr whose source expression is set to the expressio...
OpaqueValueMapping(CodeGenFunction &CGF, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it's the GNU ?...
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, RValue rvalue)
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, LValue lvalue)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
ParentLoopDirectiveForScanRegion(CodeGenFunction &CGF, const OMPExecutableDirective &ParentLoopDirectiveForScan)
An object which temporarily prevents a value from being destroyed by aggressive peephole optimization...
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
RunCleanupsScope(CodeGenFunction &CGF)
Enter a new cleanup scope.
~RunCleanupsScope()
Exit this cleanup scope, emitting any accumulated cleanups.
void ForceCleanup(std::initializer_list< llvm::Value ** > ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
An RAII object to record that we're evaluating a statement expression.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
static Destroyer destroyNonTrivialCStruct
bool isBinaryLogicalOp(const Expr *E) const
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
SanitizerSet SanOpts
Sanitizers enabled for this function.
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
SmallVector< Address, 1 > SEHCodeSlotStack
A stack of exception code slots.
JumpDest getJumpDestInCurrentScope(StringRef Name=StringRef())
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
static bool hasScalarEvaluationKind(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
llvm::Value * EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
AwaitSuspendWrapperInfo CurAwaitSuspendWrapper
void EmitFakeUse(Address Addr)
Definition: CGDecl.cpp:1380
llvm::function_ref< std::pair< llvm::Value *, llvm::Value * >(CodeGenFunction &, const OMPExecutableDirective &S, Address LB, Address UB)> CodeGenDispatchBoundsTy
CGCapturedStmtInfo * CapturedStmtInfo
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we're intending to store to the side, but which will prob...
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
const OMPExecutableDirective * OMPParentLoopDirectiveForScan
Parent loop-based directive for scan directive.
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
void unprotectFromPeepholes(PeepholeProtection protection)
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, Address Addr, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
const CodeGen::CGBlockInfo * BlockInfo
void EmitAggregateCopyCtor(LValue Dest, LValue Src, AggValueSlot::Overlap_t MayOverlap)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_ConstructorCall
Checking the 'this' pointer for a constructor call.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_NonnullAssign
Checking the value assigned to a _Nonnull pointer. Must not be null.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
@ TCK_Upcast
Checking the operand of a cast to a base object.
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
Definition: CGCleanup.cpp:1112
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Value * EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx, const llvm::ElementCount &Count)
Address EmitCheckedInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *elementType, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align, const Twine &Name="")
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
llvm::BasicBlock * getUnreachableBlock()
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy)
Emit an aggregate assignment.
llvm::SmallVector< const JumpDest *, 2 > SEHTryEpilogueStack
JumpDest ReturnBlock
ReturnBlock - Unified return block.
DominatingValue< T >::saved_type saveValueInCond(T value)
const llvm::function_ref< void(CodeGenFunction &, llvm::Function *, const OMPTaskDataTy &)> TaskGenTy
llvm::Value * EmitSVEDupX(llvm::Value *Scalar)
llvm::SmallPtrSet< const CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
void pushCleanupAndDeferDeactivation(CleanupKind Kind, As... A)
llvm::DenseMap< const Decl *, Address > DeclMapTy
const TargetInfo & getTarget() const
void initFullExprCleanup()
Set up the last cleanup that was pushed as a conditional full-expression cleanup.
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Definition: CGCleanup.cpp:424
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
Definition: CGCleanup.cpp:1293
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
static Destroyer destroyARCStrongImprecise
llvm::BasicBlock * getInvokeDest()
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
llvm::Value * EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty)
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
Definition: CGStmt.cpp:1012
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke=nullptr, bool IsMustTail=false)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
Same as MakeAddrLValue above except that the pointer is known to be unsigned.
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
Definition: CGBlocks.cpp:2634
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:151
llvm::function_ref< std::pair< LValue, LValue >(CodeGenFunction &, const OMPExecutableDirective &S)> CodeGenLoopBoundsTy
llvm::Function * generateAwaitSuspendWrapper(Twine const &CoroName, Twine const &SuspendPointName, CoroutineSuspendExpr const &S)
const TargetCodeGenInfo & getTargetHooks() const
void setBeforeOutermostConditional(llvm::Value *value, Address addr, CodeGenFunction &CGF)
void EmitLifetimeEnd(llvm::Value *Addr)
Definition: CGDecl.cpp:1369
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind, RawAddress ActiveFlag, As... A)
int ExpectedOMPLoopDepth
Number of nested loop to be consumed by the last surrounding loop-associated directive.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
SmallVector< llvm::CanonicalLoopInfo *, 4 > OMPLoopNestStack
List of recently emitted OMPCanonicalLoops.
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
void EmitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &S)
llvm::Value * LoadCXXVTT()
LoadCXXVTT - Load the VTT parameter to base constructors/destructors have virtual bases.
void EmitOpenACCCacheConstruct(const OpenACCCacheConstruct &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition: CGExpr.cpp:264
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
Definition: CGStmt.cpp:61
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
CodeGenTypes & getTypes() const
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
RawAddress CreateTempAlloca(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr, RawAddress *Alloca=nullptr)
CreateTempAlloca - This creates a alloca and inserts it into the entry block.
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:186
SmallVector< llvm::Value *, 8 > ObjCEHValueStack
ObjCEHValueStack - Stack of Objective-C exception values, used for rethrows.
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D, llvm::Value *Address)
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters.
void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S)
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::function_ref< void(CodeGenFunction &, SourceLocation, const unsigned, const bool)> CodeGenOrderedTy
const CGFunctionInfo * CurFnInfo
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition: CGExpr.cpp:1631
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
static Destroyer destroyARCStrongPrecise
bool checkIfFunctionMustProgress()
Returns true if a function must make progress, which means the mustprogress attribute can be added.
llvm::SmallVector< VPtr, 4 > VPtrsVector
llvm::function_ref< void(CodeGenFunction &, const OMPLoopDirective &, JumpDest)> CodeGenLoopTy
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, ArrayRef< Address > args, const Twine &name="")
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
bool hasLabelBeenSeenInCurrentScope() const
Return true if a label was seen in the current scope.
LValue EmitLoadOfReferenceLValue(Address RefAddr, QualType RefTy, AlignmentSource Source=AlignmentSource::Type)
llvm::Type * ConvertType(const TypeDecl *T)
This class organizes the cross-function state that is used while generating LLVM code.
void setAtomicOpts(AtomicOptions AO)
Set the current Atomic options.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CGPointerAuthInfo getPointerAuthInfoForPointeeType(QualType type)
const llvm::DataLayout & getDataLayout() const
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Definition: CodeGenTypes.h:54
A specialization of Address that requires the address to be an LLVM Constant.
Definition: Address.h:296
DominatingValue< Address >::saved_type AggregateAddr
static saved_type save(CodeGenFunction &CGF, RValue value)
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:146
ConditionalCleanup stores the saved form of its parameters, then restores them and performs the clean...
Definition: EHScopeStack.h:208
A saved depth on the scope stack.
Definition: EHScopeStack.h:106
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Definition: EHScopeStack.h:99
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:375
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:398
void pushCleanupTuple(CleanupKind Kind, std::tuple< As... > A)
Push a lazily-created cleanup on the stack. Tuple version.
Definition: EHScopeStack.h:300
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:375
LValue - This represents an lvalue references.
Definition: CGValue.h:182
CharUnits getAlignment() const
Definition: CGValue.h:343
QualType getType() const
Definition: CGValue.h:291
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
A stack of loop information corresponding to loop nesting levels.
Definition: CGLoopInfo.h:207
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
An abstract representation of an aligned address.
Definition: Address.h:42
bool isValid() const
Definition: Address.h:62
Class provides a way to call simple version of codegen for OpenMP region, or an advanced with possibl...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:379
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition: TargetInfo.h:47
The class detects jumps which bypass local variables declaration: goto L; int a; L:
CompoundAssignOperator - For compound assignments (e.g.
Definition: Expr.h:4236
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3541
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1720
Represents an expression that might suspend coroutine execution; either a co_await or co_yield expres...
Definition: ExprCXX.h:5249
Represents the current source location and context used to determine the value of the source location...
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2393
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1272
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
This represents one expression.
Definition: Expr.h:112
QualType getType() const
Definition: Expr.h:144
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:6500
Represents a member of a struct/union/class.
Definition: Decl.h:3157
Represents a function declaration or definition.
Definition: Decl.h:1999
Represents a prototype with parameter type info, e.g.
Definition: TypeBase.h:5282
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:57
const Decl * getDecl() const
Definition: GlobalDecl.h:106
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2969
This class represents temporary values used to represent inout and out arguments in HLSL.
Definition: Expr.h:7258
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2259
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:3008
Describes an C or C++ initializer list.
Definition: Expr.h:5235
Represents the declaration of a label.
Definition: Decl.h:523
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2146
FPExceptionModeKind
Possible floating point exception behavior.
Definition: LangOptions.h:227
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:434
Represents a point when we exit a loop.
Definition: ProgramPoint.h:721
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4914
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2801
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3300
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: TypeBase.h:3669
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
This represents '#pragma omp atomic' directive.
Definition: StmtOpenMP.h:2947
This represents '#pragma omp barrier' directive.
Definition: StmtOpenMP.h:2625
This represents '#pragma omp cancel' directive.
Definition: StmtOpenMP.h:3655
This represents '#pragma omp cancellation point' directive.
Definition: StmtOpenMP.h:3597
Representation of an OpenMP canonical loop.
Definition: StmtOpenMP.h:142
This represents '#pragma omp critical' directive.
Definition: StmtOpenMP.h:2076
This represents '#pragma omp depobj' directive.
Definition: StmtOpenMP.h:2841
This represents '#pragma omp distribute' directive.
Definition: StmtOpenMP.h:4425
This represents '#pragma omp distribute parallel for' composite directive.
Definition: StmtOpenMP.h:4547
This represents '#pragma omp distribute parallel for simd' composite directive.
Definition: StmtOpenMP.h:4643
This represents '#pragma omp distribute simd' composite directive.
Definition: StmtOpenMP.h:4708
This represents '#pragma omp error' directive.
Definition: StmtOpenMP.h:6514
This is a basic class for representing single OpenMP executable directive.
Definition: StmtOpenMP.h:266
This represents '#pragma omp flush' directive.
Definition: StmtOpenMP.h:2789
This represents '#pragma omp for' directive.
Definition: StmtOpenMP.h:1634
This represents '#pragma omp for simd' directive.
Definition: StmtOpenMP.h:1724
This represents '#pragma omp loop' directive.
Definition: StmtOpenMP.h:6185
Represents the '#pragma omp interchange' loop transformation directive.
Definition: StmtOpenMP.h:5851
This represents '#pragma omp interop' directive.
Definition: StmtOpenMP.h:5977
This is a common base class for loop directives ('omp simd', 'omp for', 'omp for simd' etc....
Definition: StmtOpenMP.h:1004
This represents '#pragma omp masked' directive.
Definition: StmtOpenMP.h:6095
This represents '#pragma omp masked taskloop' directive.
Definition: StmtOpenMP.h:3930
This represents '#pragma omp masked taskloop simd' directive.
Definition: StmtOpenMP.h:4071
This represents '#pragma omp master' directive.
Definition: StmtOpenMP.h:2028
This represents '#pragma omp master taskloop' directive.
Definition: StmtOpenMP.h:3854
This represents '#pragma omp master taskloop simd' directive.
Definition: StmtOpenMP.h:4006
This represents '#pragma omp metadirective' directive.
Definition: StmtOpenMP.h:6146
This represents '#pragma omp ordered' directive.
Definition: StmtOpenMP.h:2893
This represents '#pragma omp parallel' directive.
Definition: StmtOpenMP.h:611
This represents '#pragma omp parallel for' directive.
Definition: StmtOpenMP.h:2147
This represents '#pragma omp parallel for simd' directive.
Definition: StmtOpenMP.h:2244
This represents '#pragma omp parallel masked' directive.
Definition: StmtOpenMP.h:2372
This represents '#pragma omp parallel masked taskloop' directive.
Definition: StmtOpenMP.h:4215
This represents '#pragma omp parallel masked taskloop simd' directive.
Definition: StmtOpenMP.h:4360
This represents '#pragma omp parallel master' directive.
Definition: StmtOpenMP.h:2309
This represents '#pragma omp parallel master taskloop' directive.
Definition: StmtOpenMP.h:4137
This represents '#pragma omp parallel master taskloop simd' directive.
Definition: StmtOpenMP.h:4293
This represents '#pragma omp parallel sections' directive.
Definition: StmtOpenMP.h:2436
Represents the '#pragma omp reverse' loop transformation directive.
Definition: StmtOpenMP.h:5779
This represents '#pragma omp scan' directive.
Definition: StmtOpenMP.h:5924
This represents '#pragma omp scope' directive.
Definition: StmtOpenMP.h:1925
This represents '#pragma omp section' directive.
Definition: StmtOpenMP.h:1864
This represents '#pragma omp sections' directive.
Definition: StmtOpenMP.h:1787
This represents '#pragma omp simd' directive.
Definition: StmtOpenMP.h:1571
This represents '#pragma omp single' directive.
Definition: StmtOpenMP.h:1977
This represents the '#pragma omp stripe' loop transformation directive.
Definition: StmtOpenMP.h:5625
This represents '#pragma omp target data' directive.
Definition: StmtOpenMP.h:3206
This represents '#pragma omp target' directive.
Definition: StmtOpenMP.h:3152
This represents '#pragma omp target enter data' directive.
Definition: StmtOpenMP.h:3260
This represents '#pragma omp target exit data' directive.
Definition: StmtOpenMP.h:3315
This represents '#pragma omp target parallel' directive.
Definition: StmtOpenMP.h:3369
This represents '#pragma omp target parallel for' directive.
Definition: StmtOpenMP.h:3449
This represents '#pragma omp target parallel for simd' directive.
Definition: StmtOpenMP.h:4774
This represents '#pragma omp target parallel loop' directive.
Definition: StmtOpenMP.h:6452
This represents '#pragma omp target simd' directive.
Definition: StmtOpenMP.h:4841
This represents '#pragma omp target teams' directive.
Definition: StmtOpenMP.h:5199
This represents '#pragma omp target teams distribute' combined directive.
Definition: StmtOpenMP.h:5255
This represents '#pragma omp target teams distribute parallel for' combined directive.
Definition: StmtOpenMP.h:5322
This represents '#pragma omp target teams distribute parallel for simd' combined directive.
Definition: StmtOpenMP.h:5420
This represents '#pragma omp target teams distribute simd' combined directive.
Definition: StmtOpenMP.h:5490
This represents '#pragma omp target teams loop' directive.
Definition: StmtOpenMP.h:6312
This represents '#pragma omp target update' directive.
Definition: StmtOpenMP.h:4491
This represents '#pragma omp task' directive.
Definition: StmtOpenMP.h:2517
This represents '#pragma omp taskloop' directive.
Definition: StmtOpenMP.h:3715
This represents '#pragma omp taskloop simd' directive.
Definition: StmtOpenMP.h:3788
This represents '#pragma omp taskgroup' directive.
Definition: StmtOpenMP.h:2722
This represents '#pragma omp taskwait' directive.
Definition: StmtOpenMP.h:2671
This represents '#pragma omp taskyield' directive.
Definition: StmtOpenMP.h:2579
This represents '#pragma omp teams' directive.
Definition: StmtOpenMP.h:3544
This represents '#pragma omp teams distribute' directive.
Definition: StmtOpenMP.h:4906
This represents '#pragma omp teams distribute parallel for' composite directive.
Definition: StmtOpenMP.h:5106
This represents '#pragma omp teams distribute parallel for simd' composite directive.
Definition: StmtOpenMP.h:5040
This represents '#pragma omp teams distribute simd' combined directive.
Definition: StmtOpenMP.h:4972
This represents '#pragma omp teams loop' directive.
Definition: StmtOpenMP.h:6247
This represents the '#pragma omp tile' loop transformation directive.
Definition: StmtOpenMP.h:5548
This represents the '#pragma omp unroll' loop transformation directive.
Definition: StmtOpenMP.h:5705
This represents clause 'use_device_addr' in the '#pragma omp ...' directives.
This represents clause 'use_device_ptr' in the '#pragma omp ...' directives.
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp,...
Definition: ExprObjC.h:192
ObjCBoxedExpr - used for generalized expression boxing.
Definition: ExprObjC.h:128
ObjCContainerDecl - Represents a container for method declarations.
Definition: DeclObjC.h:948
ObjCDictionaryLiteral - AST node to represent objective-c dictionary literals; as in:"name" : NSUserN...
Definition: ExprObjC.h:308
ObjCEncodeExpr, used for @encode in Objective-C.
Definition: ExprObjC.h:409
ObjCImplementationDecl - Represents a class definition - this is where method definitions are specifi...
Definition: DeclObjC.h:2597
Represents an ObjC class declaration.
Definition: DeclObjC.h:1154
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition: ExprObjC.h:1498
ObjCIvarDecl - Represents an ObjC instance variable.
Definition: DeclObjC.h:1952
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:548
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:940
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:140
ObjCPropertyImplDecl - Represents implementation declaration of a property in a class or category imp...
Definition: DeclObjC.h:2805
ObjCProtocolExpr used for protocol expression in Objective-C.
Definition: ExprObjC.h:504
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:454
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:52
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1180
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1230
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Definition: StmtOpenACC.h:132
This class represents a 'loop' construct.
Definition: StmtOpenACC.h:190
Represents a parameter to a function.
Definition: Decl.h:1789
Pointer-authentication qualifiers.
Definition: TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: TypeBase.h:3346
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:2007
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6692
A (possibly-)qualified type.
Definition: TypeBase.h:937
The collection of all-type qualifiers we support.
Definition: TypeBase.h:331
Represents a struct/union/class.
Definition: Decl.h:4309
Flags to identify the types for overloaded SVE builtins.
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
A trivial tuple used to represent a source range.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4531
Stmt - This represents one statement.
Definition: Stmt.h:85
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1415
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1801
Exposes information about the current target.
Definition: TargetInfo.h:226
Represents a declaration of a type.
Definition: Decl.h:3510
The base class of the type hierarchy.
Definition: TypeBase.h:1833
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition: Type.h:41
bool isReferenceType() const
Definition: TypeBase.h:8604
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2246
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4893
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:711
QualType getType() const
Definition: Decl.h:722
Represents a variable declaration or definition.
Definition: Decl.h:925
VarDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition: Decl.cpp:2257
bool isLocalVarDeclOrParm() const
Similar to isLocalVarDecl but also includes parameters.
Definition: Decl.h:1261
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: TypeBase.h:3982
Expr * getSizeExpr() const
Definition: TypeBase.h:3996
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2697
#define bool
Definition: gpuintrin.h:32
Defines the clang::TargetInfo interface.
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:141
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
ARCPreciseLifetime_t
Does an ARC strong l-value have precise lifetime?
Definition: CGValue.h:135
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
std::variant< struct RequiresDecl, struct HeaderDecl, struct UmbrellaDirDecl, struct ModuleDecl, struct ExcludeDecl, struct ExportDecl, struct ExportAsDecl, struct ExternModuleDecl, struct UseDecl, struct LinkDecl, struct ConfigMacrosDecl, struct ConflictDecl > Decl
All declarations that can appear in a module declaration.
Definition: ModuleMapFile.h:36
The JSON file list parser is used to communicate input to InstallAPI.
CXXCtorType
C++ constructor types.
Definition: ABI.h:24
llvm::omp::Directive OpenMPDirectiveKind
OpenMP directives.
Definition: OpenMPKinds.h:25
BinaryOperatorKind
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ CR_Default
Definition: CapturedStmt.h:17
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
Definition: Linkage.h:24
CXXDtorType
C++ destructor types.
Definition: ABI.h:33
LangAS
Defines the address space values used by the address space qualifier of QualType.
Definition: AddressSpaces.h:25
ExprValueKind
The categorization of expression values, currently following the C++11 scheme.
Definition: Specifiers.h:132
const FunctionProtoType * T
llvm::fp::ExceptionBehavior ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
#define true
Definition: stdbool.h:25
#define false
Definition: stdbool.h:26
unsigned fine_grained_memory
Definition: LangOptions.h:1051
unsigned ignore_denormal_mode
Definition: LangOptions.h:1052
Structure with information about how a bitfield should be accessed.
llvm::SmallVector< llvm::AllocaInst * > Take()
CXXDefaultArgExprScope(CodeGenFunction &CGF, const CXXDefaultArgExpr *E)
FMVResolverOption(llvm::Function *F, ArrayRef< StringRef > Feats, std::optional< StringRef > Arch=std::nullopt)
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth, unsigned Index)
Header for data within LifetimeExtendedCleanupStack.
unsigned Size
The size of the following cleanup object.
unsigned IsConditional
Whether this is a conditional cleanup.
llvm::OpenMPIRBuilder::InsertPointTy InsertPointTy
static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP, llvm::BasicBlock &FiniBB, llvm::Function *Fn, ArrayRef< llvm::Value * > Args)
static void FinalizeOMPRegion(CodeGenFunction &CGF, InsertPointTy IP)
Emit the Finalization for an OMP region.
OMPBuilderCBHelpers & operator=(const OMPBuilderCBHelpers &)=delete
OMPBuilderCBHelpers(const OMPBuilderCBHelpers &)=delete
OMPTargetDataInfo(Address BasePointersArray, Address PointersArray, Address SizesArray, Address MappersArray, unsigned NumberOfTargetItems)
llvm::PointerUnion< const FunctionProtoType *, const ObjCMethodDecl * > P
Struct with all information about dynamic [sub]class needed to set vptr.
This structure provides a set of types that are commonly used during IR emission.
Helper class with most of the code for saving a value for a conditional expression cleanup.
llvm::PointerIntPair< llvm::Value *, 1, bool > saved_type
static llvm::Value * restore(CodeGenFunction &CGF, saved_type value)
static saved_type save(CodeGenFunction &CGF, llvm::Value *value)
static bool needsSaving(llvm::Value *value)
Answer whether the given value needs extra work to be saved.
static type restore(CodeGenFunction &CGF, saved_type value)
static type restore(CodeGenFunction &CGF, saved_type value)
static saved_type save(CodeGenFunction &CGF, type value)
static saved_type save(CodeGenFunction &CGF, type value)
static type restore(CodeGenFunction &CGF, saved_type value)
A metaprogramming class for ensuring that a value will dominate an arbitrary position in a function.
Definition: EHScopeStack.h:65
The this pointer adjustment as well as an optional return adjustment for a thunk.
Definition: Thunk.h:157