clang 22.0.0git
CGExprAgg.cpp
Go to the documentation of this file.
1//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGDebugInfo.h"
15#include "CGHLSLRuntime.h"
16#include "CGObjCRuntime.h"
17#include "CGRecordLayout.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "ConstantEmitter.h"
21#include "EHScopeStack.h"
22#include "TargetInfo.h"
24#include "clang/AST/Attr.h"
25#include "clang/AST/DeclCXX.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/GlobalVariable.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/IntrinsicInst.h"
33#include "llvm/IR/Intrinsics.h"
34using namespace clang;
35using namespace CodeGen;
36
37//===----------------------------------------------------------------------===//
38// Aggregate Expression Emitter
39//===----------------------------------------------------------------------===//
40
41namespace llvm {
42extern cl::opt<bool> EnableSingleByteCoverage;
43} // namespace llvm
44
45namespace {
46class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
47 CodeGenFunction &CGF;
48 CGBuilderTy &Builder;
49 AggValueSlot Dest;
50 bool IsResultUnused;
51
52 AggValueSlot EnsureSlot(QualType T) {
53 if (!Dest.isIgnored()) return Dest;
54 return CGF.CreateAggTemp(T, "agg.tmp.ensured");
55 }
56 void EnsureDest(QualType T) {
57 if (!Dest.isIgnored()) return;
58 Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
59 }
60
61 // Calls `Fn` with a valid return value slot, potentially creating a temporary
62 // to do so. If a temporary is created, an appropriate copy into `Dest` will
63 // be emitted, as will lifetime markers.
64 //
65 // The given function should take a ReturnValueSlot, and return an RValue that
66 // points to said slot.
67 void withReturnValueSlot(const Expr *E,
68 llvm::function_ref<RValue(ReturnValueSlot)> Fn);
69
70 void DoZeroInitPadding(uint64_t &PaddingStart, uint64_t PaddingEnd,
71 const FieldDecl *NextField);
72
73public:
74 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
75 : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
76 IsResultUnused(IsResultUnused) { }
77
78 //===--------------------------------------------------------------------===//
79 // Utilities
80 //===--------------------------------------------------------------------===//
81
82 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
83 /// represents a value lvalue, this method emits the address of the lvalue,
84 /// then loads the result into DestPtr.
85 void EmitAggLoadOfLValue(const Expr *E);
86
87 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
88 /// SrcIsRValue is true if source comes from an RValue.
89 void EmitFinalDestCopy(QualType type, const LValue &src,
92 void EmitFinalDestCopy(QualType type, RValue src);
93 void EmitCopy(QualType type, const AggValueSlot &dest,
94 const AggValueSlot &src);
95
96 void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy,
97 Expr *ExprToVisit, ArrayRef<Expr *> Args,
98 Expr *ArrayFiller);
99
101 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
104 }
105
106 bool TypeRequiresGCollection(QualType T);
107
108 //===--------------------------------------------------------------------===//
109 // Visitor Methods
110 //===--------------------------------------------------------------------===//
111
112 void Visit(Expr *E) {
113 ApplyDebugLocation DL(CGF, E);
115 }
116
117 void VisitStmt(Stmt *S) {
118 CGF.ErrorUnsupported(S, "aggregate expression");
119 }
120 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
121 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
122 Visit(GE->getResultExpr());
123 }
124 void VisitCoawaitExpr(CoawaitExpr *E) {
125 CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
126 }
127 void VisitCoyieldExpr(CoyieldExpr *E) {
128 CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
129 }
130 void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
131 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
132 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
133 return Visit(E->getReplacement());
134 }
135
136 void VisitConstantExpr(ConstantExpr *E) {
137 EnsureDest(E->getType());
138
139 if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
141 Result, Dest.getAddress(),
142 llvm::TypeSize::getFixed(
143 Dest.getPreferredSize(CGF.getContext(), E->getType())
144 .getQuantity()),
146 return;
147 }
148 return Visit(E->getSubExpr());
149 }
150
151 // l-values.
152 void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
153 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
154 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
155 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
156 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
157 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
158 EmitAggLoadOfLValue(E);
159 }
160 void VisitPredefinedExpr(const PredefinedExpr *E) {
161 EmitAggLoadOfLValue(E);
162 }
163
164 // Operators.
165 void VisitCastExpr(CastExpr *E);
166 void VisitCallExpr(const CallExpr *E);
167 void VisitStmtExpr(const StmtExpr *E);
168 void VisitBinaryOperator(const BinaryOperator *BO);
169 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
170 void VisitBinAssign(const BinaryOperator *E);
171 void VisitBinComma(const BinaryOperator *E);
172 void VisitBinCmp(const BinaryOperator *E);
173 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
174 Visit(E->getSemanticForm());
175 }
176
177 void VisitObjCMessageExpr(ObjCMessageExpr *E);
178 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
179 EmitAggLoadOfLValue(E);
180 }
181
182 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
183 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
184 void VisitChooseExpr(const ChooseExpr *CE);
185 void VisitInitListExpr(InitListExpr *E);
186 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
187 FieldDecl *InitializedFieldInUnion,
188 Expr *ArrayFiller);
189 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
190 llvm::Value *outerBegin = nullptr);
191 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
192 void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
193 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
195 Visit(DAE->getExpr());
196 }
197 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
199 Visit(DIE->getExpr());
200 }
201 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
202 void VisitCXXConstructExpr(const CXXConstructExpr *E);
203 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
204 void VisitLambdaExpr(LambdaExpr *E);
205 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
206 void VisitExprWithCleanups(ExprWithCleanups *E);
207 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
208 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
209 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
210 void VisitOpaqueValueExpr(OpaqueValueExpr *E);
211
212 void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
213 if (E->isGLValue()) {
215 return EmitFinalDestCopy(E->getType(), LV);
216 }
217
218 AggValueSlot Slot = EnsureSlot(E->getType());
219 bool NeedsDestruction =
220 !Slot.isExternallyDestructed() &&
222 if (NeedsDestruction)
224 CGF.EmitPseudoObjectRValue(E, Slot);
225 if (NeedsDestruction)
227 E->getType());
228 }
229
230 void VisitVAArgExpr(VAArgExpr *E);
231 void VisitCXXParenListInitExpr(CXXParenListInitExpr *E);
232 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
233 Expr *ArrayFiller);
234
235 void EmitInitializationToLValue(Expr *E, LValue Address);
236 void EmitNullInitializationToLValue(LValue Address);
237 // case Expr::ChooseExprClass:
238 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
239 void VisitAtomicExpr(AtomicExpr *E) {
240 RValue Res = CGF.EmitAtomicExpr(E);
241 EmitFinalDestCopy(E->getType(), Res);
242 }
243 void VisitPackIndexingExpr(PackIndexingExpr *E) {
244 Visit(E->getSelectedExpr());
245 }
246};
247} // end anonymous namespace.
248
249//===----------------------------------------------------------------------===//
250// Utilities
251//===----------------------------------------------------------------------===//
252
253/// EmitAggLoadOfLValue - Given an expression with aggregate type that
254/// represents a value lvalue, this method emits the address of the lvalue,
255/// then loads the result into DestPtr.
256void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
257 LValue LV = CGF.EmitLValue(E);
258
259 // If the type of the l-value is atomic, then do an atomic load.
261 CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
262 return;
263 }
264
265 EmitFinalDestCopy(E->getType(), LV);
266}
267
268/// True if the given aggregate type requires special GC API calls.
269bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
270 // Only record types have members that might require garbage collection.
271 const auto *Record = T->getAsRecordDecl();
272 if (!Record)
273 return false;
274
275 // Don't mess with non-trivial C++ types.
276 if (isa<CXXRecordDecl>(Record) &&
277 (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
278 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
279 return false;
280
281 // Check whether the type has an object member.
282 return Record->hasObjectMember();
283}
284
285void AggExprEmitter::withReturnValueSlot(
286 const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
287 QualType RetTy = E->getType();
288 bool RequiresDestruction =
289 !Dest.isExternallyDestructed() &&
291
292 // If it makes no observable difference, save a memcpy + temporary.
293 //
294 // We need to always provide our own temporary if destruction is required.
295 // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
296 // its lifetime before we have the chance to emit a proper destructor call.
297 bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
298 (RequiresDestruction && Dest.isIgnored());
299
300 Address RetAddr = Address::invalid();
301
302 EHScopeStack::stable_iterator LifetimeEndBlock;
303 llvm::IntrinsicInst *LifetimeStartInst = nullptr;
304 if (!UseTemp) {
305 RetAddr = Dest.getAddress();
306 } else {
307 RetAddr = CGF.CreateMemTempWithoutCast(RetTy, "tmp");
308 if (CGF.EmitLifetimeStart(RetAddr.getBasePointer())) {
309 LifetimeStartInst =
310 cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
311 assert(LifetimeStartInst->getIntrinsicID() ==
312 llvm::Intrinsic::lifetime_start &&
313 "Last insertion wasn't a lifetime.start?");
314
316 NormalEHLifetimeMarker, RetAddr);
317 LifetimeEndBlock = CGF.EHStack.stable_begin();
318 }
319 }
320
321 RValue Src =
322 EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused,
323 Dest.isExternallyDestructed()));
324
325 if (!UseTemp)
326 return;
327
328 assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) !=
329 Src.getAggregatePointer(E->getType(), CGF));
330 EmitFinalDestCopy(E->getType(), Src);
331
332 if (!RequiresDestruction && LifetimeStartInst) {
333 // If there's no dtor to run, the copy was the last use of our temporary.
334 // Since we're not guaranteed to be in an ExprWithCleanups, clean up
335 // eagerly.
336 CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
337 CGF.EmitLifetimeEnd(RetAddr.getBasePointer());
338 }
339}
340
341/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
342void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
343 assert(src.isAggregate() && "value must be aggregate value!");
344 LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
345 EmitFinalDestCopy(type, srcLV, CodeGenFunction::EVK_RValue);
346}
347
348/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
349void AggExprEmitter::EmitFinalDestCopy(
350 QualType type, const LValue &src,
351 CodeGenFunction::ExprValueKind SrcValueKind) {
352 // If Dest is ignored, then we're evaluating an aggregate expression
353 // in a context that doesn't care about the result. Note that loads
354 // from volatile l-values force the existence of a non-ignored
355 // destination.
356 if (Dest.isIgnored())
357 return;
358
359 // Copy non-trivial C structs here.
360 LValue DstLV = CGF.MakeAddrLValue(
361 Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
362
363 if (SrcValueKind == CodeGenFunction::EVK_RValue) {
364 if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
365 if (Dest.isPotentiallyAliased())
366 CGF.callCStructMoveAssignmentOperator(DstLV, src);
367 else
368 CGF.callCStructMoveConstructor(DstLV, src);
369 return;
370 }
371 } else {
372 if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
373 if (Dest.isPotentiallyAliased())
374 CGF.callCStructCopyAssignmentOperator(DstLV, src);
375 else
376 CGF.callCStructCopyConstructor(DstLV, src);
377 return;
378 }
379 }
380
384 EmitCopy(type, Dest, srcAgg);
385}
386
387/// Perform a copy from the source into the destination.
388///
389/// \param type - the type of the aggregate being copied; qualifiers are
390/// ignored
391void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
392 const AggValueSlot &src) {
393 if (dest.requiresGCollection()) {
394 CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
395 llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
397 dest.getAddress(),
398 src.getAddress(),
399 size);
400 return;
401 }
402
403 // If the result of the assignment is used, copy the LHS there also.
404 // It's volatile if either side is. Use the minimum alignment of
405 // the two sides.
406 LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
407 LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
408 CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
409 dest.isVolatile() || src.isVolatile());
410}
411
412/// Emit the initializer for a std::initializer_list initialized with a
413/// real initializer list.
414void
415AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
416 // Emit an array containing the elements. The array is externally destructed
417 // if the std::initializer_list object is.
418 ASTContext &Ctx = CGF.getContext();
419 LValue Array = CGF.EmitLValue(E->getSubExpr());
420 assert(Array.isSimple() && "initializer_list array not a simple lvalue");
421 Address ArrayPtr = Array.getAddress();
422
424 Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
425 assert(ArrayType && "std::initializer_list constructed from non-array");
426
427 auto *Record = E->getType()->castAsRecordDecl();
428 RecordDecl::field_iterator Field = Record->field_begin();
429 assert(Field != Record->field_end() &&
430 Ctx.hasSameType(Field->getType()->getPointeeType(),
432 "Expected std::initializer_list first field to be const E *");
433
434 // Start pointer.
435 AggValueSlot Dest = EnsureSlot(E->getType());
436 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
437 LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
438 llvm::Value *ArrayStart = ArrayPtr.emitRawPointer(CGF);
439 CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
440 ++Field;
441 assert(Field != Record->field_end() &&
442 "Expected std::initializer_list to have two fields");
443
444 llvm::Value *Size = Builder.getInt(ArrayType->getSize());
445 LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
446 if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
447 // Length.
448 CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
449
450 } else {
451 // End pointer.
452 assert(Field->getType()->isPointerType() &&
453 Ctx.hasSameType(Field->getType()->getPointeeType(),
455 "Expected std::initializer_list second field to be const E *");
456 llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
457 llvm::Value *IdxEnd[] = { Zero, Size };
458 llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
459 ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd,
460 "arrayend");
461 CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
462 }
463
464 assert(++Field == Record->field_end() &&
465 "Expected std::initializer_list to only have two fields");
466}
467
468/// Determine if E is a trivial array filler, that is, one that is
469/// equivalent to zero-initialization.
470static bool isTrivialFiller(Expr *E) {
471 if (!E)
472 return true;
473
474 if (isa<ImplicitValueInitExpr>(E))
475 return true;
476
477 if (auto *ILE = dyn_cast<InitListExpr>(E)) {
478 if (ILE->getNumInits())
479 return false;
480 return isTrivialFiller(ILE->getArrayFiller());
481 }
482
483 if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
484 return Cons->getConstructor()->isDefaultConstructor() &&
485 Cons->getConstructor()->isTrivial();
486
487 // FIXME: Are there other cases where we can avoid emitting an initializer?
488 return false;
489}
490
492 QualType DestTy, llvm::Value *SrcVal,
493 QualType SrcTy, SourceLocation Loc) {
494 // Flatten our destination
495 SmallVector<QualType> DestTypes; // Flattened type
497 // ^^ Flattened accesses to DestVal we want to store into
498 CGF.FlattenAccessAndType(DestVal, DestTy, StoreGEPList, DestTypes);
499
500 assert(SrcTy->isScalarType() && "Invalid HLSL Aggregate splat cast.");
501 for (unsigned I = 0, Size = StoreGEPList.size(); I < Size; ++I) {
502 llvm::Value *Cast =
503 CGF.EmitScalarConversion(SrcVal, SrcTy, DestTypes[I], Loc);
504
505 // store back
506 llvm::Value *Idx = StoreGEPList[I].second;
507 if (Idx) {
508 llvm::Value *V =
509 CGF.Builder.CreateLoad(StoreGEPList[I].first, "load.for.insert");
510 Cast = CGF.Builder.CreateInsertElement(V, Cast, Idx);
511 }
512 CGF.Builder.CreateStore(Cast, StoreGEPList[I].first);
513 }
514}
515
516// emit a flat cast where the RHS is a scalar, including vector
518 QualType DestTy, llvm::Value *SrcVal,
519 QualType SrcTy, SourceLocation Loc) {
520 // Flatten our destination
521 SmallVector<QualType, 16> DestTypes; // Flattened type
523 // ^^ Flattened accesses to DestVal we want to store into
524 CGF.FlattenAccessAndType(DestVal, DestTy, StoreGEPList, DestTypes);
525
526 assert(SrcTy->isVectorType() && "HLSL Flat cast doesn't handle splatting.");
527 const VectorType *VT = SrcTy->getAs<VectorType>();
528 SrcTy = VT->getElementType();
529 assert(StoreGEPList.size() <= VT->getNumElements() &&
530 "Cannot perform HLSL flat cast when vector source \
531 object has less elements than flattened destination \
532 object.");
533 for (unsigned I = 0, Size = StoreGEPList.size(); I < Size; I++) {
534 llvm::Value *Load = CGF.Builder.CreateExtractElement(SrcVal, I, "vec.load");
535 llvm::Value *Cast =
536 CGF.EmitScalarConversion(Load, SrcTy, DestTypes[I], Loc);
537
538 // store back
539 llvm::Value *Idx = StoreGEPList[I].second;
540 if (Idx) {
541 llvm::Value *V =
542 CGF.Builder.CreateLoad(StoreGEPList[I].first, "load.for.insert");
543 Cast = CGF.Builder.CreateInsertElement(V, Cast, Idx);
544 }
545 CGF.Builder.CreateStore(Cast, StoreGEPList[I].first);
546 }
547}
548
549// emit a flat cast where the RHS is an aggregate
551 QualType DestTy, Address SrcVal,
552 QualType SrcTy, SourceLocation Loc) {
553 // Flatten our destination
554 SmallVector<QualType, 16> DestTypes; // Flattened type
556 // ^^ Flattened accesses to DestVal we want to store into
557 CGF.FlattenAccessAndType(DestVal, DestTy, StoreGEPList, DestTypes);
558 // Flatten our src
559 SmallVector<QualType, 16> SrcTypes; // Flattened type
561 // ^^ Flattened accesses to SrcVal we want to load from
562 CGF.FlattenAccessAndType(SrcVal, SrcTy, LoadGEPList, SrcTypes);
563
564 assert(StoreGEPList.size() <= LoadGEPList.size() &&
565 "Cannot perform HLSL flat cast when flattened source object \
566 has less elements than flattened destination object.");
567 // apply casts to what we load from LoadGEPList
568 // and store result in Dest
569 for (unsigned I = 0, E = StoreGEPList.size(); I < E; I++) {
570 llvm::Value *Idx = LoadGEPList[I].second;
571 llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[I].first, "load");
572 Load =
573 Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract") : Load;
574 llvm::Value *Cast =
575 CGF.EmitScalarConversion(Load, SrcTypes[I], DestTypes[I], Loc);
576
577 // store back
578 Idx = StoreGEPList[I].second;
579 if (Idx) {
580 llvm::Value *V =
581 CGF.Builder.CreateLoad(StoreGEPList[I].first, "load.for.insert");
582 Cast = CGF.Builder.CreateInsertElement(V, Cast, Idx);
583 }
584 CGF.Builder.CreateStore(Cast, StoreGEPList[I].first);
585 }
586}
587
588/// Emit initialization of an array from an initializer list. ExprToVisit must
589/// be either an InitListEpxr a CXXParenInitListExpr.
590void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
591 QualType ArrayQTy, Expr *ExprToVisit,
592 ArrayRef<Expr *> Args, Expr *ArrayFiller) {
593 uint64_t NumInitElements = Args.size();
594
595 uint64_t NumArrayElements = AType->getNumElements();
596 for (const auto *Init : Args) {
597 if (const auto *Embed = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
598 NumInitElements += Embed->getDataElementCount() - 1;
599 if (NumInitElements > NumArrayElements) {
600 NumInitElements = NumArrayElements;
601 break;
602 }
603 }
604 }
605
606 assert(NumInitElements <= NumArrayElements);
607
608 QualType elementType =
609 CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
610 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
611 CharUnits elementAlign =
612 DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
613 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
614
615 // Consider initializing the array by copying from a global. For this to be
616 // more efficient than per-element initialization, the size of the elements
617 // with explicit initializers should be large enough.
618 if (NumInitElements * elementSize.getQuantity() > 16 &&
619 elementType.isTriviallyCopyableType(CGF.getContext())) {
620 CodeGen::CodeGenModule &CGM = CGF.CGM;
622 QualType GVArrayQTy = CGM.getContext().getAddrSpaceQualType(
623 CGM.getContext().removeAddrSpaceQualType(ArrayQTy),
625 LangAS AS = GVArrayQTy.getAddressSpace();
626 if (llvm::Constant *C =
627 Emitter.tryEmitForInitializer(ExprToVisit, AS, GVArrayQTy)) {
628 auto GV = new llvm::GlobalVariable(
629 CGM.getModule(), C->getType(),
630 /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,
631 "constinit",
632 /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
634 Emitter.finalize(GV);
635 CharUnits Align = CGM.getContext().getTypeAlignInChars(GVArrayQTy);
636 GV->setAlignment(Align.getAsAlign());
637 Address GVAddr(GV, GV->getValueType(), Align);
638 EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, GVArrayQTy));
639 return;
640 }
641 }
642
643 // Exception safety requires us to destroy all the
644 // already-constructed members if an initializer throws.
645 // For that, we'll need an EH cleanup.
646 QualType::DestructionKind dtorKind = elementType.isDestructedType();
647 Address endOfInit = Address::invalid();
649
650 llvm::Value *begin = DestPtr.emitRawPointer(CGF);
651 if (dtorKind) {
652 CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF);
653 // In principle we could tell the cleanup where we are more
654 // directly, but the control flow can get so varied here that it
655 // would actually be quite complex. Therefore we go through an
656 // alloca.
657 llvm::Instruction *dominatingIP =
658 Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF.Int8PtrTy));
659 endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
660 "arrayinit.endOfInit");
661 Builder.CreateStore(begin, endOfInit);
662 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
663 elementAlign,
664 CGF.getDestroyer(dtorKind));
665 cast<EHCleanupScope>(*CGF.EHStack.find(CGF.EHStack.stable_begin()))
666 .AddAuxAllocas(allocaTracker.Take());
667
669 {CGF.EHStack.stable_begin(), dominatingIP});
670 }
671
672 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
673
674 auto Emit = [&](Expr *Init, uint64_t ArrayIndex) {
675 llvm::Value *element = begin;
676 if (ArrayIndex > 0) {
677 element = Builder.CreateInBoundsGEP(
678 llvmElementType, begin,
679 llvm::ConstantInt::get(CGF.SizeTy, ArrayIndex), "arrayinit.element");
680
681 // Tell the cleanup that it needs to destroy up to this
682 // element. TODO: some of these stores can be trivially
683 // observed to be unnecessary.
684 if (endOfInit.isValid())
685 Builder.CreateStore(element, endOfInit);
686 }
687
688 LValue elementLV = CGF.MakeAddrLValue(
689 Address(element, llvmElementType, elementAlign), elementType);
690 EmitInitializationToLValue(Init, elementLV);
691 return true;
692 };
693
694 unsigned ArrayIndex = 0;
695 // Emit the explicit initializers.
696 for (uint64_t i = 0; i != NumInitElements; ++i) {
697 if (ArrayIndex >= NumInitElements)
698 break;
699 if (auto *EmbedS = dyn_cast<EmbedExpr>(Args[i]->IgnoreParenImpCasts())) {
700 EmbedS->doForEachDataElement(Emit, ArrayIndex);
701 } else {
702 Emit(Args[i], ArrayIndex);
703 ArrayIndex++;
704 }
705 }
706
707 // Check whether there's a non-trivial array-fill expression.
708 bool hasTrivialFiller = isTrivialFiller(ArrayFiller);
709
710 // Any remaining elements need to be zero-initialized, possibly
711 // using the filler expression. We can skip this if the we're
712 // emitting to zeroed memory.
713 if (NumInitElements != NumArrayElements &&
714 !(Dest.isZeroed() && hasTrivialFiller &&
715 CGF.getTypes().isZeroInitializable(elementType))) {
716
717 // Use an actual loop. This is basically
718 // do { *array++ = filler; } while (array != end);
719
720 // Advance to the start of the rest of the array.
721 llvm::Value *element = begin;
722 if (NumInitElements) {
723 element = Builder.CreateInBoundsGEP(
724 llvmElementType, element,
725 llvm::ConstantInt::get(CGF.SizeTy, NumInitElements),
726 "arrayinit.start");
727 if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
728 }
729
730 // Compute the end of the array.
731 llvm::Value *end = Builder.CreateInBoundsGEP(
732 llvmElementType, begin,
733 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end");
734
735 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
736 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
737
738 // Jump into the body.
739 CGF.EmitBlock(bodyBB);
740 llvm::PHINode *currentElement =
741 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
742 currentElement->addIncoming(element, entryBB);
743
744 // Emit the actual filler expression.
745 {
746 // C++1z [class.temporary]p5:
747 // when a default constructor is called to initialize an element of
748 // an array with no corresponding initializer [...] the destruction of
749 // every temporary created in a default argument is sequenced before
750 // the construction of the next array element, if any
751 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
752 LValue elementLV = CGF.MakeAddrLValue(
753 Address(currentElement, llvmElementType, elementAlign), elementType);
754 if (ArrayFiller)
755 EmitInitializationToLValue(ArrayFiller, elementLV);
756 else
757 EmitNullInitializationToLValue(elementLV);
758 }
759
760 // Move on to the next element.
761 llvm::Value *nextElement = Builder.CreateInBoundsGEP(
762 llvmElementType, currentElement, one, "arrayinit.next");
763
764 // Tell the EH cleanup that we finished with the last element.
765 if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
766
767 // Leave the loop if we're done.
768 llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
769 "arrayinit.done");
770 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
771 Builder.CreateCondBr(done, endBB, bodyBB);
772 currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
773
774 CGF.EmitBlock(endBB);
775 }
776}
777
778//===----------------------------------------------------------------------===//
779// Visitor Methods
780//===----------------------------------------------------------------------===//
781
782void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
783 Visit(E->getSubExpr());
784}
785
786void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
787 // If this is a unique OVE, just visit its source expression.
788 if (e->isUnique())
789 Visit(e->getSourceExpr());
790 else
791 EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
792}
793
794void
795AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
796 if (Dest.isPotentiallyAliased() &&
797 E->getType().isPODType(CGF.getContext())) {
798 // For a POD type, just emit a load of the lvalue + a copy, because our
799 // compound literal might alias the destination.
800 EmitAggLoadOfLValue(E);
801 return;
802 }
803
804 AggValueSlot Slot = EnsureSlot(E->getType());
805
806 // Block-scope compound literals are destroyed at the end of the enclosing
807 // scope in C.
808 bool Destruct =
809 !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed();
810 if (Destruct)
812
813 CGF.EmitAggExpr(E->getInitializer(), Slot);
814
815 if (Destruct)
818 CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(),
819 CGF.getDestroyer(DtorKind), DtorKind & EHCleanup);
820}
821
822/// Attempt to look through various unimportant expressions to find a
823/// cast of the given kind.
824static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) {
825 op = op->IgnoreParenNoopCasts(ctx);
826 if (auto castE = dyn_cast<CastExpr>(op)) {
827 if (castE->getCastKind() == kind)
828 return castE->getSubExpr();
829 }
830 return nullptr;
831}
832
833void AggExprEmitter::VisitCastExpr(CastExpr *E) {
834 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
835 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
836 switch (E->getCastKind()) {
837 case CK_Dynamic: {
838 // FIXME: Can this actually happen? We have no test coverage for it.
839 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
840 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
842 // FIXME: Do we also need to handle property references here?
843 if (LV.isSimple())
844 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
845 else
846 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
847
848 if (!Dest.isIgnored())
849 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
850 break;
851 }
852
853 case CK_ToUnion: {
854 // Evaluate even if the destination is ignored.
855 if (Dest.isIgnored()) {
856 CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
857 /*ignoreResult=*/true);
858 break;
859 }
860
861 // GCC union extension
862 QualType Ty = E->getSubExpr()->getType();
863 Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty));
864 EmitInitializationToLValue(E->getSubExpr(),
865 CGF.MakeAddrLValue(CastPtr, Ty));
866 break;
867 }
868
869 case CK_LValueToRValueBitCast: {
870 if (Dest.isIgnored()) {
871 CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
872 /*ignoreResult=*/true);
873 break;
874 }
875
876 LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
877 Address SourceAddress = SourceLV.getAddress().withElementType(CGF.Int8Ty);
878 Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
879 llvm::Value *SizeVal = llvm::ConstantInt::get(
880 CGF.SizeTy,
882 Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
883 break;
884 }
885
886 case CK_DerivedToBase:
887 case CK_BaseToDerived:
888 case CK_UncheckedDerivedToBase: {
889 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
890 "should have been unpacked before we got here");
891 }
892
893 case CK_NonAtomicToAtomic:
894 case CK_AtomicToNonAtomic: {
895 bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
896
897 // Determine the atomic and value types.
898 QualType atomicType = E->getSubExpr()->getType();
899 QualType valueType = E->getType();
900 if (isToAtomic) std::swap(atomicType, valueType);
901
902 assert(atomicType->isAtomicType());
903 assert(CGF.getContext().hasSameUnqualifiedType(valueType,
904 atomicType->castAs<AtomicType>()->getValueType()));
905
906 // Just recurse normally if we're ignoring the result or the
907 // atomic type doesn't change representation.
908 if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
909 return Visit(E->getSubExpr());
910 }
911
912 CastKind peepholeTarget =
913 (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
914
915 // These two cases are reverses of each other; try to peephole them.
916 if (Expr *op =
917 findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) {
918 assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
919 E->getType()) &&
920 "peephole significantly changed types?");
921 return Visit(op);
922 }
923
924 // If we're converting an r-value of non-atomic type to an r-value
925 // of atomic type, just emit directly into the relevant sub-object.
926 if (isToAtomic) {
927 AggValueSlot valueDest = Dest;
928 if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
929 // Zero-initialize. (Strictly speaking, we only need to initialize
930 // the padding at the end, but this is simpler.)
931 if (!Dest.isZeroed())
933
934 // Build a GEP to refer to the subobject.
935 Address valueAddr =
936 CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
937 valueDest = AggValueSlot::forAddr(valueAddr,
938 valueDest.getQualifiers(),
939 valueDest.isExternallyDestructed(),
940 valueDest.requiresGCollection(),
941 valueDest.isPotentiallyAliased(),
944 }
945
946 CGF.EmitAggExpr(E->getSubExpr(), valueDest);
947 return;
948 }
949
950 // Otherwise, we're converting an atomic type to a non-atomic type.
951 // Make an atomic temporary, emit into that, and then copy the value out.
952 AggValueSlot atomicSlot =
953 CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
954 CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
955
956 Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
957 RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
958 return EmitFinalDestCopy(valueType, rvalue);
959 }
960 case CK_AddressSpaceConversion:
961 return Visit(E->getSubExpr());
962
963 case CK_LValueToRValue:
964 // If we're loading from a volatile type, force the destination
965 // into existence.
966 if (E->getSubExpr()->getType().isVolatileQualified()) {
967 bool Destruct =
968 !Dest.isExternallyDestructed() &&
970 if (Destruct)
972 EnsureDest(E->getType());
973 Visit(E->getSubExpr());
974
975 if (Destruct)
977 E->getType());
978
979 return;
980 }
981
982 [[fallthrough]];
983
984 case CK_HLSLArrayRValue:
985 Visit(E->getSubExpr());
986 break;
987 case CK_HLSLAggregateSplatCast: {
988 Expr *Src = E->getSubExpr();
989 QualType SrcTy = Src->getType();
990 RValue RV = CGF.EmitAnyExpr(Src);
991 QualType DestTy = E->getType();
992 Address DestVal = Dest.getAddress();
994
995 assert(RV.isScalar() && "RHS of HLSL splat cast must be a scalar.");
996 llvm::Value *SrcVal = RV.getScalarVal();
997 EmitHLSLAggregateSplatCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc);
998 break;
999 }
1000 case CK_HLSLElementwiseCast: {
1001 Expr *Src = E->getSubExpr();
1002 QualType SrcTy = Src->getType();
1003 RValue RV = CGF.EmitAnyExpr(Src);
1004 QualType DestTy = E->getType();
1005 Address DestVal = Dest.getAddress();
1007
1008 if (RV.isScalar()) {
1009 llvm::Value *SrcVal = RV.getScalarVal();
1010 EmitHLSLScalarFlatCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc);
1011 } else {
1012 assert(RV.isAggregate() &&
1013 "Can't perform HLSL Aggregate cast on a complex type.");
1014 Address SrcVal = RV.getAggregateAddress();
1015 EmitHLSLElementwiseCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc);
1016 }
1017 break;
1018 }
1019 case CK_NoOp:
1020 case CK_UserDefinedConversion:
1021 case CK_ConstructorConversion:
1022 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
1023 E->getType()) &&
1024 "Implicit cast types must be compatible");
1025 Visit(E->getSubExpr());
1026 break;
1027
1028 case CK_LValueBitCast:
1029 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
1030
1031 case CK_Dependent:
1032 case CK_BitCast:
1033 case CK_ArrayToPointerDecay:
1034 case CK_FunctionToPointerDecay:
1035 case CK_NullToPointer:
1036 case CK_NullToMemberPointer:
1037 case CK_BaseToDerivedMemberPointer:
1038 case CK_DerivedToBaseMemberPointer:
1039 case CK_MemberPointerToBoolean:
1040 case CK_ReinterpretMemberPointer:
1041 case CK_IntegralToPointer:
1042 case CK_PointerToIntegral:
1043 case CK_PointerToBoolean:
1044 case CK_ToVoid:
1045 case CK_VectorSplat:
1046 case CK_IntegralCast:
1047 case CK_BooleanToSignedIntegral:
1048 case CK_IntegralToBoolean:
1049 case CK_IntegralToFloating:
1050 case CK_FloatingToIntegral:
1051 case CK_FloatingToBoolean:
1052 case CK_FloatingCast:
1053 case CK_CPointerToObjCPointerCast:
1054 case CK_BlockPointerToObjCPointerCast:
1055 case CK_AnyPointerToBlockPointerCast:
1056 case CK_ObjCObjectLValueCast:
1057 case CK_FloatingRealToComplex:
1058 case CK_FloatingComplexToReal:
1059 case CK_FloatingComplexToBoolean:
1060 case CK_FloatingComplexCast:
1061 case CK_FloatingComplexToIntegralComplex:
1062 case CK_IntegralRealToComplex:
1063 case CK_IntegralComplexToReal:
1064 case CK_IntegralComplexToBoolean:
1065 case CK_IntegralComplexCast:
1066 case CK_IntegralComplexToFloatingComplex:
1067 case CK_ARCProduceObject:
1068 case CK_ARCConsumeObject:
1069 case CK_ARCReclaimReturnedObject:
1070 case CK_ARCExtendBlockObject:
1071 case CK_CopyAndAutoreleaseBlockObject:
1072 case CK_BuiltinFnToFnPtr:
1073 case CK_ZeroToOCLOpaqueType:
1074 case CK_MatrixCast:
1075 case CK_HLSLVectorTruncation:
1076
1077 case CK_IntToOCLSampler:
1078 case CK_FloatingToFixedPoint:
1079 case CK_FixedPointToFloating:
1080 case CK_FixedPointCast:
1081 case CK_FixedPointToBoolean:
1082 case CK_FixedPointToIntegral:
1083 case CK_IntegralToFixedPoint:
1084 llvm_unreachable("cast kind invalid for aggregate types");
1085 }
1086}
1087
1088void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
1089 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
1090 EmitAggLoadOfLValue(E);
1091 return;
1092 }
1093
1094 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
1095 return CGF.EmitCallExpr(E, Slot);
1096 });
1097}
1098
1099void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
1100 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
1101 return CGF.EmitObjCMessageExpr(E, Slot);
1102 });
1103}
1104
1105void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
1106 CGF.EmitIgnoredExpr(E->getLHS());
1107 Visit(E->getRHS());
1108}
1109
1110void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
1112 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
1113}
1114
1119};
1120
1121static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
1122 const BinaryOperator *E, llvm::Value *LHS,
1123 llvm::Value *RHS, CompareKind Kind,
1124 const char *NameSuffix = "") {
1125 QualType ArgTy = E->getLHS()->getType();
1126 if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
1127 ArgTy = CT->getElementType();
1128
1129 if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
1130 assert(Kind == CK_Equal &&
1131 "member pointers may only be compared for equality");
1133 CGF, LHS, RHS, MPT, /*IsInequality*/ false);
1134 }
1135
1136 // Compute the comparison instructions for the specified comparison kind.
1137 struct CmpInstInfo {
1138 const char *Name;
1139 llvm::CmpInst::Predicate FCmp;
1140 llvm::CmpInst::Predicate SCmp;
1141 llvm::CmpInst::Predicate UCmp;
1142 };
1143 CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
1144 using FI = llvm::FCmpInst;
1145 using II = llvm::ICmpInst;
1146 switch (Kind) {
1147 case CK_Less:
1148 return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
1149 case CK_Greater:
1150 return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
1151 case CK_Equal:
1152 return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
1153 }
1154 llvm_unreachable("Unrecognised CompareKind enum");
1155 }();
1156
1157 if (ArgTy->hasFloatingRepresentation())
1158 return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
1159 llvm::Twine(InstInfo.Name) + NameSuffix);
1160 if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
1161 auto Inst =
1162 ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
1163 return Builder.CreateICmp(Inst, LHS, RHS,
1164 llvm::Twine(InstInfo.Name) + NameSuffix);
1165 }
1166
1167 llvm_unreachable("unsupported aggregate binary expression should have "
1168 "already been handled");
1169}
1170
1171void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
1172 using llvm::BasicBlock;
1173 using llvm::PHINode;
1174 using llvm::Value;
1175 assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
1176 E->getRHS()->getType()));
1177 const ComparisonCategoryInfo &CmpInfo =
1179 assert(CmpInfo.Record->isTriviallyCopyable() &&
1180 "cannot copy non-trivially copyable aggregate");
1181
1182 QualType ArgTy = E->getLHS()->getType();
1183
1184 if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
1185 !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
1186 !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
1187 return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
1188 }
1189 bool IsComplex = ArgTy->isAnyComplexType();
1190
1191 // Evaluate the operands to the expression and extract their values.
1192 auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
1193 RValue RV = CGF.EmitAnyExpr(E);
1194 if (RV.isScalar())
1195 return {RV.getScalarVal(), nullptr};
1196 if (RV.isAggregate())
1197 return {RV.getAggregatePointer(E->getType(), CGF), nullptr};
1198 assert(RV.isComplex());
1199 return RV.getComplexVal();
1200 };
1201 auto LHSValues = EmitOperand(E->getLHS()),
1202 RHSValues = EmitOperand(E->getRHS());
1203
1204 auto EmitCmp = [&](CompareKind K) {
1205 Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
1206 K, IsComplex ? ".r" : "");
1207 if (!IsComplex)
1208 return Cmp;
1209 assert(K == CompareKind::CK_Equal);
1210 Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
1211 RHSValues.second, K, ".i");
1212 return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
1213 };
1214 auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
1215 return Builder.getInt(VInfo->getIntValue());
1216 };
1217
1218 Value *Select;
1219 if (ArgTy->isNullPtrType()) {
1220 Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1221 } else if (!CmpInfo.isPartial()) {
1222 Value *SelectOne =
1223 Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
1224 EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
1225 Select = Builder.CreateSelect(EmitCmp(CK_Equal),
1226 EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1227 SelectOne, "sel.eq");
1228 } else {
1229 Value *SelectEq = Builder.CreateSelect(
1230 EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1231 EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
1232 Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
1233 EmitCmpRes(CmpInfo.getGreater()),
1234 SelectEq, "sel.gt");
1235 Select = Builder.CreateSelect(
1236 EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
1237 }
1238 // Create the return value in the destination slot.
1239 EnsureDest(E->getType());
1240 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1241
1242 // Emit the address of the first (and only) field in the comparison category
1243 // type, and initialize it from the constant integer value selected above.
1245 DestLV, *CmpInfo.Record->field_begin());
1246 CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
1247
1248 // All done! The result is in the Dest slot.
1249}
1250
1251void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1252 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1253 VisitPointerToDataMemberBinaryOperator(E);
1254 else
1255 CGF.ErrorUnsupported(E, "aggregate binary expression");
1256}
1257
1258void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1259 const BinaryOperator *E) {
1261 EmitFinalDestCopy(E->getType(), LV);
1262}
1263
1264/// Is the value of the given expression possibly a reference to or
1265/// into a __block variable?
1266static bool isBlockVarRef(const Expr *E) {
1267 // Make sure we look through parens.
1268 E = E->IgnoreParens();
1269
1270 // Check for a direct reference to a __block variable.
1271 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
1272 const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
1273 return (var && var->hasAttr<BlocksAttr>());
1274 }
1275
1276 // More complicated stuff.
1277
1278 // Binary operators.
1279 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
1280 // For an assignment or pointer-to-member operation, just care
1281 // about the LHS.
1282 if (op->isAssignmentOp() || op->isPtrMemOp())
1283 return isBlockVarRef(op->getLHS());
1284
1285 // For a comma, just care about the RHS.
1286 if (op->getOpcode() == BO_Comma)
1287 return isBlockVarRef(op->getRHS());
1288
1289 // FIXME: pointer arithmetic?
1290 return false;
1291
1292 // Check both sides of a conditional operator.
1293 } else if (const AbstractConditionalOperator *op
1294 = dyn_cast<AbstractConditionalOperator>(E)) {
1295 return isBlockVarRef(op->getTrueExpr())
1296 || isBlockVarRef(op->getFalseExpr());
1297
1298 // OVEs are required to support BinaryConditionalOperators.
1299 } else if (const OpaqueValueExpr *op
1300 = dyn_cast<OpaqueValueExpr>(E)) {
1301 if (const Expr *src = op->getSourceExpr())
1302 return isBlockVarRef(src);
1303
1304 // Casts are necessary to get things like (*(int*)&var) = foo().
1305 // We don't really care about the kind of cast here, except
1306 // we don't want to look through l2r casts, because it's okay
1307 // to get the *value* in a __block variable.
1308 } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
1309 if (cast->getCastKind() == CK_LValueToRValue)
1310 return false;
1311 return isBlockVarRef(cast->getSubExpr());
1312
1313 // Handle unary operators. Again, just aggressively look through
1314 // it, ignoring the operation.
1315 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
1316 return isBlockVarRef(uop->getSubExpr());
1317
1318 // Look into the base of a field access.
1319 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
1320 return isBlockVarRef(mem->getBase());
1321
1322 // Look into the base of a subscript.
1323 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
1324 return isBlockVarRef(sub->getBase());
1325 }
1326
1327 return false;
1328}
1329
1330void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1331 ApplyAtomGroup Grp(CGF.getDebugInfo());
1332 // For an assignment to work, the value on the right has
1333 // to be compatible with the value on the left.
1334 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1335 E->getRHS()->getType())
1336 && "Invalid assignment");
1337
1338 // If the LHS might be a __block variable, and the RHS can
1339 // potentially cause a block copy, we need to evaluate the RHS first
1340 // so that the assignment goes the right place.
1341 // This is pretty semantically fragile.
1342 if (isBlockVarRef(E->getLHS()) &&
1343 E->getRHS()->HasSideEffects(CGF.getContext())) {
1344 // Ensure that we have a destination, and evaluate the RHS into that.
1345 EnsureDest(E->getRHS()->getType());
1346 Visit(E->getRHS());
1347
1348 // Now emit the LHS and copy into it.
1350
1351 // That copy is an atomic copy if the LHS is atomic.
1352 if (LHS.getType()->isAtomicType() ||
1354 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1355 return;
1356 }
1357
1358 EmitCopy(E->getLHS()->getType(),
1360 needsGC(E->getLHS()->getType()),
1363 Dest);
1364 return;
1365 }
1366
1367 LValue LHS = CGF.EmitLValue(E->getLHS());
1368
1369 // If we have an atomic type, evaluate into the destination and then
1370 // do an atomic copy.
1371 if (LHS.getType()->isAtomicType() ||
1373 EnsureDest(E->getRHS()->getType());
1374 Visit(E->getRHS());
1375 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1376 return;
1377 }
1378
1379 // Codegen the RHS so that it stores directly into the LHS.
1381 LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
1383 // A non-volatile aggregate destination might have volatile member.
1384 if (!LHSSlot.isVolatile() &&
1385 CGF.hasVolatileMember(E->getLHS()->getType()))
1386 LHSSlot.setVolatile(true);
1387
1388 CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1389
1390 // Copy into the destination if the assignment isn't ignored.
1391 EmitFinalDestCopy(E->getType(), LHS);
1392
1393 if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&
1396 E->getType());
1397}
1398
1399void AggExprEmitter::
1400VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
1401 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1402 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1403 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1404
1405 // Bind the common expression if necessary.
1407
1409 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1410 CGF.getProfileCount(E));
1411
1412 // Save whether the destination's lifetime is externally managed.
1413 bool isExternallyDestructed = Dest.isExternallyDestructed();
1414 bool destructNonTrivialCStruct =
1415 !isExternallyDestructed &&
1417 isExternallyDestructed |= destructNonTrivialCStruct;
1418 Dest.setExternallyDestructed(isExternallyDestructed);
1419
1420 eval.begin(CGF);
1421 CGF.EmitBlock(LHSBlock);
1423 CGF.incrementProfileCounter(E->getTrueExpr());
1424 else
1426 Visit(E->getTrueExpr());
1427 eval.end(CGF);
1428
1429 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1430 CGF.Builder.CreateBr(ContBlock);
1431
1432 // If the result of an agg expression is unused, then the emission
1433 // of the LHS might need to create a destination slot. That's fine
1434 // with us, and we can safely emit the RHS into the same slot, but
1435 // we shouldn't claim that it's already being destructed.
1436 Dest.setExternallyDestructed(isExternallyDestructed);
1437
1438 eval.begin(CGF);
1439 CGF.EmitBlock(RHSBlock);
1441 CGF.incrementProfileCounter(E->getFalseExpr());
1442 Visit(E->getFalseExpr());
1443 eval.end(CGF);
1444
1445 if (destructNonTrivialCStruct)
1447 E->getType());
1448
1449 CGF.EmitBlock(ContBlock);
1452}
1453
1454void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1455 Visit(CE->getChosenSubExpr());
1456}
1457
1458void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1459 Address ArgValue = Address::invalid();
1460 CGF.EmitVAArg(VE, ArgValue, Dest);
1461
1462 // If EmitVAArg fails, emit an error.
1463 if (!ArgValue.isValid()) {
1464 CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1465 return;
1466 }
1467}
1468
1469void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1470 // Ensure that we have a slot, but if we already do, remember
1471 // whether it was externally destructed.
1472 bool wasExternallyDestructed = Dest.isExternallyDestructed();
1473 EnsureDest(E->getType());
1474
1475 // We're going to push a destructor if there isn't already one.
1477
1478 Visit(E->getSubExpr());
1479
1480 // Push that destructor we promised.
1481 if (!wasExternallyDestructed)
1482 CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1483}
1484
1485void
1486AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1487 AggValueSlot Slot = EnsureSlot(E->getType());
1488 CGF.EmitCXXConstructExpr(E, Slot);
1489}
1490
1491void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1492 const CXXInheritedCtorInitExpr *E) {
1493 AggValueSlot Slot = EnsureSlot(E->getType());
1495 E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1496 E->inheritedFromVBase(), E);
1497}
1498
1499void
1500AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1501 AggValueSlot Slot = EnsureSlot(E->getType());
1502 LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1503
1504 // We'll need to enter cleanup scopes in case any of the element
1505 // initializers throws an exception or contains branch out of the expressions.
1507
1508 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1509 for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
1510 e = E->capture_init_end();
1511 i != e; ++i, ++CurField) {
1512 // Emit initialization
1513 LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1514 if (CurField->hasCapturedVLAType()) {
1515 CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
1516 continue;
1517 }
1518
1519 EmitInitializationToLValue(*i, LV);
1520
1521 // Push a destructor if necessary.
1522 if (QualType::DestructionKind DtorKind =
1523 CurField->getType().isDestructedType()) {
1524 assert(LV.isSimple());
1525 if (DtorKind)
1527 CurField->getType(),
1528 CGF.getDestroyer(DtorKind), false);
1529 }
1530 }
1531}
1532
1533void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1535 Visit(E->getSubExpr());
1536}
1537
1538void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1539 QualType T = E->getType();
1540 AggValueSlot Slot = EnsureSlot(T);
1541 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1542}
1543
1544void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1545 QualType T = E->getType();
1546 AggValueSlot Slot = EnsureSlot(T);
1547 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1548}
1549
1550/// Determine whether the given cast kind is known to always convert values
1551/// with all zero bits in their value representation to values with all zero
1552/// bits in their value representation.
1553static bool castPreservesZero(const CastExpr *CE) {
1554 switch (CE->getCastKind()) {
1555 // No-ops.
1556 case CK_NoOp:
1557 case CK_UserDefinedConversion:
1558 case CK_ConstructorConversion:
1559 case CK_BitCast:
1560 case CK_ToUnion:
1561 case CK_ToVoid:
1562 // Conversions between (possibly-complex) integral, (possibly-complex)
1563 // floating-point, and bool.
1564 case CK_BooleanToSignedIntegral:
1565 case CK_FloatingCast:
1566 case CK_FloatingComplexCast:
1567 case CK_FloatingComplexToBoolean:
1568 case CK_FloatingComplexToIntegralComplex:
1569 case CK_FloatingComplexToReal:
1570 case CK_FloatingRealToComplex:
1571 case CK_FloatingToBoolean:
1572 case CK_FloatingToIntegral:
1573 case CK_IntegralCast:
1574 case CK_IntegralComplexCast:
1575 case CK_IntegralComplexToBoolean:
1576 case CK_IntegralComplexToFloatingComplex:
1577 case CK_IntegralComplexToReal:
1578 case CK_IntegralRealToComplex:
1579 case CK_IntegralToBoolean:
1580 case CK_IntegralToFloating:
1581 // Reinterpreting integers as pointers and vice versa.
1582 case CK_IntegralToPointer:
1583 case CK_PointerToIntegral:
1584 // Language extensions.
1585 case CK_VectorSplat:
1586 case CK_MatrixCast:
1587 case CK_NonAtomicToAtomic:
1588 case CK_AtomicToNonAtomic:
1589 case CK_HLSLVectorTruncation:
1590 case CK_HLSLElementwiseCast:
1591 case CK_HLSLAggregateSplatCast:
1592 return true;
1593
1594 case CK_BaseToDerivedMemberPointer:
1595 case CK_DerivedToBaseMemberPointer:
1596 case CK_MemberPointerToBoolean:
1597 case CK_NullToMemberPointer:
1598 case CK_ReinterpretMemberPointer:
1599 // FIXME: ABI-dependent.
1600 return false;
1601
1602 case CK_AnyPointerToBlockPointerCast:
1603 case CK_BlockPointerToObjCPointerCast:
1604 case CK_CPointerToObjCPointerCast:
1605 case CK_ObjCObjectLValueCast:
1606 case CK_IntToOCLSampler:
1607 case CK_ZeroToOCLOpaqueType:
1608 // FIXME: Check these.
1609 return false;
1610
1611 case CK_FixedPointCast:
1612 case CK_FixedPointToBoolean:
1613 case CK_FixedPointToFloating:
1614 case CK_FixedPointToIntegral:
1615 case CK_FloatingToFixedPoint:
1616 case CK_IntegralToFixedPoint:
1617 // FIXME: Do all fixed-point types represent zero as all 0 bits?
1618 return false;
1619
1620 case CK_AddressSpaceConversion:
1621 case CK_BaseToDerived:
1622 case CK_DerivedToBase:
1623 case CK_Dynamic:
1624 case CK_NullToPointer:
1625 case CK_PointerToBoolean:
1626 // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1627 // same representation in all involved address spaces.
1628 return false;
1629
1630 case CK_ARCConsumeObject:
1631 case CK_ARCExtendBlockObject:
1632 case CK_ARCProduceObject:
1633 case CK_ARCReclaimReturnedObject:
1634 case CK_CopyAndAutoreleaseBlockObject:
1635 case CK_ArrayToPointerDecay:
1636 case CK_FunctionToPointerDecay:
1637 case CK_BuiltinFnToFnPtr:
1638 case CK_Dependent:
1639 case CK_LValueBitCast:
1640 case CK_LValueToRValue:
1641 case CK_LValueToRValueBitCast:
1642 case CK_UncheckedDerivedToBase:
1643 case CK_HLSLArrayRValue:
1644 return false;
1645 }
1646 llvm_unreachable("Unhandled clang::CastKind enum");
1647}
1648
1649/// isSimpleZero - If emitting this value will obviously just cause a store of
1650/// zero to memory, return true. This can return false if uncertain, so it just
1651/// handles simple cases.
1652static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1653 E = E->IgnoreParens();
1654 while (auto *CE = dyn_cast<CastExpr>(E)) {
1655 if (!castPreservesZero(CE))
1656 break;
1657 E = CE->getSubExpr()->IgnoreParens();
1658 }
1659
1660 // 0
1661 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1662 return IL->getValue() == 0;
1663 // +0.0
1664 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1665 return FL->getValue().isPosZero();
1666 // int()
1667 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1669 return true;
1670 // (int*)0 - Null pointer expressions.
1671 if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1672 return ICE->getCastKind() == CK_NullToPointer &&
1674 !E->HasSideEffects(CGF.getContext());
1675 // '\0'
1676 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1677 return CL->getValue() == 0;
1678
1679 // Otherwise, hard case: conservatively return false.
1680 return false;
1681}
1682
1683
1684void
1685AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1686 QualType type = LV.getType();
1687 // FIXME: Ignore result?
1688 // FIXME: Are initializers affected by volatile?
1689 if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1690 // Storing "i32 0" to a zero'd memory location is a noop.
1691 return;
1692 } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1693 return EmitNullInitializationToLValue(LV);
1694 } else if (isa<NoInitExpr>(E)) {
1695 // Do nothing.
1696 return;
1697 } else if (type->isReferenceType()) {
1699 return CGF.EmitStoreThroughLValue(RV, LV);
1700 }
1701
1702 CGF.EmitInitializationToLValue(E, LV, Dest.isZeroed());
1703}
1704
1705void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1706 QualType type = lv.getType();
1707
1708 // If the destination slot is already zeroed out before the aggregate is
1709 // copied into it, we don't have to emit any zeros here.
1710 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1711 return;
1712
1713 if (CGF.hasScalarEvaluationKind(type)) {
1714 // For non-aggregates, we can store the appropriate null constant.
1715 llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1716 // Note that the following is not equivalent to
1717 // EmitStoreThroughBitfieldLValue for ARC types.
1718 if (lv.isBitField()) {
1720 } else {
1721 assert(lv.isSimple());
1722 CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1723 }
1724 } else {
1725 // There's a potential optimization opportunity in combining
1726 // memsets; that would be easy for arrays, but relatively
1727 // difficult for structures with the current code.
1729 }
1730}
1731
1732void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {
1733 VisitCXXParenListOrInitListExpr(E, E->getInitExprs(),
1734 E->getInitializedFieldInUnion(),
1735 E->getArrayFiller());
1736}
1737
1738void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1739 if (E->hadArrayRangeDesignator())
1740 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1741
1742 if (E->isTransparent())
1743 return Visit(E->getInit(0));
1744
1745 VisitCXXParenListOrInitListExpr(
1746 E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller());
1747}
1748
1749void AggExprEmitter::VisitCXXParenListOrInitListExpr(
1750 Expr *ExprToVisit, ArrayRef<Expr *> InitExprs,
1751 FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) {
1752#if 0
1753 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
1754 // (Length of globals? Chunks of zeroed-out space?).
1755 //
1756 // If we can, prefer a copy from a global; this is a lot less code for long
1757 // globals, and it's easier for the current optimizers to analyze.
1758 if (llvm::Constant *C =
1759 CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) {
1760 llvm::GlobalVariable* GV =
1761 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1762 llvm::GlobalValue::InternalLinkage, C, "");
1763 EmitFinalDestCopy(ExprToVisit->getType(),
1764 CGF.MakeAddrLValue(GV, ExprToVisit->getType()));
1765 return;
1766 }
1767#endif
1768
1769 // HLSL initialization lists in the AST are an expansion which can contain
1770 // side-effecting expressions wrapped in opaque value expressions. To properly
1771 // emit these we need to emit the opaque values before we emit the argument
1772 // expressions themselves. This is a little hacky, but it prevents us needing
1773 // to do a bigger AST-level change for a language feature that we need
1774 // deprecate in the near future. See related HLSL language proposals:
1775 // * 0005-strict-initializer-lists.md
1776 // * https://github.com/microsoft/hlsl-specs/pull/325
1777 if (CGF.getLangOpts().HLSL && isa<InitListExpr>(ExprToVisit))
1779 CGF, cast<InitListExpr>(ExprToVisit));
1780
1781 AggValueSlot Dest = EnsureSlot(ExprToVisit->getType());
1782
1783 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), ExprToVisit->getType());
1784
1785 // Handle initialization of an array.
1786 if (ExprToVisit->getType()->isConstantArrayType()) {
1787 auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1788 EmitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit,
1789 InitExprs, ArrayFiller);
1790 return;
1791 } else if (ExprToVisit->getType()->isVariableArrayType()) {
1792 // A variable array type that has an initializer can only do empty
1793 // initialization. And because this feature is not exposed as an extension
1794 // in C++, we can safely memset the array memory to zero.
1795 assert(InitExprs.size() == 0 &&
1796 "you can only use an empty initializer with VLAs");
1797 CGF.EmitNullInitialization(Dest.getAddress(), ExprToVisit->getType());
1798 return;
1799 }
1800
1801 assert(ExprToVisit->getType()->isRecordType() &&
1802 "Only support structs/unions here!");
1803
1804 // Do struct initialization; this code just sets each individual member
1805 // to the approprate value. This makes bitfield support automatic;
1806 // the disadvantage is that the generated code is more difficult for
1807 // the optimizer, especially with bitfields.
1808 unsigned NumInitElements = InitExprs.size();
1809 RecordDecl *record = ExprToVisit->getType()->castAsRecordDecl();
1810
1811 // We'll need to enter cleanup scopes in case any of the element
1812 // initializers throws an exception.
1813 CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF);
1814
1815 unsigned curInitIndex = 0;
1816
1817 // Emit initialization of base classes.
1818 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1819 assert(NumInitElements >= CXXRD->getNumBases() &&
1820 "missing initializer for base class");
1821 for (auto &Base : CXXRD->bases()) {
1822 assert(!Base.isVirtual() && "should not see vbases here");
1823 auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1825 Dest.getAddress(), CXXRD, BaseRD,
1826 /*isBaseVirtual*/ false);
1828 V, Qualifiers(),
1832 CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1833 CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot);
1834
1835 if (QualType::DestructionKind dtorKind =
1836 Base.getType().isDestructedType())
1837 CGF.pushDestroyAndDeferDeactivation(dtorKind, V, Base.getType());
1838 }
1839 }
1840
1841 // Prepare a 'this' for CXXDefaultInitExprs.
1843
1844 const bool ZeroInitPadding =
1845 CGF.CGM.shouldZeroInitPadding() && !Dest.isZeroed();
1846
1847 if (record->isUnion()) {
1848 // Only initialize one field of a union. The field itself is
1849 // specified by the initializer list.
1850 if (!InitializedFieldInUnion) {
1851 // Empty union; we have nothing to do.
1852
1853#ifndef NDEBUG
1854 // Make sure that it's really an empty and not a failure of
1855 // semantic analysis.
1856 for (const auto *Field : record->fields())
1857 assert(
1858 (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) &&
1859 "Only unnamed bitfields or anonymous class allowed");
1860#endif
1861 return;
1862 }
1863
1864 // FIXME: volatility
1865 FieldDecl *Field = InitializedFieldInUnion;
1866
1867 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1868 if (NumInitElements) {
1869 // Store the initializer into the field
1870 EmitInitializationToLValue(InitExprs[0], FieldLoc);
1871 if (ZeroInitPadding) {
1872 uint64_t TotalSize = CGF.getContext().toBits(
1873 Dest.getPreferredSize(CGF.getContext(), DestLV.getType()));
1874 uint64_t FieldSize = CGF.getContext().getTypeSize(FieldLoc.getType());
1875 DoZeroInitPadding(FieldSize, TotalSize, nullptr);
1876 }
1877 } else {
1878 // Default-initialize to null.
1879 if (ZeroInitPadding)
1880 EmitNullInitializationToLValue(DestLV);
1881 else
1882 EmitNullInitializationToLValue(FieldLoc);
1883 }
1884 return;
1885 }
1886
1887 // Here we iterate over the fields; this makes it simpler to both
1888 // default-initialize fields and skip over unnamed fields.
1889 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(record);
1890 uint64_t PaddingStart = 0;
1891
1892 for (const auto *field : record->fields()) {
1893 // We're done once we hit the flexible array member.
1894 if (field->getType()->isIncompleteArrayType())
1895 break;
1896
1897 // Always skip anonymous bitfields.
1898 if (field->isUnnamedBitField())
1899 continue;
1900
1901 // We're done if we reach the end of the explicit initializers, we
1902 // have a zeroed object, and the rest of the fields are
1903 // zero-initializable.
1904 if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1905 CGF.getTypes().isZeroInitializable(ExprToVisit->getType()))
1906 break;
1907
1908 if (ZeroInitPadding)
1909 DoZeroInitPadding(PaddingStart,
1910 Layout.getFieldOffset(field->getFieldIndex()), field);
1911
1912 LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1913 // We never generate write-barries for initialized fields.
1914 LV.setNonGC(true);
1915
1916 if (curInitIndex < NumInitElements) {
1917 // Store the initializer into the field.
1918 EmitInitializationToLValue(InitExprs[curInitIndex++], LV);
1919 } else {
1920 // We're out of initializers; default-initialize to null
1921 EmitNullInitializationToLValue(LV);
1922 }
1923
1924 // Push a destructor if necessary.
1925 // FIXME: if we have an array of structures, all explicitly
1926 // initialized, we can end up pushing a linear number of cleanups.
1927 if (QualType::DestructionKind dtorKind
1928 = field->getType().isDestructedType()) {
1929 assert(LV.isSimple());
1930 if (dtorKind) {
1932 field->getType(),
1933 CGF.getDestroyer(dtorKind), false);
1934 }
1935 }
1936 }
1937 if (ZeroInitPadding) {
1938 uint64_t TotalSize = CGF.getContext().toBits(
1939 Dest.getPreferredSize(CGF.getContext(), DestLV.getType()));
1940 DoZeroInitPadding(PaddingStart, TotalSize, nullptr);
1941 }
1942}
1943
1944void AggExprEmitter::DoZeroInitPadding(uint64_t &PaddingStart,
1945 uint64_t PaddingEnd,
1946 const FieldDecl *NextField) {
1947
1948 auto InitBytes = [&](uint64_t StartBit, uint64_t EndBit) {
1949 CharUnits Start = CGF.getContext().toCharUnitsFromBits(StartBit);
1950 CharUnits End = CGF.getContext().toCharUnitsFromBits(EndBit);
1952 if (!Start.isZero())
1953 Addr = Builder.CreateConstGEP(Addr, Start.getQuantity());
1954 llvm::Constant *SizeVal = Builder.getInt64((End - Start).getQuantity());
1955 CGF.Builder.CreateMemSet(Addr, Builder.getInt8(0), SizeVal, false);
1956 };
1957
1958 if (NextField != nullptr && NextField->isBitField()) {
1959 // For bitfield, zero init StorageSize before storing the bits. So we don't
1960 // need to handle big/little endian.
1961 const CGRecordLayout &RL =
1962 CGF.getTypes().getCGRecordLayout(NextField->getParent());
1963 const CGBitFieldInfo &Info = RL.getBitFieldInfo(NextField);
1964 uint64_t StorageStart = CGF.getContext().toBits(Info.StorageOffset);
1965 if (StorageStart + Info.StorageSize > PaddingStart) {
1966 if (StorageStart > PaddingStart)
1967 InitBytes(PaddingStart, StorageStart);
1968 Address Addr = Dest.getAddress();
1969 if (!Info.StorageOffset.isZero())
1970 Addr = Builder.CreateConstGEP(Addr.withElementType(CGF.CharTy),
1971 Info.StorageOffset.getQuantity());
1972 Addr = Addr.withElementType(
1973 llvm::Type::getIntNTy(CGF.getLLVMContext(), Info.StorageSize));
1974 Builder.CreateStore(Builder.getIntN(Info.StorageSize, 0), Addr);
1975 PaddingStart = StorageStart + Info.StorageSize;
1976 }
1977 return;
1978 }
1979
1980 if (PaddingStart < PaddingEnd)
1981 InitBytes(PaddingStart, PaddingEnd);
1982 if (NextField != nullptr)
1983 PaddingStart =
1984 PaddingEnd + CGF.getContext().getTypeSize(NextField->getType());
1985}
1986
1987void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1988 llvm::Value *outerBegin) {
1989 // Emit the common subexpression.
1990 CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1991
1992 Address destPtr = EnsureSlot(E->getType()).getAddress();
1993 uint64_t numElements = E->getArraySize().getZExtValue();
1994
1995 if (!numElements)
1996 return;
1997
1998 // destPtr is an array*. Construct an elementType* by drilling down a level.
1999 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
2000 llvm::Value *indices[] = {zero, zero};
2001 llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getElementType(),
2002 destPtr.emitRawPointer(CGF),
2003 indices, "arrayinit.begin");
2004
2005 // Prepare to special-case multidimensional array initialization: we avoid
2006 // emitting multiple destructor loops in that case.
2007 if (!outerBegin)
2008 outerBegin = begin;
2009 ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
2010
2011 QualType elementType =
2013 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
2014 CharUnits elementAlign =
2015 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
2016 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
2017
2018 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2019 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
2020
2021 // Jump into the body.
2022 CGF.EmitBlock(bodyBB);
2023 llvm::PHINode *index =
2024 Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
2025 index->addIncoming(zero, entryBB);
2026 llvm::Value *element =
2027 Builder.CreateInBoundsGEP(llvmElementType, begin, index);
2028
2029 // Prepare for a cleanup.
2030 QualType::DestructionKind dtorKind = elementType.isDestructedType();
2032 if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
2033 if (outerBegin->getType() != element->getType())
2034 outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
2035 CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
2036 elementAlign,
2037 CGF.getDestroyer(dtorKind));
2039 } else {
2040 dtorKind = QualType::DK_none;
2041 }
2042
2043 // Emit the actual filler expression.
2044 {
2045 // Temporaries created in an array initialization loop are destroyed
2046 // at the end of each iteration.
2047 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
2049 LValue elementLV = CGF.MakeAddrLValue(
2050 Address(element, llvmElementType, elementAlign), elementType);
2051
2052 if (InnerLoop) {
2053 // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
2054 auto elementSlot = AggValueSlot::forLValue(
2055 elementLV, AggValueSlot::IsDestructed,
2058 AggExprEmitter(CGF, elementSlot, false)
2059 .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
2060 } else
2061 EmitInitializationToLValue(E->getSubExpr(), elementLV);
2062 }
2063
2064 // Move on to the next element.
2065 llvm::Value *nextIndex = Builder.CreateNUWAdd(
2066 index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
2067 index->addIncoming(nextIndex, Builder.GetInsertBlock());
2068
2069 // Leave the loop if we're done.
2070 llvm::Value *done = Builder.CreateICmpEQ(
2071 nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
2072 "arrayinit.done");
2073 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
2074 Builder.CreateCondBr(done, endBB, bodyBB);
2075
2076 CGF.EmitBlock(endBB);
2077
2078 // Leave the partial-array cleanup if we entered one.
2079 if (dtorKind)
2080 CGF.DeactivateCleanupBlock(cleanup, index);
2081}
2082
2083void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
2084 AggValueSlot Dest = EnsureSlot(E->getType());
2085
2086 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
2087 EmitInitializationToLValue(E->getBase(), DestLV);
2088 VisitInitListExpr(E->getUpdater());
2089}
2090
2091//===----------------------------------------------------------------------===//
2092// Entry Points into this File
2093//===----------------------------------------------------------------------===//
2094
2095/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
2096/// non-zero bytes that will be stored when outputting the initializer for the
2097/// specified initializer expression.
2099 if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
2100 E = MTE->getSubExpr();
2102
2103 // 0 and 0.0 won't require any non-zero stores!
2104 if (isSimpleZero(E, CGF)) return CharUnits::Zero();
2105
2106 // If this is an initlist expr, sum up the size of sizes of the (present)
2107 // elements. If this is something weird, assume the whole thing is non-zero.
2108 const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
2109 while (ILE && ILE->isTransparent())
2110 ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
2111 if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
2112 return CGF.getContext().getTypeSizeInChars(E->getType());
2113
2114 // InitListExprs for structs have to be handled carefully. If there are
2115 // reference members, we need to consider the size of the reference, not the
2116 // referencee. InitListExprs for unions and arrays can't have references.
2117 if (const RecordType *RT = E->getType()->getAsCanonical<RecordType>()) {
2118 if (!RT->isUnionType()) {
2119 RecordDecl *SD = RT->getOriginalDecl()->getDefinitionOrSelf();
2120 CharUnits NumNonZeroBytes = CharUnits::Zero();
2121
2122 unsigned ILEElement = 0;
2123 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
2124 while (ILEElement != CXXRD->getNumBases())
2125 NumNonZeroBytes +=
2126 GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
2127 for (const auto *Field : SD->fields()) {
2128 // We're done once we hit the flexible array member or run out of
2129 // InitListExpr elements.
2130 if (Field->getType()->isIncompleteArrayType() ||
2131 ILEElement == ILE->getNumInits())
2132 break;
2133 if (Field->isUnnamedBitField())
2134 continue;
2135
2136 const Expr *E = ILE->getInit(ILEElement++);
2137
2138 // Reference values are always non-null and have the width of a pointer.
2139 if (Field->getType()->isReferenceType())
2140 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
2141 CGF.getTarget().getPointerWidth(LangAS::Default));
2142 else
2143 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
2144 }
2145
2146 return NumNonZeroBytes;
2147 }
2148 }
2149
2150 // FIXME: This overestimates the number of non-zero bytes for bit-fields.
2151 CharUnits NumNonZeroBytes = CharUnits::Zero();
2152 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
2153 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
2154 return NumNonZeroBytes;
2155}
2156
2157/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
2158/// zeros in it, emit a memset and avoid storing the individual zeros.
2159///
2161 CodeGenFunction &CGF) {
2162 // If the slot is already known to be zeroed, nothing to do. Don't mess with
2163 // volatile stores.
2164 if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
2165 return;
2166
2167 // C++ objects with a user-declared constructor don't need zero'ing.
2168 if (CGF.getLangOpts().CPlusPlus)
2169 if (const RecordType *RT = CGF.getContext()
2172 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getOriginalDecl());
2174 return;
2175 }
2176
2177 // If the type is 16-bytes or smaller, prefer individual stores over memset.
2178 CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
2179 if (Size <= CharUnits::fromQuantity(16))
2180 return;
2181
2182 // Check to see if over 3/4 of the initializer are known to be zero. If so,
2183 // we prefer to emit memset + individual stores for the rest.
2184 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
2185 if (NumNonZeroBytes*4 > Size)
2186 return;
2187
2188 // Okay, it seems like a good idea to use an initial memset, emit the call.
2189 llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
2190
2192 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
2193
2194 // Tell the AggExprEmitter that the slot is known zero.
2195 Slot.setZeroed();
2196}
2197
2198
2199
2200
2201/// EmitAggExpr - Emit the computation of the specified expression of aggregate
2202/// type. The result is computed into DestPtr. Note that if DestPtr is null,
2203/// the value of the aggregate expression is not needed. If VolatileDest is
2204/// true, DestPtr cannot be 0.
2206 assert(E && hasAggregateEvaluationKind(E->getType()) &&
2207 "Invalid aggregate expression to emit");
2208 assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
2209 "slot has bits but no address");
2210
2211 // Optimize the slot if possible.
2212 CheckAggExprForMemSetUse(Slot, E, *this);
2213
2214 AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
2215}
2216
2218 assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
2219 Address Temp = CreateMemTemp(E->getType());
2220 LValue LV = MakeAddrLValue(Temp, E->getType());
2225 return LV;
2226}
2227
2229 const LValue &Src,
2230 ExprValueKind SrcKind) {
2231 return AggExprEmitter(*this, Dest, Dest.isIgnored())
2232 .EmitFinalDestCopy(Type, Src, SrcKind);
2233}
2234
2237 if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
2239
2240 // Empty fields can overlap earlier fields.
2241 if (FD->getType()->getAsCXXRecordDecl()->isEmpty())
2243
2244 // If the field lies entirely within the enclosing class's nvsize, its tail
2245 // padding cannot overlap any already-initialized object. (The only subobjects
2246 // with greater addresses that might already be initialized are vbases.)
2247 const RecordDecl *ClassRD = FD->getParent();
2248 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
2249 if (Layout.getFieldOffset(FD->getFieldIndex()) +
2250 getContext().getTypeSize(FD->getType()) <=
2251 (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
2253
2254 // The tail padding may contain values we need to preserve.
2256}
2257
2259 const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
2260 // If the most-derived object is a field declared with [[no_unique_address]],
2261 // the tail padding of any virtual base could be reused for other subobjects
2262 // of that field's class.
2263 if (IsVirtual)
2265
2266 // Empty bases can overlap earlier bases.
2267 if (BaseRD->isEmpty())
2269
2270 // If the base class is laid out entirely within the nvsize of the derived
2271 // class, its tail padding cannot yet be initialized, so we can issue
2272 // stores at the full width of the base class.
2273 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2274 if (Layout.getBaseClassOffset(BaseRD) +
2275 getContext().getASTRecordLayout(BaseRD).getSize() <=
2276 Layout.getNonVirtualSize())
2278
2279 // The tail padding may contain values we need to preserve.
2281}
2282
2284 AggValueSlot::Overlap_t MayOverlap,
2285 bool isVolatile) {
2286 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
2287
2288 Address DestPtr = Dest.getAddress();
2289 Address SrcPtr = Src.getAddress();
2290
2291 if (getLangOpts().CPlusPlus) {
2292 if (const auto *Record = Ty->getAsCXXRecordDecl()) {
2293 assert((Record->hasTrivialCopyConstructor() ||
2294 Record->hasTrivialCopyAssignment() ||
2295 Record->hasTrivialMoveConstructor() ||
2296 Record->hasTrivialMoveAssignment() ||
2297 Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) &&
2298 "Trying to aggregate-copy a type without a trivial copy/move "
2299 "constructor or assignment operator");
2300 // Ignore empty classes in C++.
2301 if (Record->isEmpty())
2302 return;
2303 }
2304 }
2305
2306 if (getLangOpts().CUDAIsDevice) {
2308 if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest,
2309 Src))
2310 return;
2311 } else if (Ty->isCUDADeviceBuiltinTextureType()) {
2312 if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest,
2313 Src))
2314 return;
2315 }
2316 }
2317
2318 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
2319 // C99 6.5.16.1p3, which states "If the value being stored in an object is
2320 // read from another object that overlaps in anyway the storage of the first
2321 // object, then the overlap shall be exact and the two objects shall have
2322 // qualified or unqualified versions of a compatible type."
2323 //
2324 // memcpy is not defined if the source and destination pointers are exactly
2325 // equal, but other compilers do this optimization, and almost every memcpy
2326 // implementation handles this case safely. If there is a libc that does not
2327 // safely handle this, we can add a target hook.
2328
2329 // Get data size info for this aggregate. Don't copy the tail padding if this
2330 // might be a potentially-overlapping subobject, since the tail padding might
2331 // be occupied by a different object. Otherwise, copying it is fine.
2333 if (MayOverlap)
2335 else
2337
2338 llvm::Value *SizeVal = nullptr;
2339 if (TypeInfo.Width.isZero()) {
2340 // But note that getTypeInfo returns 0 for a VLA.
2341 if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
2342 getContext().getAsArrayType(Ty))) {
2343 QualType BaseEltTy;
2344 SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
2345 TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
2346 assert(!TypeInfo.Width.isZero());
2347 SizeVal = Builder.CreateNUWMul(
2348 SizeVal,
2349 llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()));
2350 }
2351 }
2352 if (!SizeVal) {
2353 SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity());
2354 }
2355
2356 // FIXME: If we have a volatile struct, the optimizer can remove what might
2357 // appear to be `extra' memory ops:
2358 //
2359 // volatile struct { int i; } a, b;
2360 //
2361 // int main() {
2362 // a = b;
2363 // a = b;
2364 // }
2365 //
2366 // we need to use a different call here. We use isVolatile to indicate when
2367 // either the source or the destination is volatile.
2368
2369 DestPtr = DestPtr.withElementType(Int8Ty);
2370 SrcPtr = SrcPtr.withElementType(Int8Ty);
2371
2372 // Don't do any of the memmove_collectable tests if GC isn't set.
2373 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
2374 // fall through
2375 } else if (const auto *Record = Ty->getAsRecordDecl()) {
2376 if (Record->hasObjectMember()) {
2377 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2378 SizeVal);
2379 return;
2380 }
2381 } else if (Ty->isArrayType()) {
2382 QualType BaseType = getContext().getBaseElementType(Ty);
2383 if (const auto *Record = BaseType->getAsRecordDecl()) {
2384 if (Record->hasObjectMember()) {
2385 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2386 SizeVal);
2387 return;
2388 }
2389 }
2390 }
2391
2392 auto *Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
2393 addInstToCurrentSourceAtom(Inst, nullptr);
2394
2395 // Determine the metadata to describe the position of any padding in this
2396 // memcpy, as well as the TBAA tags for the members of the struct, in case
2397 // the optimizer wishes to expand it in to scalar memory operations.
2398 if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
2399 Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
2400
2401 if (CGM.getCodeGenOpts().NewStructPathTBAA) {
2403 Dest.getTBAAInfo(), Src.getTBAAInfo());
2404 CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
2405 }
2406}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3597
CompareKind
Definition: CGExprAgg.cpp:1115
@ CK_Greater
Definition: CGExprAgg.cpp:1117
@ CK_Less
Definition: CGExprAgg.cpp:1116
@ CK_Equal
Definition: CGExprAgg.cpp:1118
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF)
GetNumNonZeroBytesInInit - Get an approximate count of the number of non-zero bytes that will be stor...
Definition: CGExprAgg.cpp:2098
static Expr * findPeephole(Expr *op, CastKind kind, const ASTContext &ctx)
Attempt to look through various unimportant expressions to find a cast of the given kind.
Definition: CGExprAgg.cpp:824
static void EmitHLSLScalarFlatCast(CodeGenFunction &CGF, Address DestVal, QualType DestTy, llvm::Value *SrcVal, QualType SrcTy, SourceLocation Loc)
Definition: CGExprAgg.cpp:517
static bool isBlockVarRef(const Expr *E)
Is the value of the given expression possibly a reference to or into a __block variable?
Definition: CGExprAgg.cpp:1266
static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address DestVal, QualType DestTy, Address SrcVal, QualType SrcTy, SourceLocation Loc)
Definition: CGExprAgg.cpp:550
static bool isTrivialFiller(Expr *E)
Determine if E is a trivial array filler, that is, one that is equivalent to zero-initialization.
Definition: CGExprAgg.cpp:470
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF)
isSimpleZero - If emitting this value will obviously just cause a store of zero to memory,...
Definition: CGExprAgg.cpp:1652
static void EmitHLSLAggregateSplatCast(CodeGenFunction &CGF, Address DestVal, QualType DestTy, llvm::Value *SrcVal, QualType SrcTy, SourceLocation Loc)
Definition: CGExprAgg.cpp:491
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
Definition: CGExprAgg.cpp:1121
static bool castPreservesZero(const CastExpr *CE)
Determine whether the given cast kind is known to always convert values with all zero bits in their v...
Definition: CGExprAgg.cpp:1553
static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, CodeGenFunction &CGF)
CheckAggExprForMemSetUse - If the initializer is large and has a lot of zeros in it,...
Definition: CGExprAgg.cpp:2160
Expr * E
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the C++ template declaration subclasses.
llvm::MachO::Record Record
Definition: MachO.h:31
SourceLocation Loc
Definition: SemaObjC.cpp:754
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:3056
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
bool hasSameType(QualType T1, QualType T2) const
Determine whether the given types T1 and T2 are equivalent.
Definition: ASTContext.h:2867
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
ComparisonCategories CompCategories
Types and expressions required to build C++2a three-way comparisons using operator<=>,...
Definition: ASTContext.h:2565
QualType removeAddrSpaceQualType(QualType T) const
Remove any existing address space on the type and returns the type with qualifiers intact (or that's ...
TypeInfoChars getTypeInfoDataSizeInChars(QualType T) const
TypeInfoChars getTypeInfoInChars(const Type *T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Definition: ASTContext.h:2898
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2625
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
unsigned getTargetAddressSpace(LangAS AS) const
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:201
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:250
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
Definition: RecordLayout.h:211
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4289
Represents a loop initializing the elements of an array.
Definition: Expr.h:5904
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2723
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: TypeBase.h:3738
QualType getElementType() const
Definition: TypeBase.h:3750
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6816
QualType getValueType() const
Gets the type contained by this atomic type, i.e.
Definition: TypeBase.h:8142
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3974
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1494
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1549
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1271
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1378
Expr * getExpr()
Get the initialization expression that will be used.
Definition: ExprCXX.cpp:1105
Represents a call to an inherited base class constructor from an inheriting constructor.
Definition: ExprCXX.h:1753
Represents a list-initialization with parenthesis.
Definition: ExprCXX.h:5135
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool isTriviallyCopyable() const
Determine whether this class is considered trivially copyable per (C++11 [class]p6).
Definition: DeclCXX.cpp:607
bool hasUserDeclaredConstructor() const
Determine whether this class has any user-declared constructors.
Definition: DeclCXX.h:780
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition: DeclCXX.h:1186
A rewritten comparison expression that was originally written using operator syntax.
Definition: ExprCXX.h:286
An expression "T()" which creates an rvalue of a non-class type T.
Definition: ExprCXX.h:2198
Implicit construction of a std::initializer_list<T> object from an array temporary within list-initia...
Definition: ExprCXX.h:800
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1209
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:848
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2879
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3612
CastKind getCastKind() const
Definition: Expr.h:3656
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
ChooseExpr - GNU builtin-in function __builtin_choose_expr.
Definition: Expr.h:4784
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition: Expr.h:4820
Represents a 'co_await' expression.
Definition: ExprCXX.h:5363
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
llvm::Value * getBasePointer() const
Definition: Address.h:198
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:253
CharUnits getAlignment() const
Definition: Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:276
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:204
An aggregate value slot.
Definition: CGValue.h:504
void setVolatile(bool flag)
Definition: CGValue.h:623
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:572
Address getAddress() const
Definition: CGValue.h:644
CharUnits getPreferredSize(ASTContext &Ctx, QualType Type) const
Get the preferred size to use when storing a value to this slot.
Definition: CGValue.h:682
NeedsGCBarriers_t requiresGCollection() const
Definition: CGValue.h:634
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:613
void setZeroed(bool V=true)
Definition: CGValue.h:674
IsZeroed_t isZeroed() const
Definition: CGValue.h:675
Qualifiers getQualifiers() const
Definition: CGValue.h:617
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:602
IsAliased_t isPotentiallyAliased() const
Definition: CGValue.h:654
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
IsDestructed_t isExternallyDestructed() const
Definition: CGValue.h:610
Overlap_t mayOverlap() const
Definition: CGValue.h:658
RValue asRValue() const
Definition: CGValue.h:666
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:640
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:906
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:140
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:402
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:223
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:112
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:369
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition: CGCXXABI.cpp:85
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E)
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, Address DestPtr, Address SrcPtr, llvm::Value *Size)=0
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
An object to manage conditionally-evaluated expressions.
A scope within which we are constructing the fields of an object which might use a CXXDefaultInitExpr...
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
An RAII object to record that we're evaluating a statement expression.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
Definition: CGCall.cpp:1399
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition: CGObjC.cpp:573
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
Definition: CGExprCXX.cpp:598
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
Definition: CGExprAgg.cpp:2236
void callCStructMoveConstructor(LValue Dst, LValue Src)
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
Definition: CGExprAgg.cpp:2228
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
pushRegularPartialArrayCleanup - Push an EH cleanup to destroy already-constructed elements of the gi...
Definition: CGDecl.cpp:2595
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition: CGExpr.cpp:2680
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition: CGCall.cpp:6263
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition: CGExpr.cpp:6782
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual)
Determine whether a base class initialization may overlap some other object.
Definition: CGExprAgg.cpp:2258
const LangOptions & getLangOpts() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition: CGExpr.cpp:684
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
Definition: CGExpr.cpp:6612
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
Definition: CGDecl.cpp:2279
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to destroy already-constructed elements ...
Definition: CGDecl.cpp:2579
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
Definition: CGDecl.cpp:2252
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
Definition: CGExpr.cpp:6787
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
Definition: CGExprAgg.cpp:2283
const TargetInfo & getTarget() const
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition: CGExpr.cpp:242
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition: CGExpr.cpp:5932
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
Definition: CGDecl.cpp:2304
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
Definition: CGCleanup.cpp:1293
void callCStructCopyAssignmentOperator(LValue Dst, LValue Src)
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
Definition: CGExprAgg.cpp:2217
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
void callCStructCopyConstructor(LValue Dst, LValue Src)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
Definition: CGAtomic.cpp:1549
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition: CGDecl.cpp:1357
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition: CGExpr.cpp:5427
Address GetAddressOfDirectBaseInCompleteClass(Address Value, const CXXRecordDecl *Derived, const CXXRecordDecl *Base, bool BaseIsVirtual)
GetAddressOfBaseOfCompleteClass - Convert the given pointer to a complete class to the given direct b...
Definition: CGClass.cpp:216
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:151
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition: CGExpr.cpp:5871
const TargetCodeGenInfo & getTargetHooks() const
void EmitLifetimeEnd(llvm::Value *Addr)
Definition: CGDecl.cpp:1369
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
Definition: CGExpr.cpp:215
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void callCStructMoveAssignmentOperator(LValue Dst, LValue Src)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:2533
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition: CGDecl.cpp:2332
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition: CGStmt.cpp:566
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition: CGExpr.cpp:264
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitAtomicExpr(AtomicExpr *E)
Definition: CGAtomic.cpp:854
CodeGenTypes & getTypes() const
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
Emits all the code to cause the given temporary to be cleaned up.
Definition: CGCleanup.cpp:1322
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
Definition: CGAtomic.cpp:1536
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition: CGExpr.cpp:1596
void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E)
Emit a call to a constructor inherited from a base class, passing the current constructor's arguments...
Definition: CGClass.cpp:2262
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:186
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Definition: CGExpr.cpp:323
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Definition: CGExprAgg.cpp:2205
void FlattenAccessAndType(Address Addr, QualType AddrTy, SmallVectorImpl< std::pair< Address, llvm::Value * > > &AccessList, SmallVectorImpl< QualType > &FlatTypes)
Definition: CGExpr.cpp:6791
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
Definition: CGAtomic.cpp:1973
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition: CGExpr.cpp:1631
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
Definition: CGExprCXX.cpp:2246
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:652
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1311
llvm::MDNode * getTBAAStructInfo(QualType QTy)
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::Module & getModule() const
bool isPaddedAtomicType(QualType type)
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
TBAAAccessInfo mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo, TBAAAccessInfo SrcInfo)
mergeTBAAInfoForMemoryTransfer - Get merged TBAA information for the purposes of memory transfer call...
const LangOptions & getLangOpts() const
CGCXXABI & getCXXABI() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
bool isPointerZeroInitializable(QualType T)
Check if the pointer type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
A saved depth on the scope stack.
Definition: EHScopeStack.h:106
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:398
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
Definition: CGCleanup.h:647
LValue - This represents an lvalue references.
Definition: CGValue.h:182
bool isBitField() const
Definition: CGValue.h:280
bool isSimple() const
Definition: CGValue.h:278
Address getAddress() const
Definition: CGValue.h:361
QualType getType() const
Definition: CGValue.h:291
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:335
void setNonGC(bool Value)
Definition: CGValue.h:304
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
llvm::Value * getAggregatePointer(QualType PointeeType, CodeGenFunction &CGF) const
Definition: CGValue.h:88
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition: CGValue.h:125
bool isAggregate() const
Definition: CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
bool isComplex() const
Definition: CGValue.h:65
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:78
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:379
const ComparisonCategoryInfo & getInfoForType(QualType Ty) const
Return the comparison category information as specified by getCategoryForType(Ty).
bool isPartial() const
True iff the comparison is not totally ordered.
const ValueInfo * getLess() const
const ValueInfo * getUnordered() const
const CXXRecordDecl * Record
The declaration for the comparison category type from the standard library.
const ValueInfo * getGreater() const
const ValueInfo * getEqualOrEquiv() const
Complex values, per C99 6.2.5p11.
Definition: TypeBase.h:3293
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3541
Represents the canonical version of C arrays with a specified constant size.
Definition: TypeBase.h:3776
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1084
Represents a 'co_yield' expression.
Definition: ExprCXX.h:5444
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2393
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1272
bool hasAttr() const
Definition: DeclBase.h:577
Represents an expression – generally a full-expression – that introduces cleanups to be run at the en...
Definition: ExprCXX.h:3655
This represents one expression.
Definition: Expr.h:112
bool isGLValue() const
Definition: Expr.h:287
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3100
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3069
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3624
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:273
QualType getType() const
Definition: Expr.h:144
Represents a member of a struct/union/class.
Definition: Decl.h:3157
bool isBitField() const
Determines whether this field is a bitfield.
Definition: Decl.h:3260
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.h:3242
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition: Decl.h:3393
Represents a C11 generic selection.
Definition: Expr.h:6114
Represents an implicitly-generated value initialization of an object of a given type.
Definition: Expr.h:5993
Describes an C or C++ initializer list.
Definition: Expr.h:5235
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition: Expr.cpp:2457
unsigned getNumInits() const
Definition: Expr.h:5265
const Expr * getInit(unsigned Init) const
Definition: Expr.h:5289
A C++ lambda expression, which produces a function object (of unspecified type) that can be invoked l...
Definition: ExprCXX.h:1970
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: ExprCXX.h:2082
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4914
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3300
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: TypeBase.h:3669
Represents a place-holder for an object not to be initialized by anything.
Definition: Expr.h:5813
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:548
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:940
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1180
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1230
bool isUnique() const
Definition: Expr.h:1238
ParenExpr - This represents a parenthesized expression, e.g.
Definition: Expr.h:2184
const Expr * getSubExpr() const
Definition: Expr.h:2201
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:2007
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6692
A (possibly-)qualified type.
Definition: TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: TypeBase.h:8427
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition: Type.cpp:2871
@ DK_nontrivial_c_struct
Definition: TypeBase.h:1538
LangAS getAddressSpace() const
Return the address space of this type.
Definition: TypeBase.h:8469
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition: TypeBase.h:1545
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition: Type.cpp:2699
@ PCK_Struct
The type is a struct containing a field whose type is neither PCK_Trivial nor PCK_VolatileTrivial.
Definition: TypeBase.h:1517
The collection of all-type qualifiers we support.
Definition: TypeBase.h:331
Represents a struct/union/class.
Definition: Decl.h:4309
bool hasObjectMember() const
Definition: Decl.h:4369
field_range fields() const
Definition: Decl.h:4512
RecordDecl * getDefinitionOrSelf() const
Definition: Decl.h:4497
field_iterator field_begin() const
Definition: Decl.cpp:5154
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: TypeBase.h:6502
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4531
RetTy Visit(PTR(Stmt) S, ParamTys... P)
Definition: StmtVisitor.h:45
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:186
Stmt - This represents one statement.
Definition: Stmt.h:85
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1801
Represents a reference to a non-type template parameter that has been substituted with a template arg...
Definition: ExprCXX.h:4658
bool isUnion() const
Definition: Decl.h:3919
uint64_t getPointerWidth(LangAS AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:486
The base class of the type hierarchy.
Definition: TypeBase.h:1833
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.h:26
bool isConstantArrayType() const
Definition: TypeBase.h:8683
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition: Type.h:41
bool isArrayType() const
Definition: TypeBase.h:8679
bool isPointerType() const
Definition: TypeBase.h:8580
bool isScalarType() const
Definition: TypeBase.h:9038
bool isVariableArrayType() const
Definition: TypeBase.h:8691
bool isCUDADeviceBuiltinSurfaceType() const
Check if the type is the CUDA device builtin surface type.
Definition: Type.cpp:5379
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition: TypeBase.h:9054
RecordDecl * castAsRecordDecl() const
Definition: Type.h:48
bool isAnyComplexType() const
Definition: TypeBase.h:8715
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition: Type.cpp:2247
bool isMemberPointerType() const
Definition: TypeBase.h:8661
bool isAtomicType() const
Definition: TypeBase.h:8762
bool isCUDADeviceBuiltinTextureType() const
Check if the type is the CUDA device builtin texture type.
Definition: Type.cpp:5388
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
Definition: Type.cpp:2316
bool isVectorType() const
Definition: TypeBase.h:8719
bool isRealFloatingType() const
Floating point categories.
Definition: Type.cpp:2324
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
Definition: TypeBase.h:2939
const T * getAs() const
Member-template getAs<specific type>'.
Definition: TypeBase.h:9159
bool isNullPtrType() const
Definition: TypeBase.h:8973
bool isRecordType() const
Definition: TypeBase.h:8707
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2246
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4893
QualType getType() const
Definition: Decl.h:722
Represents a variable declaration or definition.
Definition: Decl.h:925
Represents a GCC generic vector type.
Definition: TypeBase.h:4191
unsigned getNumElements() const
Definition: TypeBase.h:4206
QualType getElementType() const
Definition: TypeBase.h:4205
Definition: SPIR.cpp:35
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< AtomicType > atomicType
Matches atomic types.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Definition: Format.cpp:4035
bool GE(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1289
The JSON file list parser is used to communicate input to InstallAPI.
@ CPlusPlus
Definition: LangStandard.h:55
LangAS
Defines the address space values used by the address space qualifier of QualType.
Definition: AddressSpaces.h:25
CastKind
CastKind - The kind of operation required for a conversion.
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition: Address.h:327
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * CharTy
char
uint64_t Width
Definition: ASTContext.h:159