clang 22.0.0git
CGExprScalar.cpp
Go to the documentation of this file.
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGHLSLRuntime.h"
17#include "CGObjCRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "ConstantEmitter.h"
23#include "TargetInfo.h"
24#include "TrapReasonBuilder.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/DeclObjC.h"
28#include "clang/AST/Expr.h"
35#include "llvm/ADT/APFixedPoint.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/CFG.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/DerivedTypes.h"
41#include "llvm/IR/FixedPointBuilder.h"
42#include "llvm/IR/Function.h"
43#include "llvm/IR/GEPNoWrapFlags.h"
44#include "llvm/IR/GetElementPtrTypeIterator.h"
45#include "llvm/IR/GlobalVariable.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/IntrinsicsPowerPC.h"
48#include "llvm/IR/MatrixBuilder.h"
49#include "llvm/IR/Module.h"
50#include "llvm/Support/TypeSize.h"
51#include <cstdarg>
52#include <optional>
53
54using namespace clang;
55using namespace CodeGen;
56using llvm::Value;
57
58//===----------------------------------------------------------------------===//
59// Scalar Expression Emitter
60//===----------------------------------------------------------------------===//
61
62namespace llvm {
63extern cl::opt<bool> EnableSingleByteCoverage;
64} // namespace llvm
65
66namespace {
67
68/// Determine whether the given binary operation may overflow.
69/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
70/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
71/// the returned overflow check is precise. The returned value is 'true' for
72/// all other opcodes, to be conservative.
73bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
74 BinaryOperator::Opcode Opcode, bool Signed,
75 llvm::APInt &Result) {
76 // Assume overflow is possible, unless we can prove otherwise.
77 bool Overflow = true;
78 const auto &LHSAP = LHS->getValue();
79 const auto &RHSAP = RHS->getValue();
80 if (Opcode == BO_Add) {
81 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
82 : LHSAP.uadd_ov(RHSAP, Overflow);
83 } else if (Opcode == BO_Sub) {
84 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
85 : LHSAP.usub_ov(RHSAP, Overflow);
86 } else if (Opcode == BO_Mul) {
87 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
88 : LHSAP.umul_ov(RHSAP, Overflow);
89 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
90 if (Signed && !RHS->isZero())
91 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
92 else
93 return false;
94 }
95 return Overflow;
96}
97
98struct BinOpInfo {
99 Value *LHS;
100 Value *RHS;
101 QualType Ty; // Computation Type.
102 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
103 FPOptions FPFeatures;
104 const Expr *E; // Entire expr, for error unsupported. May not be binop.
105
106 /// Check if the binop can result in integer overflow.
107 bool mayHaveIntegerOverflow() const {
108 // Without constant input, we can't rule out overflow.
109 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
110 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
111 if (!LHSCI || !RHSCI)
112 return true;
113
114 llvm::APInt Result;
115 return ::mayHaveIntegerOverflow(
116 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
117 }
118
119 /// Check if the binop computes a division or a remainder.
120 bool isDivremOp() const {
121 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
122 Opcode == BO_RemAssign;
123 }
124
125 /// Check if the binop can result in an integer division by zero.
126 bool mayHaveIntegerDivisionByZero() const {
127 if (isDivremOp())
128 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
129 return CI->isZero();
130 return true;
131 }
132
133 /// Check if the binop can result in a float division by zero.
134 bool mayHaveFloatDivisionByZero() const {
135 if (isDivremOp())
136 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
137 return CFP->isZero();
138 return true;
139 }
140
141 /// Check if at least one operand is a fixed point type. In such cases, this
142 /// operation did not follow usual arithmetic conversion and both operands
143 /// might not be of the same type.
144 bool isFixedPointOp() const {
145 // We cannot simply check the result type since comparison operations return
146 // an int.
147 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
148 QualType LHSType = BinOp->getLHS()->getType();
149 QualType RHSType = BinOp->getRHS()->getType();
150 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
151 }
152 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
153 return UnOp->getSubExpr()->getType()->isFixedPointType();
154 return false;
155 }
156
157 /// Check if the RHS has a signed integer representation.
158 bool rhsHasSignedIntegerRepresentation() const {
159 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
160 QualType RHSType = BinOp->getRHS()->getType();
161 return RHSType->hasSignedIntegerRepresentation();
162 }
163 return false;
164 }
165};
166
167static bool MustVisitNullValue(const Expr *E) {
168 // If a null pointer expression's type is the C++0x nullptr_t, then
169 // it's not necessarily a simple constant and it must be evaluated
170 // for its potential side effects.
171 return E->getType()->isNullPtrType();
172}
173
174/// If \p E is a widened promoted integer, get its base (unpromoted) type.
175static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
176 const Expr *E) {
177 const Expr *Base = E->IgnoreImpCasts();
178 if (E == Base)
179 return std::nullopt;
180
181 QualType BaseTy = Base->getType();
182 if (!Ctx.isPromotableIntegerType(BaseTy) ||
183 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
184 return std::nullopt;
185
186 return BaseTy;
187}
188
189/// Check if \p E is a widened promoted integer.
190static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
191 return getUnwidenedIntegerType(Ctx, E).has_value();
192}
193
194/// Check if we can skip the overflow check for \p Op.
195static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
196 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
197 "Expected a unary or binary operator");
198
199 // If the binop has constant inputs and we can prove there is no overflow,
200 // we can elide the overflow check.
201 if (!Op.mayHaveIntegerOverflow())
202 return true;
203
204 if (Op.Ty->isSignedIntegerType() &&
205 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::SignedIntegerOverflow,
206 Op.Ty)) {
207 return true;
208 }
209
210 if (Op.Ty->isUnsignedIntegerType() &&
211 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::UnsignedIntegerOverflow,
212 Op.Ty)) {
213 return true;
214 }
215
216 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Op.E);
217
218 if (UO && UO->getOpcode() == UO_Minus &&
220 LangOptions::OverflowPatternExclusionKind::NegUnsignedConst) &&
221 UO->isIntegerConstantExpr(Ctx))
222 return true;
223
224 // If a unary op has a widened operand, the op cannot overflow.
225 if (UO)
226 return !UO->canOverflow();
227
228 // We usually don't need overflow checks for binops with widened operands.
229 // Multiplication with promoted unsigned operands is a special case.
230 const auto *BO = cast<BinaryOperator>(Op.E);
231 if (BO->hasExcludedOverflowPattern())
232 return true;
233
234 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
235 if (!OptionalLHSTy)
236 return false;
237
238 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
239 if (!OptionalRHSTy)
240 return false;
241
242 QualType LHSTy = *OptionalLHSTy;
243 QualType RHSTy = *OptionalRHSTy;
244
245 // This is the simple case: binops without unsigned multiplication, and with
246 // widened operands. No overflow check is needed here.
247 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
248 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
249 return true;
250
251 // For unsigned multiplication the overflow check can be elided if either one
252 // of the unpromoted types are less than half the size of the promoted type.
253 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
254 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
255 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
256}
257
258class ScalarExprEmitter
259 : public StmtVisitor<ScalarExprEmitter, Value*> {
260 CodeGenFunction &CGF;
261 CGBuilderTy &Builder;
262 bool IgnoreResultAssign;
263 llvm::LLVMContext &VMContext;
264public:
265
266 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
267 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
268 VMContext(cgf.getLLVMContext()) {
269 }
270
271 //===--------------------------------------------------------------------===//
272 // Utilities
273 //===--------------------------------------------------------------------===//
274
275 bool TestAndClearIgnoreResultAssign() {
276 bool I = IgnoreResultAssign;
277 IgnoreResultAssign = false;
278 return I;
279 }
280
281 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
282 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
283 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
284 return CGF.EmitCheckedLValue(E, TCK);
285 }
286
287 void EmitBinOpCheck(
288 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
289 const BinOpInfo &Info);
290
291 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
292 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
293 }
294
295 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
296 const AlignValueAttr *AVAttr = nullptr;
297 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
298 const ValueDecl *VD = DRE->getDecl();
299
300 if (VD->getType()->isReferenceType()) {
301 if (const auto *TTy =
303 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
304 } else {
305 // Assumptions for function parameters are emitted at the start of the
306 // function, so there is no need to repeat that here,
307 // unless the alignment-assumption sanitizer is enabled,
308 // then we prefer the assumption over alignment attribute
309 // on IR function param.
310 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
311 return;
312
313 AVAttr = VD->getAttr<AlignValueAttr>();
314 }
315 }
316
317 if (!AVAttr)
318 if (const auto *TTy = E->getType()->getAs<TypedefType>())
319 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
320
321 if (!AVAttr)
322 return;
323
324 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
325 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
326 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
327 }
328
329 /// EmitLoadOfLValue - Given an expression with complex type that represents a
330 /// value l-value, this method emits the address of the l-value, then loads
331 /// and returns the result.
332 Value *EmitLoadOfLValue(const Expr *E) {
333 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
334 E->getExprLoc());
335
336 EmitLValueAlignmentAssumption(E, V);
337 return V;
338 }
339
340 /// EmitConversionToBool - Convert the specified expression value to a
341 /// boolean (i1) truth value. This is equivalent to "Val != 0".
342 Value *EmitConversionToBool(Value *Src, QualType DstTy);
343
344 /// Emit a check that a conversion from a floating-point type does not
345 /// overflow.
346 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
347 Value *Src, QualType SrcType, QualType DstType,
348 llvm::Type *DstTy, SourceLocation Loc);
349
350 /// Known implicit conversion check kinds.
351 /// This is used for bitfield conversion checks as well.
352 /// Keep in sync with the enum of the same name in ubsan_handlers.h
353 enum ImplicitConversionCheckKind : unsigned char {
354 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
355 ICCK_UnsignedIntegerTruncation = 1,
356 ICCK_SignedIntegerTruncation = 2,
357 ICCK_IntegerSignChange = 3,
358 ICCK_SignedIntegerTruncationOrSignChange = 4,
359 };
360
361 /// Emit a check that an [implicit] truncation of an integer does not
362 /// discard any bits. It is not UB, so we use the value after truncation.
363 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
364 QualType DstType, SourceLocation Loc);
365
366 /// Emit a check that an [implicit] conversion of an integer does not change
367 /// the sign of the value. It is not UB, so we use the value after conversion.
368 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
369 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
370 QualType DstType, SourceLocation Loc);
371
372 /// Emit a conversion from the specified type to the specified destination
373 /// type, both of which are LLVM scalar types.
374 struct ScalarConversionOpts {
375 bool TreatBooleanAsSigned;
376 bool EmitImplicitIntegerTruncationChecks;
377 bool EmitImplicitIntegerSignChangeChecks;
378
379 ScalarConversionOpts()
380 : TreatBooleanAsSigned(false),
381 EmitImplicitIntegerTruncationChecks(false),
382 EmitImplicitIntegerSignChangeChecks(false) {}
383
384 ScalarConversionOpts(clang::SanitizerSet SanOpts)
385 : TreatBooleanAsSigned(false),
386 EmitImplicitIntegerTruncationChecks(
387 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
388 EmitImplicitIntegerSignChangeChecks(
389 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
390 };
391 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
392 llvm::Type *SrcTy, llvm::Type *DstTy,
393 ScalarConversionOpts Opts);
394 Value *
395 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
397 ScalarConversionOpts Opts = ScalarConversionOpts());
398
399 /// Convert between either a fixed point and other fixed point or fixed point
400 /// and an integer.
401 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
403
404 /// Emit a conversion from the specified complex type to the specified
405 /// destination type, where the destination type is an LLVM scalar type.
406 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
407 QualType SrcTy, QualType DstTy,
409
410 /// EmitNullValue - Emit a value that corresponds to null for the given type.
411 Value *EmitNullValue(QualType Ty);
412
413 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
414 Value *EmitFloatToBoolConversion(Value *V) {
415 // Compare against 0.0 for fp scalars.
416 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
417 return Builder.CreateFCmpUNE(V, Zero, "tobool");
418 }
419
420 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
421 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
422 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
423
424 return Builder.CreateICmpNE(V, Zero, "tobool");
425 }
426
427 Value *EmitIntToBoolConversion(Value *V) {
428 // Because of the type rules of C, we often end up computing a
429 // logical value, then zero extending it to int, then wanting it
430 // as a logical value again. Optimize this common case.
431 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
432 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
433 Value *Result = ZI->getOperand(0);
434 // If there aren't any more uses, zap the instruction to save space.
435 // Note that there can be more uses, for example if this
436 // is the result of an assignment.
437 if (ZI->use_empty())
438 ZI->eraseFromParent();
439 return Result;
440 }
441 }
442
443 return Builder.CreateIsNotNull(V, "tobool");
444 }
445
446 //===--------------------------------------------------------------------===//
447 // Visitor Methods
448 //===--------------------------------------------------------------------===//
449
450 Value *Visit(Expr *E) {
451 ApplyDebugLocation DL(CGF, E);
453 }
454
455 Value *VisitStmt(Stmt *S) {
456 S->dump(llvm::errs(), CGF.getContext());
457 llvm_unreachable("Stmt can't have complex result type!");
458 }
459 Value *VisitExpr(Expr *S);
460
461 Value *VisitConstantExpr(ConstantExpr *E) {
462 // A constant expression of type 'void' generates no code and produces no
463 // value.
464 if (E->getType()->isVoidType())
465 return nullptr;
466
467 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
468 if (E->isGLValue())
469 return CGF.EmitLoadOfScalar(
472 /*Volatile*/ false, E->getType(), E->getExprLoc());
473 return Result;
474 }
475 return Visit(E->getSubExpr());
476 }
477 Value *VisitParenExpr(ParenExpr *PE) {
478 return Visit(PE->getSubExpr());
479 }
480 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
481 return Visit(E->getReplacement());
482 }
483 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
484 return Visit(GE->getResultExpr());
485 }
486 Value *VisitCoawaitExpr(CoawaitExpr *S) {
487 return CGF.EmitCoawaitExpr(*S).getScalarVal();
488 }
489 Value *VisitCoyieldExpr(CoyieldExpr *S) {
490 return CGF.EmitCoyieldExpr(*S).getScalarVal();
491 }
492 Value *VisitUnaryCoawait(const UnaryOperator *E) {
493 return Visit(E->getSubExpr());
494 }
495
496 // Leaves.
497 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
498 return Builder.getInt(E->getValue());
499 }
500 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
501 return Builder.getInt(E->getValue());
502 }
503 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
504 return llvm::ConstantFP::get(VMContext, E->getValue());
505 }
506 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
507 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
508 }
509 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
510 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
511 }
512 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
513 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
514 }
515 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
516 if (E->getType()->isVoidType())
517 return nullptr;
518
519 return EmitNullValue(E->getType());
520 }
521 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
522 return EmitNullValue(E->getType());
523 }
524 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
525 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
526 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
527 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
528 return Builder.CreateBitCast(V, ConvertType(E->getType()));
529 }
530
531 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
532 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
533 }
534
535 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
537 }
538
539 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
540 Value *VisitEmbedExpr(EmbedExpr *E);
541
542 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
543 if (E->isGLValue())
544 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
545 E->getExprLoc());
546
547 // Otherwise, assume the mapping is the scalar directly.
549 }
550
551 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) {
552 llvm_unreachable("Codegen for this isn't defined/implemented");
553 }
554
555 // l-values.
556 Value *VisitDeclRefExpr(DeclRefExpr *E) {
558 return CGF.emitScalarConstant(Constant, E);
559 return EmitLoadOfLValue(E);
560 }
561
562 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
563 return CGF.EmitObjCSelectorExpr(E);
564 }
565 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
566 return CGF.EmitObjCProtocolExpr(E);
567 }
568 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
569 return EmitLoadOfLValue(E);
570 }
571 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
572 if (E->getMethodDecl() &&
573 E->getMethodDecl()->getReturnType()->isReferenceType())
574 return EmitLoadOfLValue(E);
575 return CGF.EmitObjCMessageExpr(E).getScalarVal();
576 }
577
578 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
579 LValue LV = CGF.EmitObjCIsaExpr(E);
581 return V;
582 }
583
584 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
585 VersionTuple Version = E->getVersion();
586
587 // If we're checking for a platform older than our minimum deployment
588 // target, we can fold the check away.
589 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
590 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
591
592 return CGF.EmitBuiltinAvailable(Version);
593 }
594
595 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
596 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
597 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
598 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
599 Value *VisitMemberExpr(MemberExpr *E);
600 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
601 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
602 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
603 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
604 // literals aren't l-values in C++. We do so simply because that's the
605 // cleanest way to handle compound literals in C++.
606 // See the discussion here: https://reviews.llvm.org/D64464
607 return EmitLoadOfLValue(E);
608 }
609
610 Value *VisitInitListExpr(InitListExpr *E);
611
612 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
613 assert(CGF.getArrayInitIndex() &&
614 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
615 return CGF.getArrayInitIndex();
616 }
617
618 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
619 return EmitNullValue(E->getType());
620 }
621 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
622 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
623 return VisitCastExpr(E);
624 }
625 Value *VisitCastExpr(CastExpr *E);
626
627 Value *VisitCallExpr(const CallExpr *E) {
628 if (E->getCallReturnType(CGF.getContext())->isReferenceType())
629 return EmitLoadOfLValue(E);
630
632
633 EmitLValueAlignmentAssumption(E, V);
634 return V;
635 }
636
637 Value *VisitStmtExpr(const StmtExpr *E);
638
639 // Unary Operators.
640 Value *VisitUnaryPostDec(const UnaryOperator *E) {
641 LValue LV = EmitLValue(E->getSubExpr());
642 return EmitScalarPrePostIncDec(E, LV, false, false);
643 }
644 Value *VisitUnaryPostInc(const UnaryOperator *E) {
645 LValue LV = EmitLValue(E->getSubExpr());
646 return EmitScalarPrePostIncDec(E, LV, true, false);
647 }
648 Value *VisitUnaryPreDec(const UnaryOperator *E) {
649 LValue LV = EmitLValue(E->getSubExpr());
650 return EmitScalarPrePostIncDec(E, LV, false, true);
651 }
652 Value *VisitUnaryPreInc(const UnaryOperator *E) {
653 LValue LV = EmitLValue(E->getSubExpr());
654 return EmitScalarPrePostIncDec(E, LV, true, true);
655 }
656
657 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
658 llvm::Value *InVal,
659 bool IsInc);
660
661 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
662 bool isInc, bool isPre);
663
664
665 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
666 if (isa<MemberPointerType>(E->getType())) // never sugared
667 return CGF.CGM.getMemberPointerConstant(E);
668
669 return EmitLValue(E->getSubExpr()).getPointer(CGF);
670 }
671 Value *VisitUnaryDeref(const UnaryOperator *E) {
672 if (E->getType()->isVoidType())
673 return Visit(E->getSubExpr()); // the actual value should be unused
674 return EmitLoadOfLValue(E);
675 }
676
677 Value *VisitUnaryPlus(const UnaryOperator *E,
678 QualType PromotionType = QualType());
679 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
680 Value *VisitUnaryMinus(const UnaryOperator *E,
681 QualType PromotionType = QualType());
682 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
683
684 Value *VisitUnaryNot (const UnaryOperator *E);
685 Value *VisitUnaryLNot (const UnaryOperator *E);
686 Value *VisitUnaryReal(const UnaryOperator *E,
687 QualType PromotionType = QualType());
688 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
689 Value *VisitUnaryImag(const UnaryOperator *E,
690 QualType PromotionType = QualType());
691 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
692 Value *VisitUnaryExtension(const UnaryOperator *E) {
693 return Visit(E->getSubExpr());
694 }
695
696 // C++
697 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
698 return EmitLoadOfLValue(E);
699 }
700 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
701 auto &Ctx = CGF.getContext();
705 SLE->getType());
706 }
707
708 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
710 return Visit(DAE->getExpr());
711 }
712 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
714 return Visit(DIE->getExpr());
715 }
716 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
717 return CGF.LoadCXXThis();
718 }
719
720 Value *VisitExprWithCleanups(ExprWithCleanups *E);
721 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
722 return CGF.EmitCXXNewExpr(E);
723 }
724 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
725 CGF.EmitCXXDeleteExpr(E);
726 return nullptr;
727 }
728
729 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
730 if (E->isStoredAsBoolean())
731 return llvm::ConstantInt::get(ConvertType(E->getType()),
732 E->getBoolValue());
733 assert(E->getAPValue().isInt() && "APValue type not supported");
734 return llvm::ConstantInt::get(ConvertType(E->getType()),
735 E->getAPValue().getInt());
736 }
737
738 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
739 return Builder.getInt1(E->isSatisfied());
740 }
741
742 Value *VisitRequiresExpr(const RequiresExpr *E) {
743 return Builder.getInt1(E->isSatisfied());
744 }
745
746 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
747 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
748 }
749
750 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
751 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
752 }
753
754 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
755 // C++ [expr.pseudo]p1:
756 // The result shall only be used as the operand for the function call
757 // operator (), and the result of such a call has type void. The only
758 // effect is the evaluation of the postfix-expression before the dot or
759 // arrow.
760 CGF.EmitScalarExpr(E->getBase());
761 return nullptr;
762 }
763
764 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
765 return EmitNullValue(E->getType());
766 }
767
768 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
769 CGF.EmitCXXThrowExpr(E);
770 return nullptr;
771 }
772
773 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
774 return Builder.getInt1(E->getValue());
775 }
776
777 // Binary Operators.
778 Value *EmitMul(const BinOpInfo &Ops) {
779 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
780 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
782 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
783 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
784 [[fallthrough]];
786 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
787 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
788 [[fallthrough]];
790 if (CanElideOverflowCheck(CGF.getContext(), Ops))
791 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
792 return EmitOverflowCheckedBinOp(Ops);
793 }
794 }
795
796 if (Ops.Ty->isConstantMatrixType()) {
797 llvm::MatrixBuilder MB(Builder);
798 // We need to check the types of the operands of the operator to get the
799 // correct matrix dimensions.
800 auto *BO = cast<BinaryOperator>(Ops.E);
801 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
802 BO->getLHS()->getType().getCanonicalType());
803 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
804 BO->getRHS()->getType().getCanonicalType());
805 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
806 if (LHSMatTy && RHSMatTy)
807 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
808 LHSMatTy->getNumColumns(),
809 RHSMatTy->getNumColumns());
810 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
811 }
812
813 if (Ops.Ty->isUnsignedIntegerType() &&
814 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
815 !CanElideOverflowCheck(CGF.getContext(), Ops))
816 return EmitOverflowCheckedBinOp(Ops);
817
818 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
819 // Preserve the old values
820 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
821 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
822 }
823 if (Ops.isFixedPointOp())
824 return EmitFixedPointBinOp(Ops);
825 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
826 }
827 /// Create a binary op that checks for overflow.
828 /// Currently only supports +, - and *.
829 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
830
831 // Check for undefined division and modulus behaviors.
832 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
833 llvm::Value *Zero,bool isDiv);
834 // Common helper for getting how wide LHS of shift is.
835 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
836
837 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
838 // non powers of two.
839 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
840
841 Value *EmitDiv(const BinOpInfo &Ops);
842 Value *EmitRem(const BinOpInfo &Ops);
843 Value *EmitAdd(const BinOpInfo &Ops);
844 Value *EmitSub(const BinOpInfo &Ops);
845 Value *EmitShl(const BinOpInfo &Ops);
846 Value *EmitShr(const BinOpInfo &Ops);
847 Value *EmitAnd(const BinOpInfo &Ops) {
848 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
849 }
850 Value *EmitXor(const BinOpInfo &Ops) {
851 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
852 }
853 Value *EmitOr (const BinOpInfo &Ops) {
854 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
855 }
856
857 // Helper functions for fixed point binary operations.
858 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
859
860 BinOpInfo EmitBinOps(const BinaryOperator *E,
861 QualType PromotionTy = QualType());
862
863 Value *EmitPromotedValue(Value *result, QualType PromotionType);
864 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
865 Value *EmitPromoted(const Expr *E, QualType PromotionType);
866
867 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
868 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
869 Value *&Result);
870
871 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
872 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
873
874 QualType getPromotionType(QualType Ty) {
875 const auto &Ctx = CGF.getContext();
876 if (auto *CT = Ty->getAs<ComplexType>()) {
877 QualType ElementType = CT->getElementType();
878 if (ElementType.UseExcessPrecision(Ctx))
879 return Ctx.getComplexType(Ctx.FloatTy);
880 }
881
882 if (Ty.UseExcessPrecision(Ctx)) {
883 if (auto *VT = Ty->getAs<VectorType>()) {
884 unsigned NumElements = VT->getNumElements();
885 return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
886 }
887 return Ctx.FloatTy;
888 }
889
890 return QualType();
891 }
892
893 // Binary operators and binary compound assignment operators.
894#define HANDLEBINOP(OP) \
895 Value *VisitBin##OP(const BinaryOperator *E) { \
896 QualType promotionTy = getPromotionType(E->getType()); \
897 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
898 if (result && !promotionTy.isNull()) \
899 result = EmitUnPromotedValue(result, E->getType()); \
900 return result; \
901 } \
902 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
903 ApplyAtomGroup Grp(CGF.getDebugInfo()); \
904 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
905 }
906 HANDLEBINOP(Mul)
907 HANDLEBINOP(Div)
908 HANDLEBINOP(Rem)
909 HANDLEBINOP(Add)
910 HANDLEBINOP(Sub)
911 HANDLEBINOP(Shl)
912 HANDLEBINOP(Shr)
914 HANDLEBINOP(Xor)
916#undef HANDLEBINOP
917
918 // Comparisons.
919 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
920 llvm::CmpInst::Predicate SICmpOpc,
921 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
922#define VISITCOMP(CODE, UI, SI, FP, SIG) \
923 Value *VisitBin##CODE(const BinaryOperator *E) { \
924 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
925 llvm::FCmpInst::FP, SIG); }
926 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
927 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
928 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
929 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
930 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
931 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
932#undef VISITCOMP
933
934 Value *VisitBinAssign (const BinaryOperator *E);
935
936 Value *VisitBinLAnd (const BinaryOperator *E);
937 Value *VisitBinLOr (const BinaryOperator *E);
938 Value *VisitBinComma (const BinaryOperator *E);
939
940 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
941 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
942
943 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
944 return Visit(E->getSemanticForm());
945 }
946
947 // Other Operators.
948 Value *VisitBlockExpr(const BlockExpr *BE);
949 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
950 Value *VisitChooseExpr(ChooseExpr *CE);
951 Value *VisitVAArgExpr(VAArgExpr *VE);
952 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
953 return CGF.EmitObjCStringLiteral(E);
954 }
955 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
956 return CGF.EmitObjCBoxedExpr(E);
957 }
958 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
959 return CGF.EmitObjCArrayLiteral(E);
960 }
961 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
962 return CGF.EmitObjCDictionaryLiteral(E);
963 }
964 Value *VisitAsTypeExpr(AsTypeExpr *CE);
965 Value *VisitAtomicExpr(AtomicExpr *AE);
966 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
967 return Visit(E->getSelectedExpr());
968 }
969};
970} // end anonymous namespace.
971
972//===----------------------------------------------------------------------===//
973// Utilities
974//===----------------------------------------------------------------------===//
975
976/// EmitConversionToBool - Convert the specified expression value to a
977/// boolean (i1) truth value. This is equivalent to "Val != 0".
978Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
979 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
980
981 if (SrcType->isRealFloatingType())
982 return EmitFloatToBoolConversion(Src);
983
984 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
985 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
986
987 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
988 "Unknown scalar type to convert");
989
990 if (isa<llvm::IntegerType>(Src->getType()))
991 return EmitIntToBoolConversion(Src);
992
993 assert(isa<llvm::PointerType>(Src->getType()));
994 return EmitPointerToBoolConversion(Src, SrcType);
995}
996
997void ScalarExprEmitter::EmitFloatConversionCheck(
998 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
999 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
1000 assert(SrcType->isFloatingType() && "not a conversion from floating point");
1001 if (!isa<llvm::IntegerType>(DstTy))
1002 return;
1003
1004 auto CheckOrdinal = SanitizerKind::SO_FloatCastOverflow;
1005 auto CheckHandler = SanitizerHandler::FloatCastOverflow;
1006 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
1007 using llvm::APFloat;
1008 using llvm::APSInt;
1009
1010 llvm::Value *Check = nullptr;
1011 const llvm::fltSemantics &SrcSema =
1012 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
1013
1014 // Floating-point to integer. This has undefined behavior if the source is
1015 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
1016 // to an integer).
1017 unsigned Width = CGF.getContext().getIntWidth(DstType);
1019
1020 APSInt Min = APSInt::getMinValue(Width, Unsigned);
1021 APFloat MinSrc(SrcSema, APFloat::uninitialized);
1022 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
1023 APFloat::opOverflow)
1024 // Don't need an overflow check for lower bound. Just check for
1025 // -Inf/NaN.
1026 MinSrc = APFloat::getInf(SrcSema, true);
1027 else
1028 // Find the largest value which is too small to represent (before
1029 // truncation toward zero).
1030 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
1031
1032 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
1033 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
1034 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
1035 APFloat::opOverflow)
1036 // Don't need an overflow check for upper bound. Just check for
1037 // +Inf/NaN.
1038 MaxSrc = APFloat::getInf(SrcSema, false);
1039 else
1040 // Find the smallest value which is too large to represent (before
1041 // truncation toward zero).
1042 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
1043
1044 // If we're converting from __half, convert the range to float to match
1045 // the type of src.
1046 if (OrigSrcType->isHalfType()) {
1047 const llvm::fltSemantics &Sema =
1048 CGF.getContext().getFloatTypeSemantics(SrcType);
1049 bool IsInexact;
1050 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1051 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1052 }
1053
1054 llvm::Value *GE =
1055 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
1056 llvm::Value *LE =
1057 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
1058 Check = Builder.CreateAnd(GE, LE);
1059
1060 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1061 CGF.EmitCheckTypeDescriptor(OrigSrcType),
1062 CGF.EmitCheckTypeDescriptor(DstType)};
1063 CGF.EmitCheck(std::make_pair(Check, CheckOrdinal), CheckHandler, StaticArgs,
1064 OrigSrc);
1065}
1066
1067// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1068// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1069static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1070 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1072 QualType DstType, CGBuilderTy &Builder) {
1073 llvm::Type *SrcTy = Src->getType();
1074 llvm::Type *DstTy = Dst->getType();
1075 (void)DstTy; // Only used in assert()
1076
1077 // This should be truncation of integral types.
1078 assert(Src != Dst);
1079 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1080 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1081 "non-integer llvm type");
1082
1083 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1084 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1085
1086 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1087 // Else, it is a signed truncation.
1088 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1090 if (!SrcSigned && !DstSigned) {
1091 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1092 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation;
1093 } else {
1094 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1095 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation;
1096 }
1097
1098 llvm::Value *Check = nullptr;
1099 // 1. Extend the truncated value back to the same width as the Src.
1100 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1101 // 2. Equality-compare with the original source value
1102 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1103 // If the comparison result is 'i1 false', then the truncation was lossy.
1104 return std::make_pair(Kind, std::make_pair(Check, Ordinal));
1105}
1106
1108 QualType SrcType, QualType DstType) {
1109 return SrcType->isIntegerType() && DstType->isIntegerType();
1110}
1111
1112void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1113 Value *Dst, QualType DstType,
1115 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1116 return;
1117
1118 // We only care about int->int conversions here.
1119 // We ignore conversions to/from pointer and/or bool.
1121 DstType))
1122 return;
1123
1124 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1125 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1126 // This must be truncation. Else we do not care.
1127 if (SrcBits <= DstBits)
1128 return;
1129
1130 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1131
1132 // If the integer sign change sanitizer is enabled,
1133 // and we are truncating from larger unsigned type to smaller signed type,
1134 // let that next sanitizer deal with it.
1135 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1136 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1137 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1138 (!SrcSigned && DstSigned))
1139 return;
1140
1141 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1142 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1143 Check;
1144
1145 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1146 {
1147 // We don't know the check kind until we call
1148 // EmitIntegerTruncationCheckHelper, but we want to annotate
1149 // EmitIntegerTruncationCheckHelper's instructions too.
1150 SanitizerDebugLocation SanScope(
1151 &CGF,
1152 {SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1153 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1154 CheckHandler);
1155 Check =
1156 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1157 // If the comparison result is 'i1 false', then the truncation was lossy.
1158 }
1159
1160 // Do we care about this type of truncation?
1161 if (!CGF.SanOpts.has(Check.second.second))
1162 return;
1163
1164 SanitizerDebugLocation SanScope(&CGF, {Check.second.second}, CheckHandler);
1165
1166 // Does some SSCL ignore this type?
1168 SanitizerMask::bitPosToMask(Check.second.second), DstType))
1169 return;
1170
1171 llvm::Constant *StaticArgs[] = {
1173 CGF.EmitCheckTypeDescriptor(DstType),
1174 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),
1175 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1176
1177 CGF.EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1178}
1179
1180static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1181 const char *Name,
1182 CGBuilderTy &Builder) {
1183 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1184 llvm::Type *VTy = V->getType();
1185 if (!VSigned) {
1186 // If the value is unsigned, then it is never negative.
1187 return llvm::ConstantInt::getFalse(VTy->getContext());
1188 }
1189 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1190 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1191 llvm::Twine(Name) + "." + V->getName() +
1192 ".negativitycheck");
1193}
1194
1195// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1196// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1197static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1198 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1200 QualType DstType, CGBuilderTy &Builder) {
1201 llvm::Type *SrcTy = Src->getType();
1202 llvm::Type *DstTy = Dst->getType();
1203
1204 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1205 "non-integer llvm type");
1206
1207 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1208 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1209 (void)SrcSigned; // Only used in assert()
1210 (void)DstSigned; // Only used in assert()
1211 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1212 unsigned DstBits = DstTy->getScalarSizeInBits();
1213 (void)SrcBits; // Only used in assert()
1214 (void)DstBits; // Only used in assert()
1215
1216 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1217 "either the widths should be different, or the signednesses.");
1218
1219 // 1. Was the old Value negative?
1220 llvm::Value *SrcIsNegative =
1221 EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);
1222 // 2. Is the new Value negative?
1223 llvm::Value *DstIsNegative =
1224 EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);
1225 // 3. Now, was the 'negativity status' preserved during the conversion?
1226 // NOTE: conversion from negative to zero is considered to change the sign.
1227 // (We want to get 'false' when the conversion changed the sign)
1228 // So we should just equality-compare the negativity statuses.
1229 llvm::Value *Check = nullptr;
1230 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1231 // If the comparison result is 'false', then the conversion changed the sign.
1232 return std::make_pair(
1233 ScalarExprEmitter::ICCK_IntegerSignChange,
1234 std::make_pair(Check, SanitizerKind::SO_ImplicitIntegerSignChange));
1235}
1236
1237void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1238 Value *Dst, QualType DstType,
1240 if (!CGF.SanOpts.has(SanitizerKind::SO_ImplicitIntegerSignChange))
1241 return;
1242
1243 llvm::Type *SrcTy = Src->getType();
1244 llvm::Type *DstTy = Dst->getType();
1245
1246 // We only care about int->int conversions here.
1247 // We ignore conversions to/from pointer and/or bool.
1249 DstType))
1250 return;
1251
1252 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1253 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1254 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1255 unsigned DstBits = DstTy->getScalarSizeInBits();
1256
1257 // Now, we do not need to emit the check in *all* of the cases.
1258 // We can avoid emitting it in some obvious cases where it would have been
1259 // dropped by the opt passes (instcombine) always anyways.
1260 // If it's a cast between effectively the same type, no check.
1261 // NOTE: this is *not* equivalent to checking the canonical types.
1262 if (SrcSigned == DstSigned && SrcBits == DstBits)
1263 return;
1264 // At least one of the values needs to have signed type.
1265 // If both are unsigned, then obviously, neither of them can be negative.
1266 if (!SrcSigned && !DstSigned)
1267 return;
1268 // If the conversion is to *larger* *signed* type, then no check is needed.
1269 // Because either sign-extension happens (so the sign will remain),
1270 // or zero-extension will happen (the sign bit will be zero.)
1271 if ((DstBits > SrcBits) && DstSigned)
1272 return;
1273 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1274 (SrcBits > DstBits) && SrcSigned) {
1275 // If the signed integer truncation sanitizer is enabled,
1276 // and this is a truncation from signed type, then no check is needed.
1277 // Because here sign change check is interchangeable with truncation check.
1278 return;
1279 }
1280 // Does an SSCL have an entry for the DstType under its respective sanitizer
1281 // section?
1282 if (DstSigned && CGF.getContext().isTypeIgnoredBySanitizer(
1283 SanitizerKind::ImplicitSignedIntegerTruncation, DstType))
1284 return;
1285 if (!DstSigned &&
1287 SanitizerKind::ImplicitUnsignedIntegerTruncation, DstType))
1288 return;
1289 // That's it. We can't rule out any more cases with the data we have.
1290
1291 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1292 SanitizerDebugLocation SanScope(
1293 &CGF,
1294 {SanitizerKind::SO_ImplicitIntegerSignChange,
1295 SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1296 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1297 CheckHandler);
1298
1299 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1300 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1301 Check;
1302
1303 // Each of these checks needs to return 'false' when an issue was detected.
1304 ImplicitConversionCheckKind CheckKind;
1306 2>
1307 Checks;
1308 // So we can 'and' all the checks together, and still get 'false',
1309 // if at least one of the checks detected an issue.
1310
1311 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1312 CheckKind = Check.first;
1313 Checks.emplace_back(Check.second);
1314
1315 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1316 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1317 // If the signed integer truncation sanitizer was enabled,
1318 // and we are truncating from larger unsigned type to smaller signed type,
1319 // let's handle the case we skipped in that check.
1320 Check =
1321 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1322 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1323 Checks.emplace_back(Check.second);
1324 // If the comparison result is 'i1 false', then the truncation was lossy.
1325 }
1326
1327 llvm::Constant *StaticArgs[] = {
1329 CGF.EmitCheckTypeDescriptor(DstType),
1330 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1331 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1332 // EmitCheck() will 'and' all the checks together.
1333 CGF.EmitCheck(Checks, CheckHandler, StaticArgs, {Src, Dst});
1334}
1335
1336// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1337// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1338static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1339 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1341 QualType DstType, CGBuilderTy &Builder) {
1342 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1343 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1344
1345 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1346 if (!SrcSigned && !DstSigned)
1347 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1348 else
1349 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1350
1351 llvm::Value *Check = nullptr;
1352 // 1. Extend the truncated value back to the same width as the Src.
1353 Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");
1354 // 2. Equality-compare with the original source value
1355 Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");
1356 // If the comparison result is 'i1 false', then the truncation was lossy.
1357
1358 return std::make_pair(
1359 Kind,
1360 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1361}
1362
1363// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1364// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1365static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1366 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1368 QualType DstType, CGBuilderTy &Builder) {
1369 // 1. Was the old Value negative?
1370 llvm::Value *SrcIsNegative =
1371 EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);
1372 // 2. Is the new Value negative?
1373 llvm::Value *DstIsNegative =
1374 EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);
1375 // 3. Now, was the 'negativity status' preserved during the conversion?
1376 // NOTE: conversion from negative to zero is considered to change the sign.
1377 // (We want to get 'false' when the conversion changed the sign)
1378 // So we should just equality-compare the negativity statuses.
1379 llvm::Value *Check = nullptr;
1380 Check =
1381 Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");
1382 // If the comparison result is 'false', then the conversion changed the sign.
1383 return std::make_pair(
1384 ScalarExprEmitter::ICCK_IntegerSignChange,
1385 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1386}
1387
1389 Value *Dst, QualType DstType,
1390 const CGBitFieldInfo &Info,
1392
1393 if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
1394 return;
1395
1396 // We only care about int->int conversions here.
1397 // We ignore conversions to/from pointer and/or bool.
1399 DstType))
1400 return;
1401
1402 if (DstType->isBooleanType() || SrcType->isBooleanType())
1403 return;
1404
1405 // This should be truncation of integral types.
1406 assert(isa<llvm::IntegerType>(Src->getType()) &&
1407 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1408
1409 // TODO: Calculate src width to avoid emitting code
1410 // for unecessary cases.
1411 unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();
1412 unsigned DstBits = Info.Size;
1413
1414 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1415 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1416
1417 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1418 SanitizerDebugLocation SanScope(
1419 this, {SanitizerKind::SO_ImplicitBitfieldConversion}, CheckHandler);
1420
1421 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1422 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1423 Check;
1424
1425 // Truncation
1426 bool EmitTruncation = DstBits < SrcBits;
1427 // If Dst is signed and Src unsigned, we want to be more specific
1428 // about the CheckKind we emit, in this case we want to emit
1429 // ICCK_SignedIntegerTruncationOrSignChange.
1430 bool EmitTruncationFromUnsignedToSigned =
1431 EmitTruncation && DstSigned && !SrcSigned;
1432 // Sign change
1433 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1434 bool BothUnsigned = !SrcSigned && !DstSigned;
1435 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1436 // We can avoid emitting sign change checks in some obvious cases
1437 // 1. If Src and Dst have the same signedness and size
1438 // 2. If both are unsigned sign check is unecessary!
1439 // 3. If Dst is signed and bigger than Src, either
1440 // sign-extension or zero-extension will make sure
1441 // the sign remains.
1442 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1443
1444 if (EmitTruncation)
1445 Check =
1446 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1447 else if (EmitSignChange) {
1448 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1449 "either the widths should be different, or the signednesses.");
1450 Check =
1451 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1452 } else
1453 return;
1454
1455 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1456 if (EmitTruncationFromUnsignedToSigned)
1457 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1458
1459 llvm::Constant *StaticArgs[] = {
1461 EmitCheckTypeDescriptor(DstType),
1462 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1463 llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};
1464
1465 EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1466}
1467
1468Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1469 QualType DstType, llvm::Type *SrcTy,
1470 llvm::Type *DstTy,
1471 ScalarConversionOpts Opts) {
1472 // The Element types determine the type of cast to perform.
1473 llvm::Type *SrcElementTy;
1474 llvm::Type *DstElementTy;
1475 QualType SrcElementType;
1476 QualType DstElementType;
1477 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1478 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1479 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1480 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1481 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1482 } else {
1483 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1484 "cannot cast between matrix and non-matrix types");
1485 SrcElementTy = SrcTy;
1486 DstElementTy = DstTy;
1487 SrcElementType = SrcType;
1488 DstElementType = DstType;
1489 }
1490
1491 if (isa<llvm::IntegerType>(SrcElementTy)) {
1492 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1493 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1494 InputSigned = true;
1495 }
1496
1497 if (isa<llvm::IntegerType>(DstElementTy))
1498 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1499 if (InputSigned)
1500 return Builder.CreateSIToFP(Src, DstTy, "conv");
1501 return Builder.CreateUIToFP(Src, DstTy, "conv");
1502 }
1503
1504 if (isa<llvm::IntegerType>(DstElementTy)) {
1505 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1506 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1507
1508 // If we can't recognize overflow as undefined behavior, assume that
1509 // overflow saturates. This protects against normal optimizations if we are
1510 // compiling with non-standard FP semantics.
1511 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1512 llvm::Intrinsic::ID IID =
1513 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1514 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1515 }
1516
1517 if (IsSigned)
1518 return Builder.CreateFPToSI(Src, DstTy, "conv");
1519 return Builder.CreateFPToUI(Src, DstTy, "conv");
1520 }
1521
1522 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) {
1523 Value *FloatVal = Builder.CreateFPExt(Src, Builder.getFloatTy(), "fpext");
1524 return Builder.CreateFPTrunc(FloatVal, DstTy, "fptrunc");
1525 }
1526 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1527 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1528 return Builder.CreateFPExt(Src, DstTy, "conv");
1529}
1530
1531/// Emit a conversion from the specified type to the specified destination type,
1532/// both of which are LLVM scalar types.
1533Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1534 QualType DstType,
1536 ScalarConversionOpts Opts) {
1537 // All conversions involving fixed point types should be handled by the
1538 // EmitFixedPoint family functions. This is done to prevent bloating up this
1539 // function more, and although fixed point numbers are represented by
1540 // integers, we do not want to follow any logic that assumes they should be
1541 // treated as integers.
1542 // TODO(leonardchan): When necessary, add another if statement checking for
1543 // conversions to fixed point types from other types.
1544 if (SrcType->isFixedPointType()) {
1545 if (DstType->isBooleanType())
1546 // It is important that we check this before checking if the dest type is
1547 // an integer because booleans are technically integer types.
1548 // We do not need to check the padding bit on unsigned types if unsigned
1549 // padding is enabled because overflow into this bit is undefined
1550 // behavior.
1551 return Builder.CreateIsNotNull(Src, "tobool");
1552 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1553 DstType->isRealFloatingType())
1554 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1555
1556 llvm_unreachable(
1557 "Unhandled scalar conversion from a fixed point type to another type.");
1558 } else if (DstType->isFixedPointType()) {
1559 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1560 // This also includes converting booleans and enums to fixed point types.
1561 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1562
1563 llvm_unreachable(
1564 "Unhandled scalar conversion to a fixed point type from another type.");
1565 }
1566
1567 QualType NoncanonicalSrcType = SrcType;
1568 QualType NoncanonicalDstType = DstType;
1569
1570 SrcType = CGF.getContext().getCanonicalType(SrcType);
1571 DstType = CGF.getContext().getCanonicalType(DstType);
1572 if (SrcType == DstType) return Src;
1573
1574 if (DstType->isVoidType()) return nullptr;
1575
1576 llvm::Value *OrigSrc = Src;
1577 QualType OrigSrcType = SrcType;
1578 llvm::Type *SrcTy = Src->getType();
1579
1580 // Handle conversions to bool first, they are special: comparisons against 0.
1581 if (DstType->isBooleanType())
1582 return EmitConversionToBool(Src, SrcType);
1583
1584 llvm::Type *DstTy = ConvertType(DstType);
1585
1586 // Cast from half through float if half isn't a native type.
1587 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1588 // Cast to FP using the intrinsic if the half type itself isn't supported.
1589 if (DstTy->isFloatingPointTy()) {
1591 return Builder.CreateCall(
1592 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1593 Src);
1594 } else {
1595 // Cast to other types through float, using either the intrinsic or FPExt,
1596 // depending on whether the half type itself is supported
1597 // (as opposed to operations on half, available with NativeHalfType).
1599 Src = Builder.CreateCall(
1600 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1601 CGF.CGM.FloatTy),
1602 Src);
1603 } else {
1604 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1605 }
1606 SrcType = CGF.getContext().FloatTy;
1607 SrcTy = CGF.FloatTy;
1608 }
1609 }
1610
1611 // Ignore conversions like int -> uint.
1612 if (SrcTy == DstTy) {
1613 if (Opts.EmitImplicitIntegerSignChangeChecks)
1614 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1615 NoncanonicalDstType, Loc);
1616
1617 return Src;
1618 }
1619
1620 // Handle pointer conversions next: pointers can only be converted to/from
1621 // other pointers and integers. Check for pointer types in terms of LLVM, as
1622 // some native types (like Obj-C id) may map to a pointer type.
1623 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1624 // The source value may be an integer, or a pointer.
1625 if (isa<llvm::PointerType>(SrcTy))
1626 return Src;
1627
1628 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1629 // First, convert to the correct width so that we control the kind of
1630 // extension.
1631 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1632 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1633 llvm::Value* IntResult =
1634 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1635 // Then, cast to pointer.
1636 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1637 }
1638
1639 if (isa<llvm::PointerType>(SrcTy)) {
1640 // Must be an ptr to int cast.
1641 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1642 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1643 }
1644
1645 // A scalar can be splatted to an extended vector of the same element type
1646 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1647 // Sema should add casts to make sure that the source expression's type is
1648 // the same as the vector's element type (sans qualifiers)
1649 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1650 SrcType.getTypePtr() &&
1651 "Splatted expr doesn't match with vector element type?");
1652
1653 // Splat the element across to all elements
1654 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1655 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1656 }
1657
1658 if (SrcType->isMatrixType() && DstType->isMatrixType())
1659 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1660
1661 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1662 // Allow bitcast from vector to integer/fp of the same size.
1663 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1664 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1665 if (SrcSize == DstSize)
1666 return Builder.CreateBitCast(Src, DstTy, "conv");
1667
1668 // Conversions between vectors of different sizes are not allowed except
1669 // when vectors of half are involved. Operations on storage-only half
1670 // vectors require promoting half vector operands to float vectors and
1671 // truncating the result, which is either an int or float vector, to a
1672 // short or half vector.
1673
1674 // Source and destination are both expected to be vectors.
1675 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1676 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1677 (void)DstElementTy;
1678
1679 assert(((SrcElementTy->isIntegerTy() &&
1680 DstElementTy->isIntegerTy()) ||
1681 (SrcElementTy->isFloatingPointTy() &&
1682 DstElementTy->isFloatingPointTy())) &&
1683 "unexpected conversion between a floating-point vector and an "
1684 "integer vector");
1685
1686 // Truncate an i32 vector to an i16 vector.
1687 if (SrcElementTy->isIntegerTy())
1688 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1689
1690 // Truncate a float vector to a half vector.
1691 if (SrcSize > DstSize)
1692 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1693
1694 // Promote a half vector to a float vector.
1695 return Builder.CreateFPExt(Src, DstTy, "conv");
1696 }
1697
1698 // Finally, we have the arithmetic types: real int/float.
1699 Value *Res = nullptr;
1700 llvm::Type *ResTy = DstTy;
1701
1702 // An overflowing conversion has undefined behavior if either the source type
1703 // or the destination type is a floating-point type. However, we consider the
1704 // range of representable values for all floating-point types to be
1705 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1706 // floating-point type.
1707 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1708 OrigSrcType->isFloatingType())
1709 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1710 Loc);
1711
1712 // Cast to half through float if half isn't a native type.
1713 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1714 // Make sure we cast in a single step if from another FP type.
1715 if (SrcTy->isFloatingPointTy()) {
1716 // Use the intrinsic if the half type itself isn't supported
1717 // (as opposed to operations on half, available with NativeHalfType).
1719 return Builder.CreateCall(
1720 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1721 // If the half type is supported, just use an fptrunc.
1722 return Builder.CreateFPTrunc(Src, DstTy);
1723 }
1724 DstTy = CGF.FloatTy;
1725 }
1726
1727 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1728
1729 if (DstTy != ResTy) {
1731 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1732 Res = Builder.CreateCall(
1733 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1734 Res);
1735 } else {
1736 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1737 }
1738 }
1739
1740 if (Opts.EmitImplicitIntegerTruncationChecks)
1741 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1742 NoncanonicalDstType, Loc);
1743
1744 if (Opts.EmitImplicitIntegerSignChangeChecks)
1745 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1746 NoncanonicalDstType, Loc);
1747
1748 return Res;
1749}
1750
1751Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1752 QualType DstTy,
1754 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1755 llvm::Value *Result;
1756 if (SrcTy->isRealFloatingType())
1757 Result = FPBuilder.CreateFloatingToFixed(Src,
1758 CGF.getContext().getFixedPointSemantics(DstTy));
1759 else if (DstTy->isRealFloatingType())
1760 Result = FPBuilder.CreateFixedToFloating(Src,
1762 ConvertType(DstTy));
1763 else {
1764 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1765 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1766
1767 if (DstTy->isIntegerType())
1768 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1769 DstFPSema.getWidth(),
1770 DstFPSema.isSigned());
1771 else if (SrcTy->isIntegerType())
1772 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1773 DstFPSema);
1774 else
1775 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1776 }
1777 return Result;
1778}
1779
1780/// Emit a conversion from the specified complex type to the specified
1781/// destination type, where the destination type is an LLVM scalar type.
1782Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1785 // Get the source element type.
1786 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1787
1788 // Handle conversions to bool first, they are special: comparisons against 0.
1789 if (DstTy->isBooleanType()) {
1790 // Complex != 0 -> (Real != 0) | (Imag != 0)
1791 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1792 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1793 return Builder.CreateOr(Src.first, Src.second, "tobool");
1794 }
1795
1796 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1797 // the imaginary part of the complex value is discarded and the value of the
1798 // real part is converted according to the conversion rules for the
1799 // corresponding real type.
1800 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1801}
1802
1803Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1804 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1805}
1806
1807/// Emit a sanitization check for the given "binary" operation (which
1808/// might actually be a unary increment which has been lowered to a binary
1809/// operation). The check passes if all values in \p Checks (which are \c i1),
1810/// are \c true.
1811void ScalarExprEmitter::EmitBinOpCheck(
1812 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
1813 const BinOpInfo &Info) {
1814 assert(CGF.IsSanitizerScope);
1815 SanitizerHandler Check;
1818 TrapReason TR;
1819
1820 BinaryOperatorKind Opcode = Info.Opcode;
1823
1824 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1825 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1826 if (UO && UO->getOpcode() == UO_Minus) {
1827 Check = SanitizerHandler::NegateOverflow;
1828 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1829 DynamicData.push_back(Info.RHS);
1830 } else {
1831 if (BinaryOperator::isShiftOp(Opcode)) {
1832 // Shift LHS negative or too large, or RHS out of bounds.
1833 Check = SanitizerHandler::ShiftOutOfBounds;
1834 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1835 StaticData.push_back(
1836 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1837 StaticData.push_back(
1838 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1839 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1840 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1841 Check = SanitizerHandler::DivremOverflow;
1842 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1843 } else {
1844 // Arithmetic overflow (+, -, *).
1845 int ArithOverflowKind = 0;
1846 switch (Opcode) {
1847 case BO_Add: {
1848 Check = SanitizerHandler::AddOverflow;
1849 ArithOverflowKind = diag::UBSanArithKind::Add;
1850 break;
1851 }
1852 case BO_Sub: {
1853 Check = SanitizerHandler::SubOverflow;
1854 ArithOverflowKind = diag::UBSanArithKind::Sub;
1855 break;
1856 }
1857 case BO_Mul: {
1858 Check = SanitizerHandler::MulOverflow;
1859 ArithOverflowKind = diag::UBSanArithKind::Mul;
1860 break;
1861 }
1862 default:
1863 llvm_unreachable("unexpected opcode for bin op check");
1864 }
1865 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1867 SanitizerKind::UnsignedIntegerOverflow) ||
1869 SanitizerKind::SignedIntegerOverflow)) {
1870 // Only pay the cost for constructing the trap diagnostic if they are
1871 // going to be used.
1872 CGF.CGM.BuildTrapReason(diag::trap_ubsan_arith_overflow, TR)
1873 << Info.Ty->isSignedIntegerOrEnumerationType() << ArithOverflowKind
1874 << Info.E;
1875 }
1876 }
1877 DynamicData.push_back(Info.LHS);
1878 DynamicData.push_back(Info.RHS);
1879 }
1880
1881 CGF.EmitCheck(Checks, Check, StaticData, DynamicData, &TR);
1882}
1883
1884//===----------------------------------------------------------------------===//
1885// Visitor Methods
1886//===----------------------------------------------------------------------===//
1887
1888Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1889 CGF.ErrorUnsupported(E, "scalar expression");
1890 if (E->getType()->isVoidType())
1891 return nullptr;
1892 return llvm::PoisonValue::get(CGF.ConvertType(E->getType()));
1893}
1894
1895Value *
1896ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1897 ASTContext &Context = CGF.getContext();
1898 unsigned AddrSpace =
1900 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
1901 E->ComputeName(Context), "__usn_str", AddrSpace);
1902
1903 llvm::Type *ExprTy = ConvertType(E->getType());
1904 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
1905 "usn_addr_cast");
1906}
1907
1908Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
1909 assert(E->getDataElementCount() == 1);
1910 auto It = E->begin();
1911 return Builder.getInt((*It)->getValue());
1912}
1913
1914Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1915 // Vector Mask Case
1916 if (E->getNumSubExprs() == 2) {
1917 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1918 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1919 Value *Mask;
1920
1921 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1922 unsigned LHSElts = LTy->getNumElements();
1923
1924 Mask = RHS;
1925
1926 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1927
1928 // Mask off the high bits of each shuffle index.
1929 Value *MaskBits =
1930 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1931 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1932
1933 // newv = undef
1934 // mask = mask & maskbits
1935 // for each elt
1936 // n = extract mask i
1937 // x = extract val n
1938 // newv = insert newv, x, i
1939 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1940 MTy->getNumElements());
1941 Value* NewV = llvm::PoisonValue::get(RTy);
1942 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1943 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1944 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1945
1946 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1947 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1948 }
1949 return NewV;
1950 }
1951
1952 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1953 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1954
1955 SmallVector<int, 32> Indices;
1956 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1957 llvm::APSInt Idx = E->getShuffleMaskIdx(i - 2);
1958 // Check for -1 and output it as undef in the IR.
1959 if (Idx.isSigned() && Idx.isAllOnes())
1960 Indices.push_back(-1);
1961 else
1962 Indices.push_back(Idx.getZExtValue());
1963 }
1964
1965 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1966}
1967
1968Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1969 QualType SrcType = E->getSrcExpr()->getType(),
1970 DstType = E->getType();
1971
1972 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1973
1974 SrcType = CGF.getContext().getCanonicalType(SrcType);
1975 DstType = CGF.getContext().getCanonicalType(DstType);
1976 if (SrcType == DstType) return Src;
1977
1978 assert(SrcType->isVectorType() &&
1979 "ConvertVector source type must be a vector");
1980 assert(DstType->isVectorType() &&
1981 "ConvertVector destination type must be a vector");
1982
1983 llvm::Type *SrcTy = Src->getType();
1984 llvm::Type *DstTy = ConvertType(DstType);
1985
1986 // Ignore conversions like int -> uint.
1987 if (SrcTy == DstTy)
1988 return Src;
1989
1990 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1991 DstEltType = DstType->castAs<VectorType>()->getElementType();
1992
1993 assert(SrcTy->isVectorTy() &&
1994 "ConvertVector source IR type must be a vector");
1995 assert(DstTy->isVectorTy() &&
1996 "ConvertVector destination IR type must be a vector");
1997
1998 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1999 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
2000
2001 if (DstEltType->isBooleanType()) {
2002 assert((SrcEltTy->isFloatingPointTy() ||
2003 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
2004
2005 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
2006 if (SrcEltTy->isFloatingPointTy()) {
2008 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
2009 } else {
2010 return Builder.CreateICmpNE(Src, Zero, "tobool");
2011 }
2012 }
2013
2014 // We have the arithmetic types: real int/float.
2015 Value *Res = nullptr;
2016
2017 if (isa<llvm::IntegerType>(SrcEltTy)) {
2018 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
2019 if (isa<llvm::IntegerType>(DstEltTy))
2020 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
2021 else {
2023 if (InputSigned)
2024 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
2025 else
2026 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
2027 }
2028 } else if (isa<llvm::IntegerType>(DstEltTy)) {
2029 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
2031 if (DstEltType->isSignedIntegerOrEnumerationType())
2032 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
2033 else
2034 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
2035 } else {
2036 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
2037 "Unknown real conversion");
2039 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
2040 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
2041 else
2042 Res = Builder.CreateFPExt(Src, DstTy, "conv");
2043 }
2044
2045 return Res;
2046}
2047
2048Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
2050 CGF.EmitIgnoredExpr(E->getBase());
2051 return CGF.emitScalarConstant(Constant, E);
2052 } else {
2055 llvm::APSInt Value = Result.Val.getInt();
2056 CGF.EmitIgnoredExpr(E->getBase());
2057 return Builder.getInt(Value);
2058 }
2059 }
2060
2061 llvm::Value *Result = EmitLoadOfLValue(E);
2062
2063 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
2064 // debug info for the pointer, even if there is no variable associated with
2065 // the pointer's expression.
2066 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
2067 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Result)) {
2068 if (llvm::GetElementPtrInst *GEP =
2069 dyn_cast<llvm::GetElementPtrInst>(Load->getPointerOperand())) {
2070 if (llvm::Instruction *Pointer =
2071 dyn_cast<llvm::Instruction>(GEP->getPointerOperand())) {
2072 QualType Ty = E->getBase()->getType();
2073 if (!E->isArrow())
2074 Ty = CGF.getContext().getPointerType(Ty);
2075 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Pointer, Ty);
2076 }
2077 }
2078 }
2079 }
2080 return Result;
2081}
2082
2083Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
2084 TestAndClearIgnoreResultAssign();
2085
2086 // Emit subscript expressions in rvalue context's. For most cases, this just
2087 // loads the lvalue formed by the subscript expr. However, we have to be
2088 // careful, because the base of a vector subscript is occasionally an rvalue,
2089 // so we can't get it as an lvalue.
2090 if (!E->getBase()->getType()->isVectorType() &&
2091 !E->getBase()->getType()->isSveVLSBuiltinType())
2092 return EmitLoadOfLValue(E);
2093
2094 // Handle the vector case. The base must be a vector, the index must be an
2095 // integer value.
2096 Value *Base = Visit(E->getBase());
2097 Value *Idx = Visit(E->getIdx());
2098 QualType IdxTy = E->getIdx()->getType();
2099
2100 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
2101 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
2102
2103 return Builder.CreateExtractElement(Base, Idx, "vecext");
2104}
2105
2106Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
2107 TestAndClearIgnoreResultAssign();
2108
2109 // Handle the vector case. The base must be a vector, the index must be an
2110 // integer value.
2111 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2112 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E->getColumnIdx());
2113
2114 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2115 unsigned NumRows = MatrixTy->getNumRows();
2116 llvm::MatrixBuilder MB(Builder);
2117 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
2118 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2119 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
2120
2121 Value *Matrix = Visit(E->getBase());
2122
2123 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2124 return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
2125}
2126
2127static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2128 unsigned Off) {
2129 int MV = SVI->getMaskValue(Idx);
2130 if (MV == -1)
2131 return -1;
2132 return Off + MV;
2133}
2134
2135static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2136 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2137 "Index operand too large for shufflevector mask!");
2138 return C->getZExtValue();
2139}
2140
2141Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2142 bool Ignore = TestAndClearIgnoreResultAssign();
2143 (void)Ignore;
2144 unsigned NumInitElements = E->getNumInits();
2145 assert(Ignore == false ||
2146 (NumInitElements == 0 && E->getType()->isVoidType()) &&
2147 "init list ignored");
2148
2149 // HLSL initialization lists in the AST are an expansion which can contain
2150 // side-effecting expressions wrapped in opaque value expressions. To properly
2151 // emit these we need to emit the opaque values before we emit the argument
2152 // expressions themselves. This is a little hacky, but it prevents us needing
2153 // to do a bigger AST-level change for a language feature that we need
2154 // deprecate in the near future. See related HLSL language proposals in the
2155 // proposals (https://github.com/microsoft/hlsl-specs/blob/main/proposals):
2156 // * 0005-strict-initializer-lists.md
2157 // * 0032-constructors.md
2158 if (CGF.getLangOpts().HLSL)
2160
2161 if (E->hadArrayRangeDesignator())
2162 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2163
2164 llvm::VectorType *VType =
2165 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
2166
2167 if (!VType) {
2168 if (NumInitElements == 0) {
2169 // C++11 value-initialization for the scalar.
2170 return EmitNullValue(E->getType());
2171 }
2172 // We have a scalar in braces. Just use the first element.
2173 return Visit(E->getInit(0));
2174 }
2175
2176 if (isa<llvm::ScalableVectorType>(VType)) {
2177 if (NumInitElements == 0) {
2178 // C++11 value-initialization for the vector.
2179 return EmitNullValue(E->getType());
2180 }
2181
2182 if (NumInitElements == 1) {
2183 Expr *InitVector = E->getInit(0);
2184
2185 // Initialize from another scalable vector of the same type.
2186 if (InitVector->getType().getCanonicalType() ==
2188 return Visit(InitVector);
2189 }
2190
2191 llvm_unreachable("Unexpected initialization of a scalable vector!");
2192 }
2193
2194 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
2195
2196 // Loop over initializers collecting the Value for each, and remembering
2197 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2198 // us to fold the shuffle for the swizzle into the shuffle for the vector
2199 // initializer, since LLVM optimizers generally do not want to touch
2200 // shuffles.
2201 unsigned CurIdx = 0;
2202 bool VIsPoisonShuffle = false;
2203 llvm::Value *V = llvm::PoisonValue::get(VType);
2204 for (unsigned i = 0; i != NumInitElements; ++i) {
2205 Expr *IE = E->getInit(i);
2206 Value *Init = Visit(IE);
2208
2209 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
2210
2211 // Handle scalar elements. If the scalar initializer is actually one
2212 // element of a different vector of the same width, use shuffle instead of
2213 // extract+insert.
2214 if (!VVT) {
2215 if (isa<ExtVectorElementExpr>(IE)) {
2216 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
2217
2218 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
2219 ->getNumElements() == ResElts) {
2220 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
2221 Value *LHS = nullptr, *RHS = nullptr;
2222 if (CurIdx == 0) {
2223 // insert into poison -> shuffle (src, poison)
2224 // shufflemask must use an i32
2225 Args.push_back(getAsInt32(C, CGF.Int32Ty));
2226 Args.resize(ResElts, -1);
2227
2228 LHS = EI->getVectorOperand();
2229 RHS = V;
2230 VIsPoisonShuffle = true;
2231 } else if (VIsPoisonShuffle) {
2232 // insert into poison shuffle && size match -> shuffle (v, src)
2233 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
2234 for (unsigned j = 0; j != CurIdx; ++j)
2235 Args.push_back(getMaskElt(SVV, j, 0));
2236 Args.push_back(ResElts + C->getZExtValue());
2237 Args.resize(ResElts, -1);
2238
2239 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2240 RHS = EI->getVectorOperand();
2241 VIsPoisonShuffle = false;
2242 }
2243 if (!Args.empty()) {
2244 V = Builder.CreateShuffleVector(LHS, RHS, Args);
2245 ++CurIdx;
2246 continue;
2247 }
2248 }
2249 }
2250 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
2251 "vecinit");
2252 VIsPoisonShuffle = false;
2253 ++CurIdx;
2254 continue;
2255 }
2256
2257 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
2258
2259 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2260 // input is the same width as the vector being constructed, generate an
2261 // optimized shuffle of the swizzle input into the result.
2262 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2263 if (isa<ExtVectorElementExpr>(IE)) {
2264 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
2265 Value *SVOp = SVI->getOperand(0);
2266 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
2267
2268 if (OpTy->getNumElements() == ResElts) {
2269 for (unsigned j = 0; j != CurIdx; ++j) {
2270 // If the current vector initializer is a shuffle with poison, merge
2271 // this shuffle directly into it.
2272 if (VIsPoisonShuffle) {
2273 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
2274 } else {
2275 Args.push_back(j);
2276 }
2277 }
2278 for (unsigned j = 0, je = InitElts; j != je; ++j)
2279 Args.push_back(getMaskElt(SVI, j, Offset));
2280 Args.resize(ResElts, -1);
2281
2282 if (VIsPoisonShuffle)
2283 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2284
2285 Init = SVOp;
2286 }
2287 }
2288
2289 // Extend init to result vector length, and then shuffle its contribution
2290 // to the vector initializer into V.
2291 if (Args.empty()) {
2292 for (unsigned j = 0; j != InitElts; ++j)
2293 Args.push_back(j);
2294 Args.resize(ResElts, -1);
2295 Init = Builder.CreateShuffleVector(Init, Args, "vext");
2296
2297 Args.clear();
2298 for (unsigned j = 0; j != CurIdx; ++j)
2299 Args.push_back(j);
2300 for (unsigned j = 0; j != InitElts; ++j)
2301 Args.push_back(j + Offset);
2302 Args.resize(ResElts, -1);
2303 }
2304
2305 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2306 // merging subsequent shuffles into this one.
2307 if (CurIdx == 0)
2308 std::swap(V, Init);
2309 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
2310 VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
2311 CurIdx += InitElts;
2312 }
2313
2314 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2315 // Emit remaining default initializers.
2316 llvm::Type *EltTy = VType->getElementType();
2317
2318 // Emit remaining default initializers
2319 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2320 Value *Idx = Builder.getInt32(CurIdx);
2321 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
2322 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
2323 }
2324 return V;
2325}
2326
2328 return !D->isWeak();
2329}
2330
2331static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) {
2332 E = E->IgnoreParens();
2333
2334 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2335 if (UO->getOpcode() == UO_Deref)
2336 return CGF.isPointerKnownNonNull(UO->getSubExpr());
2337
2338 if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
2339 return isDeclRefKnownNonNull(CGF, DRE->getDecl());
2340
2341 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
2342 if (isa<FieldDecl>(ME->getMemberDecl()))
2343 return true;
2344 return isDeclRefKnownNonNull(CGF, ME->getMemberDecl());
2345 }
2346
2347 // Array subscripts? Anything else?
2348
2349 return false;
2350}
2351
2353 assert(E->getType()->isSignableType(getContext()));
2354
2355 E = E->IgnoreParens();
2356
2357 if (isa<CXXThisExpr>(E))
2358 return true;
2359
2360 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2361 if (UO->getOpcode() == UO_AddrOf)
2362 return isLValueKnownNonNull(*this, UO->getSubExpr());
2363
2364 if (const auto *CE = dyn_cast<CastExpr>(E))
2365 if (CE->getCastKind() == CK_FunctionToPointerDecay ||
2366 CE->getCastKind() == CK_ArrayToPointerDecay)
2367 return isLValueKnownNonNull(*this, CE->getSubExpr());
2368
2369 // Maybe honor __nonnull?
2370
2371 return false;
2372}
2373
2375 const Expr *E = CE->getSubExpr();
2376
2377 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2378 return false;
2379
2380 if (isa<CXXThisExpr>(E->IgnoreParens())) {
2381 // We always assume that 'this' is never null.
2382 return false;
2383 }
2384
2385 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2386 // And that glvalue casts are never null.
2387 if (ICE->isGLValue())
2388 return false;
2389 }
2390
2391 return true;
2392}
2393
2394// RHS is an aggregate type
2396 QualType RHSTy, QualType LHSTy,
2399 SmallVector<QualType, 16> SrcTypes; // Flattened type
2400 CGF.FlattenAccessAndType(RHSVal, RHSTy, LoadGEPList, SrcTypes);
2401 // LHS is either a vector or a builtin?
2402 // if its a vector create a temp alloca to store into and return that
2403 if (auto *VecTy = LHSTy->getAs<VectorType>()) {
2404 assert(SrcTypes.size() >= VecTy->getNumElements() &&
2405 "Flattened type on RHS must have more elements than vector on LHS.");
2406 llvm::Value *V =
2407 CGF.Builder.CreateLoad(CGF.CreateIRTemp(LHSTy, "flatcast.tmp"));
2408 // write to V.
2409 for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
2410 llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[I].first, "load");
2411 llvm::Value *Idx = LoadGEPList[I].second;
2412 Load = Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract")
2413 : Load;
2414 llvm::Value *Cast = CGF.EmitScalarConversion(
2415 Load, SrcTypes[I], VecTy->getElementType(), Loc);
2416 V = CGF.Builder.CreateInsertElement(V, Cast, I);
2417 }
2418 return V;
2419 }
2420 // i its a builtin just do an extract element or load.
2421 assert(LHSTy->isBuiltinType() &&
2422 "Destination type must be a vector or builtin type.");
2423 llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[0].first, "load");
2424 llvm::Value *Idx = LoadGEPList[0].second;
2425 Load =
2426 Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract") : Load;
2427 return CGF.EmitScalarConversion(Load, LHSTy, SrcTypes[0], Loc);
2428}
2429
2430// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2431// have to handle a more broad range of conversions than explicit casts, as they
2432// handle things like function to ptr-to-function decay etc.
2433Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2434 Expr *E = CE->getSubExpr();
2435 QualType DestTy = CE->getType();
2436 CastKind Kind = CE->getCastKind();
2438
2439 // These cases are generally not written to ignore the result of
2440 // evaluating their sub-expressions, so we clear this now.
2441 bool Ignored = TestAndClearIgnoreResultAssign();
2442
2443 // Since almost all cast kinds apply to scalars, this switch doesn't have
2444 // a default case, so the compiler will warn on a missing case. The cases
2445 // are in the same order as in the CastKind enum.
2446 switch (Kind) {
2447 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2448 case CK_BuiltinFnToFnPtr:
2449 llvm_unreachable("builtin functions are handled elsewhere");
2450
2451 case CK_LValueBitCast:
2452 case CK_ObjCObjectLValueCast: {
2453 Address Addr = EmitLValue(E).getAddress();
2454 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2455 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2456 return EmitLoadOfLValue(LV, CE->getExprLoc());
2457 }
2458
2459 case CK_LValueToRValueBitCast: {
2460 LValue SourceLVal = CGF.EmitLValue(E);
2461 Address Addr =
2462 SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
2463 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2465 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2466 }
2467
2468 case CK_CPointerToObjCPointerCast:
2469 case CK_BlockPointerToObjCPointerCast:
2470 case CK_AnyPointerToBlockPointerCast:
2471 case CK_BitCast: {
2472 Value *Src = Visit(E);
2473 llvm::Type *SrcTy = Src->getType();
2474 llvm::Type *DstTy = ConvertType(DestTy);
2475
2476 // FIXME: this is a gross but seemingly necessary workaround for an issue
2477 // manifesting when a target uses a non-default AS for indirect sret args,
2478 // but the source HLL is generic, wherein a valid C-cast or reinterpret_cast
2479 // on the address of a local struct that gets returned by value yields an
2480 // invalid bitcast from the a pointer to the IndirectAS to a pointer to the
2481 // DefaultAS. We can only do this subversive thing because sret args are
2482 // manufactured and them residing in the IndirectAS is a target specific
2483 // detail, and doing an AS cast here still retains the semantics the user
2484 // expects. It is desirable to remove this iff a better solution is found.
2485 if (auto A = dyn_cast<llvm::Argument>(Src); A && A->hasStructRetAttr())
2487 CGF, Src, E->getType().getAddressSpace(), DstTy);
2488
2489 assert(
2490 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2491 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2492 "Address-space cast must be used to convert address spaces");
2493
2494 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2495 if (auto *PT = DestTy->getAs<PointerType>()) {
2497 PT->getPointeeType(),
2498 Address(Src,
2501 CGF.getPointerAlign()),
2502 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
2503 CE->getBeginLoc());
2504 }
2505 }
2506
2507 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2508 const QualType SrcType = E->getType();
2509
2510 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2511 // Casting to pointer that could carry dynamic information (provided by
2512 // invariant.group) requires launder.
2513 Src = Builder.CreateLaunderInvariantGroup(Src);
2514 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2515 // Casting to pointer that does not carry dynamic information (provided
2516 // by invariant.group) requires stripping it. Note that we don't do it
2517 // if the source could not be dynamic type and destination could be
2518 // dynamic because dynamic information is already laundered. It is
2519 // because launder(strip(src)) == launder(src), so there is no need to
2520 // add extra strip before launder.
2521 Src = Builder.CreateStripInvariantGroup(Src);
2522 }
2523 }
2524
2525 // Update heapallocsite metadata when there is an explicit pointer cast.
2526 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2527 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
2528 !isa<CastExpr>(E)) {
2529 QualType PointeeType = DestTy->getPointeeType();
2530 if (!PointeeType.isNull())
2531 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2532 CE->getExprLoc());
2533 }
2534 }
2535
2536 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2537 // same element type, use the llvm.vector.insert intrinsic to perform the
2538 // bitcast.
2539 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2540 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2541 // If we are casting a fixed i8 vector to a scalable i1 predicate
2542 // vector, use a vector insert and bitcast the result.
2543 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2544 FixedSrcTy->getElementType()->isIntegerTy(8)) {
2545 ScalableDstTy = llvm::ScalableVectorType::get(
2546 FixedSrcTy->getElementType(),
2547 llvm::divideCeil(
2548 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
2549 }
2550 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2551 llvm::Value *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
2552 llvm::Value *Result = Builder.CreateInsertVector(
2553 ScalableDstTy, PoisonVec, Src, uint64_t(0), "cast.scalable");
2554 ScalableDstTy = cast<llvm::ScalableVectorType>(
2555 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, DstTy));
2556 if (Result->getType() != ScalableDstTy)
2557 Result = Builder.CreateBitCast(Result, ScalableDstTy);
2558 if (Result->getType() != DstTy)
2559 Result = Builder.CreateExtractVector(DstTy, Result, uint64_t(0));
2560 return Result;
2561 }
2562 }
2563 }
2564
2565 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2566 // same element type, use the llvm.vector.extract intrinsic to perform the
2567 // bitcast.
2568 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2569 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2570 // If we are casting a scalable i1 predicate vector to a fixed i8
2571 // vector, bitcast the source and use a vector extract.
2572 if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
2573 FixedDstTy->getElementType()->isIntegerTy(8)) {
2574 if (!ScalableSrcTy->getElementCount().isKnownMultipleOf(8)) {
2575 ScalableSrcTy = llvm::ScalableVectorType::get(
2576 ScalableSrcTy->getElementType(),
2577 llvm::alignTo<8>(
2578 ScalableSrcTy->getElementCount().getKnownMinValue()));
2579 llvm::Value *ZeroVec = llvm::Constant::getNullValue(ScalableSrcTy);
2580 Src = Builder.CreateInsertVector(ScalableSrcTy, ZeroVec, Src,
2581 uint64_t(0));
2582 }
2583
2584 ScalableSrcTy = llvm::ScalableVectorType::get(
2585 FixedDstTy->getElementType(),
2586 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2587 Src = Builder.CreateBitCast(Src, ScalableSrcTy);
2588 }
2589 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType())
2590 return Builder.CreateExtractVector(DstTy, Src, uint64_t(0),
2591 "cast.fixed");
2592 }
2593 }
2594
2595 // Perform VLAT <-> VLST bitcast through memory.
2596 // TODO: since the llvm.vector.{insert,extract} intrinsics
2597 // require the element types of the vectors to be the same, we
2598 // need to keep this around for bitcasts between VLAT <-> VLST where
2599 // the element types of the vectors are not the same, until we figure
2600 // out a better way of doing these casts.
2601 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2602 isa<llvm::ScalableVectorType>(DstTy)) ||
2603 (isa<llvm::ScalableVectorType>(SrcTy) &&
2604 isa<llvm::FixedVectorType>(DstTy))) {
2605 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2606 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2607 CGF.EmitStoreOfScalar(Src, LV);
2608 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2609 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2611 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2612 }
2613
2614 llvm::Value *Result = Builder.CreateBitCast(Src, DstTy);
2615 return CGF.authPointerToPointerCast(Result, E->getType(), DestTy);
2616 }
2617 case CK_AddressSpaceConversion: {
2619 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2620 Result.Val.isNullPointer()) {
2621 // If E has side effect, it is emitted even if its final result is a
2622 // null pointer. In that case, a DCE pass should be able to
2623 // eliminate the useless instructions emitted during translating E.
2624 if (Result.HasSideEffects)
2625 Visit(E);
2626 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2627 ConvertType(DestTy)), DestTy);
2628 }
2629 // Since target may map different address spaces in AST to the same address
2630 // space, an address space conversion may end up as a bitcast.
2632 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2633 ConvertType(DestTy));
2634 }
2635 case CK_AtomicToNonAtomic:
2636 case CK_NonAtomicToAtomic:
2637 case CK_UserDefinedConversion:
2638 return Visit(E);
2639
2640 case CK_NoOp: {
2641 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE) : Visit(E);
2642 }
2643
2644 case CK_BaseToDerived: {
2645 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2646 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2647
2649 Address Derived =
2650 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2651 CE->path_begin(), CE->path_end(),
2653
2654 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2655 // performed and the object is not of the derived type.
2656 if (CGF.sanitizePerformTypeCheck())
2658 Derived, DestTy->getPointeeType());
2659
2660 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2661 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
2662 /*MayBeNull=*/true,
2664 CE->getBeginLoc());
2665
2666 return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
2667 }
2668 case CK_UncheckedDerivedToBase:
2669 case CK_DerivedToBase: {
2670 // The EmitPointerWithAlignment path does this fine; just discard
2671 // the alignment.
2673 CE->getType()->getPointeeType());
2674 }
2675
2676 case CK_Dynamic: {
2678 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2679 return CGF.EmitDynamicCast(V, DCE);
2680 }
2681
2682 case CK_ArrayToPointerDecay:
2684 CE->getType()->getPointeeType());
2685 case CK_FunctionToPointerDecay:
2686 return EmitLValue(E).getPointer(CGF);
2687
2688 case CK_NullToPointer:
2689 if (MustVisitNullValue(E))
2690 CGF.EmitIgnoredExpr(E);
2691
2692 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2693 DestTy);
2694
2695 case CK_NullToMemberPointer: {
2696 if (MustVisitNullValue(E))
2697 CGF.EmitIgnoredExpr(E);
2698
2699 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2700 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2701 }
2702
2703 case CK_ReinterpretMemberPointer:
2704 case CK_BaseToDerivedMemberPointer:
2705 case CK_DerivedToBaseMemberPointer: {
2706 Value *Src = Visit(E);
2707
2708 // Note that the AST doesn't distinguish between checked and
2709 // unchecked member pointer conversions, so we always have to
2710 // implement checked conversions here. This is inefficient when
2711 // actual control flow may be required in order to perform the
2712 // check, which it is for data member pointers (but not member
2713 // function pointers on Itanium and ARM).
2714 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2715 }
2716
2717 case CK_ARCProduceObject:
2718 return CGF.EmitARCRetainScalarExpr(E);
2719 case CK_ARCConsumeObject:
2720 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2721 case CK_ARCReclaimReturnedObject:
2722 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2723 case CK_ARCExtendBlockObject:
2724 return CGF.EmitARCExtendBlockObject(E);
2725
2726 case CK_CopyAndAutoreleaseBlockObject:
2727 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2728
2729 case CK_FloatingRealToComplex:
2730 case CK_FloatingComplexCast:
2731 case CK_IntegralRealToComplex:
2732 case CK_IntegralComplexCast:
2733 case CK_IntegralComplexToFloatingComplex:
2734 case CK_FloatingComplexToIntegralComplex:
2735 case CK_ConstructorConversion:
2736 case CK_ToUnion:
2737 case CK_HLSLArrayRValue:
2738 llvm_unreachable("scalar cast to non-scalar value");
2739
2740 case CK_LValueToRValue:
2741 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2742 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2743 return Visit(E);
2744
2745 case CK_IntegralToPointer: {
2746 Value *Src = Visit(E);
2747
2748 // First, convert to the correct width so that we control the kind of
2749 // extension.
2750 auto DestLLVMTy = ConvertType(DestTy);
2751 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2752 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2753 llvm::Value* IntResult =
2754 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2755
2756 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2757
2758 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2759 // Going from integer to pointer that could be dynamic requires reloading
2760 // dynamic information from invariant.group.
2761 if (DestTy.mayBeDynamicClass())
2762 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2763 }
2764
2765 IntToPtr = CGF.authPointerToPointerCast(IntToPtr, E->getType(), DestTy);
2766 return IntToPtr;
2767 }
2768 case CK_PointerToIntegral: {
2769 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2770 auto *PtrExpr = Visit(E);
2771
2772 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2773 const QualType SrcType = E->getType();
2774
2775 // Casting to integer requires stripping dynamic information as it does
2776 // not carries it.
2777 if (SrcType.mayBeDynamicClass())
2778 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2779 }
2780
2781 PtrExpr = CGF.authPointerToPointerCast(PtrExpr, E->getType(), DestTy);
2782 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2783 }
2784 case CK_ToVoid: {
2785 CGF.EmitIgnoredExpr(E);
2786 return nullptr;
2787 }
2788 case CK_MatrixCast: {
2789 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2790 CE->getExprLoc());
2791 }
2792 // CK_HLSLAggregateSplatCast only handles splatting to vectors from a vec1
2793 // Casts were inserted in Sema to Cast the Src Expr to a Scalar and
2794 // To perform any necessary Scalar Cast, so this Cast can be handled
2795 // by the regular Vector Splat cast code.
2796 case CK_HLSLAggregateSplatCast:
2797 case CK_VectorSplat: {
2798 llvm::Type *DstTy = ConvertType(DestTy);
2799 Value *Elt = Visit(E);
2800 // Splat the element across to all elements
2801 llvm::ElementCount NumElements =
2802 cast<llvm::VectorType>(DstTy)->getElementCount();
2803 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2804 }
2805
2806 case CK_FixedPointCast:
2807 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2808 CE->getExprLoc());
2809
2810 case CK_FixedPointToBoolean:
2811 assert(E->getType()->isFixedPointType() &&
2812 "Expected src type to be fixed point type");
2813 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2814 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2815 CE->getExprLoc());
2816
2817 case CK_FixedPointToIntegral:
2818 assert(E->getType()->isFixedPointType() &&
2819 "Expected src type to be fixed point type");
2820 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2821 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2822 CE->getExprLoc());
2823
2824 case CK_IntegralToFixedPoint:
2825 assert(E->getType()->isIntegerType() &&
2826 "Expected src type to be an integer");
2827 assert(DestTy->isFixedPointType() &&
2828 "Expected dest type to be fixed point type");
2829 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2830 CE->getExprLoc());
2831
2832 case CK_IntegralCast: {
2833 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2834 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2835 return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),
2837 "conv");
2838 }
2839 ScalarConversionOpts Opts;
2840 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2841 if (!ICE->isPartOfExplicitCast())
2842 Opts = ScalarConversionOpts(CGF.SanOpts);
2843 }
2844 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2845 CE->getExprLoc(), Opts);
2846 }
2847 case CK_IntegralToFloating: {
2848 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2849 // TODO: Support constrained FP intrinsics.
2850 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2851 if (SrcElTy->isSignedIntegerOrEnumerationType())
2852 return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");
2853 return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");
2854 }
2855 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2856 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2857 CE->getExprLoc());
2858 }
2859 case CK_FloatingToIntegral: {
2860 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2861 // TODO: Support constrained FP intrinsics.
2862 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2863 if (DstElTy->isSignedIntegerOrEnumerationType())
2864 return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");
2865 return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");
2866 }
2867 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2868 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2869 CE->getExprLoc());
2870 }
2871 case CK_FloatingCast: {
2872 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2873 // TODO: Support constrained FP intrinsics.
2874 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2875 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2876 if (DstElTy->castAs<BuiltinType>()->getKind() <
2877 SrcElTy->castAs<BuiltinType>()->getKind())
2878 return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");
2879 return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");
2880 }
2881 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2882 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2883 CE->getExprLoc());
2884 }
2885 case CK_FixedPointToFloating:
2886 case CK_FloatingToFixedPoint: {
2887 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2888 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2889 CE->getExprLoc());
2890 }
2891 case CK_BooleanToSignedIntegral: {
2892 ScalarConversionOpts Opts;
2893 Opts.TreatBooleanAsSigned = true;
2894 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2895 CE->getExprLoc(), Opts);
2896 }
2897 case CK_IntegralToBoolean:
2898 return EmitIntToBoolConversion(Visit(E));
2899 case CK_PointerToBoolean:
2900 return EmitPointerToBoolConversion(Visit(E), E->getType());
2901 case CK_FloatingToBoolean: {
2902 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2903 return EmitFloatToBoolConversion(Visit(E));
2904 }
2905 case CK_MemberPointerToBoolean: {
2906 llvm::Value *MemPtr = Visit(E);
2908 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2909 }
2910
2911 case CK_FloatingComplexToReal:
2912 case CK_IntegralComplexToReal:
2913 return CGF.EmitComplexExpr(E, false, true).first;
2914
2915 case CK_FloatingComplexToBoolean:
2916 case CK_IntegralComplexToBoolean: {
2918
2919 // TODO: kill this function off, inline appropriate case here
2920 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2921 CE->getExprLoc());
2922 }
2923
2924 case CK_ZeroToOCLOpaqueType: {
2925 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2926 DestTy->isOCLIntelSubgroupAVCType()) &&
2927 "CK_ZeroToOCLEvent cast on non-event type");
2928 return llvm::Constant::getNullValue(ConvertType(DestTy));
2929 }
2930
2931 case CK_IntToOCLSampler:
2932 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2933
2934 case CK_HLSLVectorTruncation: {
2935 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) &&
2936 "Destination type must be a vector or builtin type.");
2937 Value *Vec = Visit(E);
2938 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2939 SmallVector<int> Mask;
2940 unsigned NumElts = VecTy->getNumElements();
2941 for (unsigned I = 0; I != NumElts; ++I)
2942 Mask.push_back(I);
2943
2944 return Builder.CreateShuffleVector(Vec, Mask, "trunc");
2945 }
2946 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
2947 return Builder.CreateExtractElement(Vec, Zero, "cast.vtrunc");
2948 }
2949 case CK_HLSLElementwiseCast: {
2950 RValue RV = CGF.EmitAnyExpr(E);
2952 QualType SrcTy = E->getType();
2953
2954 assert(RV.isAggregate() && "Not a valid HLSL Elementwise Cast.");
2955 // RHS is an aggregate
2956 Address SrcVal = RV.getAggregateAddress();
2957 return EmitHLSLElementwiseCast(CGF, SrcVal, SrcTy, DestTy, Loc);
2958 }
2959 } // end of switch
2960
2961 llvm_unreachable("unknown scalar cast");
2962}
2963
2964Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2966 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2967 !E->getType()->isVoidType());
2968 if (!RetAlloca.isValid())
2969 return nullptr;
2970 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2971 E->getExprLoc());
2972}
2973
2974Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2976 Value *V = Visit(E->getSubExpr());
2977 // Defend against dominance problems caused by jumps out of expression
2978 // evaluation through the shared cleanup block.
2979 Scope.ForceCleanup({&V});
2980 return V;
2981}
2982
2983//===----------------------------------------------------------------------===//
2984// Unary Operators
2985//===----------------------------------------------------------------------===//
2986
2988 llvm::Value *InVal, bool IsInc,
2989 FPOptions FPFeatures) {
2990 BinOpInfo BinOp;
2991 BinOp.LHS = InVal;
2992 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2993 BinOp.Ty = E->getType();
2994 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2995 BinOp.FPFeatures = FPFeatures;
2996 BinOp.E = E;
2997 return BinOp;
2998}
2999
3000llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
3001 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
3002 llvm::Value *Amount =
3003 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
3004 StringRef Name = IsInc ? "inc" : "dec";
3005 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3007 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3008 return Builder.CreateAdd(InVal, Amount, Name);
3009 [[fallthrough]];
3011 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3012 return Builder.CreateNSWAdd(InVal, Amount, Name);
3013 [[fallthrough]];
3015 BinOpInfo Info = createBinOpInfoFromIncDec(
3016 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3017 if (!E->canOverflow() || CanElideOverflowCheck(CGF.getContext(), Info))
3018 return Builder.CreateNSWAdd(InVal, Amount, Name);
3019 return EmitOverflowCheckedBinOp(Info);
3020 }
3021 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
3022}
3023
3024/// For the purposes of overflow pattern exclusion, does this match the
3025/// "while(i--)" pattern?
3026static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc,
3027 bool isPre, ASTContext &Ctx) {
3028 if (isInc || isPre)
3029 return false;
3030
3031 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while
3034 return false;
3035
3036 // all Parents (usually just one) must be a WhileStmt
3037 for (const auto &Parent : Ctx.getParentMapContext().getParents(*UO))
3038 if (!Parent.get<WhileStmt>())
3039 return false;
3040
3041 return true;
3042}
3043
3044namespace {
3045/// Handles check and update for lastprivate conditional variables.
3046class OMPLastprivateConditionalUpdateRAII {
3047private:
3048 CodeGenFunction &CGF;
3049 const UnaryOperator *E;
3050
3051public:
3052 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
3053 const UnaryOperator *E)
3054 : CGF(CGF), E(E) {}
3055 ~OMPLastprivateConditionalUpdateRAII() {
3056 if (CGF.getLangOpts().OpenMP)
3058 CGF, E->getSubExpr());
3059 }
3060};
3061} // namespace
3062
3063llvm::Value *
3064ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3065 bool isInc, bool isPre) {
3066 ApplyAtomGroup Grp(CGF.getDebugInfo());
3067 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
3068 QualType type = E->getSubExpr()->getType();
3069 llvm::PHINode *atomicPHI = nullptr;
3070 llvm::Value *value;
3071 llvm::Value *input;
3072 llvm::Value *Previous = nullptr;
3073 QualType SrcType = E->getType();
3074
3075 int amount = (isInc ? 1 : -1);
3076 bool isSubtraction = !isInc;
3077
3078 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
3079 type = atomicTy->getValueType();
3080 if (isInc && type->isBooleanType()) {
3081 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
3082 if (isPre) {
3083 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
3084 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
3085 return Builder.getTrue();
3086 }
3087 // For atomic bool increment, we just store true and return it for
3088 // preincrement, do an atomic swap with true for postincrement
3089 return Builder.CreateAtomicRMW(
3090 llvm::AtomicRMWInst::Xchg, LV.getAddress(), True,
3091 llvm::AtomicOrdering::SequentiallyConsistent);
3092 }
3093 // Special case for atomic increment / decrement on integers, emit
3094 // atomicrmw instructions. We skip this if we want to be doing overflow
3095 // checking, and fall into the slow path with the atomic cmpxchg loop.
3096 if (!type->isBooleanType() && type->isIntegerType() &&
3097 !(type->isUnsignedIntegerType() &&
3098 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3099 CGF.getLangOpts().getSignedOverflowBehavior() !=
3101 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
3102 llvm::AtomicRMWInst::Sub;
3103 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
3104 llvm::Instruction::Sub;
3105 llvm::Value *amt = CGF.EmitToMemory(
3106 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
3107 llvm::Value *old =
3108 Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
3109 llvm::AtomicOrdering::SequentiallyConsistent);
3110 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3111 }
3112 // Special case for atomic increment/decrement on floats.
3113 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
3114 if (type->isFloatingType()) {
3115 llvm::Type *Ty = ConvertType(type);
3116 if (llvm::has_single_bit(Ty->getScalarSizeInBits())) {
3117 llvm::AtomicRMWInst::BinOp aop =
3118 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
3119 llvm::Instruction::BinaryOps op =
3120 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
3121 llvm::Value *amt = llvm::ConstantFP::get(Ty, 1.0);
3122 llvm::AtomicRMWInst *old =
3123 CGF.emitAtomicRMWInst(aop, LV.getAddress(), amt,
3124 llvm::AtomicOrdering::SequentiallyConsistent);
3125
3126 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3127 }
3128 }
3129 value = EmitLoadOfLValue(LV, E->getExprLoc());
3130 input = value;
3131 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
3132 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3133 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3134 value = CGF.EmitToMemory(value, type);
3135 Builder.CreateBr(opBB);
3136 Builder.SetInsertPoint(opBB);
3137 atomicPHI = Builder.CreatePHI(value->getType(), 2);
3138 atomicPHI->addIncoming(value, startBB);
3139 value = atomicPHI;
3140 } else {
3141 value = EmitLoadOfLValue(LV, E->getExprLoc());
3142 input = value;
3143 }
3144
3145 // Special case of integer increment that we have to check first: bool++.
3146 // Due to promotion rules, we get:
3147 // bool++ -> bool = bool + 1
3148 // -> bool = (int)bool + 1
3149 // -> bool = ((int)bool + 1 != 0)
3150 // An interesting aspect of this is that increment is always true.
3151 // Decrement does not have this property.
3152 if (isInc && type->isBooleanType()) {
3153 value = Builder.getTrue();
3154
3155 // Most common case by far: integer increment.
3156 } else if (type->isIntegerType()) {
3157 QualType promotedType;
3158 bool canPerformLossyDemotionCheck = false;
3159
3160 bool excludeOverflowPattern =
3161 matchesPostDecrInWhile(E, isInc, isPre, CGF.getContext());
3162
3164 promotedType = CGF.getContext().getPromotedIntegerType(type);
3165 assert(promotedType != type && "Shouldn't promote to the same type.");
3166 canPerformLossyDemotionCheck = true;
3167 canPerformLossyDemotionCheck &=
3169 CGF.getContext().getCanonicalType(promotedType);
3170 canPerformLossyDemotionCheck &=
3172 type, promotedType);
3173 assert((!canPerformLossyDemotionCheck ||
3174 type->isSignedIntegerOrEnumerationType() ||
3175 promotedType->isSignedIntegerOrEnumerationType() ||
3176 ConvertType(type)->getScalarSizeInBits() ==
3177 ConvertType(promotedType)->getScalarSizeInBits()) &&
3178 "The following check expects that if we do promotion to different "
3179 "underlying canonical type, at least one of the types (either "
3180 "base or promoted) will be signed, or the bitwidths will match.");
3181 }
3182 if (CGF.SanOpts.hasOneOf(
3183 SanitizerKind::ImplicitIntegerArithmeticValueChange |
3184 SanitizerKind::ImplicitBitfieldConversion) &&
3185 canPerformLossyDemotionCheck) {
3186 // While `x += 1` (for `x` with width less than int) is modeled as
3187 // promotion+arithmetics+demotion, and we can catch lossy demotion with
3188 // ease; inc/dec with width less than int can't overflow because of
3189 // promotion rules, so we omit promotion+demotion, which means that we can
3190 // not catch lossy "demotion". Because we still want to catch these cases
3191 // when the sanitizer is enabled, we perform the promotion, then perform
3192 // the increment/decrement in the wider type, and finally
3193 // perform the demotion. This will catch lossy demotions.
3194
3195 // We have a special case for bitfields defined using all the bits of the
3196 // type. In this case we need to do the same trick as for the integer
3197 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
3198
3199 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
3200 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3201 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3202 // Do pass non-default ScalarConversionOpts so that sanitizer check is
3203 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
3204 // checks will take care of the conversion.
3205 ScalarConversionOpts Opts;
3206 if (!LV.isBitField())
3207 Opts = ScalarConversionOpts(CGF.SanOpts);
3208 else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {
3209 Previous = value;
3210 SrcType = promotedType;
3211 }
3212
3213 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
3214 Opts);
3215
3216 // Note that signed integer inc/dec with width less than int can't
3217 // overflow because of promotion rules; we're just eliding a few steps
3218 // here.
3219 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
3220 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
3221 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
3222 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3223 !excludeOverflowPattern &&
3225 SanitizerKind::UnsignedIntegerOverflow, E->getType())) {
3226 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
3227 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
3228 } else {
3229 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3230 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3231 }
3232
3233 // Next most common: pointer increment.
3234 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
3235 QualType type = ptr->getPointeeType();
3236
3237 // VLA types don't have constant size.
3238 if (const VariableArrayType *vla
3240 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
3241 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
3242 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
3243 if (CGF.getLangOpts().PointerOverflowDefined)
3244 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
3245 else
3246 value = CGF.EmitCheckedInBoundsGEP(
3247 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
3248 E->getExprLoc(), "vla.inc");
3249
3250 // Arithmetic on function pointers (!) is just +-1.
3251 } else if (type->isFunctionType()) {
3252 llvm::Value *amt = Builder.getInt32(amount);
3253
3254 if (CGF.getLangOpts().PointerOverflowDefined)
3255 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
3256 else
3257 value =
3258 CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
3259 /*SignedIndices=*/false, isSubtraction,
3260 E->getExprLoc(), "incdec.funcptr");
3261
3262 // For everything else, we can just do a simple increment.
3263 } else {
3264 llvm::Value *amt = Builder.getInt32(amount);
3265 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
3266 if (CGF.getLangOpts().PointerOverflowDefined)
3267 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
3268 else
3269 value = CGF.EmitCheckedInBoundsGEP(
3270 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
3271 E->getExprLoc(), "incdec.ptr");
3272 }
3273
3274 // Vector increment/decrement.
3275 } else if (type->isVectorType()) {
3276 if (type->hasIntegerRepresentation()) {
3277 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
3278
3279 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3280 } else {
3281 value = Builder.CreateFAdd(
3282 value,
3283 llvm::ConstantFP::get(value->getType(), amount),
3284 isInc ? "inc" : "dec");
3285 }
3286
3287 // Floating point.
3288 } else if (type->isRealFloatingType()) {
3289 // Add the inc/dec to the real part.
3290 llvm::Value *amt;
3291 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3292
3293 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3294 // Another special case: half FP increment should be done via float
3296 value = Builder.CreateCall(
3297 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
3298 CGF.CGM.FloatTy),
3299 input, "incdec.conv");
3300 } else {
3301 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
3302 }
3303 }
3304
3305 if (value->getType()->isFloatTy())
3306 amt = llvm::ConstantFP::get(VMContext,
3307 llvm::APFloat(static_cast<float>(amount)));
3308 else if (value->getType()->isDoubleTy())
3309 amt = llvm::ConstantFP::get(VMContext,
3310 llvm::APFloat(static_cast<double>(amount)));
3311 else {
3312 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3313 // Convert from float.
3314 llvm::APFloat F(static_cast<float>(amount));
3315 bool ignored;
3316 const llvm::fltSemantics *FS;
3317 // Don't use getFloatTypeSemantics because Half isn't
3318 // necessarily represented using the "half" LLVM type.
3319 if (value->getType()->isFP128Ty())
3320 FS = &CGF.getTarget().getFloat128Format();
3321 else if (value->getType()->isHalfTy())
3322 FS = &CGF.getTarget().getHalfFormat();
3323 else if (value->getType()->isBFloatTy())
3324 FS = &CGF.getTarget().getBFloat16Format();
3325 else if (value->getType()->isPPC_FP128Ty())
3326 FS = &CGF.getTarget().getIbm128Format();
3327 else
3328 FS = &CGF.getTarget().getLongDoubleFormat();
3329 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
3330 amt = llvm::ConstantFP::get(VMContext, F);
3331 }
3332 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
3333
3334 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3336 value = Builder.CreateCall(
3337 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
3338 CGF.CGM.FloatTy),
3339 value, "incdec.conv");
3340 } else {
3341 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
3342 }
3343 }
3344
3345 // Fixed-point types.
3346 } else if (type->isFixedPointType()) {
3347 // Fixed-point types are tricky. In some cases, it isn't possible to
3348 // represent a 1 or a -1 in the type at all. Piggyback off of
3349 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3350 BinOpInfo Info;
3351 Info.E = E;
3352 Info.Ty = E->getType();
3353 Info.Opcode = isInc ? BO_Add : BO_Sub;
3354 Info.LHS = value;
3355 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
3356 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3357 // since -1 is guaranteed to be representable.
3358 if (type->isSignedFixedPointType()) {
3359 Info.Opcode = isInc ? BO_Sub : BO_Add;
3360 Info.RHS = Builder.CreateNeg(Info.RHS);
3361 }
3362 // Now, convert from our invented integer literal to the type of the unary
3363 // op. This will upscale and saturate if necessary. This value can become
3364 // undef in some cases.
3365 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3366 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
3367 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
3368 value = EmitFixedPointBinOp(Info);
3369
3370 // Objective-C pointer types.
3371 } else {
3372 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3373
3375 if (!isInc) size = -size;
3376 llvm::Value *sizeValue =
3377 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
3378
3379 if (CGF.getLangOpts().PointerOverflowDefined)
3380 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
3381 else
3382 value = CGF.EmitCheckedInBoundsGEP(
3383 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
3384 E->getExprLoc(), "incdec.objptr");
3385 value = Builder.CreateBitCast(value, input->getType());
3386 }
3387
3388 if (atomicPHI) {
3389 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3390 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3391 auto Pair = CGF.EmitAtomicCompareExchange(
3392 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
3393 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
3394 llvm::Value *success = Pair.second;
3395 atomicPHI->addIncoming(old, curBlock);
3396 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3397 Builder.SetInsertPoint(contBB);
3398 return isPre ? value : input;
3399 }
3400
3401 // Store the updated result through the lvalue.
3402 if (LV.isBitField()) {
3403 Value *Src = Previous ? Previous : value;
3404 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
3405 CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),
3406 LV.getBitFieldInfo(), E->getExprLoc());
3407 } else
3408 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
3409
3410 // If this is a postinc, return the value read from memory, otherwise use the
3411 // updated value.
3412 return isPre ? value : input;
3413}
3414
3415
3416Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3417 QualType PromotionType) {
3418 QualType promotionTy = PromotionType.isNull()
3419 ? getPromotionType(E->getSubExpr()->getType())
3420 : PromotionType;
3421 Value *result = VisitPlus(E, promotionTy);
3422 if (result && !promotionTy.isNull())
3423 result = EmitUnPromotedValue(result, E->getType());
3424 return result;
3425}
3426
3427Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3428 QualType PromotionType) {
3429 // This differs from gcc, though, most likely due to a bug in gcc.
3430 TestAndClearIgnoreResultAssign();
3431 if (!PromotionType.isNull())
3432 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3433 return Visit(E->getSubExpr());
3434}
3435
3436Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3437 QualType PromotionType) {
3438 QualType promotionTy = PromotionType.isNull()
3439 ? getPromotionType(E->getSubExpr()->getType())
3440 : PromotionType;
3441 Value *result = VisitMinus(E, promotionTy);
3442 if (result && !promotionTy.isNull())
3443 result = EmitUnPromotedValue(result, E->getType());
3444 return result;
3445}
3446
3447Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3448 QualType PromotionType) {
3449 TestAndClearIgnoreResultAssign();
3450 Value *Op;
3451 if (!PromotionType.isNull())
3452 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3453 else
3454 Op = Visit(E->getSubExpr());
3455
3456 // Generate a unary FNeg for FP ops.
3457 if (Op->getType()->isFPOrFPVectorTy())
3458 return Builder.CreateFNeg(Op, "fneg");
3459
3460 // Emit unary minus with EmitSub so we handle overflow cases etc.
3461 BinOpInfo BinOp;
3462 BinOp.RHS = Op;
3463 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
3464 BinOp.Ty = E->getType();
3465 BinOp.Opcode = BO_Sub;
3466 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3467 BinOp.E = E;
3468 return EmitSub(BinOp);
3469}
3470
3471Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3472 TestAndClearIgnoreResultAssign();
3473 Value *Op = Visit(E->getSubExpr());
3474 return Builder.CreateNot(Op, "not");
3475}
3476
3477Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3478 // Perform vector logical not on comparison with zero vector.
3479 if (E->getType()->isVectorType() &&
3482 Value *Oper = Visit(E->getSubExpr());
3483 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
3484 Value *Result;
3485 if (Oper->getType()->isFPOrFPVectorTy()) {
3487 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3488 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
3489 } else
3490 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
3491 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3492 }
3493
3494 // Compare operand to zero.
3495 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
3496
3497 // Invert value.
3498 // TODO: Could dynamically modify easy computations here. For example, if
3499 // the operand is an icmp ne, turn into icmp eq.
3500 BoolVal = Builder.CreateNot(BoolVal, "lnot");
3501
3502 // ZExt result to the expr type.
3503 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
3504}
3505
3506Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3507 // Try folding the offsetof to a constant.
3508 Expr::EvalResult EVResult;
3509 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3510 llvm::APSInt Value = EVResult.Val.getInt();
3511 return Builder.getInt(Value);
3512 }
3513
3514 // Loop over the components of the offsetof to compute the value.
3515 unsigned n = E->getNumComponents();
3516 llvm::Type* ResultType = ConvertType(E->getType());
3517 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
3518 QualType CurrentType = E->getTypeSourceInfo()->getType();
3519 for (unsigned i = 0; i != n; ++i) {
3520 OffsetOfNode ON = E->getComponent(i);
3521 llvm::Value *Offset = nullptr;
3522 switch (ON.getKind()) {
3523 case OffsetOfNode::Array: {
3524 // Compute the index
3525 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
3526 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
3527 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3528 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
3529
3530 // Save the element type
3531 CurrentType =
3532 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
3533
3534 // Compute the element size
3535 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
3536 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
3537
3538 // Multiply out to compute the result
3539 Offset = Builder.CreateMul(Idx, ElemSize);
3540 break;
3541 }
3542
3543 case OffsetOfNode::Field: {
3544 FieldDecl *MemberDecl = ON.getField();
3545 auto *RD = CurrentType->castAsRecordDecl();
3546 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3547
3548 // Compute the index of the field in its parent.
3549 unsigned i = 0;
3550 // FIXME: It would be nice if we didn't have to loop here!
3551 for (RecordDecl::field_iterator Field = RD->field_begin(),
3552 FieldEnd = RD->field_end();
3553 Field != FieldEnd; ++Field, ++i) {
3554 if (*Field == MemberDecl)
3555 break;
3556 }
3557 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3558
3559 // Compute the offset to the field
3560 int64_t OffsetInt = RL.getFieldOffset(i) /
3561 CGF.getContext().getCharWidth();
3562 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
3563
3564 // Save the element type.
3565 CurrentType = MemberDecl->getType();
3566 break;
3567 }
3568
3570 llvm_unreachable("dependent __builtin_offsetof");
3571
3572 case OffsetOfNode::Base: {
3573 if (ON.getBase()->isVirtual()) {
3574 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3575 continue;
3576 }
3577
3579 CurrentType->castAsCanonical<RecordType>()->getOriginalDecl());
3580
3581 // Save the element type.
3582 CurrentType = ON.getBase()->getType();
3583
3584 // Compute the offset to the base.
3585 auto *BaseRD = CurrentType->castAsCXXRecordDecl();
3586 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
3587 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
3588 break;
3589 }
3590 }
3591 Result = Builder.CreateAdd(Result, Offset);
3592 }
3593 return Result;
3594}
3595
3596/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3597/// argument of the sizeof expression as an integer.
3598Value *
3599ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3600 const UnaryExprOrTypeTraitExpr *E) {
3601 QualType TypeToSize = E->getTypeOfArgument();
3602 if (auto Kind = E->getKind();
3603 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf || Kind == UETT_CountOf) {
3604 if (const VariableArrayType *VAT =
3605 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
3606 // For _Countof, we only want to evaluate if the extent is actually
3607 // variable as opposed to a multi-dimensional array whose extent is
3608 // constant but whose element type is variable.
3609 bool EvaluateExtent = true;
3610 if (Kind == UETT_CountOf && VAT->getElementType()->isArrayType()) {
3611 EvaluateExtent =
3612 !VAT->getSizeExpr()->isIntegerConstantExpr(CGF.getContext());
3613 }
3614 if (EvaluateExtent) {
3615 if (E->isArgumentType()) {
3616 // sizeof(type) - make sure to emit the VLA size.
3617 CGF.EmitVariablyModifiedType(TypeToSize);
3618 } else {
3619 // C99 6.5.3.4p2: If the argument is an expression of type
3620 // VLA, it is evaluated.
3621 CGF.EmitIgnoredExpr(E->getArgumentExpr());
3622 }
3623
3624 // For _Countof, we just want to return the size of a single dimension.
3625 if (Kind == UETT_CountOf)
3626 return CGF.getVLAElements1D(VAT).NumElts;
3627
3628 // For sizeof and __datasizeof, we need to scale the number of elements
3629 // by the size of the array element type.
3630 auto VlaSize = CGF.getVLASize(VAT);
3631
3632 // Scale the number of non-VLA elements by the non-VLA element size.
3633 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3634 if (!eltSize.isOne())
3635 return CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize),
3636 VlaSize.NumElts);
3637 return VlaSize.NumElts;
3638 }
3639 }
3640 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3641 auto Alignment =
3642 CGF.getContext()
3644 E->getTypeOfArgument()->getPointeeType()))
3645 .getQuantity();
3646 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
3647 } else if (E->getKind() == UETT_VectorElements) {
3648 auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
3649 return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
3650 }
3651
3652 // If this isn't sizeof(vla), the result must be constant; use the constant
3653 // folding logic so we don't have to duplicate it here.
3654 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3655}
3656
3657Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3658 QualType PromotionType) {
3659 QualType promotionTy = PromotionType.isNull()
3660 ? getPromotionType(E->getSubExpr()->getType())
3661 : PromotionType;
3662 Value *result = VisitReal(E, promotionTy);
3663 if (result && !promotionTy.isNull())
3664 result = EmitUnPromotedValue(result, E->getType());
3665 return result;
3666}
3667
3668Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3669 QualType PromotionType) {
3670 Expr *Op = E->getSubExpr();
3671 if (Op->getType()->isAnyComplexType()) {
3672 // If it's an l-value, load through the appropriate subobject l-value.
3673 // Note that we have to ask E because Op might be an l-value that
3674 // this won't work for, e.g. an Obj-C property.
3675 if (E->isGLValue()) {
3676 if (!PromotionType.isNull()) {
3678 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3679 if (result.first)
3680 result.first = CGF.EmitPromotedValue(result, PromotionType).first;
3681 return result.first;
3682 } else {
3683 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3684 .getScalarVal();
3685 }
3686 }
3687 // Otherwise, calculate and project.
3688 return CGF.EmitComplexExpr(Op, false, true).first;
3689 }
3690
3691 if (!PromotionType.isNull())
3692 return CGF.EmitPromotedScalarExpr(Op, PromotionType);
3693 return Visit(Op);
3694}
3695
3696Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3697 QualType PromotionType) {
3698 QualType promotionTy = PromotionType.isNull()
3699 ? getPromotionType(E->getSubExpr()->getType())
3700 : PromotionType;
3701 Value *result = VisitImag(E, promotionTy);
3702 if (result && !promotionTy.isNull())
3703 result = EmitUnPromotedValue(result, E->getType());
3704 return result;
3705}
3706
3707Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3708 QualType PromotionType) {
3709 Expr *Op = E->getSubExpr();
3710 if (Op->getType()->isAnyComplexType()) {
3711 // If it's an l-value, load through the appropriate subobject l-value.
3712 // Note that we have to ask E because Op might be an l-value that
3713 // this won't work for, e.g. an Obj-C property.
3714 if (Op->isGLValue()) {
3715 if (!PromotionType.isNull()) {
3717 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3718 if (result.second)
3719 result.second = CGF.EmitPromotedValue(result, PromotionType).second;
3720 return result.second;
3721 } else {
3722 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3723 .getScalarVal();
3724 }
3725 }
3726 // Otherwise, calculate and project.
3727 return CGF.EmitComplexExpr(Op, true, false).second;
3728 }
3729
3730 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3731 // effects are evaluated, but not the actual value.
3732 if (Op->isGLValue())
3733 CGF.EmitLValue(Op);
3734 else if (!PromotionType.isNull())
3735 CGF.EmitPromotedScalarExpr(Op, PromotionType);
3736 else
3737 CGF.EmitScalarExpr(Op, true);
3738 if (!PromotionType.isNull())
3739 return llvm::Constant::getNullValue(ConvertType(PromotionType));
3740 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3741}
3742
3743//===----------------------------------------------------------------------===//
3744// Binary Operators
3745//===----------------------------------------------------------------------===//
3746
3747Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3748 QualType PromotionType) {
3749 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
3750}
3751
3752Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3753 QualType ExprType) {
3754 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
3755}
3756
3757Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3758 E = E->IgnoreParens();
3759 if (auto BO = dyn_cast<BinaryOperator>(E)) {
3760 switch (BO->getOpcode()) {
3761#define HANDLE_BINOP(OP) \
3762 case BO_##OP: \
3763 return Emit##OP(EmitBinOps(BO, PromotionType));
3764 HANDLE_BINOP(Add)
3765 HANDLE_BINOP(Sub)
3766 HANDLE_BINOP(Mul)
3767 HANDLE_BINOP(Div)
3768#undef HANDLE_BINOP
3769 default:
3770 break;
3771 }
3772 } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
3773 switch (UO->getOpcode()) {
3774 case UO_Imag:
3775 return VisitImag(UO, PromotionType);
3776 case UO_Real:
3777 return VisitReal(UO, PromotionType);
3778 case UO_Minus:
3779 return VisitMinus(UO, PromotionType);
3780 case UO_Plus:
3781 return VisitPlus(UO, PromotionType);
3782 default:
3783 break;
3784 }
3785 }
3786 auto result = Visit(const_cast<Expr *>(E));
3787 if (result) {
3788 if (!PromotionType.isNull())
3789 return EmitPromotedValue(result, PromotionType);
3790 else
3791 return EmitUnPromotedValue(result, E->getType());
3792 }
3793 return result;
3794}
3795
3796BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3797 QualType PromotionType) {
3798 TestAndClearIgnoreResultAssign();
3799 BinOpInfo Result;
3800 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
3801 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
3802 if (!PromotionType.isNull())
3803 Result.Ty = PromotionType;
3804 else
3805 Result.Ty = E->getType();
3806 Result.Opcode = E->getOpcode();
3807 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3808 Result.E = E;
3809 return Result;
3810}
3811
3812LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3814 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3815 Value *&Result) {
3816 QualType LHSTy = E->getLHS()->getType();
3817 BinOpInfo OpInfo;
3818
3819 if (E->getComputationResultType()->isAnyComplexType())
3821
3822 // Emit the RHS first. __block variables need to have the rhs evaluated
3823 // first, plus this should improve codegen a little.
3824
3825 QualType PromotionTypeCR;
3826 PromotionTypeCR = getPromotionType(E->getComputationResultType());
3827 if (PromotionTypeCR.isNull())
3828 PromotionTypeCR = E->getComputationResultType();
3829 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
3830 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
3831 if (!PromotionTypeRHS.isNull())
3832 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
3833 else
3834 OpInfo.RHS = Visit(E->getRHS());
3835 OpInfo.Ty = PromotionTypeCR;
3836 OpInfo.Opcode = E->getOpcode();
3837 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3838 OpInfo.E = E;
3839 // Load/convert the LHS.
3840 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3841
3842 llvm::PHINode *atomicPHI = nullptr;
3843 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3844 QualType type = atomicTy->getValueType();
3845 if (!type->isBooleanType() && type->isIntegerType() &&
3846 !(type->isUnsignedIntegerType() &&
3847 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3848 CGF.getLangOpts().getSignedOverflowBehavior() !=
3850 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3851 llvm::Instruction::BinaryOps Op;
3852 switch (OpInfo.Opcode) {
3853 // We don't have atomicrmw operands for *, %, /, <<, >>
3854 case BO_MulAssign: case BO_DivAssign:
3855 case BO_RemAssign:
3856 case BO_ShlAssign:
3857 case BO_ShrAssign:
3858 break;
3859 case BO_AddAssign:
3860 AtomicOp = llvm::AtomicRMWInst::Add;
3861 Op = llvm::Instruction::Add;
3862 break;
3863 case BO_SubAssign:
3864 AtomicOp = llvm::AtomicRMWInst::Sub;
3865 Op = llvm::Instruction::Sub;
3866 break;
3867 case BO_AndAssign:
3868 AtomicOp = llvm::AtomicRMWInst::And;
3869 Op = llvm::Instruction::And;
3870 break;
3871 case BO_XorAssign:
3872 AtomicOp = llvm::AtomicRMWInst::Xor;
3873 Op = llvm::Instruction::Xor;
3874 break;
3875 case BO_OrAssign:
3876 AtomicOp = llvm::AtomicRMWInst::Or;
3877 Op = llvm::Instruction::Or;
3878 break;
3879 default:
3880 llvm_unreachable("Invalid compound assignment type");
3881 }
3882 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3883 llvm::Value *Amt = CGF.EmitToMemory(
3884 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3885 E->getExprLoc()),
3886 LHSTy);
3887
3888 llvm::AtomicRMWInst *OldVal =
3889 CGF.emitAtomicRMWInst(AtomicOp, LHSLV.getAddress(), Amt);
3890
3891 // Since operation is atomic, the result type is guaranteed to be the
3892 // same as the input in LLVM terms.
3893 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3894 return LHSLV;
3895 }
3896 }
3897 // FIXME: For floating point types, we should be saving and restoring the
3898 // floating point environment in the loop.
3899 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3900 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3901 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3902 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3903 Builder.CreateBr(opBB);
3904 Builder.SetInsertPoint(opBB);
3905 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3906 atomicPHI->addIncoming(OpInfo.LHS, startBB);
3907 OpInfo.LHS = atomicPHI;
3908 }
3909 else
3910 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3911
3912 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3914 if (!PromotionTypeLHS.isNull())
3915 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
3916 E->getExprLoc());
3917 else
3918 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
3919 E->getComputationLHSType(), Loc);
3920
3921 // Expand the binary operator.
3922 Result = (this->*Func)(OpInfo);
3923
3924 // Convert the result back to the LHS type,
3925 // potentially with Implicit Conversion sanitizer check.
3926 // If LHSLV is a bitfield, use default ScalarConversionOpts
3927 // to avoid emit any implicit integer checks.
3928 Value *Previous = nullptr;
3929 if (LHSLV.isBitField()) {
3930 Previous = Result;
3931 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);
3932 } else
3933 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
3934 ScalarConversionOpts(CGF.SanOpts));
3935
3936 if (atomicPHI) {
3937 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3938 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3939 auto Pair = CGF.EmitAtomicCompareExchange(
3940 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3941 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3942 llvm::Value *success = Pair.second;
3943 atomicPHI->addIncoming(old, curBlock);
3944 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3945 Builder.SetInsertPoint(contBB);
3946 return LHSLV;
3947 }
3948
3949 // Store the result value into the LHS lvalue. Bit-fields are handled
3950 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3951 // 'An assignment expression has the value of the left operand after the
3952 // assignment...'.
3953 if (LHSLV.isBitField()) {
3954 Value *Src = Previous ? Previous : Result;
3955 QualType SrcType = E->getRHS()->getType();
3956 QualType DstType = E->getLHS()->getType();
3958 CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
3959 LHSLV.getBitFieldInfo(), E->getExprLoc());
3960 } else
3962
3963 if (CGF.getLangOpts().OpenMP)
3965 E->getLHS());
3966 return LHSLV;
3967}
3968
3969Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3970 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3971 bool Ignore = TestAndClearIgnoreResultAssign();
3972 Value *RHS = nullptr;
3973 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3974
3975 // If the result is clearly ignored, return now.
3976 if (Ignore)
3977 return nullptr;
3978
3979 // The result of an assignment in C is the assigned r-value.
3980 if (!CGF.getLangOpts().CPlusPlus)
3981 return RHS;
3982
3983 // If the lvalue is non-volatile, return the computed value of the assignment.
3984 if (!LHS.isVolatileQualified())
3985 return RHS;
3986
3987 // Otherwise, reload the value.
3988 return EmitLoadOfLValue(LHS, E->getExprLoc());
3989}
3990
3991void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3992 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3994 Checks;
3995
3996 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3997 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3998 SanitizerKind::SO_IntegerDivideByZero));
3999 }
4000
4001 const auto *BO = cast<BinaryOperator>(Ops.E);
4002 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
4003 Ops.Ty->hasSignedIntegerRepresentation() &&
4004 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
4005 Ops.mayHaveIntegerOverflow()) {
4006 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
4007
4008 llvm::Value *IntMin =
4009 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
4010 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
4011
4012 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
4013 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
4014 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
4015 Checks.push_back(
4016 std::make_pair(NotOverflow, SanitizerKind::SO_SignedIntegerOverflow));
4017 }
4018
4019 if (Checks.size() > 0)
4020 EmitBinOpCheck(Checks, Ops);
4021}
4022
4023Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
4024 {
4025 SanitizerDebugLocation SanScope(&CGF,
4026 {SanitizerKind::SO_IntegerDivideByZero,
4027 SanitizerKind::SO_SignedIntegerOverflow,
4028 SanitizerKind::SO_FloatDivideByZero},
4029 SanitizerHandler::DivremOverflow);
4030 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4031 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4032 Ops.Ty->isIntegerType() &&
4033 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4034 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4035 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
4036 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
4037 Ops.Ty->isRealFloatingType() &&
4038 Ops.mayHaveFloatDivisionByZero()) {
4039 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4040 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
4041 EmitBinOpCheck(
4042 std::make_pair(NonZero, SanitizerKind::SO_FloatDivideByZero), Ops);
4043 }
4044 }
4045
4046 if (Ops.Ty->isConstantMatrixType()) {
4047 llvm::MatrixBuilder MB(Builder);
4048 // We need to check the types of the operands of the operator to get the
4049 // correct matrix dimensions.
4050 auto *BO = cast<BinaryOperator>(Ops.E);
4051 (void)BO;
4052 assert(
4053 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
4054 "first operand must be a matrix");
4055 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
4056 "second operand must be an arithmetic type");
4057 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4058 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
4059 Ops.Ty->hasUnsignedIntegerRepresentation());
4060 }
4061
4062 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
4063 llvm::Value *Val;
4064 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4065 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
4066 CGF.SetDivFPAccuracy(Val);
4067 return Val;
4068 }
4069 else if (Ops.isFixedPointOp())
4070 return EmitFixedPointBinOp(Ops);
4071 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
4072 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
4073 else
4074 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
4075}
4076
4077Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
4078 // Rem in C can't be a floating point type: C99 6.5.5p2.
4079 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4080 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4081 Ops.Ty->isIntegerType() &&
4082 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4083 SanitizerDebugLocation SanScope(&CGF,
4084 {SanitizerKind::SO_IntegerDivideByZero,
4085 SanitizerKind::SO_SignedIntegerOverflow},
4086 SanitizerHandler::DivremOverflow);
4087 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4088 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
4089 }
4090
4091 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4092 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
4093
4094 if (CGF.getLangOpts().HLSL && Ops.Ty->hasFloatingRepresentation())
4095 return Builder.CreateFRem(Ops.LHS, Ops.RHS, "rem");
4096
4097 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
4098}
4099
4100Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
4101 unsigned IID;
4102 unsigned OpID = 0;
4103 SanitizerHandler OverflowKind;
4104
4105 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
4106 switch (Ops.Opcode) {
4107 case BO_Add:
4108 case BO_AddAssign:
4109 OpID = 1;
4110 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
4111 llvm::Intrinsic::uadd_with_overflow;
4112 OverflowKind = SanitizerHandler::AddOverflow;
4113 break;
4114 case BO_Sub:
4115 case BO_SubAssign:
4116 OpID = 2;
4117 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
4118 llvm::Intrinsic::usub_with_overflow;
4119 OverflowKind = SanitizerHandler::SubOverflow;
4120 break;
4121 case BO_Mul:
4122 case BO_MulAssign:
4123 OpID = 3;
4124 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
4125 llvm::Intrinsic::umul_with_overflow;
4126 OverflowKind = SanitizerHandler::MulOverflow;
4127 break;
4128 default:
4129 llvm_unreachable("Unsupported operation for overflow detection");
4130 }
4131 OpID <<= 1;
4132 if (isSigned)
4133 OpID |= 1;
4134
4135 SanitizerDebugLocation SanScope(&CGF,
4136 {SanitizerKind::SO_SignedIntegerOverflow,
4137 SanitizerKind::SO_UnsignedIntegerOverflow},
4138 OverflowKind);
4139 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
4140
4141 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
4142
4143 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
4144 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
4145 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
4146
4147 // Handle overflow with llvm.trap if no custom handler has been specified.
4148 const std::string *handlerName =
4150 if (handlerName->empty()) {
4151 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
4152 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
4153 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
4154 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
4156 isSigned ? SanitizerKind::SO_SignedIntegerOverflow
4157 : SanitizerKind::SO_UnsignedIntegerOverflow;
4158 EmitBinOpCheck(std::make_pair(NotOverflow, Ordinal), Ops);
4159 } else
4160 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
4161 return result;
4162 }
4163
4164 // Branch in case of overflow.
4165 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
4166 llvm::BasicBlock *continueBB =
4167 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
4168 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
4169
4170 Builder.CreateCondBr(overflow, overflowBB, continueBB);
4171
4172 // If an overflow handler is set, then we want to call it and then use its
4173 // result, if it returns.
4174 Builder.SetInsertPoint(overflowBB);
4175
4176 // Get the overflow handler.
4177 llvm::Type *Int8Ty = CGF.Int8Ty;
4178 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
4179 llvm::FunctionType *handlerTy =
4180 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
4181 llvm::FunctionCallee handler =
4182 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
4183
4184 // Sign extend the args to 64-bit, so that we can use the same handler for
4185 // all types of overflow.
4186 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
4187 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
4188
4189 // Call the handler with the two arguments, the operation, and the size of
4190 // the result.
4191 llvm::Value *handlerArgs[] = {
4192 lhs,
4193 rhs,
4194 Builder.getInt8(OpID),
4195 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
4196 };
4197 llvm::Value *handlerResult =
4198 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
4199
4200 // Truncate the result back to the desired size.
4201 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
4202 Builder.CreateBr(continueBB);
4203
4204 Builder.SetInsertPoint(continueBB);
4205 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
4206 phi->addIncoming(result, initialBB);
4207 phi->addIncoming(handlerResult, overflowBB);
4208
4209 return phi;
4210}
4211
4212/// BO_Add/BO_Sub are handled by EmitPointerWithAlignment to preserve alignment
4213/// information.
4214/// This function is used for BO_AddAssign/BO_SubAssign.
4215static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op,
4216 bool isSubtraction) {
4217 // Must have binary (not unary) expr here. Unary pointer
4218 // increment/decrement doesn't use this path.
4219 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4220
4221 Value *pointer = op.LHS;
4222 Expr *pointerOperand = expr->getLHS();
4223 Value *index = op.RHS;
4224 Expr *indexOperand = expr->getRHS();
4225
4226 // In a subtraction, the LHS is always the pointer.
4227 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
4228 std::swap(pointer, index);
4229 std::swap(pointerOperand, indexOperand);
4230 }
4231
4232 return CGF.EmitPointerArithmetic(expr, pointerOperand, pointer, indexOperand,
4233 index, isSubtraction);
4234}
4235
4236/// Emit pointer + index arithmetic.
4238 const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer,
4239 Expr *indexOperand, llvm::Value *index, bool isSubtraction) {
4240 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
4241
4242 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
4243 auto &DL = CGM.getDataLayout();
4244 auto *PtrTy = cast<llvm::PointerType>(pointer->getType());
4245
4246 // Some versions of glibc and gcc use idioms (particularly in their malloc
4247 // routines) that add a pointer-sized integer (known to be a pointer value)
4248 // to a null pointer in order to cast the value back to an integer or as
4249 // part of a pointer alignment algorithm. This is undefined behavior, but
4250 // we'd like to be able to compile programs that use it.
4251 //
4252 // Normally, we'd generate a GEP with a null-pointer base here in response
4253 // to that code, but it's also UB to dereference a pointer created that
4254 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
4255 // generate a direct cast of the integer value to a pointer.
4256 //
4257 // The idiom (p = nullptr + N) is not met if any of the following are true:
4258 //
4259 // The operation is subtraction.
4260 // The index is not pointer-sized.
4261 // The pointer type is not byte-sized.
4262 //
4263 // Note that we do not suppress the pointer overflow check in this case.
4265 getContext(), BO->getOpcode(), pointerOperand, indexOperand)) {
4266 llvm::Value *Ptr = Builder.CreateIntToPtr(index, pointer->getType());
4267 if (getLangOpts().PointerOverflowDefined ||
4268 !SanOpts.has(SanitizerKind::PointerOverflow) ||
4269 NullPointerIsDefined(Builder.GetInsertBlock()->getParent(),
4270 PtrTy->getPointerAddressSpace()))
4271 return Ptr;
4272 // The inbounds GEP of null is valid iff the index is zero.
4273 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
4274 auto CheckHandler = SanitizerHandler::PointerOverflow;
4275 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4276 llvm::Value *IsZeroIndex = Builder.CreateIsNull(index);
4277 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(BO->getExprLoc())};
4278 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4279 llvm::Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
4280 llvm::Value *ComputedGEP = Builder.CreateZExtOrTrunc(index, IntPtrTy);
4281 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4282 EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
4283 DynamicArgs);
4284 return Ptr;
4285 }
4286
4287 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
4288 // Zero-extend or sign-extend the pointer value according to
4289 // whether the index is signed or not.
4290 index = Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
4291 "idx.ext");
4292 }
4293
4294 // If this is subtraction, negate the index.
4295 if (isSubtraction)
4296 index = Builder.CreateNeg(index, "idx.neg");
4297
4298 if (SanOpts.has(SanitizerKind::ArrayBounds))
4299 EmitBoundsCheck(BO, pointerOperand, index, indexOperand->getType(),
4300 /*Accessed*/ false);
4301
4302 const PointerType *pointerType =
4303 pointerOperand->getType()->getAs<PointerType>();
4304 if (!pointerType) {
4305 QualType objectType = pointerOperand->getType()
4307 ->getPointeeType();
4308 llvm::Value *objectSize =
4309 CGM.getSize(getContext().getTypeSizeInChars(objectType));
4310
4311 index = Builder.CreateMul(index, objectSize);
4312
4313 llvm::Value *result = Builder.CreateGEP(Int8Ty, pointer, index, "add.ptr");
4314 return Builder.CreateBitCast(result, pointer->getType());
4315 }
4316
4317 QualType elementType = pointerType->getPointeeType();
4318 if (const VariableArrayType *vla =
4319 getContext().getAsVariableArrayType(elementType)) {
4320 // The element count here is the total number of non-VLA elements.
4321 llvm::Value *numElements = getVLASize(vla).NumElts;
4322
4323 // Effectively, the multiply by the VLA size is part of the GEP.
4324 // GEP indexes are signed, and scaling an index isn't permitted to
4325 // signed-overflow, so we use the same semantics for our explicit
4326 // multiply. We suppress this if overflow is not undefined behavior.
4327 llvm::Type *elemTy = ConvertTypeForMem(vla->getElementType());
4328 if (getLangOpts().PointerOverflowDefined) {
4329 index = Builder.CreateMul(index, numElements, "vla.index");
4330 pointer = Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4331 } else {
4332 index = Builder.CreateNSWMul(index, numElements, "vla.index");
4333 pointer =
4334 EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned,
4335 isSubtraction, BO->getExprLoc(), "add.ptr");
4336 }
4337 return pointer;
4338 }
4339
4340 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4341 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4342 // future proof.
4343 llvm::Type *elemTy;
4344 if (elementType->isVoidType() || elementType->isFunctionType())
4345 elemTy = Int8Ty;
4346 else
4347 elemTy = ConvertTypeForMem(elementType);
4348
4349 if (getLangOpts().PointerOverflowDefined)
4350 return Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4351
4352 return EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, isSubtraction,
4353 BO->getExprLoc(), "add.ptr");
4354}
4355
4356// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4357// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4358// the add operand respectively. This allows fmuladd to represent a*b-c, or
4359// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4360// efficient operations.
4361static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4362 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4363 bool negMul, bool negAdd) {
4364 Value *MulOp0 = MulOp->getOperand(0);
4365 Value *MulOp1 = MulOp->getOperand(1);
4366 if (negMul)
4367 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
4368 if (negAdd)
4369 Addend = Builder.CreateFNeg(Addend, "neg");
4370
4371 Value *FMulAdd = nullptr;
4372 if (Builder.getIsFPConstrained()) {
4373 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4374 "Only constrained operation should be created when Builder is in FP "
4375 "constrained mode");
4376 FMulAdd = Builder.CreateConstrainedFPCall(
4377 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
4378 Addend->getType()),
4379 {MulOp0, MulOp1, Addend});
4380 } else {
4381 FMulAdd = Builder.CreateCall(
4382 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
4383 {MulOp0, MulOp1, Addend});
4384 }
4385 MulOp->eraseFromParent();
4386
4387 return FMulAdd;
4388}
4389
4390// Check whether it would be legal to emit an fmuladd intrinsic call to
4391// represent op and if so, build the fmuladd.
4392//
4393// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4394// Does NOT check the type of the operation - it's assumed that this function
4395// will be called from contexts where it's known that the type is contractable.
4396static Value* tryEmitFMulAdd(const BinOpInfo &op,
4397 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4398 bool isSub=false) {
4399
4400 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4401 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4402 "Only fadd/fsub can be the root of an fmuladd.");
4403
4404 // Check whether this op is marked as fusable.
4405 if (!op.FPFeatures.allowFPContractWithinStatement())
4406 return nullptr;
4407
4408 Value *LHS = op.LHS;
4409 Value *RHS = op.RHS;
4410
4411 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4412 // it is the only use of its operand.
4413 bool NegLHS = false;
4414 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
4415 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4416 LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
4417 LHS = LHSUnOp->getOperand(0);
4418 NegLHS = true;
4419 }
4420 }
4421
4422 bool NegRHS = false;
4423 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
4424 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4425 RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
4426 RHS = RHSUnOp->getOperand(0);
4427 NegRHS = true;
4428 }
4429 }
4430
4431 // We have a potentially fusable op. Look for a mul on one of the operands.
4432 // Also, make sure that the mul result isn't used directly. In that case,
4433 // there's no point creating a muladd operation.
4434 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
4435 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4436 (LHSBinOp->use_empty() || NegLHS)) {
4437 // If we looked through fneg, erase it.
4438 if (NegLHS)
4439 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4440 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4441 }
4442 }
4443 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
4444 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4445 (RHSBinOp->use_empty() || NegRHS)) {
4446 // If we looked through fneg, erase it.
4447 if (NegRHS)
4448 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4449 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4450 }
4451 }
4452
4453 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
4454 if (LHSBinOp->getIntrinsicID() ==
4455 llvm::Intrinsic::experimental_constrained_fmul &&
4456 (LHSBinOp->use_empty() || NegLHS)) {
4457 // If we looked through fneg, erase it.
4458 if (NegLHS)
4459 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4460 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4461 }
4462 }
4463 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
4464 if (RHSBinOp->getIntrinsicID() ==
4465 llvm::Intrinsic::experimental_constrained_fmul &&
4466 (RHSBinOp->use_empty() || NegRHS)) {
4467 // If we looked through fneg, erase it.
4468 if (NegRHS)
4469 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4470 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4471 }
4472 }
4473
4474 return nullptr;
4475}
4476
4477Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4478 if (op.LHS->getType()->isPointerTy() ||
4479 op.RHS->getType()->isPointerTy())
4481
4482 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4483 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4485 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4486 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4487 [[fallthrough]];
4489 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4490 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4491 [[fallthrough]];
4493 if (CanElideOverflowCheck(CGF.getContext(), op))
4494 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4495 return EmitOverflowCheckedBinOp(op);
4496 }
4497 }
4498
4499 // For vector and matrix adds, try to fold into a fmuladd.
4500 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4501 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4502 // Try to form an fmuladd.
4503 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4504 return FMulAdd;
4505 }
4506
4507 if (op.Ty->isConstantMatrixType()) {
4508 llvm::MatrixBuilder MB(Builder);
4509 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4510 return MB.CreateAdd(op.LHS, op.RHS);
4511 }
4512
4513 if (op.Ty->isUnsignedIntegerType() &&
4514 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4515 !CanElideOverflowCheck(CGF.getContext(), op))
4516 return EmitOverflowCheckedBinOp(op);
4517
4518 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4519 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4520 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
4521 }
4522
4523 if (op.isFixedPointOp())
4524 return EmitFixedPointBinOp(op);
4525
4526 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4527}
4528
4529/// The resulting value must be calculated with exact precision, so the operands
4530/// may not be the same type.
4531Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4532 using llvm::APSInt;
4533 using llvm::ConstantInt;
4534
4535 // This is either a binary operation where at least one of the operands is
4536 // a fixed-point type, or a unary operation where the operand is a fixed-point
4537 // type. The result type of a binary operation is determined by
4538 // Sema::handleFixedPointConversions().
4539 QualType ResultTy = op.Ty;
4540 QualType LHSTy, RHSTy;
4541 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
4542 RHSTy = BinOp->getRHS()->getType();
4543 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
4544 // For compound assignment, the effective type of the LHS at this point
4545 // is the computation LHS type, not the actual LHS type, and the final
4546 // result type is not the type of the expression but rather the
4547 // computation result type.
4548 LHSTy = CAO->getComputationLHSType();
4549 ResultTy = CAO->getComputationResultType();
4550 } else
4551 LHSTy = BinOp->getLHS()->getType();
4552 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
4553 LHSTy = UnOp->getSubExpr()->getType();
4554 RHSTy = UnOp->getSubExpr()->getType();
4555 }
4556 ASTContext &Ctx = CGF.getContext();
4557 Value *LHS = op.LHS;
4558 Value *RHS = op.RHS;
4559
4560 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
4561 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
4562 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
4563 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
4564
4565 // Perform the actual operation.
4566 Value *Result;
4567 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4568 switch (op.Opcode) {
4569 case BO_AddAssign:
4570 case BO_Add:
4571 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
4572 break;
4573 case BO_SubAssign:
4574 case BO_Sub:
4575 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
4576 break;
4577 case BO_MulAssign:
4578 case BO_Mul:
4579 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
4580 break;
4581 case BO_DivAssign:
4582 case BO_Div:
4583 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
4584 break;
4585 case BO_ShlAssign:
4586 case BO_Shl:
4587 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
4588 break;
4589 case BO_ShrAssign:
4590 case BO_Shr:
4591 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
4592 break;
4593 case BO_LT:
4594 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4595 case BO_GT:
4596 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4597 case BO_LE:
4598 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4599 case BO_GE:
4600 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4601 case BO_EQ:
4602 // For equality operations, we assume any padding bits on unsigned types are
4603 // zero'd out. They could be overwritten through non-saturating operations
4604 // that cause overflow, but this leads to undefined behavior.
4605 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
4606 case BO_NE:
4607 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4608 case BO_Cmp:
4609 case BO_LAnd:
4610 case BO_LOr:
4611 llvm_unreachable("Found unimplemented fixed point binary operation");
4612 case BO_PtrMemD:
4613 case BO_PtrMemI:
4614 case BO_Rem:
4615 case BO_Xor:
4616 case BO_And:
4617 case BO_Or:
4618 case BO_Assign:
4619 case BO_RemAssign:
4620 case BO_AndAssign:
4621 case BO_XorAssign:
4622 case BO_OrAssign:
4623 case BO_Comma:
4624 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4625 }
4626
4627 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
4629 // Convert to the result type.
4630 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
4631 : CommonFixedSema,
4632 ResultFixedSema);
4633}
4634
4635Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4636 // The LHS is always a pointer if either side is.
4637 if (!op.LHS->getType()->isPointerTy()) {
4638 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4639 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4641 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4642 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4643 [[fallthrough]];
4645 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4646 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4647 [[fallthrough]];
4649 if (CanElideOverflowCheck(CGF.getContext(), op))
4650 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4651 return EmitOverflowCheckedBinOp(op);
4652 }
4653 }
4654
4655 // For vector and matrix subs, try to fold into a fmuladd.
4656 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4657 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4658 // Try to form an fmuladd.
4659 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
4660 return FMulAdd;
4661 }
4662
4663 if (op.Ty->isConstantMatrixType()) {
4664 llvm::MatrixBuilder MB(Builder);
4665 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4666 return MB.CreateSub(op.LHS, op.RHS);
4667 }
4668
4669 if (op.Ty->isUnsignedIntegerType() &&
4670 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4671 !CanElideOverflowCheck(CGF.getContext(), op))
4672 return EmitOverflowCheckedBinOp(op);
4673
4674 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4675 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4676 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
4677 }
4678
4679 if (op.isFixedPointOp())
4680 return EmitFixedPointBinOp(op);
4681
4682 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4683 }
4684
4685 // If the RHS is not a pointer, then we have normal pointer
4686 // arithmetic.
4687 if (!op.RHS->getType()->isPointerTy())
4689
4690 // Otherwise, this is a pointer subtraction.
4691
4692 // Do the raw subtraction part.
4693 llvm::Value *LHS
4694 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
4695 llvm::Value *RHS
4696 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
4697 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
4698
4699 // Okay, figure out the element size.
4700 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4701 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4702
4703 llvm::Value *divisor = nullptr;
4704
4705 // For a variable-length array, this is going to be non-constant.
4706 if (const VariableArrayType *vla
4707 = CGF.getContext().getAsVariableArrayType(elementType)) {
4708 auto VlaSize = CGF.getVLASize(vla);
4709 elementType = VlaSize.Type;
4710 divisor = VlaSize.NumElts;
4711
4712 // Scale the number of non-VLA elements by the non-VLA element size.
4713 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
4714 if (!eltSize.isOne())
4715 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
4716
4717 // For everything elese, we can just compute it, safe in the
4718 // assumption that Sema won't let anything through that we can't
4719 // safely compute the size of.
4720 } else {
4721 CharUnits elementSize;
4722 // Handle GCC extension for pointer arithmetic on void* and
4723 // function pointer types.
4724 if (elementType->isVoidType() || elementType->isFunctionType())
4725 elementSize = CharUnits::One();
4726 else
4727 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
4728
4729 // Don't even emit the divide for element size of 1.
4730 if (elementSize.isOne())
4731 return diffInChars;
4732
4733 divisor = CGF.CGM.getSize(elementSize);
4734 }
4735
4736 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4737 // pointer difference in C is only defined in the case where both operands
4738 // are pointing to elements of an array.
4739 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
4740}
4741
4742Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4743 bool RHSIsSigned) {
4744 llvm::IntegerType *Ty;
4745 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4746 Ty = cast<llvm::IntegerType>(VT->getElementType());
4747 else
4748 Ty = cast<llvm::IntegerType>(LHS->getType());
4749 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4750 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4751 // this in ConstantInt::get, this results in the value getting truncated.
4752 // Constrain the return value to be max(RHS) in this case.
4753 llvm::Type *RHSTy = RHS->getType();
4754 llvm::APInt RHSMax =
4755 RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits())
4756 : llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());
4757 if (RHSMax.ult(Ty->getBitWidth()))
4758 return llvm::ConstantInt::get(RHSTy, RHSMax);
4759 return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);
4760}
4761
4762Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4763 const Twine &Name) {
4764 llvm::IntegerType *Ty;
4765 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4766 Ty = cast<llvm::IntegerType>(VT->getElementType());
4767 else
4768 Ty = cast<llvm::IntegerType>(LHS->getType());
4769
4770 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
4771 return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name);
4772
4773 return Builder.CreateURem(
4774 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
4775}
4776
4777Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4778 // TODO: This misses out on the sanitizer check below.
4779 if (Ops.isFixedPointOp())
4780 return EmitFixedPointBinOp(Ops);
4781
4782 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4783 // RHS to the same size as the LHS.
4784 Value *RHS = Ops.RHS;
4785 if (Ops.LHS->getType() != RHS->getType())
4786 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4787
4788 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
4789 Ops.Ty->hasSignedIntegerRepresentation() &&
4791 !CGF.getLangOpts().CPlusPlus20;
4792 bool SanitizeUnsignedBase =
4793 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
4794 Ops.Ty->hasUnsignedIntegerRepresentation();
4795 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4796 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
4797 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4798 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4799 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
4800 else if ((SanitizeBase || SanitizeExponent) &&
4801 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4803 if (SanitizeSignedBase)
4804 Ordinals.push_back(SanitizerKind::SO_ShiftBase);
4805 if (SanitizeUnsignedBase)
4806 Ordinals.push_back(SanitizerKind::SO_UnsignedShiftBase);
4807 if (SanitizeExponent)
4808 Ordinals.push_back(SanitizerKind::SO_ShiftExponent);
4809
4810 SanitizerDebugLocation SanScope(&CGF, Ordinals,
4811 SanitizerHandler::ShiftOutOfBounds);
4813 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4814 llvm::Value *WidthMinusOne =
4815 GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned);
4816 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
4817
4818 if (SanitizeExponent) {
4819 Checks.push_back(
4820 std::make_pair(ValidExponent, SanitizerKind::SO_ShiftExponent));
4821 }
4822
4823 if (SanitizeBase) {
4824 // Check whether we are shifting any non-zero bits off the top of the
4825 // integer. We only emit this check if exponent is valid - otherwise
4826 // instructions below will have undefined behavior themselves.
4827 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4828 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4829 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
4830 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
4831 llvm::Value *PromotedWidthMinusOne =
4832 (RHS == Ops.RHS) ? WidthMinusOne
4833 : GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned);
4834 CGF.EmitBlock(CheckShiftBase);
4835 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4836 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
4837 /*NUW*/ true, /*NSW*/ true),
4838 "shl.check");
4839 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4840 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4841 // Under C++11's rules, shifting a 1 bit into the sign bit is
4842 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4843 // define signed left shifts, so we use the C99 and C++11 rules there).
4844 // Unsigned shifts can always shift into the top bit.
4845 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
4846 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
4847 }
4848 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
4849 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
4850 CGF.EmitBlock(Cont);
4851 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
4852 BaseCheck->addIncoming(Builder.getTrue(), Orig);
4853 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
4854 Checks.push_back(std::make_pair(
4855 BaseCheck, SanitizeSignedBase ? SanitizerKind::SO_ShiftBase
4856 : SanitizerKind::SO_UnsignedShiftBase));
4857 }
4858
4859 assert(!Checks.empty());
4860 EmitBinOpCheck(Checks, Ops);
4861 }
4862
4863 return Builder.CreateShl(Ops.LHS, RHS, "shl");
4864}
4865
4866Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4867 // TODO: This misses out on the sanitizer check below.
4868 if (Ops.isFixedPointOp())
4869 return EmitFixedPointBinOp(Ops);
4870
4871 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4872 // RHS to the same size as the LHS.
4873 Value *RHS = Ops.RHS;
4874 if (Ops.LHS->getType() != RHS->getType())
4875 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4876
4877 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4878 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4879 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
4880 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
4881 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4882 SanitizerDebugLocation SanScope(&CGF, {SanitizerKind::SO_ShiftExponent},
4883 SanitizerHandler::ShiftOutOfBounds);
4884 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4885 llvm::Value *Valid = Builder.CreateICmpULE(
4886 Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned));
4887 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::SO_ShiftExponent), Ops);
4888 }
4889
4890 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4891 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
4892 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
4893}
4894
4896// return corresponding comparison intrinsic for given vector type
4897static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4898 BuiltinType::Kind ElemKind) {
4899 switch (ElemKind) {
4900 default: llvm_unreachable("unexpected element type");
4901 case BuiltinType::Char_U:
4902 case BuiltinType::UChar:
4903 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4904 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4905 case BuiltinType::Char_S:
4906 case BuiltinType::SChar:
4907 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4908 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4909 case BuiltinType::UShort:
4910 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4911 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4912 case BuiltinType::Short:
4913 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4914 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4915 case BuiltinType::UInt:
4916 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4917 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4918 case BuiltinType::Int:
4919 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4920 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4921 case BuiltinType::ULong:
4922 case BuiltinType::ULongLong:
4923 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4924 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4925 case BuiltinType::Long:
4926 case BuiltinType::LongLong:
4927 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4928 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4929 case BuiltinType::Float:
4930 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4931 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4932 case BuiltinType::Double:
4933 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
4934 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4935 case BuiltinType::UInt128:
4936 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4937 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4938 case BuiltinType::Int128:
4939 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4940 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4941 }
4942}
4943
4944Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4945 llvm::CmpInst::Predicate UICmpOpc,
4946 llvm::CmpInst::Predicate SICmpOpc,
4947 llvm::CmpInst::Predicate FCmpOpc,
4948 bool IsSignaling) {
4949 TestAndClearIgnoreResultAssign();
4950 Value *Result;
4951 QualType LHSTy = E->getLHS()->getType();
4952 QualType RHSTy = E->getRHS()->getType();
4953 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4954 assert(E->getOpcode() == BO_EQ ||
4955 E->getOpcode() == BO_NE);
4956 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
4957 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
4959 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
4960 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4961 BinOpInfo BOInfo = EmitBinOps(E);
4962 Value *LHS = BOInfo.LHS;
4963 Value *RHS = BOInfo.RHS;
4964
4965 // If AltiVec, the comparison results in a numeric type, so we use
4966 // intrinsics comparing vectors and giving 0 or 1 as a result
4967 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4968 // constants for mapping CR6 register bits to predicate result
4969 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4970
4971 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4972
4973 // in several cases vector arguments order will be reversed
4974 Value *FirstVecArg = LHS,
4975 *SecondVecArg = RHS;
4976
4977 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4978 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4979
4980 switch(E->getOpcode()) {
4981 default: llvm_unreachable("is not a comparison operation");
4982 case BO_EQ:
4983 CR6 = CR6_LT;
4984 ID = GetIntrinsic(VCMPEQ, ElementKind);
4985 break;
4986 case BO_NE:
4987 CR6 = CR6_EQ;
4988 ID = GetIntrinsic(VCMPEQ, ElementKind);
4989 break;
4990 case BO_LT:
4991 CR6 = CR6_LT;
4992 ID = GetIntrinsic(VCMPGT, ElementKind);
4993 std::swap(FirstVecArg, SecondVecArg);
4994 break;
4995 case BO_GT:
4996 CR6 = CR6_LT;
4997 ID = GetIntrinsic(VCMPGT, ElementKind);
4998 break;
4999 case BO_LE:
5000 if (ElementKind == BuiltinType::Float) {
5001 CR6 = CR6_LT;
5002 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5003 std::swap(FirstVecArg, SecondVecArg);
5004 }
5005 else {
5006 CR6 = CR6_EQ;
5007 ID = GetIntrinsic(VCMPGT, ElementKind);
5008 }
5009 break;
5010 case BO_GE:
5011 if (ElementKind == BuiltinType::Float) {
5012 CR6 = CR6_LT;
5013 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5014 }
5015 else {
5016 CR6 = CR6_EQ;
5017 ID = GetIntrinsic(VCMPGT, ElementKind);
5018 std::swap(FirstVecArg, SecondVecArg);
5019 }
5020 break;
5021 }
5022
5023 Value *CR6Param = Builder.getInt32(CR6);
5024 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
5025 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
5026
5027 // The result type of intrinsic may not be same as E->getType().
5028 // If E->getType() is not BoolTy, EmitScalarConversion will do the
5029 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
5030 // do nothing, if ResultTy is not i1 at the same time, it will cause
5031 // crash later.
5032 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
5033 if (ResultTy->getBitWidth() > 1 &&
5034 E->getType() == CGF.getContext().BoolTy)
5035 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
5036 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5037 E->getExprLoc());
5038 }
5039
5040 if (BOInfo.isFixedPointOp()) {
5041 Result = EmitFixedPointBinOp(BOInfo);
5042 } else if (LHS->getType()->isFPOrFPVectorTy()) {
5043 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
5044 if (!IsSignaling)
5045 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
5046 else
5047 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
5048 } else if (LHSTy->hasSignedIntegerRepresentation()) {
5049 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
5050 } else {
5051 // Unsigned integers and pointers.
5052
5053 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
5054 !isa<llvm::ConstantPointerNull>(LHS) &&
5055 !isa<llvm::ConstantPointerNull>(RHS)) {
5056
5057 // Dynamic information is required to be stripped for comparisons,
5058 // because it could leak the dynamic information. Based on comparisons
5059 // of pointers to dynamic objects, the optimizer can replace one pointer
5060 // with another, which might be incorrect in presence of invariant
5061 // groups. Comparison with null is safe because null does not carry any
5062 // dynamic information.
5063 if (LHSTy.mayBeDynamicClass())
5064 LHS = Builder.CreateStripInvariantGroup(LHS);
5065 if (RHSTy.mayBeDynamicClass())
5066 RHS = Builder.CreateStripInvariantGroup(RHS);
5067 }
5068
5069 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
5070 }
5071
5072 // If this is a vector comparison, sign extend the result to the appropriate
5073 // vector integer type and return it (don't convert to bool).
5074 if (LHSTy->isVectorType())
5075 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
5076
5077 } else {
5078 // Complex Comparison: can only be an equality comparison.
5080 QualType CETy;
5081 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
5082 LHS = CGF.EmitComplexExpr(E->getLHS());
5083 CETy = CTy->getElementType();
5084 } else {
5085 LHS.first = Visit(E->getLHS());
5086 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
5087 CETy = LHSTy;
5088 }
5089 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
5090 RHS = CGF.EmitComplexExpr(E->getRHS());
5091 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
5092 CTy->getElementType()) &&
5093 "The element types must always match.");
5094 (void)CTy;
5095 } else {
5096 RHS.first = Visit(E->getRHS());
5097 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
5098 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
5099 "The element types must always match.");
5100 }
5101
5102 Value *ResultR, *ResultI;
5103 if (CETy->isRealFloatingType()) {
5104 // As complex comparisons can only be equality comparisons, they
5105 // are never signaling comparisons.
5106 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
5107 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
5108 } else {
5109 // Complex comparisons can only be equality comparisons. As such, signed
5110 // and unsigned opcodes are the same.
5111 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
5112 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
5113 }
5114
5115 if (E->getOpcode() == BO_EQ) {
5116 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
5117 } else {
5118 assert(E->getOpcode() == BO_NE &&
5119 "Complex comparison other than == or != ?");
5120 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
5121 }
5122 }
5123
5124 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5125 E->getExprLoc());
5126}
5127
5129 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
5130 // In case we have the integer or bitfield sanitizer checks enabled
5131 // we want to get the expression before scalar conversion.
5132 if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {
5133 CastKind Kind = ICE->getCastKind();
5134 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
5135 *SrcType = ICE->getSubExpr()->getType();
5136 *Previous = EmitScalarExpr(ICE->getSubExpr());
5137 // Pass default ScalarConversionOpts to avoid emitting
5138 // integer sanitizer checks as E refers to bitfield.
5139 return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),
5140 ICE->getExprLoc());
5141 }
5142 }
5143 return EmitScalarExpr(E->getRHS());
5144}
5145
5146Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
5147 ApplyAtomGroup Grp(CGF.getDebugInfo());
5148 bool Ignore = TestAndClearIgnoreResultAssign();
5149
5150 Value *RHS;
5151 LValue LHS;
5152
5153 if (PointerAuthQualifier PtrAuth = E->getLHS()->getType().getPointerAuth()) {
5156 llvm::Value *RV =
5157 CGF.EmitPointerAuthQualify(PtrAuth, E->getRHS(), LV.getAddress());
5158 CGF.EmitNullabilityCheck(LV, RV, E->getExprLoc());
5160
5161 if (Ignore)
5162 return nullptr;
5163 RV = CGF.EmitPointerAuthUnqualify(PtrAuth, RV, LV.getType(),
5164 LV.getAddress(), /*nonnull*/ false);
5165 return RV;
5166 }
5167
5168 switch (E->getLHS()->getType().getObjCLifetime()) {
5170 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
5171 break;
5172
5174 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
5175 break;
5176
5178 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
5179 break;
5180
5182 RHS = Visit(E->getRHS());
5183 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5184 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
5185 break;
5186
5188 // __block variables need to have the rhs evaluated first, plus
5189 // this should improve codegen just a little.
5190 Value *Previous = nullptr;
5191 QualType SrcType = E->getRHS()->getType();
5192 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5193 // we want to extract that value and potentially (if the bitfield sanitizer
5194 // is enabled) use it to check for an implicit conversion.
5195 if (E->getLHS()->refersToBitField())
5196 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
5197 else
5198 RHS = Visit(E->getRHS());
5199
5200 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5201
5202 // Store the value into the LHS. Bit-fields are handled specially
5203 // because the result is altered by the store, i.e., [C99 6.5.16p1]
5204 // 'An assignment expression has the value of the left operand after
5205 // the assignment...'.
5206 if (LHS.isBitField()) {
5207 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
5208 // If the expression contained an implicit conversion, make sure
5209 // to use the value before the scalar conversion.
5210 Value *Src = Previous ? Previous : RHS;
5211 QualType DstType = E->getLHS()->getType();
5212 CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,
5213 LHS.getBitFieldInfo(), E->getExprLoc());
5214 } else {
5215 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
5216 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
5217 }
5218 }
5219
5220 // If the result is clearly ignored, return now.
5221 if (Ignore)
5222 return nullptr;
5223
5224 // The result of an assignment in C is the assigned r-value.
5225 if (!CGF.getLangOpts().CPlusPlus)
5226 return RHS;
5227
5228 // If the lvalue is non-volatile, return the computed value of the assignment.
5229 if (!LHS.isVolatileQualified())
5230 return RHS;
5231
5232 // Otherwise, reload the value.
5233 return EmitLoadOfLValue(LHS, E->getExprLoc());
5234}
5235
5236Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
5237 // Perform vector logical and on comparisons with zero vectors.
5238 if (E->getType()->isVectorType()) {
5240
5241 Value *LHS = Visit(E->getLHS());
5242 Value *RHS = Visit(E->getRHS());
5243 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5244 if (LHS->getType()->isFPOrFPVectorTy()) {
5246 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5247 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5248 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5249 } else {
5250 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5251 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5252 }
5253 Value *And = Builder.CreateAnd(LHS, RHS);
5254 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
5255 }
5256
5257 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5258 llvm::Type *ResTy = ConvertType(E->getType());
5259
5260 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
5261 // If we have 1 && X, just emit X without inserting the control flow.
5262 bool LHSCondVal;
5263 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5264 if (LHSCondVal) { // If we have 1 && X, just emit X.
5266
5267 // If the top of the logical operator nest, reset the MCDC temp to 0.
5268 if (CGF.MCDCLogOpStack.empty())
5270
5271 CGF.MCDCLogOpStack.push_back(E);
5272
5273 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5274
5275 // If we're generating for profiling or coverage, generate a branch to a
5276 // block that increments the RHS counter needed to track branch condition
5277 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5278 // "FalseBlock" after the increment is done.
5279 if (InstrumentRegions &&
5281 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5282 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
5283 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5284 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
5285 CGF.EmitBlock(RHSBlockCnt);
5286 CGF.incrementProfileCounter(E->getRHS());
5287 CGF.EmitBranch(FBlock);
5288 CGF.EmitBlock(FBlock);
5289 } else
5290 CGF.markStmtMaybeUsed(E->getRHS());
5291
5292 CGF.MCDCLogOpStack.pop_back();
5293 // If the top of the logical operator nest, update the MCDC bitmap.
5294 if (CGF.MCDCLogOpStack.empty())
5296
5297 // ZExt result to int or bool.
5298 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
5299 }
5300
5301 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
5302 if (!CGF.ContainsLabel(E->getRHS())) {
5303 CGF.markStmtMaybeUsed(E->getRHS());
5304 return llvm::Constant::getNullValue(ResTy);
5305 }
5306 }
5307
5308 // If the top of the logical operator nest, reset the MCDC temp to 0.
5309 if (CGF.MCDCLogOpStack.empty())
5311
5312 CGF.MCDCLogOpStack.push_back(E);
5313
5314 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
5315 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
5316
5318
5319 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5320 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
5321 CGF.getProfileCount(E->getRHS()));
5322
5323 // Any edges into the ContBlock are now from an (indeterminate number of)
5324 // edges from this first condition. All of these values will be false. Start
5325 // setting up the PHI node in the Cont Block for this.
5326 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5327 "", ContBlock);
5328 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5329 PI != PE; ++PI)
5330 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
5331
5332 eval.begin(CGF);
5333 CGF.EmitBlock(RHSBlock);
5335 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5336 eval.end(CGF);
5337
5338 // Reaquire the RHS block, as there may be subblocks inserted.
5339 RHSBlock = Builder.GetInsertBlock();
5340
5341 // If we're generating for profiling or coverage, generate a branch on the
5342 // RHS to a block that increments the RHS true counter needed to track branch
5343 // condition coverage.
5344 if (InstrumentRegions &&
5346 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5347 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5348 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
5349 CGF.EmitBlock(RHSBlockCnt);
5350 CGF.incrementProfileCounter(E->getRHS());
5351 CGF.EmitBranch(ContBlock);
5352 PN->addIncoming(RHSCond, RHSBlockCnt);
5353 }
5354
5355 // Emit an unconditional branch from this block to ContBlock.
5356 {
5357 // There is no need to emit line number for unconditional branch.
5358 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
5359 CGF.EmitBlock(ContBlock);
5360 }
5361 // Insert an entry into the phi node for the edge with the value of RHSCond.
5362 PN->addIncoming(RHSCond, RHSBlock);
5363
5364 CGF.MCDCLogOpStack.pop_back();
5365 // If the top of the logical operator nest, update the MCDC bitmap.
5366 if (CGF.MCDCLogOpStack.empty())
5368
5369 // Artificial location to preserve the scope information
5370 {
5372 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5373 }
5374
5375 // ZExt result to int.
5376 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
5377}
5378
5379Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5380 // Perform vector logical or on comparisons with zero vectors.
5381 if (E->getType()->isVectorType()) {
5383
5384 Value *LHS = Visit(E->getLHS());
5385 Value *RHS = Visit(E->getRHS());
5386 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5387 if (LHS->getType()->isFPOrFPVectorTy()) {
5389 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5390 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5391 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5392 } else {
5393 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5394 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5395 }
5396 Value *Or = Builder.CreateOr(LHS, RHS);
5397 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
5398 }
5399
5400 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5401 llvm::Type *ResTy = ConvertType(E->getType());
5402
5403 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5404 // If we have 0 || X, just emit X without inserting the control flow.
5405 bool LHSCondVal;
5406 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5407 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5409
5410 // If the top of the logical operator nest, reset the MCDC temp to 0.
5411 if (CGF.MCDCLogOpStack.empty())
5413
5414 CGF.MCDCLogOpStack.push_back(E);
5415
5416 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5417
5418 // If we're generating for profiling or coverage, generate a branch to a
5419 // block that increments the RHS counter need to track branch condition
5420 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5421 // "FalseBlock" after the increment is done.
5422 if (InstrumentRegions &&
5424 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5425 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
5426 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5427 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
5428 CGF.EmitBlock(RHSBlockCnt);
5429 CGF.incrementProfileCounter(E->getRHS());
5430 CGF.EmitBranch(FBlock);
5431 CGF.EmitBlock(FBlock);
5432 } else
5433 CGF.markStmtMaybeUsed(E->getRHS());
5434
5435 CGF.MCDCLogOpStack.pop_back();
5436 // If the top of the logical operator nest, update the MCDC bitmap.
5437 if (CGF.MCDCLogOpStack.empty())
5439
5440 // ZExt result to int or bool.
5441 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
5442 }
5443
5444 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5445 if (!CGF.ContainsLabel(E->getRHS())) {
5446 CGF.markStmtMaybeUsed(E->getRHS());
5447 return llvm::ConstantInt::get(ResTy, 1);
5448 }
5449 }
5450
5451 // If the top of the logical operator nest, reset the MCDC temp to 0.
5452 if (CGF.MCDCLogOpStack.empty())
5454
5455 CGF.MCDCLogOpStack.push_back(E);
5456
5457 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
5458 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
5459
5461
5462 // Branch on the LHS first. If it is true, go to the success (cont) block.
5463 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
5465 CGF.getProfileCount(E->getRHS()));
5466
5467 // Any edges into the ContBlock are now from an (indeterminate number of)
5468 // edges from this first condition. All of these values will be true. Start
5469 // setting up the PHI node in the Cont Block for this.
5470 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5471 "", ContBlock);
5472 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5473 PI != PE; ++PI)
5474 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
5475
5476 eval.begin(CGF);
5477
5478 // Emit the RHS condition as a bool value.
5479 CGF.EmitBlock(RHSBlock);
5481 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5482
5483 eval.end(CGF);
5484
5485 // Reaquire the RHS block, as there may be subblocks inserted.
5486 RHSBlock = Builder.GetInsertBlock();
5487
5488 // If we're generating for profiling or coverage, generate a branch on the
5489 // RHS to a block that increments the RHS true counter needed to track branch
5490 // condition coverage.
5491 if (InstrumentRegions &&
5493 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5494 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5495 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
5496 CGF.EmitBlock(RHSBlockCnt);
5497 CGF.incrementProfileCounter(E->getRHS());
5498 CGF.EmitBranch(ContBlock);
5499 PN->addIncoming(RHSCond, RHSBlockCnt);
5500 }
5501
5502 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5503 // into the phi node for the edge with the value of RHSCond.
5504 CGF.EmitBlock(ContBlock);
5505 PN->addIncoming(RHSCond, RHSBlock);
5506
5507 CGF.MCDCLogOpStack.pop_back();
5508 // If the top of the logical operator nest, update the MCDC bitmap.
5509 if (CGF.MCDCLogOpStack.empty())
5511
5512 // ZExt result to int.
5513 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
5514}
5515
5516Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5517 CGF.EmitIgnoredExpr(E->getLHS());
5518 CGF.EnsureInsertPoint();
5519 return Visit(E->getRHS());
5520}
5521
5522//===----------------------------------------------------------------------===//
5523// Other Operators
5524//===----------------------------------------------------------------------===//
5525
5526/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5527/// expression is cheap enough and side-effect-free enough to evaluate
5528/// unconditionally instead of conditionally. This is used to convert control
5529/// flow into selects in some cases.
5531 CodeGenFunction &CGF) {
5532 // Anything that is an integer or floating point constant is fine.
5533 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
5534
5535 // Even non-volatile automatic variables can't be evaluated unconditionally.
5536 // Referencing a thread_local may cause non-trivial initialization work to
5537 // occur. If we're inside a lambda and one of the variables is from the scope
5538 // outside the lambda, that function may have returned already. Reading its
5539 // locals is a bad idea. Also, these reads may introduce races there didn't
5540 // exist in the source-level program.
5541}
5542
5543
5544Value *ScalarExprEmitter::
5545VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5546 TestAndClearIgnoreResultAssign();
5547
5548 // Bind the common expression if necessary.
5550
5551 Expr *condExpr = E->getCond();
5552 Expr *lhsExpr = E->getTrueExpr();
5553 Expr *rhsExpr = E->getFalseExpr();
5554
5555 // If the condition constant folds and can be elided, try to avoid emitting
5556 // the condition and the dead arm.
5557 bool CondExprBool;
5558 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5559 Expr *live = lhsExpr, *dead = rhsExpr;
5560 if (!CondExprBool) std::swap(live, dead);
5561
5562 // If the dead side doesn't have labels we need, just emit the Live part.
5563 if (!CGF.ContainsLabel(dead)) {
5564 if (CondExprBool) {
5566 CGF.incrementProfileCounter(lhsExpr);
5567 CGF.incrementProfileCounter(rhsExpr);
5568 }
5570 }
5571 Value *Result = Visit(live);
5572 CGF.markStmtMaybeUsed(dead);
5573
5574 // If the live part is a throw expression, it acts like it has a void
5575 // type, so evaluating it returns a null Value*. However, a conditional
5576 // with non-void type must return a non-null Value*.
5577 if (!Result && !E->getType()->isVoidType())
5578 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
5579
5580 return Result;
5581 }
5582 }
5583
5584 // OpenCL: If the condition is a vector, we can treat this condition like
5585 // the select function.
5586 if (CGF.getLangOpts().OpenCL && (condExpr->getType()->isVectorType() ||
5587 condExpr->getType()->isExtVectorType())) {
5589
5590 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5591 llvm::Value *LHS = Visit(lhsExpr);
5592 llvm::Value *RHS = Visit(rhsExpr);
5593
5594 llvm::Type *condType = ConvertType(condExpr->getType());
5595 auto *vecTy = cast<llvm::FixedVectorType>(condType);
5596
5597 unsigned numElem = vecTy->getNumElements();
5598 llvm::Type *elemType = vecTy->getElementType();
5599
5600 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
5601 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
5602 llvm::Value *tmp = Builder.CreateSExt(
5603 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
5604 llvm::Value *tmp2 = Builder.CreateNot(tmp);
5605
5606 // Cast float to int to perform ANDs if necessary.
5607 llvm::Value *RHSTmp = RHS;
5608 llvm::Value *LHSTmp = LHS;
5609 bool wasCast = false;
5610 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
5611 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5612 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
5613 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
5614 wasCast = true;
5615 }
5616
5617 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
5618 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
5619 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
5620 if (wasCast)
5621 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
5622
5623 return tmp5;
5624 }
5625
5626 if (condExpr->getType()->isVectorType() ||
5627 condExpr->getType()->isSveVLSBuiltinType()) {
5629
5630 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5631 llvm::Value *LHS = Visit(lhsExpr);
5632 llvm::Value *RHS = Visit(rhsExpr);
5633
5634 llvm::Type *CondType = ConvertType(condExpr->getType());
5635 auto *VecTy = cast<llvm::VectorType>(CondType);
5636
5637 if (VecTy->getElementType()->isIntegerTy(1))
5638 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5639
5640 // OpenCL uses the MSB of the mask vector.
5641 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
5642 if (condExpr->getType()->isExtVectorType())
5643 CondV = Builder.CreateICmpSLT(CondV, ZeroVec, "vector_cond");
5644 else
5645 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
5646 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5647 }
5648
5649 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5650 // select instead of as control flow. We can only do this if it is cheap and
5651 // safe to evaluate the LHS and RHS unconditionally.
5652 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
5654 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
5655 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
5656
5658 CGF.incrementProfileCounter(lhsExpr);
5659 CGF.incrementProfileCounter(rhsExpr);
5661 } else
5662 CGF.incrementProfileCounter(E, StepV);
5663
5664 llvm::Value *LHS = Visit(lhsExpr);
5665 llvm::Value *RHS = Visit(rhsExpr);
5666 if (!LHS) {
5667 // If the conditional has void type, make sure we return a null Value*.
5668 assert(!RHS && "LHS and RHS types must match");
5669 return nullptr;
5670 }
5671 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
5672 }
5673
5674 // If the top of the logical operator nest, reset the MCDC temp to 0.
5675 if (CGF.MCDCLogOpStack.empty())
5676 CGF.maybeResetMCDCCondBitmap(condExpr);
5677
5678 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
5679 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
5680 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
5681
5683 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
5684 CGF.getProfileCount(lhsExpr));
5685
5686 CGF.EmitBlock(LHSBlock);
5687
5688 // If the top of the logical operator nest, update the MCDC bitmap for the
5689 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5690 // may also contain a boolean expression.
5691 if (CGF.MCDCLogOpStack.empty())
5692 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5693
5695 CGF.incrementProfileCounter(lhsExpr);
5696 else
5698
5699 eval.begin(CGF);
5700 Value *LHS = Visit(lhsExpr);
5701 eval.end(CGF);
5702
5703 LHSBlock = Builder.GetInsertBlock();
5704 Builder.CreateBr(ContBlock);
5705
5706 CGF.EmitBlock(RHSBlock);
5707
5708 // If the top of the logical operator nest, update the MCDC bitmap for the
5709 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5710 // may also contain a boolean expression.
5711 if (CGF.MCDCLogOpStack.empty())
5712 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5713
5715 CGF.incrementProfileCounter(rhsExpr);
5716
5717 eval.begin(CGF);
5718 Value *RHS = Visit(rhsExpr);
5719 eval.end(CGF);
5720
5721 RHSBlock = Builder.GetInsertBlock();
5722 CGF.EmitBlock(ContBlock);
5723
5724 // If the LHS or RHS is a throw expression, it will be legitimately null.
5725 if (!LHS)
5726 return RHS;
5727 if (!RHS)
5728 return LHS;
5729
5730 // Create a PHI node for the real part.
5731 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
5732 PN->addIncoming(LHS, LHSBlock);
5733 PN->addIncoming(RHS, RHSBlock);
5734
5735 // When single byte coverage mode is enabled, add a counter to continuation
5736 // block.
5739
5740 return PN;
5741}
5742
5743Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5744 return Visit(E->getChosenSubExpr());
5745}
5746
5747Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5748 Address ArgValue = Address::invalid();
5749 RValue ArgPtr = CGF.EmitVAArg(VE, ArgValue);
5750
5751 return ArgPtr.getScalarVal();
5752}
5753
5754Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5755 return CGF.EmitBlockLiteral(block);
5756}
5757
5758// Convert a vec3 to vec4, or vice versa.
5760 Value *Src, unsigned NumElementsDst) {
5761 static constexpr int Mask[] = {0, 1, 2, -1};
5762 return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));
5763}
5764
5765// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5766// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5767// but could be scalar or vectors of different lengths, and either can be
5768// pointer.
5769// There are 4 cases:
5770// 1. non-pointer -> non-pointer : needs 1 bitcast
5771// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5772// 3. pointer -> non-pointer
5773// a) pointer -> intptr_t : needs 1 ptrtoint
5774// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5775// 4. non-pointer -> pointer
5776// a) intptr_t -> pointer : needs 1 inttoptr
5777// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5778// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5779// allow casting directly between pointer types and non-integer non-pointer
5780// types.
5782 const llvm::DataLayout &DL,
5783 Value *Src, llvm::Type *DstTy,
5784 StringRef Name = "") {
5785 auto SrcTy = Src->getType();
5786
5787 // Case 1.
5788 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5789 return Builder.CreateBitCast(Src, DstTy, Name);
5790
5791 // Case 2.
5792 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5793 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
5794
5795 // Case 3.
5796 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5797 // Case 3b.
5798 if (!DstTy->isIntegerTy())
5799 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
5800 // Cases 3a and 3b.
5801 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
5802 }
5803
5804 // Case 4b.
5805 if (!SrcTy->isIntegerTy())
5806 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
5807 // Cases 4a and 4b.
5808 return Builder.CreateIntToPtr(Src, DstTy, Name);
5809}
5810
5811Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5812 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
5813 llvm::Type *DstTy = ConvertType(E->getType());
5814
5815 llvm::Type *SrcTy = Src->getType();
5816 unsigned NumElementsSrc =
5817 isa<llvm::VectorType>(SrcTy)
5818 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
5819 : 0;
5820 unsigned NumElementsDst =
5821 isa<llvm::VectorType>(DstTy)
5822 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
5823 : 0;
5824
5825 // Use bit vector expansion for ext_vector_type boolean vectors.
5826 if (E->getType()->isExtVectorBoolType())
5827 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
5828
5829 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5830 // vector to get a vec4, then a bitcast if the target type is different.
5831 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5832 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
5833 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5834 DstTy);
5835
5836 Src->setName("astype");
5837 return Src;
5838 }
5839
5840 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5841 // to vec4 if the original type is not vec4, then a shuffle vector to
5842 // get a vec3.
5843 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5844 auto *Vec4Ty = llvm::FixedVectorType::get(
5845 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
5846 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5847 Vec4Ty);
5848
5849 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
5850 Src->setName("astype");
5851 return Src;
5852 }
5853
5854 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
5855 Src, DstTy, "astype");
5856}
5857
5858Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5859 return CGF.EmitAtomicExpr(E).getScalarVal();
5860}
5861
5862//===----------------------------------------------------------------------===//
5863// Entry Point into this File
5864//===----------------------------------------------------------------------===//
5865
5866/// Emit the computation of the specified expression of scalar type, ignoring
5867/// the result.
5868Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5869 assert(E && hasScalarEvaluationKind(E->getType()) &&
5870 "Invalid scalar expression to emit");
5871
5872 return ScalarExprEmitter(*this, IgnoreResultAssign)
5873 .Visit(const_cast<Expr *>(E));
5874}
5875
5876/// Emit a conversion from the specified type to the specified destination type,
5877/// both of which are LLVM scalar types.
5879 QualType DstTy,
5881 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5882 "Invalid scalar expression to emit");
5883 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
5884}
5885
5886/// Emit a conversion from the specified complex type to the specified
5887/// destination type, where the destination type is an LLVM scalar type.
5889 QualType SrcTy,
5890 QualType DstTy,
5892 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
5893 "Invalid complex -> scalar conversion");
5894 return ScalarExprEmitter(*this)
5895 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
5896}
5897
5898
5899Value *
5901 QualType PromotionType) {
5902 if (!PromotionType.isNull())
5903 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
5904 else
5905 return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));
5906}
5907
5908
5911 bool isInc, bool isPre) {
5912 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
5913}
5914
5916 // object->isa or (*object).isa
5917 // Generate code as for: *(Class*)object
5918
5919 Expr *BaseExpr = E->getBase();
5921 if (BaseExpr->isPRValue()) {
5922 llvm::Type *BaseTy =
5924 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
5925 } else {
5926 Addr = EmitLValue(BaseExpr).getAddress();
5927 }
5928
5929 // Cast the address to Class*.
5930 Addr = Addr.withElementType(ConvertType(E->getType()));
5931 return MakeAddrLValue(Addr, E->getType());
5932}
5933
5934
5936 const CompoundAssignOperator *E) {
5938 ScalarExprEmitter Scalar(*this);
5939 Value *Result = nullptr;
5940 switch (E->getOpcode()) {
5941#define COMPOUND_OP(Op) \
5942 case BO_##Op##Assign: \
5943 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
5944 Result)
5945 COMPOUND_OP(Mul);
5946 COMPOUND_OP(Div);
5947 COMPOUND_OP(Rem);
5948 COMPOUND_OP(Add);
5949 COMPOUND_OP(Sub);
5950 COMPOUND_OP(Shl);
5951 COMPOUND_OP(Shr);
5953 COMPOUND_OP(Xor);
5954 COMPOUND_OP(Or);
5955#undef COMPOUND_OP
5956
5957 case BO_PtrMemD:
5958 case BO_PtrMemI:
5959 case BO_Mul:
5960 case BO_Div:
5961 case BO_Rem:
5962 case BO_Add:
5963 case BO_Sub:
5964 case BO_Shl:
5965 case BO_Shr:
5966 case BO_LT:
5967 case BO_GT:
5968 case BO_LE:
5969 case BO_GE:
5970 case BO_EQ:
5971 case BO_NE:
5972 case BO_Cmp:
5973 case BO_And:
5974 case BO_Xor:
5975 case BO_Or:
5976 case BO_LAnd:
5977 case BO_LOr:
5978 case BO_Assign:
5979 case BO_Comma:
5980 llvm_unreachable("Not valid compound assignment operators");
5981 }
5982
5983 llvm_unreachable("Unhandled compound assignment operator");
5984}
5985
5987 // The total (signed) byte offset for the GEP.
5988 llvm::Value *TotalOffset;
5989 // The offset overflow flag - true if the total offset overflows.
5990 llvm::Value *OffsetOverflows;
5991};
5992
5993/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
5994/// and compute the total offset it applies from it's base pointer BasePtr.
5995/// Returns offset in bytes and a boolean flag whether an overflow happened
5996/// during evaluation.
5998 llvm::LLVMContext &VMContext,
5999 CodeGenModule &CGM,
6000 CGBuilderTy &Builder) {
6001 const auto &DL = CGM.getDataLayout();
6002
6003 // The total (signed) byte offset for the GEP.
6004 llvm::Value *TotalOffset = nullptr;
6005
6006 // Was the GEP already reduced to a constant?
6007 if (isa<llvm::Constant>(GEPVal)) {
6008 // Compute the offset by casting both pointers to integers and subtracting:
6009 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
6010 Value *BasePtr_int =
6011 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
6012 Value *GEPVal_int =
6013 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
6014 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
6015 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
6016 }
6017
6018 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
6019 assert(GEP->getPointerOperand() == BasePtr &&
6020 "BasePtr must be the base of the GEP.");
6021 assert(GEP->isInBounds() && "Expected inbounds GEP");
6022
6023 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
6024
6025 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
6026 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6027 auto *SAddIntrinsic =
6028 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
6029 auto *SMulIntrinsic =
6030 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
6031
6032 // The offset overflow flag - true if the total offset overflows.
6033 llvm::Value *OffsetOverflows = Builder.getFalse();
6034
6035 /// Return the result of the given binary operation.
6036 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
6037 llvm::Value *RHS) -> llvm::Value * {
6038 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
6039
6040 // If the operands are constants, return a constant result.
6041 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
6042 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
6043 llvm::APInt N;
6044 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
6045 /*Signed=*/true, N);
6046 if (HasOverflow)
6047 OffsetOverflows = Builder.getTrue();
6048 return llvm::ConstantInt::get(VMContext, N);
6049 }
6050 }
6051
6052 // Otherwise, compute the result with checked arithmetic.
6053 auto *ResultAndOverflow = Builder.CreateCall(
6054 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
6055 OffsetOverflows = Builder.CreateOr(
6056 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
6057 return Builder.CreateExtractValue(ResultAndOverflow, 0);
6058 };
6059
6060 // Determine the total byte offset by looking at each GEP operand.
6061 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
6062 GTI != GTE; ++GTI) {
6063 llvm::Value *LocalOffset;
6064 auto *Index = GTI.getOperand();
6065 // Compute the local offset contributed by this indexing step:
6066 if (auto *STy = GTI.getStructTypeOrNull()) {
6067 // For struct indexing, the local offset is the byte position of the
6068 // specified field.
6069 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
6070 LocalOffset = llvm::ConstantInt::get(
6071 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
6072 } else {
6073 // Otherwise this is array-like indexing. The local offset is the index
6074 // multiplied by the element size.
6075 auto *ElementSize =
6076 llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));
6077 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
6078 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
6079 }
6080
6081 // If this is the first offset, set it as the total offset. Otherwise, add
6082 // the local offset into the running total.
6083 if (!TotalOffset || TotalOffset == Zero)
6084 TotalOffset = LocalOffset;
6085 else
6086 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
6087 }
6088
6089 return {TotalOffset, OffsetOverflows};
6090}
6091
6092Value *
6093CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
6094 ArrayRef<Value *> IdxList,
6095 bool SignedIndices, bool IsSubtraction,
6096 SourceLocation Loc, const Twine &Name) {
6097 llvm::Type *PtrTy = Ptr->getType();
6098
6099 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6100 if (!SignedIndices && !IsSubtraction)
6101 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6102
6103 Value *GEPVal = Builder.CreateGEP(ElemTy, Ptr, IdxList, Name, NWFlags);
6104
6105 // If the pointer overflow sanitizer isn't enabled, do nothing.
6106 if (!SanOpts.has(SanitizerKind::PointerOverflow))
6107 return GEPVal;
6108
6109 // Perform nullptr-and-offset check unless the nullptr is defined.
6110 bool PerformNullCheck = !NullPointerIsDefined(
6111 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
6112 // Check for overflows unless the GEP got constant-folded,
6113 // and only in the default address space
6114 bool PerformOverflowCheck =
6115 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
6116
6117 if (!(PerformNullCheck || PerformOverflowCheck))
6118 return GEPVal;
6119
6120 const auto &DL = CGM.getDataLayout();
6121
6122 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
6123 auto CheckHandler = SanitizerHandler::PointerOverflow;
6124 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6125 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
6126
6127 GEPOffsetAndOverflow EvaluatedGEP =
6129
6130 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
6131 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
6132 "If the offset got constant-folded, we don't expect that there was an "
6133 "overflow.");
6134
6135 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6136
6137 // Common case: if the total offset is zero, don't emit a check.
6138 if (EvaluatedGEP.TotalOffset == Zero)
6139 return GEPVal;
6140
6141 // Now that we've computed the total offset, add it to the base pointer (with
6142 // wrapping semantics).
6143 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
6144 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
6145
6147 2>
6148 Checks;
6149
6150 if (PerformNullCheck) {
6151 // If the base pointer evaluates to a null pointer value,
6152 // the only valid pointer this inbounds GEP can produce is also
6153 // a null pointer, so the offset must also evaluate to zero.
6154 // Likewise, if we have non-zero base pointer, we can not get null pointer
6155 // as a result, so the offset can not be -intptr_t(BasePtr).
6156 // In other words, both pointers are either null, or both are non-null,
6157 // or the behaviour is undefined.
6158 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
6159 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
6160 auto *Valid = Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr);
6161 Checks.emplace_back(Valid, CheckOrdinal);
6162 }
6163
6164 if (PerformOverflowCheck) {
6165 // The GEP is valid if:
6166 // 1) The total offset doesn't overflow, and
6167 // 2) The sign of the difference between the computed address and the base
6168 // pointer matches the sign of the total offset.
6169 llvm::Value *ValidGEP;
6170 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
6171 if (SignedIndices) {
6172 // GEP is computed as `unsigned base + signed offset`, therefore:
6173 // * If offset was positive, then the computed pointer can not be
6174 // [unsigned] less than the base pointer, unless it overflowed.
6175 // * If offset was negative, then the computed pointer can not be
6176 // [unsigned] greater than the bas pointere, unless it overflowed.
6177 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6178 auto *PosOrZeroOffset =
6179 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
6180 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
6181 ValidGEP =
6182 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
6183 } else if (!IsSubtraction) {
6184 // GEP is computed as `unsigned base + unsigned offset`, therefore the
6185 // computed pointer can not be [unsigned] less than base pointer,
6186 // unless there was an overflow.
6187 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
6188 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6189 } else {
6190 // GEP is computed as `unsigned base - unsigned offset`, therefore the
6191 // computed pointer can not be [unsigned] greater than base pointer,
6192 // unless there was an overflow.
6193 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
6194 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
6195 }
6196 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
6197 Checks.emplace_back(ValidGEP, CheckOrdinal);
6198 }
6199
6200 assert(!Checks.empty() && "Should have produced some checks.");
6201
6202 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
6203 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
6204 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
6205 EmitCheck(Checks, CheckHandler, StaticArgs, DynamicArgs);
6206
6207 return GEPVal;
6208}
6209
6211 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
6212 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
6213 const Twine &Name) {
6214 if (!SanOpts.has(SanitizerKind::PointerOverflow)) {
6215 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6216 if (!SignedIndices && !IsSubtraction)
6217 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6218
6219 return Builder.CreateGEP(Addr, IdxList, elementType, Align, Name, NWFlags);
6220 }
6221
6222 return RawAddress(
6223 EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this),
6224 IdxList, SignedIndices, IsSubtraction, Loc, Name),
6225 elementType, Align);
6226}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3597
NodeId Parent
Definition: ASTDiff.cpp:191
ASTImporterLookupTable & LT
static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address DestVal, QualType DestTy, Address SrcVal, QualType SrcTy, SourceLocation Loc)
Definition: CGExprAgg.cpp:550
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
Definition: CGExprAgg.cpp:1121
static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
static llvm::Value * EmitIsNegativeTestHelper(Value *V, QualType VType, const char *Name, CGBuilderTy &Builder)
static Value * createCastsForTypeOfSameSize(CGBuilderTy &Builder, const llvm::DataLayout &DL, Value *Src, llvm::Type *DstTy, StringRef Name="")
static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E)
static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc, bool isPre, ASTContext &Ctx)
For the purposes of overflow pattern exclusion, does this match the "while(i--)" pattern?
IntrinsicType
@ VCMPGT
@ VCMPEQ
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, llvm::LLVMContext &VMContext, CodeGenModule &CGM, CGBuilderTy &Builder)
Evaluate given GEPVal, which is either an inbounds GEP, or a constant, and compute the total offset i...
static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D)
static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType, QualType DstType)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, Value *Src, unsigned NumElementsDst)
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc, FPOptions FPFeatures)
#define HANDLE_BINOP(OP)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
const Decl * D
Expr * E
llvm::APSInt APSInt
Definition: Compiler.cpp:23
static Decl::Kind getKind(const Decl *D)
Definition: DeclBase.cpp:1192
SanitizerHandler
SourceLocation Loc
Definition: SemaObjC.cpp:754
static QualType getPointeeType(const MemRegion *R)
This file contains the declaration of TrapReasonBuilder and related classes.
StateNode * Previous
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
APSInt & getInt()
Definition: APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
ParentMapContext & getParentMapContext()
Returns the dynamic AST node parent map context.
Definition: ASTContext.cpp:917
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
CanQualType FloatTy
Definition: ASTContext.h:1234
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2851
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const LangOptions & getLangOpts() const
Definition: ASTContext.h:894
bool isTypeIgnoredBySanitizer(const SanitizerMask &Mask, const QualType &Ty) const
Check if a type can have its sanitizer instrumentation elided based on its presence within an ignorel...
Definition: ASTContext.cpp:878
CanQualType BoolTy
Definition: ASTContext.h:1223
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
llvm::FixedPointSemantics getFixedPointSemantics(QualType Ty) const
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Definition: ASTContext.h:2898
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2625
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getPromotedIntegerType(QualType PromotableType) const
Return the type that PromotableType will promote to: C99 6.3.1.1p2, assuming that PromotableType is a...
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:3059
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:859
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Definition: ASTContext.h:2629
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
Definition: RecordLayout.h:197
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:201
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:250
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4289
AddrLabelExpr - The GNU address of label extension, representing &&label.
Definition: Expr.h:4486
Represents the index of the current element of an array being initialized by an ArrayInitLoopExpr.
Definition: Expr.h:5957
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2723
An Embarcadero array type trait, as used in the implementation of __array_rank and __array_extent.
Definition: ExprCXX.h:2990
QualType getElementType() const
Definition: TypeBase.h:3750
AsTypeExpr - Clang builtin function __builtin_astype [OpenCL 6.2.4.2] This AST node provides support ...
Definition: Expr.h:6621
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6816
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3974
Expr * getLHS() const
Definition: Expr.h:4024
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition: Expr.h:4121
bool isCompoundAssignmentOp() const
Definition: Expr.h:4118
SourceLocation getExprLoc() const
Definition: Expr.h:4015
bool isShiftOp() const
Definition: Expr.h:4063
Expr * getRHS() const
Definition: Expr.h:4026
bool isShiftAssignOp() const
Definition: Expr.h:4132
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition: Expr.cpp:2200
Opcode getOpcode() const
Definition: Expr.h:4019
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:6560
This class is used for builtin types like 'int'.
Definition: TypeBase.h:3182
Kind getKind() const
Definition: TypeBase.h:3230
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition: DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition: DeclCXX.h:249
A boolean literal, per ([C++ lex.bool] Boolean literals).
Definition: ExprCXX.h:723
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1271
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1378
Expr * getExpr()
Get the initialization expression that will be used.
Definition: ExprCXX.cpp:1105
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition: ExprCXX.h:2620
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition: ExprCXX.h:481
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition: ExprCXX.h:2349
Represents a C++11 noexcept expression (C++ [expr.unary.noexcept]).
Definition: ExprCXX.h:4303
The null pointer literal (C++11 [lex.nullptr])
Definition: ExprCXX.h:768
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2739
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
A rewritten comparison expression that was originally written using operator syntax.
Definition: ExprCXX.h:286
An expression "T()" which creates an rvalue of a non-class type T.
Definition: ExprCXX.h:2198
Represents the this expression in C++.
Definition: ExprCXX.h:1155
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1209
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2879
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3612
path_iterator path_begin()
Definition: Expr.h:3682
CastKind getCastKind() const
Definition: Expr.h:3656
bool changesVolatileQualification() const
Return.
Definition: Expr.h:3746
path_iterator path_end()
Definition: Expr.h:3683
Expr * getSubExpr()
Definition: Expr.h:3662
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:125
ChooseExpr - GNU builtin-in function __builtin_choose_expr.
Definition: Expr.h:4784
Represents a 'co_await' expression.
Definition: ExprCXX.h:5363
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:276
bool isValid() const
Definition: Address.h:177
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:906
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:946
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:963
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Definition: CGBuilder.h:360
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:296
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:112
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition: CGCXXABI.cpp:103
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:95
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition: CGCXXABI.cpp:85
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion.
Definition: CGCXXABI.cpp:72
void EmitPseudoVariable(CGBuilderTy &Builder, llvm::Instruction *Value, QualType Ty)
Emit a pseudo variable and debug info for an intermediate value if it does not correspond to a variab...
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E)
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
An object to manage conditionally-evaluated expressions.
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
An RAII object to record that we're evaluating a statement expression.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition: CGObjC.cpp:2152
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition: CGObjC.cpp:573
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
Definition: CGObjC.cpp:3089
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition: CGObjC.cpp:3679
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition: CGExpr.cpp:6682
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
Emit a selector.
Definition: CGObjC.cpp:257
SanitizerSet SanOpts
Sanitizers enabled for this function.
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition: CGExpr.cpp:181
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
Definition: CGObjC.cpp:251
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
Definition: CGObjC.cpp:265
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition: CGExpr.cpp:2680
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition: CGExpr.cpp:3649
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition: CGCall.cpp:6263
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition: CGExpr.cpp:1236
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition: CGExpr.cpp:6782
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
Definition: CGExprCXX.cpp:2117
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
Definition: CGObjC.cpp:247
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
const LangOptions & getLangOpts() const
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition: CGObjC.cpp:2545
bool isPointerKnownNonNull(const Expr *E)
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition: CGClass.cpp:394
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition: CGDecl.cpp:764
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
Definition: CGAtomic.cpp:2063
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition: CGClass.cpp:2815
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
Definition: CGExprCXX.cpp:1602
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
Definition: CGObjC.cpp:3629
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition: CGExpr.cpp:3539
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition: CGExpr.cpp:174
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
Definition: CGObjC.cpp:3953
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition: CGExpr.cpp:242
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition: CGExpr.cpp:5932
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:2336
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
EmitObjCBoxedExpr - This routine generates code to call the appropriate expression boxing method.
Definition: CGObjC.cpp:64
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition: CGExpr.cpp:223
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition: CGExpr.cpp:3789
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition: CGExpr.cpp:5885
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition: CGExpr.cpp:1967
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
EmitARCRetainScalarExpr - Semantically equivalent to EmitARCRetainObject(e->getType(),...
Definition: CGObjC.cpp:3493
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
Definition: CGBlocks.cpp:764
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition: CGExpr.cpp:2153
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition: CGExpr.cpp:5871
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:2533
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition: CGExpr.cpp:4229
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition: CGStmt.cpp:566
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
Definition: CGAtomic.cpp:2082
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition: CGExpr.cpp:264
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitAtomicExpr(AtomicExpr *E)
Definition: CGAtomic.cpp:854
void markStmtMaybeUsed(const Stmt *S)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:1515
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition: CGStmt.cpp:672
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition: CGExpr.cpp:1596
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition: CGExpr.cpp:734
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
Definition: CGObjC.cpp:4033
void FlattenAccessAndType(Address Addr, QualType AddrTy, SmallVectorImpl< std::pair< Address, llvm::Value * > > &AccessList, SmallVectorImpl< QualType > &FlatTypes)
Definition: CGExpr.cpp:6791
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition: CGExpr.cpp:4769
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition: CGExpr.cpp:4141
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition: CGExpr.cpp:2183
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
Emits an instance of NSConstantString representing the object.
Definition: CGObjC.cpp:51
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition: CGExpr.cpp:1864
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition: CGExpr.cpp:1631
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
Definition: CGObjC.cpp:3524
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition: CGObjC.cpp:2651
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
Definition: CGExprCXX.cpp:2246
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:652
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1311
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
TrapReasonBuilder BuildTrapReason(unsigned DiagID, TrapReason &TR)
Helper function to construct a TrapReasonBuilder.
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
const TargetInfo & getTarget() const
llvm::Constant * getMemberPointerConstant(const UnaryOperator *e)
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Value * createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF)
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
LValue - This represents an lvalue references.
Definition: CGValue.h:182
bool isBitField() const
Definition: CGValue.h:280
bool isVolatileQualified() const
Definition: CGValue.h:285
void setTBAAInfo(TBAAAccessInfo Info)
Definition: CGValue.h:336
const Qualifiers & getQuals() const
Definition: CGValue.h:338
Address getAddress() const
Definition: CGValue.h:361
QualType getType() const
Definition: CGValue.h:291
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:424
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
bool isAggregate() const
Definition: CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
An abstract representation of an aligned address.
Definition: Address.h:42
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Complex values, per C99 6.2.5p11.
Definition: TypeBase.h:3293
CompoundAssignOperator - For compound assignments (e.g.
Definition: Expr.h:4236
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3541
Represents the specialization of a concept - evaluates to a prvalue of type bool.
Definition: ExprConcepts.h:42
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1084
Represents a concrete matrix type with constant number of rows and columns.
Definition: TypeBase.h:4389
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition: TypeBase.h:4407
ConvertVectorExpr - Clang builtin function __builtin_convertvector This AST node provides support for...
Definition: Expr.h:4655
Represents a 'co_yield' expression.
Definition: ExprCXX.h:5444
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2393
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1272
T * getAttr() const
Definition: DeclBase.h:573
Represents a reference to #emded data.
Definition: Expr.h:5062
ExplicitCastExpr - An explicit cast written in the source code.
Definition: Expr.h:3864
Represents an expression – generally a full-expression – that introduces cleanups to be run at the en...
Definition: ExprCXX.h:3655
This represents one expression.
Definition: Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isIntegerConstantExpr(const ASTContext &Ctx) const
bool isGLValue() const
Definition: Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition: Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Returns the set of floating point options that apply to this expression.
Definition: Expr.cpp:3922
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3069
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool isPRValue() const
Definition: Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3053
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:273
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition: Expr.h:476
QualType getType() const
Definition: Expr.h:144
An expression trait intrinsic.
Definition: ExprCXX.h:3063
ExtVectorType - Extended vector type.
Definition: TypeBase.h:4283
Represents a member of a struct/union/class.
Definition: Decl.h:3157
GNUNullExpr - Implements the GNU __null extension, which is a name for a null pointer constant that h...
Definition: Expr.h:4859
Represents a C11 generic selection.
Definition: Expr.h:6114
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition: Expr.h:3789
Represents an implicitly-generated value initialization of an object of a given type.
Definition: Expr.h:5993
Describes an C or C++ initializer list.
Definition: Expr.h:5235
@ PostDecrInWhile
while (count–)
Definition: LangOptions.h:329
bool isSignedOverflowDefined() const
Definition: LangOptions.h:610
bool isOverflowPatternExcluded(OverflowPatternExclusionKind Kind) const
Definition: LangOptions.h:623
std::string OverflowHandler
The name of the handler function to be called when -ftrapv is specified.
Definition: LangOptions.h:479
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4914
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2801
Represents a matrix type, as defined in the Matrix Types clang extensions.
Definition: TypeBase.h:4353
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3300
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: TypeBase.h:3669
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp,...
Definition: ExprObjC.h:192
A runtime availability query.
Definition: ExprObjC.h:1703
ObjCBoolLiteralExpr - Objective-C Boolean Literal.
Definition: ExprObjC.h:88
ObjCBoxedExpr - used for generalized expression boxing.
Definition: ExprObjC.h:128
ObjCDictionaryLiteral - AST node to represent objective-c dictionary literals; as in:"name" : NSUserN...
Definition: ExprObjC.h:308
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition: ExprObjC.h:1498
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:548
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:940
Represents a pointer to an Objective C object.
Definition: TypeBase.h:7961
const ObjCObjectType * getObjectType() const
Gets the type pointed to by this ObjC pointer.
Definition: TypeBase.h:7998
ObjCProtocolExpr used for protocol expression in Objective-C.
Definition: ExprObjC.h:504
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:454
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:52
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition: Expr.h:2529
Helper class for OffsetOfExpr.
Definition: Expr.h:2423
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition: Expr.h:2481
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition: Expr.h:2487
@ Array
An index into an array.
Definition: Expr.h:2428
@ Identifier
A field in a dependent type, known only by its name.
Definition: Expr.h:2432
@ Field
A field.
Definition: Expr.h:2430
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition: Expr.h:2435
Kind getKind() const
Determine what kind of offsetof node this is.
Definition: Expr.h:2477
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition: Expr.h:2497
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1180
This expression type represents an asterisk in an OpenACC Size-Expr, used in the 'tile' and 'gang' cl...
Definition: Expr.h:2092
ParenExpr - This represents a parenthesized expression, e.g.
Definition: Expr.h:2184
const Expr * getSubExpr() const
Definition: Expr.h:2201
DynTypedNodeList getParents(const NodeT &Node)
Returns the parents of the given node (within the traversal scope).
Pointer-authentication qualifiers.
Definition: TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: TypeBase.h:3346
QualType getPointeeType() const
Definition: TypeBase.h:3356
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6692
A (possibly-)qualified type.
Definition: TypeBase.h:937
PointerAuthQualifier getPointerAuth() const
Definition: TypeBase.h:1453
bool mayBeDynamicClass() const
Returns true if it is a class and it might be dynamic.
Definition: Type.cpp:130
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition: TypeBase.h:8343
LangAS getAddressSpace() const
Return the address space of this type.
Definition: TypeBase.h:8469
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition: TypeBase.h:1438
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: TypeBase.h:8528
QualType getCanonicalType() const
Definition: TypeBase.h:8395
bool UseExcessPrecision(const ASTContext &Ctx)
Definition: Type.cpp:1612
bool mayBeNotDynamicClass() const
Returns true if it is not a class or if the class might not be dynamic.
Definition: Type.cpp:135
bool isCanonical() const
Definition: TypeBase.h:8400
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition: TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition: TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition: TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition: TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition: TypeBase.h:367
void removePointerAuth()
Definition: TypeBase.h:610
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: TypeBase.h:6502
RecordDecl * getOriginalDecl() const
Definition: TypeBase.h:6509
C++2a [expr.prim.req]: A requires-expression provides a concise way to express requirements on templa...
Definition: ExprConcepts.h:505
static constexpr SanitizerMask bitPosToMask(const unsigned Pos)
Create a mask with a bit enabled at position Pos.
Definition: Sanitizers.h:59
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Sema - This implements semantic analysis and AST building for C.
Definition: Sema.h:850
ShuffleVectorExpr - clang-specific builtin-in function __builtin_shufflevector.
Definition: Expr.h:4579
Represents an expression that computes the length of a parameter pack.
Definition: ExprCXX.h:4435
Represents a function call to one of __builtin_LINE(), __builtin_COLUMN(), __builtin_FUNCTION(),...
Definition: Expr.h:4953
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition: Expr.cpp:2277
SourceLocation getLocation() const
Definition: Expr.h:4997
Encodes a location in the source.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4531
RetTy Visit(PTR(Stmt) S, ParamTys... P)
Definition: StmtVisitor.h:45
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:186
Stmt - This represents one statement.
Definition: Stmt.h:85
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:346
Represents a reference to a non-type template parameter that has been substituted with a template arg...
Definition: ExprCXX.h:4658
virtual bool useFP16ConversionIntrinsics() const
Check whether llvm intrinsics such as llvm.convert.to.fp16 should be used to convert to and from __fp...
Definition: TargetInfo.h:1015
VersionTuple getPlatformMinVersion() const
Retrieve the minimum desired version of the platform, to which the program should be compiled.
Definition: TargetInfo.h:1703
const llvm::fltSemantics & getHalfFormat() const
Definition: TargetInfo.h:783
const llvm::fltSemantics & getBFloat16Format() const
Definition: TargetInfo.h:793
const llvm::fltSemantics & getLongDoubleFormat() const
Definition: TargetInfo.h:804
const llvm::fltSemantics & getFloat128Format() const
Definition: TargetInfo.h:812
const llvm::fltSemantics & getIbm128Format() const
Definition: TargetInfo.h:820
A type trait used in the implementation of various C++11 and Library TR1 trait templates.
Definition: ExprCXX.h:2890
bool isVoidType() const
Definition: TypeBase.h:8936
bool isBooleanType() const
Definition: TypeBase.h:9066
bool isSignableType(const ASTContext &Ctx) const
Definition: TypeBase.h:8592
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2229
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition: Type.cpp:2277
CXXRecordDecl * castAsCXXRecordDecl() const
Definition: Type.h:36
bool isArithmeticType() const
Definition: Type.cpp:2341
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: TypeBase.h:8980
const T * castAs() const
Member-template castAs<specific type>.
Definition: TypeBase.h:9226
bool isReferenceType() const
Definition: TypeBase.h:8604
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition: Type.cpp:1909
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition: Type.cpp:2612
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:752
bool isExtVectorType() const
Definition: TypeBase.h:8723
bool isExtVectorBoolType() const
Definition: TypeBase.h:8727
bool isOCLIntelSubgroupAVCType() const
Definition: TypeBase.h:8855
bool isBuiltinType() const
Helper methods to distinguish type categories.
Definition: TypeBase.h:8703
RecordDecl * castAsRecordDecl() const
Definition: Type.h:48
bool isAnyComplexType() const
Definition: TypeBase.h:8715
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition: TypeBase.h:8992
bool isHalfType() const
Definition: TypeBase.h:8940
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition: Type.cpp:2247
bool isQueueT() const
Definition: TypeBase.h:8826
bool isMatrixType() const
Definition: TypeBase.h:8737
bool isEventT() const
Definition: TypeBase.h:8818
bool isFunctionType() const
Definition: TypeBase.h:8576
bool isVectorType() const
Definition: TypeBase.h:8719
bool isRealFloatingType() const
Floating point categories.
Definition: Type.cpp:2324
bool isFloatingType() const
Definition: Type.cpp:2308
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition: Type.cpp:2257
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
Definition: TypeBase.h:2946
const T * getAs() const
Member-template getAs<specific type>'.
Definition: TypeBase.h:9159
bool isNullPtrType() const
Definition: TypeBase.h:8973
UnaryExprOrTypeTraitExpr - expression with either a type or (unevaluated) expression operand.
Definition: Expr.h:2627
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2246
Expr * getSubExpr() const
Definition: Expr.h:2287
Opcode getOpcode() const
Definition: Expr.h:2282
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition: Expr.h:2300
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4893
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:711
QualType getType() const
Definition: Decl.h:722
QualType getType() const
Definition: Value.cpp:237
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: TypeBase.h:3982
Represents a GCC generic vector type.
Definition: TypeBase.h:4191
VectorKind getVectorKind() const
Definition: TypeBase.h:4211
QualType getElementType() const
Definition: TypeBase.h:4205
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2697
Defines the clang::TargetInfo interface.
const AstTypeMatcher< PointerType > pointerType
Matches pointer types, but does not match Objective-C object pointer types.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
llvm::APFloat APFloat
Definition: Floating.h:27
llvm::APInt APInt
Definition: FixedPoint.h:19
bool LE(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1274
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1948
bool GE(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1289
The JSON file list parser is used to communicate input to InstallAPI.
BinaryOperatorKind
@ Result
The result type of a method or function.
CastKind
CastKind - The kind of operation required for a conversion.
const FunctionProtoType * T
@ Generic
not a target-specific vector type
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
#define true
Definition: stdbool.h:25
#define false
Definition: stdbool.h:26
llvm::Value * TotalOffset
llvm::Value * OffsetOverflows
Structure with information about how a bitfield should be accessed.
unsigned Size
The total size of the bit-field, in bits.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
static TBAAAccessInfo getMayAliasInfo()
Definition: CodeGenTBAA.h:63
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:647
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:174
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition: Sanitizers.h:184