clang 22.0.0git
InterpBuiltin.cpp
Go to the documentation of this file.
1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8#include "../ExprConstShared.h"
9#include "Boolean.h"
10#include "EvalEmitter.h"
11#include "Interp.h"
13#include "PrimType.h"
14#include "clang/AST/OSLog.h"
19#include "llvm/ADT/StringExtras.h"
20#include "llvm/Support/ErrorHandling.h"
21#include "llvm/Support/SipHash.h"
22
23namespace clang {
24namespace interp {
25
26LLVM_ATTRIBUTE_UNUSED static bool isNoopBuiltin(unsigned ID) {
27 switch (ID) {
28 case Builtin::BIas_const:
29 case Builtin::BIforward:
30 case Builtin::BIforward_like:
31 case Builtin::BImove:
32 case Builtin::BImove_if_noexcept:
33 case Builtin::BIaddressof:
34 case Builtin::BI__addressof:
35 case Builtin::BI__builtin_addressof:
36 case Builtin::BI__builtin_launder:
37 return true;
38 default:
39 return false;
40 }
41 return false;
42}
43
44static void discard(InterpStack &Stk, PrimType T) {
45 TYPE_SWITCH(T, { Stk.discard<T>(); });
46}
47
49 INT_TYPE_SWITCH(T, return Stk.pop<T>().toAPSInt());
50}
51
52/// Pushes \p Val on the stack as the type given by \p QT.
53static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
56 OptPrimType T = S.getContext().classify(QT);
57 assert(T);
58
59 unsigned BitWidth = S.getASTContext().getTypeSize(QT);
60
61 if (T == PT_IntAPS) {
62 auto Result = S.allocAP<IntegralAP<true>>(BitWidth);
63 Result.copy(Val);
64 S.Stk.push<IntegralAP<true>>(Result);
65 return;
66 }
67
68 if (T == PT_IntAP) {
69 auto Result = S.allocAP<IntegralAP<false>>(BitWidth);
70 Result.copy(Val);
71 S.Stk.push<IntegralAP<false>>(Result);
72 return;
73 }
74
76 int64_t V = Val.getSExtValue();
77 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
78 } else {
80 uint64_t V = Val.getZExtValue();
81 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
82 }
83}
84
85template <typename T>
86static void pushInteger(InterpState &S, T Val, QualType QT) {
87 if constexpr (std::is_same_v<T, APInt>)
88 pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
89 else if constexpr (std::is_same_v<T, APSInt>)
90 pushInteger(S, Val, QT);
91 else
93 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
94 std::is_signed_v<T>),
95 !std::is_signed_v<T>),
96 QT);
97}
98
99static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT,
100 const APSInt &Value) {
101
102 if (ValueT == PT_IntAPS) {
103 Dest.deref<IntegralAP<true>>() =
104 S.allocAP<IntegralAP<true>>(Value.getBitWidth());
105 Dest.deref<IntegralAP<true>>().copy(Value);
106 } else if (ValueT == PT_IntAP) {
107 Dest.deref<IntegralAP<false>>() =
108 S.allocAP<IntegralAP<false>>(Value.getBitWidth());
109 Dest.deref<IntegralAP<false>>().copy(Value);
110 } else {
112 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
113 }
114}
115
117 const Descriptor *Desc = P.getFieldDesc();
118 QualType T = Desc->getType();
119 if (Desc->isPrimitive())
120 return T;
121 if (T->isPointerType())
122 return T->getAs<PointerType>()->getPointeeType();
123 if (Desc->isArray())
124 return Desc->getElemQualType();
125 if (const auto *AT = T->getAsArrayTypeUnsafe())
126 return AT->getElementType();
127 return T;
128}
129
131 unsigned ID) {
132 if (!S.diagnosing())
133 return;
134
135 auto Loc = S.Current->getSource(OpPC);
136 if (S.getLangOpts().CPlusPlus11)
137 S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
138 << /*isConstexpr=*/0 << /*isConstructor=*/0
140 else
141 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
142}
143
144static llvm::APSInt convertBoolVectorToInt(const Pointer &Val) {
145 assert(Val.getFieldDesc()->isPrimitiveArray() &&
147 "Not a boolean vector");
148 unsigned NumElems = Val.getNumElems();
149
150 // Each element is one bit, so create an integer with NumElts bits.
151 llvm::APSInt Result(NumElems, 0);
152 for (unsigned I = 0; I != NumElems; ++I) {
153 if (Val.elem<bool>(I))
154 Result.setBit(I);
155 }
156
157 return Result;
158}
159
161 const InterpFrame *Frame,
162 const CallExpr *Call) {
163 unsigned Depth = S.Current->getDepth();
164 auto isStdCall = [](const FunctionDecl *F) -> bool {
165 return F && F->isInStdNamespace() && F->getIdentifier() &&
166 F->getIdentifier()->isStr("is_constant_evaluated");
167 };
168 const InterpFrame *Caller = Frame->Caller;
169 // The current frame is the one for __builtin_is_constant_evaluated.
170 // The one above that, potentially the one for std::is_constant_evaluated().
171 if (S.inConstantContext() && !S.checkingPotentialConstantExpression() &&
172 S.getEvalStatus().Diag &&
173 (Depth == 0 || (Depth == 1 && isStdCall(Frame->getCallee())))) {
174 if (Caller && isStdCall(Frame->getCallee())) {
175 const Expr *E = Caller->getExpr(Caller->getRetPC());
176 S.report(E->getExprLoc(),
177 diag::warn_is_constant_evaluated_always_true_constexpr)
178 << "std::is_constant_evaluated" << E->getSourceRange();
179 } else {
180 S.report(Call->getExprLoc(),
181 diag::warn_is_constant_evaluated_always_true_constexpr)
182 << "__builtin_is_constant_evaluated" << Call->getSourceRange();
183 }
184 }
185
186 S.Stk.push<Boolean>(Boolean::from(S.inConstantContext()));
187 return true;
188}
189
190// __builtin_assume(int)
192 const InterpFrame *Frame,
193 const CallExpr *Call) {
194 assert(Call->getNumArgs() == 1);
195 discard(S.Stk, *S.getContext().classify(Call->getArg(0)));
196 return true;
197}
198
200 const InterpFrame *Frame,
201 const CallExpr *Call, unsigned ID) {
202 uint64_t Limit = ~static_cast<uint64_t>(0);
203 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp ||
204 ID == Builtin::BIwcsncmp || ID == Builtin::BI__builtin_wcsncmp)
205 Limit = popToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)))
206 .getZExtValue();
207
208 const Pointer &B = S.Stk.pop<Pointer>();
209 const Pointer &A = S.Stk.pop<Pointer>();
210 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp ||
211 ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp)
212 diagnoseNonConstexprBuiltin(S, OpPC, ID);
213
214 if (Limit == 0) {
215 pushInteger(S, 0, Call->getType());
216 return true;
217 }
218
219 if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
220 return false;
221
222 if (A.isDummy() || B.isDummy())
223 return false;
224 if (!A.isBlockPointer() || !B.isBlockPointer())
225 return false;
226
227 bool IsWide = ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp ||
228 ID == Builtin::BI__builtin_wcscmp ||
229 ID == Builtin::BI__builtin_wcsncmp;
230 assert(A.getFieldDesc()->isPrimitiveArray());
231 assert(B.getFieldDesc()->isPrimitiveArray());
232
233 // Different element types shouldn't happen, but with casts they can.
235 return false;
236
237 PrimType ElemT = *S.getContext().classify(getElemType(A));
238
239 auto returnResult = [&](int V) -> bool {
240 pushInteger(S, V, Call->getType());
241 return true;
242 };
243
244 unsigned IndexA = A.getIndex();
245 unsigned IndexB = B.getIndex();
246 uint64_t Steps = 0;
247 for (;; ++IndexA, ++IndexB, ++Steps) {
248
249 if (Steps >= Limit)
250 break;
251 const Pointer &PA = A.atIndex(IndexA);
252 const Pointer &PB = B.atIndex(IndexB);
253 if (!CheckRange(S, OpPC, PA, AK_Read) ||
254 !CheckRange(S, OpPC, PB, AK_Read)) {
255 return false;
256 }
257
258 if (IsWide) {
259 INT_TYPE_SWITCH(ElemT, {
260 T CA = PA.deref<T>();
261 T CB = PB.deref<T>();
262 if (CA > CB)
263 return returnResult(1);
264 if (CA < CB)
265 return returnResult(-1);
266 if (CA.isZero() || CB.isZero())
267 return returnResult(0);
268 });
269 continue;
270 }
271
272 uint8_t CA = PA.deref<uint8_t>();
273 uint8_t CB = PB.deref<uint8_t>();
274
275 if (CA > CB)
276 return returnResult(1);
277 if (CA < CB)
278 return returnResult(-1);
279 if (CA == 0 || CB == 0)
280 return returnResult(0);
281 }
282
283 return returnResult(0);
284}
285
287 const InterpFrame *Frame,
288 const CallExpr *Call, unsigned ID) {
289 const Pointer &StrPtr = S.Stk.pop<Pointer>();
290
291 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
292 diagnoseNonConstexprBuiltin(S, OpPC, ID);
293
294 if (!CheckArray(S, OpPC, StrPtr))
295 return false;
296
297 if (!CheckLive(S, OpPC, StrPtr, AK_Read))
298 return false;
299
300 if (!CheckDummy(S, OpPC, StrPtr.block(), AK_Read))
301 return false;
302
303 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
304 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
305
306 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
307 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
308 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
309 }
310
311 size_t Len = 0;
312 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
313 const Pointer &ElemPtr = StrPtr.atIndex(I);
314
315 if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
316 return false;
317
318 uint32_t Val;
319 switch (ElemSize) {
320 case 1:
321 Val = ElemPtr.deref<uint8_t>();
322 break;
323 case 2:
324 Val = ElemPtr.deref<uint16_t>();
325 break;
326 case 4:
327 Val = ElemPtr.deref<uint32_t>();
328 break;
329 default:
330 llvm_unreachable("Unsupported char size");
331 }
332 if (Val == 0)
333 break;
334 }
335
336 pushInteger(S, Len, Call->getType());
337
338 return true;
339}
340
342 const InterpFrame *Frame, const CallExpr *Call,
343 bool Signaling) {
344 const Pointer &Arg = S.Stk.pop<Pointer>();
345
346 if (!CheckLoad(S, OpPC, Arg))
347 return false;
348
349 assert(Arg.getFieldDesc()->isPrimitiveArray());
350
351 // Convert the given string to an integer using StringRef's API.
352 llvm::APInt Fill;
353 std::string Str;
354 assert(Arg.getNumElems() >= 1);
355 for (unsigned I = 0;; ++I) {
356 const Pointer &Elem = Arg.atIndex(I);
357
358 if (!CheckLoad(S, OpPC, Elem))
359 return false;
360
361 if (Elem.deref<int8_t>() == 0)
362 break;
363
364 Str += Elem.deref<char>();
365 }
366
367 // Treat empty strings as if they were zero.
368 if (Str.empty())
369 Fill = llvm::APInt(32, 0);
370 else if (StringRef(Str).getAsInteger(0, Fill))
371 return false;
372
373 const llvm::fltSemantics &TargetSemantics =
375 Call->getDirectCallee()->getReturnType());
376
377 Floating Result = S.allocFloat(TargetSemantics);
379 if (Signaling)
380 Result.copy(
381 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
382 else
383 Result.copy(
384 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
385 } else {
386 // Prior to IEEE 754-2008, architectures were allowed to choose whether
387 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
388 // a different encoding to what became a standard in 2008, and for pre-
389 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
390 // sNaN. This is now known as "legacy NaN" encoding.
391 if (Signaling)
392 Result.copy(
393 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
394 else
395 Result.copy(
396 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
397 }
398
399 S.Stk.push<Floating>(Result);
400 return true;
401}
402
404 const InterpFrame *Frame,
405 const CallExpr *Call) {
406 const llvm::fltSemantics &TargetSemantics =
408 Call->getDirectCallee()->getReturnType());
409
410 Floating Result = S.allocFloat(TargetSemantics);
411 Result.copy(APFloat::getInf(TargetSemantics));
412 S.Stk.push<Floating>(Result);
413 return true;
414}
415
417 const InterpFrame *Frame) {
418 const Floating &Arg2 = S.Stk.pop<Floating>();
419 const Floating &Arg1 = S.Stk.pop<Floating>();
420 Floating Result = S.allocFloat(Arg1.getSemantics());
421
422 APFloat Copy = Arg1.getAPFloat();
423 Copy.copySign(Arg2.getAPFloat());
424 Result.copy(Copy);
425 S.Stk.push<Floating>(Result);
426
427 return true;
428}
429
431 const InterpFrame *Frame, bool IsNumBuiltin) {
432 const Floating &RHS = S.Stk.pop<Floating>();
433 const Floating &LHS = S.Stk.pop<Floating>();
434 Floating Result = S.allocFloat(LHS.getSemantics());
435
436 if (IsNumBuiltin)
437 Result.copy(llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat()));
438 else
439 Result.copy(minnum(LHS.getAPFloat(), RHS.getAPFloat()));
440 S.Stk.push<Floating>(Result);
441 return true;
442}
443
445 const InterpFrame *Frame, bool IsNumBuiltin) {
446 const Floating &RHS = S.Stk.pop<Floating>();
447 const Floating &LHS = S.Stk.pop<Floating>();
448 Floating Result = S.allocFloat(LHS.getSemantics());
449
450 if (IsNumBuiltin)
451 Result.copy(llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat()));
452 else
453 Result.copy(maxnum(LHS.getAPFloat(), RHS.getAPFloat()));
454 S.Stk.push<Floating>(Result);
455 return true;
456}
457
458/// Defined as __builtin_isnan(...), to accommodate the fact that it can
459/// take a float, double, long double, etc.
460/// But for us, that's all a Floating anyway.
462 const InterpFrame *Frame,
463 const CallExpr *Call) {
464 const Floating &Arg = S.Stk.pop<Floating>();
465
466 pushInteger(S, Arg.isNan(), Call->getType());
467 return true;
468}
469
471 const InterpFrame *Frame,
472 const CallExpr *Call) {
473 const Floating &Arg = S.Stk.pop<Floating>();
474
475 pushInteger(S, Arg.isSignaling(), Call->getType());
476 return true;
477}
478
480 const InterpFrame *Frame, bool CheckSign,
481 const CallExpr *Call) {
482 const Floating &Arg = S.Stk.pop<Floating>();
483 APFloat F = Arg.getAPFloat();
484 bool IsInf = F.isInfinity();
485
486 if (CheckSign)
487 pushInteger(S, IsInf ? (F.isNegative() ? -1 : 1) : 0, Call->getType());
488 else
489 pushInteger(S, IsInf, Call->getType());
490 return true;
491}
492
494 const InterpFrame *Frame,
495 const CallExpr *Call) {
496 const Floating &Arg = S.Stk.pop<Floating>();
497
498 pushInteger(S, Arg.isFinite(), Call->getType());
499 return true;
500}
501
503 const InterpFrame *Frame,
504 const CallExpr *Call) {
505 const Floating &Arg = S.Stk.pop<Floating>();
506
507 pushInteger(S, Arg.isNormal(), Call->getType());
508 return true;
509}
510
512 const InterpFrame *Frame,
513 const CallExpr *Call) {
514 const Floating &Arg = S.Stk.pop<Floating>();
515
516 pushInteger(S, Arg.isDenormal(), Call->getType());
517 return true;
518}
519
521 const InterpFrame *Frame,
522 const CallExpr *Call) {
523 const Floating &Arg = S.Stk.pop<Floating>();
524
525 pushInteger(S, Arg.isZero(), Call->getType());
526 return true;
527}
528
530 const InterpFrame *Frame,
531 const CallExpr *Call) {
532 const Floating &Arg = S.Stk.pop<Floating>();
533
534 pushInteger(S, Arg.isNegative(), Call->getType());
535 return true;
536}
537
539 const CallExpr *Call, unsigned ID) {
540 const Floating &RHS = S.Stk.pop<Floating>();
541 const Floating &LHS = S.Stk.pop<Floating>();
542
544 S,
545 [&] {
546 switch (ID) {
547 case Builtin::BI__builtin_isgreater:
548 return LHS > RHS;
549 case Builtin::BI__builtin_isgreaterequal:
550 return LHS >= RHS;
551 case Builtin::BI__builtin_isless:
552 return LHS < RHS;
553 case Builtin::BI__builtin_islessequal:
554 return LHS <= RHS;
555 case Builtin::BI__builtin_islessgreater: {
556 ComparisonCategoryResult cmp = LHS.compare(RHS);
557 return cmp == ComparisonCategoryResult::Less ||
559 }
560 case Builtin::BI__builtin_isunordered:
561 return LHS.compare(RHS) == ComparisonCategoryResult::Unordered;
562 default:
563 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
564 "comparison function");
565 }
566 }(),
567 Call->getType());
568 return true;
569}
570
571/// First parameter to __builtin_isfpclass is the floating value, the
572/// second one is an integral value.
574 const InterpFrame *Frame,
575 const CallExpr *Call) {
576 PrimType FPClassArgT = *S.getContext().classify(Call->getArg(1)->getType());
577 APSInt FPClassArg = popToAPSInt(S.Stk, FPClassArgT);
578 const Floating &F = S.Stk.pop<Floating>();
579
580 int32_t Result = static_cast<int32_t>(
581 (F.classify() & std::move(FPClassArg)).getZExtValue());
582 pushInteger(S, Result, Call->getType());
583
584 return true;
585}
586
587/// Five int values followed by one floating value.
588/// __builtin_fpclassify(int, int, int, int, int, float)
590 const InterpFrame *Frame,
591 const CallExpr *Call) {
592 const Floating &Val = S.Stk.pop<Floating>();
593
594 PrimType IntT = *S.getContext().classify(Call->getArg(0));
595 APSInt Values[5];
596 for (unsigned I = 0; I != 5; ++I)
597 Values[4 - I] = popToAPSInt(S.Stk, IntT);
598
599 unsigned Index;
600 switch (Val.getCategory()) {
601 case APFloat::fcNaN:
602 Index = 0;
603 break;
604 case APFloat::fcInfinity:
605 Index = 1;
606 break;
607 case APFloat::fcNormal:
608 Index = Val.isDenormal() ? 3 : 2;
609 break;
610 case APFloat::fcZero:
611 Index = 4;
612 break;
613 }
614
615 // The last argument is first on the stack.
616 assert(Index <= 4);
617
618 pushInteger(S, Values[Index], Call->getType());
619 return true;
620}
621
622static inline Floating abs(InterpState &S, const Floating &In) {
623 if (!In.isNegative())
624 return In;
625
626 Floating Output = S.allocFloat(In.getSemantics());
627 APFloat New = In.getAPFloat();
628 New.changeSign();
629 Output.copy(New);
630 return Output;
631}
632
633// The C standard says "fabs raises no floating-point exceptions,
634// even if x is a signaling NaN. The returned value is independent of
635// the current rounding direction mode." Therefore constant folding can
636// proceed without regard to the floating point settings.
637// Reference, WG14 N2478 F.10.4.3
639 const InterpFrame *Frame) {
640 const Floating &Val = S.Stk.pop<Floating>();
641 S.Stk.push<Floating>(abs(S, Val));
642 return true;
643}
644
646 const InterpFrame *Frame,
647 const CallExpr *Call) {
648 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
649 APSInt Val = popToAPSInt(S.Stk, ArgT);
650 if (Val ==
651 APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
652 return false;
653 if (Val.isNegative())
654 Val.negate();
655 pushInteger(S, Val, Call->getType());
656 return true;
657}
658
660 const InterpFrame *Frame,
661 const CallExpr *Call) {
662 APSInt Val;
663 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
664 const Pointer &Arg = S.Stk.pop<Pointer>();
665 Val = convertBoolVectorToInt(Arg);
666 } else {
667 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
668 Val = popToAPSInt(S.Stk, ArgT);
669 }
670 pushInteger(S, Val.popcount(), Call->getType());
671 return true;
672}
673
675 const InterpFrame *Frame,
676 const CallExpr *Call) {
677 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
678 APSInt Val = popToAPSInt(S.Stk, ArgT);
679 pushInteger(S, Val.popcount() % 2, Call->getType());
680 return true;
681}
682
684 const InterpFrame *Frame,
685 const CallExpr *Call) {
686 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
687 APSInt Val = popToAPSInt(S.Stk, ArgT);
688 pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType());
689 return true;
690}
691
693 const InterpFrame *Frame,
694 const CallExpr *Call) {
695 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
696 APSInt Val = popToAPSInt(S.Stk, ArgT);
697 pushInteger(S, Val.reverseBits(), Call->getType());
698 return true;
699}
700
702 const InterpFrame *Frame,
703 const CallExpr *Call) {
704 // This is an unevaluated call, so there are no arguments on the stack.
705 assert(Call->getNumArgs() == 1);
706 const Expr *Arg = Call->getArg(0);
707
708 GCCTypeClass ResultClass =
710 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
711 pushInteger(S, ReturnVal, Call->getType());
712 return true;
713}
714
715// __builtin_expect(long, long)
716// __builtin_expect_with_probability(long, long, double)
718 const InterpFrame *Frame,
719 const CallExpr *Call) {
720 // The return value is simply the value of the first parameter.
721 // We ignore the probability.
722 unsigned NumArgs = Call->getNumArgs();
723 assert(NumArgs == 2 || NumArgs == 3);
724
725 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
726 if (NumArgs == 3)
727 S.Stk.discard<Floating>();
728 discard(S.Stk, ArgT);
729
730 APSInt Val = popToAPSInt(S.Stk, ArgT);
731 pushInteger(S, Val, Call->getType());
732 return true;
733}
734
735/// rotateleft(value, amount)
737 const InterpFrame *Frame,
738 const CallExpr *Call, bool Right) {
739 PrimType AmountT = *S.getContext().classify(Call->getArg(1)->getType());
740 PrimType ValueT = *S.getContext().classify(Call->getArg(0)->getType());
741
742 APSInt Amount = popToAPSInt(S.Stk, AmountT);
743 APSInt Value = popToAPSInt(S.Stk, ValueT);
744
746 if (Right)
747 Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())),
748 /*IsUnsigned=*/true);
749 else // Left.
750 Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())),
751 /*IsUnsigned=*/true);
752
753 pushInteger(S, Result, Call->getType());
754 return true;
755}
756
758 const InterpFrame *Frame,
759 const CallExpr *Call) {
760 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
761 APSInt Value = popToAPSInt(S.Stk, ArgT);
762
763 uint64_t N = Value.countr_zero();
764 pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType());
765 return true;
766}
767
769 const InterpFrame *Frame,
770 const CallExpr *Call) {
771#ifndef NDEBUG
772 assert(Call->getArg(0)->isLValue());
773 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
774 assert(PtrT == PT_Ptr &&
775 "Unsupported pointer type passed to __builtin_addressof()");
776#endif
777 return true;
778}
779
781 const InterpFrame *Frame,
782 const CallExpr *Call) {
783 return Call->getDirectCallee()->isConstexpr();
784}
785
787 const InterpFrame *Frame,
788 const CallExpr *Call) {
789 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
790 APSInt Arg = popToAPSInt(S.Stk, ArgT);
791
793 Arg.getZExtValue());
794 pushInteger(S, Result, Call->getType());
795 return true;
796}
797
798// Two integral values followed by a pointer (lhs, rhs, resultOut)
800 const CallExpr *Call,
801 unsigned BuiltinOp) {
802 const Pointer &ResultPtr = S.Stk.pop<Pointer>();
803 if (ResultPtr.isDummy())
804 return false;
805
806 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
807 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
808 APSInt RHS = popToAPSInt(S.Stk, RHST);
809 APSInt LHS = popToAPSInt(S.Stk, LHST);
810 QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
811 PrimType ResultT = *S.getContext().classify(ResultType);
812 bool Overflow;
813
815 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
816 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
817 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
818 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
820 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
822 uint64_t LHSSize = LHS.getBitWidth();
823 uint64_t RHSSize = RHS.getBitWidth();
824 uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
825 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
826
827 // Add an additional bit if the signedness isn't uniformly agreed to. We
828 // could do this ONLY if there is a signed and an unsigned that both have
829 // MaxBits, but the code to check that is pretty nasty. The issue will be
830 // caught in the shrink-to-result later anyway.
831 if (IsSigned && !AllSigned)
832 ++MaxBits;
833
834 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
835 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
836 Result = APSInt(MaxBits, !IsSigned);
837 }
838
839 // Find largest int.
840 switch (BuiltinOp) {
841 default:
842 llvm_unreachable("Invalid value for BuiltinOp");
843 case Builtin::BI__builtin_add_overflow:
844 case Builtin::BI__builtin_sadd_overflow:
845 case Builtin::BI__builtin_saddl_overflow:
846 case Builtin::BI__builtin_saddll_overflow:
847 case Builtin::BI__builtin_uadd_overflow:
848 case Builtin::BI__builtin_uaddl_overflow:
849 case Builtin::BI__builtin_uaddll_overflow:
850 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
851 : LHS.uadd_ov(RHS, Overflow);
852 break;
853 case Builtin::BI__builtin_sub_overflow:
854 case Builtin::BI__builtin_ssub_overflow:
855 case Builtin::BI__builtin_ssubl_overflow:
856 case Builtin::BI__builtin_ssubll_overflow:
857 case Builtin::BI__builtin_usub_overflow:
858 case Builtin::BI__builtin_usubl_overflow:
859 case Builtin::BI__builtin_usubll_overflow:
860 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
861 : LHS.usub_ov(RHS, Overflow);
862 break;
863 case Builtin::BI__builtin_mul_overflow:
864 case Builtin::BI__builtin_smul_overflow:
865 case Builtin::BI__builtin_smull_overflow:
866 case Builtin::BI__builtin_smulll_overflow:
867 case Builtin::BI__builtin_umul_overflow:
868 case Builtin::BI__builtin_umull_overflow:
869 case Builtin::BI__builtin_umulll_overflow:
870 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
871 : LHS.umul_ov(RHS, Overflow);
872 break;
873 }
874
875 // In the case where multiple sizes are allowed, truncate and see if
876 // the values are the same.
877 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
878 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
879 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
880 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
881 // since it will give us the behavior of a TruncOrSelf in the case where
882 // its parameter <= its size. We previously set Result to be at least the
883 // type-size of the result, so getTypeSize(ResultType) <= Resu
884 APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
885 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
886
887 if (!APSInt::isSameValue(Temp, Result))
888 Overflow = true;
889 Result = std::move(Temp);
890 }
891
892 // Write Result to ResultPtr and put Overflow on the stack.
893 assignInteger(S, ResultPtr, ResultT, Result);
894 if (ResultPtr.canBeInitialized())
895 ResultPtr.initialize();
896
897 assert(Call->getDirectCallee()->getReturnType()->isBooleanType());
898 S.Stk.push<Boolean>(Overflow);
899 return true;
900}
901
902/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
904 const InterpFrame *Frame,
905 const CallExpr *Call, unsigned BuiltinOp) {
906 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
907 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
908 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
909 APSInt CarryIn = popToAPSInt(S.Stk, LHST);
910 APSInt RHS = popToAPSInt(S.Stk, RHST);
911 APSInt LHS = popToAPSInt(S.Stk, LHST);
912
913 APSInt CarryOut;
914
916 // Copy the number of bits and sign.
917 Result = LHS;
918 CarryOut = LHS;
919
920 bool FirstOverflowed = false;
921 bool SecondOverflowed = false;
922 switch (BuiltinOp) {
923 default:
924 llvm_unreachable("Invalid value for BuiltinOp");
925 case Builtin::BI__builtin_addcb:
926 case Builtin::BI__builtin_addcs:
927 case Builtin::BI__builtin_addc:
928 case Builtin::BI__builtin_addcl:
929 case Builtin::BI__builtin_addcll:
930 Result =
931 LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
932 break;
933 case Builtin::BI__builtin_subcb:
934 case Builtin::BI__builtin_subcs:
935 case Builtin::BI__builtin_subc:
936 case Builtin::BI__builtin_subcl:
937 case Builtin::BI__builtin_subcll:
938 Result =
939 LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
940 break;
941 }
942 // It is possible for both overflows to happen but CGBuiltin uses an OR so
943 // this is consistent.
944 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
945
946 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
947 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
948 assignInteger(S, CarryOutPtr, CarryOutT, CarryOut);
949 CarryOutPtr.initialize();
950
951 assert(Call->getType() == Call->getArg(0)->getType());
952 pushInteger(S, Result, Call->getType());
953 return true;
954}
955
957 const InterpFrame *Frame, const CallExpr *Call,
958 unsigned BuiltinOp) {
959
960 std::optional<APSInt> Fallback;
961 if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2) {
962 PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
963 Fallback = popToAPSInt(S.Stk, FallbackT);
964 }
965 APSInt Val;
966 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
967 const Pointer &Arg = S.Stk.pop<Pointer>();
968 Val = convertBoolVectorToInt(Arg);
969 } else {
970 PrimType ValT = *S.getContext().classify(Call->getArg(0));
971 Val = popToAPSInt(S.Stk, ValT);
972 }
973
974 // When the argument is 0, the result of GCC builtins is undefined, whereas
975 // for Microsoft intrinsics, the result is the bit-width of the argument.
976 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
977 BuiltinOp != Builtin::BI__lzcnt &&
978 BuiltinOp != Builtin::BI__lzcnt64;
979
980 if (Val == 0) {
981 if (Fallback) {
982 pushInteger(S, *Fallback, Call->getType());
983 return true;
984 }
985
986 if (ZeroIsUndefined)
987 return false;
988 }
989
990 pushInteger(S, Val.countl_zero(), Call->getType());
991 return true;
992}
993
995 const InterpFrame *Frame, const CallExpr *Call,
996 unsigned BuiltinID) {
997 std::optional<APSInt> Fallback;
998 if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2) {
999 PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
1000 Fallback = popToAPSInt(S.Stk, FallbackT);
1001 }
1002 APSInt Val;
1003 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
1004 const Pointer &Arg = S.Stk.pop<Pointer>();
1005 Val = convertBoolVectorToInt(Arg);
1006 } else {
1007 PrimType ValT = *S.getContext().classify(Call->getArg(0));
1008 Val = popToAPSInt(S.Stk, ValT);
1009 }
1010
1011 if (Val == 0) {
1012 if (Fallback) {
1013 pushInteger(S, *Fallback, Call->getType());
1014 return true;
1015 }
1016 return false;
1017 }
1018
1019 pushInteger(S, Val.countr_zero(), Call->getType());
1020 return true;
1021}
1022
1024 const InterpFrame *Frame,
1025 const CallExpr *Call) {
1026 PrimType ReturnT = *S.getContext().classify(Call->getType());
1027 PrimType ValT = *S.getContext().classify(Call->getArg(0));
1028 const APSInt &Val = popToAPSInt(S.Stk, ValT);
1029 assert(Val.getActiveBits() <= 64);
1030
1031 INT_TYPE_SWITCH(ReturnT,
1032 { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); });
1033 return true;
1034}
1035
1036/// bool __atomic_always_lock_free(size_t, void const volatile*)
1037/// bool __atomic_is_lock_free(size_t, void const volatile*)
1039 const InterpFrame *Frame,
1040 const CallExpr *Call,
1041 unsigned BuiltinOp) {
1042 auto returnBool = [&S](bool Value) -> bool {
1043 S.Stk.push<Boolean>(Value);
1044 return true;
1045 };
1046
1047 PrimType ValT = *S.getContext().classify(Call->getArg(0));
1048 const Pointer &Ptr = S.Stk.pop<Pointer>();
1049 const APSInt &SizeVal = popToAPSInt(S.Stk, ValT);
1050
1051 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
1052 // of two less than or equal to the maximum inline atomic width, we know it
1053 // is lock-free. If the size isn't a power of two, or greater than the
1054 // maximum alignment where we promote atomics, we know it is not lock-free
1055 // (at least not in the sense of atomic_is_lock_free). Otherwise,
1056 // the answer can only be determined at runtime; for example, 16-byte
1057 // atomics have lock-free implementations on some, but not all,
1058 // x86-64 processors.
1059
1060 // Check power-of-two.
1061 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1062 if (Size.isPowerOfTwo()) {
1063 // Check against inlining width.
1064 unsigned InlineWidthBits =
1066 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1067
1068 // OK, we will inline appropriately-aligned operations of this size,
1069 // and _Atomic(T) is appropriately-aligned.
1070 if (Size == CharUnits::One())
1071 return returnBool(true);
1072
1073 // Same for null pointers.
1074 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1075 if (Ptr.isZero())
1076 return returnBool(true);
1077
1078 if (Ptr.isIntegralPointer()) {
1079 uint64_t IntVal = Ptr.getIntegerRepresentation();
1080 if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
1081 return returnBool(true);
1082 }
1083
1084 const Expr *PtrArg = Call->getArg(1);
1085 // Otherwise, check if the type's alignment against Size.
1086 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
1087 // Drop the potential implicit-cast to 'const volatile void*', getting
1088 // the underlying type.
1089 if (ICE->getCastKind() == CK_BitCast)
1090 PtrArg = ICE->getSubExpr();
1091 }
1092
1093 if (const auto *PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1094 QualType PointeeType = PtrTy->getPointeeType();
1095 if (!PointeeType->isIncompleteType() &&
1096 S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
1097 // OK, we will inline operations on this object.
1098 return returnBool(true);
1099 }
1100 }
1101 }
1102 }
1103
1104 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1105 return returnBool(false);
1106
1107 return false;
1108}
1109
1110/// bool __c11_atomic_is_lock_free(size_t)
1112 CodePtr OpPC,
1113 const InterpFrame *Frame,
1114 const CallExpr *Call) {
1115 PrimType ValT = *S.getContext().classify(Call->getArg(0));
1116 const APSInt &SizeVal = popToAPSInt(S.Stk, ValT);
1117
1118 auto returnBool = [&S](bool Value) -> bool {
1119 S.Stk.push<Boolean>(Value);
1120 return true;
1121 };
1122
1123 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1124 if (Size.isPowerOfTwo()) {
1125 // Check against inlining width.
1126 unsigned InlineWidthBits =
1128 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits))
1129 return returnBool(true);
1130 }
1131
1132 return false; // returnBool(false);
1133}
1134
1135/// __builtin_complex(Float A, float B);
1137 const InterpFrame *Frame,
1138 const CallExpr *Call) {
1139 const Floating &Arg2 = S.Stk.pop<Floating>();
1140 const Floating &Arg1 = S.Stk.pop<Floating>();
1141 Pointer &Result = S.Stk.peek<Pointer>();
1142
1143 Result.elem<Floating>(0) = Arg1;
1144 Result.elem<Floating>(1) = Arg2;
1145 Result.initializeAllElements();
1146
1147 return true;
1148}
1149
1150/// __builtin_is_aligned()
1151/// __builtin_align_up()
1152/// __builtin_align_down()
1153/// The first parameter is either an integer or a pointer.
1154/// The second parameter is the requested alignment as an integer.
1156 const InterpFrame *Frame,
1157 const CallExpr *Call,
1158 unsigned BuiltinOp) {
1159 PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
1160 const APSInt &Alignment = popToAPSInt(S.Stk, AlignmentT);
1161
1162 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1163 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1164 return false;
1165 }
1166 unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
1167 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
1168 if (APSInt::compareValues(Alignment, MaxValue) > 0) {
1169 S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1170 << MaxValue << Call->getArg(0)->getType() << Alignment;
1171 return false;
1172 }
1173
1174 // The first parameter is either an integer or a pointer (but not a function
1175 // pointer).
1176 PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
1177
1178 if (isIntegralType(FirstArgT)) {
1179 const APSInt &Src = popToAPSInt(S.Stk, FirstArgT);
1180 APInt AlignMinusOne = Alignment.extOrTrunc(Src.getBitWidth()) - 1;
1181 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1182 APSInt AlignedVal =
1183 APSInt((Src + AlignMinusOne) & ~AlignMinusOne, Src.isUnsigned());
1184 pushInteger(S, AlignedVal, Call->getType());
1185 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1186 APSInt AlignedVal = APSInt(Src & ~AlignMinusOne, Src.isUnsigned());
1187 pushInteger(S, AlignedVal, Call->getType());
1188 } else {
1189 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1190 S.Stk.push<Boolean>((Src & AlignMinusOne) == 0);
1191 }
1192 return true;
1193 }
1194
1195 assert(FirstArgT == PT_Ptr);
1196 const Pointer &Ptr = S.Stk.pop<Pointer>();
1197
1198 unsigned PtrOffset = Ptr.getByteOffset();
1199 PtrOffset = Ptr.getIndex();
1200 CharUnits BaseAlignment =
1202 CharUnits PtrAlign =
1203 BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
1204
1205 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1206 if (PtrAlign.getQuantity() >= Alignment) {
1207 S.Stk.push<Boolean>(true);
1208 return true;
1209 }
1210 // If the alignment is not known to be sufficient, some cases could still
1211 // be aligned at run time. However, if the requested alignment is less or
1212 // equal to the base alignment and the offset is not aligned, we know that
1213 // the run-time value can never be aligned.
1214 if (BaseAlignment.getQuantity() >= Alignment &&
1215 PtrAlign.getQuantity() < Alignment) {
1216 S.Stk.push<Boolean>(false);
1217 return true;
1218 }
1219
1220 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1221 << Alignment;
1222 return false;
1223 }
1224
1225 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1226 BuiltinOp == Builtin::BI__builtin_align_up);
1227
1228 // For align_up/align_down, we can return the same value if the alignment
1229 // is known to be greater or equal to the requested value.
1230 if (PtrAlign.getQuantity() >= Alignment) {
1231 S.Stk.push<Pointer>(Ptr);
1232 return true;
1233 }
1234
1235 // The alignment could be greater than the minimum at run-time, so we cannot
1236 // infer much about the resulting pointer value. One case is possible:
1237 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1238 // can infer the correct index if the requested alignment is smaller than
1239 // the base alignment so we can perform the computation on the offset.
1240 if (BaseAlignment.getQuantity() >= Alignment) {
1241 assert(Alignment.getBitWidth() <= 64 &&
1242 "Cannot handle > 64-bit address-space");
1243 uint64_t Alignment64 = Alignment.getZExtValue();
1244 CharUnits NewOffset =
1245 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1246 ? llvm::alignDown(PtrOffset, Alignment64)
1247 : llvm::alignTo(PtrOffset, Alignment64));
1248
1249 S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
1250 return true;
1251 }
1252
1253 // Otherwise, we cannot constant-evaluate the result.
1254 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1255 return false;
1256}
1257
1258/// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1260 const InterpFrame *Frame,
1261 const CallExpr *Call) {
1262 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1263
1264 std::optional<APSInt> ExtraOffset;
1265 if (Call->getNumArgs() == 3)
1266 ExtraOffset = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
1267
1268 APSInt Alignment = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
1269 const Pointer &Ptr = S.Stk.pop<Pointer>();
1270
1271 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
1272
1273 // If there is a base object, then it must have the correct alignment.
1274 if (Ptr.isBlockPointer()) {
1275 CharUnits BaseAlignment;
1276 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1277 BaseAlignment = S.getASTContext().getDeclAlign(VD);
1278 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1279 BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
1280
1281 if (BaseAlignment < Align) {
1282 S.CCEDiag(Call->getArg(0),
1283 diag::note_constexpr_baa_insufficient_alignment)
1284 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1285 return false;
1286 }
1287 }
1288
1289 APValue AV = Ptr.toAPValue(S.getASTContext());
1290 CharUnits AVOffset = AV.getLValueOffset();
1291 if (ExtraOffset)
1292 AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
1293 if (AVOffset.alignTo(Align) != AVOffset) {
1294 if (Ptr.isBlockPointer())
1295 S.CCEDiag(Call->getArg(0),
1296 diag::note_constexpr_baa_insufficient_alignment)
1297 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1298 else
1299 S.CCEDiag(Call->getArg(0),
1300 diag::note_constexpr_baa_value_insufficient_alignment)
1301 << AVOffset.getQuantity() << Align.getQuantity();
1302 return false;
1303 }
1304
1305 S.Stk.push<Pointer>(Ptr);
1306 return true;
1307}
1308
1310 const InterpFrame *Frame,
1311 const CallExpr *Call) {
1312 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1313 !Call->getArg(1)->getType()->isIntegerType())
1314 return false;
1315
1316 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1317 PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
1318 APSInt Index = popToAPSInt(S.Stk, IndexT);
1319 APSInt Val = popToAPSInt(S.Stk, ValT);
1320
1321 unsigned BitWidth = Val.getBitWidth();
1322 uint64_t Shift = Index.extractBitsAsZExtValue(8, 0);
1323 uint64_t Length = Index.extractBitsAsZExtValue(8, 8);
1324 Length = Length > BitWidth ? BitWidth : Length;
1325
1326 // Handle out of bounds cases.
1327 if (Length == 0 || Shift >= BitWidth) {
1328 pushInteger(S, 0, Call->getType());
1329 return true;
1330 }
1331
1332 uint64_t Result = Val.getZExtValue() >> Shift;
1333 Result &= llvm::maskTrailingOnes<uint64_t>(Length);
1334 pushInteger(S, Result, Call->getType());
1335 return true;
1336}
1337
1339 const InterpFrame *Frame,
1340 const CallExpr *Call) {
1341 QualType CallType = Call->getType();
1342 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1343 !Call->getArg(1)->getType()->isIntegerType() ||
1344 !CallType->isIntegerType())
1345 return false;
1346
1347 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1348 PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
1349
1350 APSInt Idx = popToAPSInt(S.Stk, IndexT);
1351 APSInt Val = popToAPSInt(S.Stk, ValT);
1352
1353 unsigned BitWidth = Val.getBitWidth();
1354 uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
1355
1356 if (Index < BitWidth)
1357 Val.clearHighBits(BitWidth - Index);
1358
1359 pushInteger(S, Val, CallType);
1360 return true;
1361}
1362
1364 const InterpFrame *Frame,
1365 const CallExpr *Call) {
1366 QualType CallType = Call->getType();
1367 if (!CallType->isIntegerType() ||
1368 !Call->getArg(0)->getType()->isIntegerType())
1369 return false;
1370
1371 APSInt Val = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
1372 pushInteger(S, Val.countLeadingZeros(), CallType);
1373 return true;
1374}
1375
1377 const InterpFrame *Frame,
1378 const CallExpr *Call) {
1379 QualType CallType = Call->getType();
1380 if (!CallType->isIntegerType() ||
1381 !Call->getArg(0)->getType()->isIntegerType())
1382 return false;
1383
1384 APSInt Val = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
1385 pushInteger(S, Val.countTrailingZeros(), CallType);
1386 return true;
1387}
1388
1390 const InterpFrame *Frame,
1391 const CallExpr *Call) {
1392 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1393 !Call->getArg(1)->getType()->isIntegerType())
1394 return false;
1395
1396 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1397 PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
1398
1399 APSInt Mask = popToAPSInt(S.Stk, MaskT);
1400 APSInt Val = popToAPSInt(S.Stk, ValT);
1401
1402 unsigned BitWidth = Val.getBitWidth();
1403 APInt Result = APInt::getZero(BitWidth);
1404 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1405 if (Mask[I])
1406 Result.setBitVal(I, Val[P++]);
1407 }
1408 pushInteger(S, std::move(Result), Call->getType());
1409 return true;
1410}
1411
1413 const InterpFrame *Frame,
1414 const CallExpr *Call) {
1415 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1416 !Call->getArg(1)->getType()->isIntegerType())
1417 return false;
1418
1419 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1420 PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
1421
1422 APSInt Mask = popToAPSInt(S.Stk, MaskT);
1423 APSInt Val = popToAPSInt(S.Stk, ValT);
1424
1425 unsigned BitWidth = Val.getBitWidth();
1426 APInt Result = APInt::getZero(BitWidth);
1427 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1428 if (Mask[I])
1429 Result.setBitVal(P++, Val[I]);
1430 }
1431 pushInteger(S, std::move(Result), Call->getType());
1432 return true;
1433}
1434
1435/// (CarryIn, LHS, RHS, Result)
1437 CodePtr OpPC,
1438 const InterpFrame *Frame,
1439 const CallExpr *Call,
1440 unsigned BuiltinOp) {
1441 if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
1442 !Call->getArg(1)->getType()->isIntegerType() ||
1443 !Call->getArg(2)->getType()->isIntegerType())
1444 return false;
1445
1446 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
1447
1448 PrimType CarryInT = *S.getContext().classify(Call->getArg(0));
1449 PrimType LHST = *S.getContext().classify(Call->getArg(1));
1450 PrimType RHST = *S.getContext().classify(Call->getArg(2));
1451 APSInt RHS = popToAPSInt(S.Stk, RHST);
1452 APSInt LHS = popToAPSInt(S.Stk, LHST);
1453 APSInt CarryIn = popToAPSInt(S.Stk, CarryInT);
1454
1455 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1456 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1457
1458 unsigned BitWidth = LHS.getBitWidth();
1459 unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
1460 APInt ExResult =
1461 IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
1462 : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
1463
1464 APInt Result = ExResult.extractBits(BitWidth, 0);
1465 APSInt CarryOut =
1466 APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
1467
1468 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
1469 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
1470 assignInteger(S, CarryOutPtr, CarryOutT, APSInt(std::move(Result), true));
1471
1472 pushInteger(S, CarryOut, Call->getType());
1473
1474 return true;
1475}
1476
1478 CodePtr OpPC,
1479 const InterpFrame *Frame,
1480 const CallExpr *Call) {
1483 pushInteger(S, Layout.size().getQuantity(), Call->getType());
1484 return true;
1485}
1486
1487static bool
1489 const InterpFrame *Frame,
1490 const CallExpr *Call) {
1491 const auto &Ptr = S.Stk.pop<Pointer>();
1492 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1493
1494 // This should be created for a StringLiteral, so should alway shold at least
1495 // one array element.
1496 assert(Ptr.getFieldDesc()->getNumElems() >= 1);
1497 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1498 uint64_t Result = getPointerAuthStableSipHash(R);
1499 pushInteger(S, Result, Call->getType());
1500 return true;
1501}
1502
1504 const InterpFrame *Frame,
1505 const CallExpr *Call) {
1506 // A call to __operator_new is only valid within std::allocate<>::allocate.
1507 // Walk up the call stack to find the appropriate caller and get the
1508 // element type from it.
1509 auto [NewCall, ElemType] = S.getStdAllocatorCaller("allocate");
1510
1511 if (ElemType.isNull()) {
1512 S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1513 ? diag::note_constexpr_new_untyped
1514 : diag::note_constexpr_new);
1515 return false;
1516 }
1517 assert(NewCall);
1518
1519 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1520 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1521 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1522 return false;
1523 }
1524
1525 // We only care about the first parameter (the size), so discard all the
1526 // others.
1527 {
1528 unsigned NumArgs = Call->getNumArgs();
1529 assert(NumArgs >= 1);
1530
1531 // The std::nothrow_t arg never gets put on the stack.
1532 if (Call->getArg(NumArgs - 1)->getType()->isNothrowT())
1533 --NumArgs;
1534 auto Args = ArrayRef(Call->getArgs(), Call->getNumArgs());
1535 // First arg is needed.
1536 Args = Args.drop_front();
1537
1538 // Discard the rest.
1539 for (const Expr *Arg : Args)
1540 discard(S.Stk, *S.getContext().classify(Arg));
1541 }
1542
1543 APSInt Bytes = popToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(0)));
1544 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
1545 assert(!ElemSize.isZero());
1546 // Divide the number of bytes by sizeof(ElemType), so we get the number of
1547 // elements we should allocate.
1548 APInt NumElems, Remainder;
1549 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1550 APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
1551 if (Remainder != 0) {
1552 // This likely indicates a bug in the implementation of 'std::allocator'.
1553 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1554 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1555 return false;
1556 }
1557
1558 // NB: The same check we're using in CheckArraySize()
1559 if (NumElems.getActiveBits() >
1561 NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1562 // FIXME: NoThrow check?
1563 const SourceInfo &Loc = S.Current->getSource(OpPC);
1564 S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1565 << NumElems.getZExtValue();
1566 return false;
1567 }
1568
1569 if (!CheckArraySize(S, OpPC, NumElems.getZExtValue()))
1570 return false;
1571
1572 bool IsArray = NumElems.ugt(1);
1573 OptPrimType ElemT = S.getContext().classify(ElemType);
1574 DynamicAllocator &Allocator = S.getAllocator();
1575 if (ElemT) {
1576 Block *B =
1577 Allocator.allocate(NewCall, *ElemT, NumElems.getZExtValue(),
1578 S.Ctx.getEvalID(), DynamicAllocator::Form::Operator);
1579 assert(B);
1580 S.Stk.push<Pointer>(Pointer(B).atIndex(0));
1581 return true;
1582 }
1583
1584 assert(!ElemT);
1585
1586 // Composite arrays
1587 if (IsArray) {
1588 const Descriptor *Desc =
1589 S.P.createDescriptor(NewCall, ElemType.getTypePtr(), std::nullopt);
1590 Block *B =
1591 Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
1593 assert(B);
1594 S.Stk.push<Pointer>(Pointer(B).atIndex(0));
1595 return true;
1596 }
1597
1598 // Records. Still allocate them as single-element arrays.
1600 ElemType, NumElems, nullptr, ArraySizeModifier::Normal, 0);
1601
1602 const Descriptor *Desc = S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
1604 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1606 assert(B);
1607 S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
1608 return true;
1609}
1610
1612 const InterpFrame *Frame,
1613 const CallExpr *Call) {
1614 const Expr *Source = nullptr;
1615 const Block *BlockToDelete = nullptr;
1616
1617 if (S.checkingPotentialConstantExpression()) {
1618 S.Stk.discard<Pointer>();
1619 return false;
1620 }
1621
1622 // This is permitted only within a call to std::allocator<T>::deallocate.
1623 if (!S.getStdAllocatorCaller("deallocate")) {
1624 S.FFDiag(Call);
1625 S.Stk.discard<Pointer>();
1626 return true;
1627 }
1628
1629 {
1630 const Pointer &Ptr = S.Stk.pop<Pointer>();
1631
1632 if (Ptr.isZero()) {
1633 S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1634 return true;
1635 }
1636
1637 Source = Ptr.getDeclDesc()->asExpr();
1638 BlockToDelete = Ptr.block();
1639
1640 if (!BlockToDelete->isDynamic()) {
1641 S.FFDiag(Call, diag::note_constexpr_delete_not_heap_alloc)
1643 if (const auto *D = Ptr.getFieldDesc()->asDecl())
1644 S.Note(D->getLocation(), diag::note_declared_at);
1645 }
1646 }
1647 assert(BlockToDelete);
1648
1649 DynamicAllocator &Allocator = S.getAllocator();
1650 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1651 std::optional<DynamicAllocator::Form> AllocForm =
1652 Allocator.getAllocationForm(Source);
1653
1654 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1655 // Nothing has been deallocated, this must be a double-delete.
1656 const SourceInfo &Loc = S.Current->getSource(OpPC);
1657 S.FFDiag(Loc, diag::note_constexpr_double_delete);
1658 return false;
1659 }
1660 assert(AllocForm);
1661
1662 return CheckNewDeleteForms(
1663 S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
1664}
1665
1667 const InterpFrame *Frame,
1668 const CallExpr *Call) {
1669 const Floating &Arg0 = S.Stk.pop<Floating>();
1670 S.Stk.push<Floating>(Arg0);
1671 return true;
1672}
1673
1675 const CallExpr *Call, unsigned ID) {
1676 const Pointer &Arg = S.Stk.pop<Pointer>();
1677 assert(Arg.getFieldDesc()->isPrimitiveArray());
1678
1679 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1680 assert(Call->getType() == ElemType);
1681 PrimType ElemT = *S.getContext().classify(ElemType);
1682 unsigned NumElems = Arg.getNumElems();
1683
1685 T Result = Arg.elem<T>(0);
1686 unsigned BitWidth = Result.bitWidth();
1687 for (unsigned I = 1; I != NumElems; ++I) {
1688 T Elem = Arg.elem<T>(I);
1689 T PrevResult = Result;
1690
1691 if (ID == Builtin::BI__builtin_reduce_add) {
1692 if (T::add(Result, Elem, BitWidth, &Result)) {
1693 unsigned OverflowBits = BitWidth + 1;
1694 (void)handleOverflow(S, OpPC,
1695 (PrevResult.toAPSInt(OverflowBits) +
1696 Elem.toAPSInt(OverflowBits)));
1697 return false;
1698 }
1699 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1700 if (T::mul(Result, Elem, BitWidth, &Result)) {
1701 unsigned OverflowBits = BitWidth * 2;
1702 (void)handleOverflow(S, OpPC,
1703 (PrevResult.toAPSInt(OverflowBits) *
1704 Elem.toAPSInt(OverflowBits)));
1705 return false;
1706 }
1707
1708 } else if (ID == Builtin::BI__builtin_reduce_and) {
1709 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1710 } else if (ID == Builtin::BI__builtin_reduce_or) {
1711 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1712 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1713 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1714 } else if (ID == Builtin::BI__builtin_reduce_min) {
1715 if (Elem < Result)
1716 Result = Elem;
1717 } else if (ID == Builtin::BI__builtin_reduce_max) {
1718 if (Elem > Result)
1719 Result = Elem;
1720 } else {
1721 llvm_unreachable("Unhandled vector reduce builtin");
1722 }
1723 }
1724 pushInteger(S, Result.toAPSInt(), Call->getType());
1725 });
1726
1727 return true;
1728}
1729
1731 const InterpFrame *Frame,
1732 const CallExpr *Call,
1733 unsigned BuiltinID) {
1734 assert(Call->getNumArgs() == 1);
1735 QualType Ty = Call->getArg(0)->getType();
1736 if (Ty->isIntegerType()) {
1737 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1738 APSInt Val = popToAPSInt(S.Stk, ArgT);
1739
1740 pushInteger(S, Val.abs(), Call->getType());
1741 return true;
1742 }
1743
1744 if (Ty->isFloatingType()) {
1745 Floating Val = S.Stk.pop<Floating>();
1746 Floating Result = abs(S, Val);
1747 S.Stk.push<Floating>(Result);
1748 return true;
1749 }
1750
1751 // Otherwise, the argument must be a vector.
1752 assert(Call->getArg(0)->getType()->isVectorType());
1753 const Pointer &Arg = S.Stk.pop<Pointer>();
1754 assert(Arg.getFieldDesc()->isPrimitiveArray());
1755 const Pointer &Dst = S.Stk.peek<Pointer>();
1756 assert(Dst.getFieldDesc()->isPrimitiveArray());
1757 assert(Arg.getFieldDesc()->getNumElems() ==
1758 Dst.getFieldDesc()->getNumElems());
1759
1760 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1761 PrimType ElemT = *S.getContext().classify(ElemType);
1762 unsigned NumElems = Arg.getNumElems();
1763 // we can either have a vector of integer or a vector of floating point
1764 for (unsigned I = 0; I != NumElems; ++I) {
1765 if (ElemType->isIntegerType()) {
1767 Dst.elem<T>(I) = T::from(static_cast<T>(
1768 APSInt(Arg.elem<T>(I).toAPSInt().abs(),
1770 });
1771 } else {
1772 Floating Val = Arg.elem<Floating>(I);
1773 Dst.elem<Floating>(I) = abs(S, Val);
1774 }
1775 }
1777
1778 return true;
1779}
1780
1781/// Can be called with an integer or vector as the first and only parameter.
1783 const InterpFrame *Frame,
1784 const CallExpr *Call,
1785 unsigned BuiltinID) {
1786 assert(Call->getNumArgs() == 1);
1787 if (Call->getArg(0)->getType()->isIntegerType()) {
1788 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1789 APSInt Val = popToAPSInt(S.Stk, ArgT);
1790
1791 if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
1792 pushInteger(S, Val.popcount(), Call->getType());
1793 } else {
1794 pushInteger(S, Val.reverseBits(), Call->getType());
1795 }
1796 return true;
1797 }
1798 // Otherwise, the argument must be a vector.
1799 assert(Call->getArg(0)->getType()->isVectorType());
1800 const Pointer &Arg = S.Stk.pop<Pointer>();
1801 assert(Arg.getFieldDesc()->isPrimitiveArray());
1802 const Pointer &Dst = S.Stk.peek<Pointer>();
1803 assert(Dst.getFieldDesc()->isPrimitiveArray());
1804 assert(Arg.getFieldDesc()->getNumElems() ==
1805 Dst.getFieldDesc()->getNumElems());
1806
1807 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1808 PrimType ElemT = *S.getContext().classify(ElemType);
1809 unsigned NumElems = Arg.getNumElems();
1810
1811 // FIXME: Reading from uninitialized vector elements?
1812 for (unsigned I = 0; I != NumElems; ++I) {
1814 if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
1815 Dst.elem<T>(I) = T::from(Arg.elem<T>(I).toAPSInt().popcount());
1816 } else {
1817 Dst.elem<T>(I) =
1818 T::from(Arg.elem<T>(I).toAPSInt().reverseBits().getZExtValue());
1819 }
1820 });
1821 }
1823
1824 return true;
1825}
1826
1827/// Can be called with an integer or vector as the first and only parameter.
1829 CodePtr OpPC,
1830 const InterpFrame *Frame,
1831 const CallExpr *Call,
1832 unsigned BuiltinID) {
1833 const bool HasZeroArg = Call->getNumArgs() == 2;
1834 const bool IsCTTZ = BuiltinID == Builtin::BI__builtin_elementwise_cttz;
1835 assert(Call->getNumArgs() == 1 || HasZeroArg);
1836 if (Call->getArg(0)->getType()->isIntegerType()) {
1837 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1838 APSInt Val = popToAPSInt(S.Stk, ArgT);
1839 std::optional<APSInt> ZeroVal;
1840 if (HasZeroArg) {
1841 ZeroVal = Val;
1842 Val = popToAPSInt(S.Stk, ArgT);
1843 }
1844
1845 if (Val.isZero()) {
1846 if (ZeroVal) {
1847 pushInteger(S, *ZeroVal, Call->getType());
1848 return true;
1849 }
1850 // If we haven't been provided the second argument, the result is
1851 // undefined
1852 S.FFDiag(S.Current->getSource(OpPC),
1853 diag::note_constexpr_countzeroes_zero)
1854 << /*IsTrailing=*/IsCTTZ;
1855 return false;
1856 }
1857
1858 if (BuiltinID == Builtin::BI__builtin_elementwise_ctlz) {
1859 pushInteger(S, Val.countLeadingZeros(), Call->getType());
1860 } else {
1861 pushInteger(S, Val.countTrailingZeros(), Call->getType());
1862 }
1863 return true;
1864 }
1865 // Otherwise, the argument must be a vector.
1866 const ASTContext &ASTCtx = S.getASTContext();
1867 Pointer ZeroArg;
1868 if (HasZeroArg) {
1869 assert(Call->getArg(1)->getType()->isVectorType() &&
1870 ASTCtx.hasSameUnqualifiedType(Call->getArg(0)->getType(),
1871 Call->getArg(1)->getType()));
1872 (void)ASTCtx;
1873 ZeroArg = S.Stk.pop<Pointer>();
1874 assert(ZeroArg.getFieldDesc()->isPrimitiveArray());
1875 }
1876 assert(Call->getArg(0)->getType()->isVectorType());
1877 const Pointer &Arg = S.Stk.pop<Pointer>();
1878 assert(Arg.getFieldDesc()->isPrimitiveArray());
1879 const Pointer &Dst = S.Stk.peek<Pointer>();
1880 assert(Dst.getFieldDesc()->isPrimitiveArray());
1881 assert(Arg.getFieldDesc()->getNumElems() ==
1882 Dst.getFieldDesc()->getNumElems());
1883
1884 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1885 PrimType ElemT = *S.getContext().classify(ElemType);
1886 unsigned NumElems = Arg.getNumElems();
1887
1888 // FIXME: Reading from uninitialized vector elements?
1889 for (unsigned I = 0; I != NumElems; ++I) {
1891 APInt EltVal = Arg.atIndex(I).deref<T>().toAPSInt();
1892 if (EltVal.isZero()) {
1893 if (HasZeroArg) {
1894 Dst.atIndex(I).deref<T>() = ZeroArg.atIndex(I).deref<T>();
1895 } else {
1896 // If we haven't been provided the second argument, the result is
1897 // undefined
1898 S.FFDiag(S.Current->getSource(OpPC),
1899 diag::note_constexpr_countzeroes_zero)
1900 << /*IsTrailing=*/IsCTTZ;
1901 return false;
1902 }
1903 } else if (IsCTTZ) {
1904 Dst.atIndex(I).deref<T>() = T::from(EltVal.countTrailingZeros());
1905 } else {
1906 Dst.atIndex(I).deref<T>() = T::from(EltVal.countLeadingZeros());
1907 }
1908 Dst.atIndex(I).initialize();
1909 });
1910 }
1911
1912 return true;
1913}
1914
1916 const InterpFrame *Frame,
1917 const CallExpr *Call, unsigned ID) {
1918 assert(Call->getNumArgs() == 3);
1919 const ASTContext &ASTCtx = S.getASTContext();
1920 PrimType SizeT = *S.getContext().classify(Call->getArg(2));
1921 APSInt Size = popToAPSInt(S.Stk, SizeT);
1922 const Pointer SrcPtr = S.Stk.pop<Pointer>();
1923 const Pointer DestPtr = S.Stk.pop<Pointer>();
1924
1925 assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
1926
1927 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1928 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1929
1930 bool Move =
1931 (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove ||
1932 ID == Builtin::BI__builtin_wmemmove || ID == Builtin::BIwmemmove);
1933 bool WChar = ID == Builtin::BIwmemcpy || ID == Builtin::BIwmemmove ||
1934 ID == Builtin::BI__builtin_wmemcpy ||
1935 ID == Builtin::BI__builtin_wmemmove;
1936
1937 // If the size is zero, we treat this as always being a valid no-op.
1938 if (Size.isZero()) {
1939 S.Stk.push<Pointer>(DestPtr);
1940 return true;
1941 }
1942
1943 if (SrcPtr.isZero() || DestPtr.isZero()) {
1944 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1945 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1946 << /*IsMove=*/Move << /*IsWchar=*/WChar << !SrcPtr.isZero()
1947 << DiagPtr.toDiagnosticString(ASTCtx);
1948 return false;
1949 }
1950
1951 // Diagnose integral src/dest pointers specially.
1952 if (SrcPtr.isIntegralPointer() || DestPtr.isIntegralPointer()) {
1953 std::string DiagVal = "(void *)";
1954 DiagVal += SrcPtr.isIntegralPointer()
1955 ? std::to_string(SrcPtr.getIntegerRepresentation())
1956 : std::to_string(DestPtr.getIntegerRepresentation());
1957 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1958 << Move << WChar << DestPtr.isIntegralPointer() << DiagVal;
1959 return false;
1960 }
1961
1962 // Can't read from dummy pointers.
1963 if (DestPtr.isDummy() || SrcPtr.isDummy())
1964 return false;
1965
1966 if (DestPtr.getType()->isIncompleteType()) {
1967 S.FFDiag(S.Current->getSource(OpPC),
1968 diag::note_constexpr_memcpy_incomplete_type)
1969 << Move << DestPtr.getType();
1970 return false;
1971 }
1972 if (SrcPtr.getType()->isIncompleteType()) {
1973 S.FFDiag(S.Current->getSource(OpPC),
1974 diag::note_constexpr_memcpy_incomplete_type)
1975 << Move << SrcPtr.getType();
1976 return false;
1977 }
1978
1979 QualType DestElemType = getElemType(DestPtr);
1980 if (DestElemType->isIncompleteType()) {
1981 S.FFDiag(S.Current->getSource(OpPC),
1982 diag::note_constexpr_memcpy_incomplete_type)
1983 << Move << DestElemType;
1984 return false;
1985 }
1986
1987 size_t RemainingDestElems;
1988 if (DestPtr.getFieldDesc()->isArray()) {
1989 RemainingDestElems = DestPtr.isUnknownSizeArray()
1990 ? 0
1991 : (DestPtr.getNumElems() - DestPtr.getIndex());
1992 } else {
1993 RemainingDestElems = 1;
1994 }
1995 unsigned DestElemSize = ASTCtx.getTypeSizeInChars(DestElemType).getQuantity();
1996
1997 if (WChar) {
1998 uint64_t WCharSize =
1999 ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
2000 Size *= APSInt(APInt(Size.getBitWidth(), WCharSize, /*IsSigned=*/false),
2001 /*IsUnsigend=*/true);
2002 }
2003
2004 if (Size.urem(DestElemSize) != 0) {
2005 S.FFDiag(S.Current->getSource(OpPC),
2006 diag::note_constexpr_memcpy_unsupported)
2007 << Move << WChar << 0 << DestElemType << Size << DestElemSize;
2008 return false;
2009 }
2010
2011 QualType SrcElemType = getElemType(SrcPtr);
2012 size_t RemainingSrcElems;
2013 if (SrcPtr.getFieldDesc()->isArray()) {
2014 RemainingSrcElems = SrcPtr.isUnknownSizeArray()
2015 ? 0
2016 : (SrcPtr.getNumElems() - SrcPtr.getIndex());
2017 } else {
2018 RemainingSrcElems = 1;
2019 }
2020 unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(SrcElemType).getQuantity();
2021
2022 if (!ASTCtx.hasSameUnqualifiedType(DestElemType, SrcElemType)) {
2023 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
2024 << Move << SrcElemType << DestElemType;
2025 return false;
2026 }
2027
2028 if (!DestElemType.isTriviallyCopyableType(ASTCtx)) {
2029 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_nontrivial)
2030 << Move << DestElemType;
2031 return false;
2032 }
2033
2034 // Check if we have enough elements to read from and write to.
2035 size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
2036 size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
2037 if (Size.ugt(RemainingDestBytes) || Size.ugt(RemainingSrcBytes)) {
2038 APInt N = Size.udiv(DestElemSize);
2039 S.FFDiag(S.Current->getSource(OpPC),
2040 diag::note_constexpr_memcpy_unsupported)
2041 << Move << WChar << (Size.ugt(RemainingSrcBytes) ? 1 : 2)
2042 << DestElemType << toString(N, 10, /*Signed=*/false);
2043 return false;
2044 }
2045
2046 // Check for overlapping memory regions.
2047 if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
2048 // Remove base casts.
2049 Pointer SrcP = SrcPtr;
2050 while (SrcP.isBaseClass())
2051 SrcP = SrcP.getBase();
2052
2053 Pointer DestP = DestPtr;
2054 while (DestP.isBaseClass())
2055 DestP = DestP.getBase();
2056
2057 unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
2058 unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
2059 unsigned N = Size.getZExtValue();
2060
2061 if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
2062 (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
2063 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
2064 << /*IsWChar=*/false;
2065 return false;
2066 }
2067 }
2068
2069 assert(Size.getZExtValue() % DestElemSize == 0);
2070 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size.getZExtValue()).toBits()))
2071 return false;
2072
2073 S.Stk.push<Pointer>(DestPtr);
2074 return true;
2075}
2076
2077/// Determine if T is a character type for which we guarantee that
2078/// sizeof(T) == 1.
2080 return T->isCharType() || T->isChar8Type();
2081}
2082
2084 const InterpFrame *Frame,
2085 const CallExpr *Call, unsigned ID) {
2086 assert(Call->getNumArgs() == 3);
2087 PrimType SizeT = *S.getContext().classify(Call->getArg(2));
2088 const APSInt &Size = popToAPSInt(S.Stk, SizeT);
2089 const Pointer &PtrB = S.Stk.pop<Pointer>();
2090 const Pointer &PtrA = S.Stk.pop<Pointer>();
2091
2092 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
2093 ID == Builtin::BIwmemcmp)
2094 diagnoseNonConstexprBuiltin(S, OpPC, ID);
2095
2096 if (Size.isZero()) {
2097 pushInteger(S, 0, Call->getType());
2098 return true;
2099 }
2100
2101 bool IsWide =
2102 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
2103
2104 const ASTContext &ASTCtx = S.getASTContext();
2105 QualType ElemTypeA = getElemType(PtrA);
2106 QualType ElemTypeB = getElemType(PtrB);
2107 // FIXME: This is an arbitrary limitation the current constant interpreter
2108 // had. We could remove this.
2109 if (!IsWide && (!isOneByteCharacterType(ElemTypeA) ||
2110 !isOneByteCharacterType(ElemTypeB))) {
2111 S.FFDiag(S.Current->getSource(OpPC),
2112 diag::note_constexpr_memcmp_unsupported)
2113 << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
2114 << PtrB.getType();
2115 return false;
2116 }
2117
2118 if (PtrA.isDummy() || PtrB.isDummy())
2119 return false;
2120
2121 // Now, read both pointers to a buffer and compare those.
2122 BitcastBuffer BufferA(
2123 Bits(ASTCtx.getTypeSize(ElemTypeA) * PtrA.getNumElems()));
2124 readPointerToBuffer(S.getContext(), PtrA, BufferA, false);
2125 // FIXME: The swapping here is UNDOING something we do when reading the
2126 // data into the buffer.
2127 if (ASTCtx.getTargetInfo().isBigEndian())
2128 swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
2129
2130 BitcastBuffer BufferB(
2131 Bits(ASTCtx.getTypeSize(ElemTypeB) * PtrB.getNumElems()));
2132 readPointerToBuffer(S.getContext(), PtrB, BufferB, false);
2133 // FIXME: The swapping here is UNDOING something we do when reading the
2134 // data into the buffer.
2135 if (ASTCtx.getTargetInfo().isBigEndian())
2136 swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
2137
2138 size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
2139 BufferB.byteSize().getQuantity());
2140
2141 unsigned ElemSize = 1;
2142 if (IsWide)
2143 ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
2144 // The Size given for the wide variants is in wide-char units. Convert it
2145 // to bytes.
2146 size_t ByteSize = Size.getZExtValue() * ElemSize;
2147 size_t CmpSize = std::min(MinBufferSize, ByteSize);
2148
2149 for (size_t I = 0; I != CmpSize; I += ElemSize) {
2150 if (IsWide) {
2151 INT_TYPE_SWITCH(*S.getContext().classify(ASTCtx.getWCharType()), {
2152 T A = *reinterpret_cast<T *>(BufferA.Data.get() + I);
2153 T B = *reinterpret_cast<T *>(BufferB.Data.get() + I);
2154 if (A < B) {
2155 pushInteger(S, -1, Call->getType());
2156 return true;
2157 }
2158 if (A > B) {
2159 pushInteger(S, 1, Call->getType());
2160 return true;
2161 }
2162 });
2163 } else {
2164 std::byte A = BufferA.Data[I];
2165 std::byte B = BufferB.Data[I];
2166
2167 if (A < B) {
2168 pushInteger(S, -1, Call->getType());
2169 return true;
2170 }
2171 if (A > B) {
2172 pushInteger(S, 1, Call->getType());
2173 return true;
2174 }
2175 }
2176 }
2177
2178 // We compared CmpSize bytes above. If the limiting factor was the Size
2179 // passed, we're done and the result is equality (0).
2180 if (ByteSize <= CmpSize) {
2181 pushInteger(S, 0, Call->getType());
2182 return true;
2183 }
2184
2185 // However, if we read all the available bytes but were instructed to read
2186 // even more, diagnose this as a "read of dereferenced one-past-the-end
2187 // pointer". This is what would happen if we called CheckLoad() on every array
2188 // element.
2189 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
2190 << AK_Read << S.Current->getRange(OpPC);
2191 return false;
2192}
2193
2194// __builtin_memchr(ptr, int, int)
2195// __builtin_strchr(ptr, int)
2197 const CallExpr *Call, unsigned ID) {
2198 if (ID == Builtin::BImemchr || ID == Builtin::BIwcschr ||
2199 ID == Builtin::BIstrchr || ID == Builtin::BIwmemchr)
2200 diagnoseNonConstexprBuiltin(S, OpPC, ID);
2201
2202 std::optional<APSInt> MaxLength;
2203 PrimType DesiredT = *S.getContext().classify(Call->getArg(1));
2204 if (Call->getNumArgs() == 3) {
2205 PrimType MaxT = *S.getContext().classify(Call->getArg(2));
2206 MaxLength = popToAPSInt(S.Stk, MaxT);
2207 }
2208 APSInt Desired = popToAPSInt(S.Stk, DesiredT);
2209 const Pointer &Ptr = S.Stk.pop<Pointer>();
2210
2211 if (MaxLength && MaxLength->isZero()) {
2212 S.Stk.push<Pointer>();
2213 return true;
2214 }
2215
2216 if (Ptr.isDummy()) {
2217 if (Ptr.getType()->isIncompleteType())
2218 S.FFDiag(S.Current->getSource(OpPC),
2219 diag::note_constexpr_ltor_incomplete_type)
2220 << Ptr.getType();
2221 return false;
2222 }
2223
2224 // Null is only okay if the given size is 0.
2225 if (Ptr.isZero()) {
2226 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
2227 << AK_Read;
2228 return false;
2229 }
2230
2231 QualType ElemTy = Ptr.getFieldDesc()->isArray()
2232 ? Ptr.getFieldDesc()->getElemQualType()
2233 : Ptr.getFieldDesc()->getType();
2234 bool IsRawByte = ID == Builtin::BImemchr || ID == Builtin::BI__builtin_memchr;
2235
2236 // Give up on byte-oriented matching against multibyte elements.
2237 if (IsRawByte && !isOneByteCharacterType(ElemTy)) {
2238 S.FFDiag(S.Current->getSource(OpPC),
2239 diag::note_constexpr_memchr_unsupported)
2240 << S.getASTContext().BuiltinInfo.getQuotedName(ID) << ElemTy;
2241 return false;
2242 }
2243
2244 if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
2245 // strchr compares directly to the passed integer, and therefore
2246 // always fails if given an int that is not a char.
2247 if (Desired !=
2248 Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue()) {
2249 S.Stk.push<Pointer>();
2250 return true;
2251 }
2252 }
2253
2254 uint64_t DesiredVal;
2255 if (ID == Builtin::BIwmemchr || ID == Builtin::BI__builtin_wmemchr ||
2256 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr) {
2257 // wcschr and wmemchr are given a wchar_t to look for. Just use it.
2258 DesiredVal = Desired.getZExtValue();
2259 } else {
2260 DesiredVal = Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2261 }
2262
2263 bool StopAtZero =
2264 (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr ||
2265 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr);
2266
2267 PrimType ElemT =
2268 IsRawByte ? PT_Sint8 : *S.getContext().classify(getElemType(Ptr));
2269
2270 size_t Index = Ptr.getIndex();
2271 size_t Step = 0;
2272 for (;;) {
2273 const Pointer &ElemPtr =
2274 (Index + Step) > 0 ? Ptr.atIndex(Index + Step) : Ptr;
2275
2276 if (!CheckLoad(S, OpPC, ElemPtr))
2277 return false;
2278
2279 uint64_t V;
2281 ElemT, { V = static_cast<uint64_t>(ElemPtr.deref<T>().toUnsigned()); });
2282
2283 if (V == DesiredVal) {
2284 S.Stk.push<Pointer>(ElemPtr);
2285 return true;
2286 }
2287
2288 if (StopAtZero && V == 0)
2289 break;
2290
2291 ++Step;
2292 if (MaxLength && Step == MaxLength->getZExtValue())
2293 break;
2294 }
2295
2296 S.Stk.push<Pointer>();
2297 return true;
2298}
2299
2300static std::optional<unsigned> computeFullDescSize(const ASTContext &ASTCtx,
2301 const Descriptor *Desc) {
2302 if (Desc->isPrimitive())
2303 return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
2304 if (Desc->isArray())
2305 return ASTCtx.getTypeSizeInChars(Desc->getElemQualType()).getQuantity() *
2306 Desc->getNumElems();
2307 if (Desc->isRecord()) {
2308 // Can't use Descriptor::getType() as that may return a pointer type. Look
2309 // at the decl directly.
2310 return ASTCtx
2312 ASTCtx.getCanonicalTagType(Desc->ElemRecord->getDecl()))
2313 .getQuantity();
2314 }
2315
2316 return std::nullopt;
2317}
2318
2319/// Compute the byte offset of \p Ptr in the full declaration.
2320static unsigned computePointerOffset(const ASTContext &ASTCtx,
2321 const Pointer &Ptr) {
2322 unsigned Result = 0;
2323
2324 Pointer P = Ptr;
2325 while (P.isField() || P.isArrayElement()) {
2326 P = P.expand();
2327 const Descriptor *D = P.getFieldDesc();
2328
2329 if (P.isArrayElement()) {
2330 unsigned ElemSize =
2331 ASTCtx.getTypeSizeInChars(D->getElemQualType()).getQuantity();
2332 if (P.isOnePastEnd())
2333 Result += ElemSize * P.getNumElems();
2334 else
2335 Result += ElemSize * P.getIndex();
2336 P = P.expand().getArray();
2337 } else if (P.isBaseClass()) {
2338 const auto *RD = cast<CXXRecordDecl>(D->asDecl());
2339 bool IsVirtual = Ptr.isVirtualBaseClass();
2340 P = P.getBase();
2341 const Record *BaseRecord = P.getRecord();
2342
2343 const ASTRecordLayout &Layout =
2344 ASTCtx.getASTRecordLayout(cast<CXXRecordDecl>(BaseRecord->getDecl()));
2345 if (IsVirtual)
2346 Result += Layout.getVBaseClassOffset(RD).getQuantity();
2347 else
2348 Result += Layout.getBaseClassOffset(RD).getQuantity();
2349 } else if (P.isField()) {
2350 const FieldDecl *FD = P.getField();
2351 const ASTRecordLayout &Layout =
2352 ASTCtx.getASTRecordLayout(FD->getParent());
2353 unsigned FieldIndex = FD->getFieldIndex();
2354 uint64_t FieldOffset =
2355 ASTCtx.toCharUnitsFromBits(Layout.getFieldOffset(FieldIndex))
2356 .getQuantity();
2357 Result += FieldOffset;
2358 P = P.getBase();
2359 } else
2360 llvm_unreachable("Unhandled descriptor type");
2361 }
2362
2363 return Result;
2364}
2365
2366/// Does Ptr point to the last subobject?
2367static bool pointsToLastObject(const Pointer &Ptr) {
2368 Pointer P = Ptr;
2369 while (!P.isRoot()) {
2370
2371 if (P.isArrayElement()) {
2372 P = P.expand().getArray();
2373 continue;
2374 }
2375 if (P.isBaseClass()) {
2376 if (P.getRecord()->getNumFields() > 0)
2377 return false;
2378 P = P.getBase();
2379 continue;
2380 }
2381
2382 Pointer Base = P.getBase();
2383 if (const Record *R = Base.getRecord()) {
2384 assert(P.getField());
2385 if (P.getField()->getFieldIndex() != R->getNumFields() - 1)
2386 return false;
2387 }
2388 P = Base;
2389 }
2390
2391 return true;
2392}
2393
2394/// Does Ptr point to the last object AND to a flexible array member?
2395static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const Pointer &Ptr) {
2396 auto isFlexibleArrayMember = [&](const Descriptor *FieldDesc) {
2398 FAMKind StrictFlexArraysLevel =
2399 Ctx.getLangOpts().getStrictFlexArraysLevel();
2400
2401 if (StrictFlexArraysLevel == FAMKind::Default)
2402 return true;
2403
2404 unsigned NumElems = FieldDesc->getNumElems();
2405 if (NumElems == 0 && StrictFlexArraysLevel != FAMKind::IncompleteOnly)
2406 return true;
2407
2408 if (NumElems == 1 && StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
2409 return true;
2410 return false;
2411 };
2412
2413 const Descriptor *FieldDesc = Ptr.getFieldDesc();
2414 if (!FieldDesc->isArray())
2415 return false;
2416
2417 return Ptr.isDummy() && pointsToLastObject(Ptr) &&
2418 isFlexibleArrayMember(FieldDesc);
2419}
2420
2422 const InterpFrame *Frame,
2423 const CallExpr *Call) {
2424 const ASTContext &ASTCtx = S.getASTContext();
2425 PrimType KindT = *S.getContext().classify(Call->getArg(1));
2426 // From the GCC docs:
2427 // Kind is an integer constant from 0 to 3. If the least significant bit is
2428 // clear, objects are whole variables. If it is set, a closest surrounding
2429 // subobject is considered the object a pointer points to. The second bit
2430 // determines if maximum or minimum of remaining bytes is computed.
2431 unsigned Kind = popToAPSInt(S.Stk, KindT).getZExtValue();
2432 assert(Kind <= 3 && "unexpected kind");
2433 bool UseFieldDesc = (Kind & 1u);
2434 bool ReportMinimum = (Kind & 2u);
2435 const Pointer &Ptr = S.Stk.pop<Pointer>();
2436
2437 if (Call->getArg(0)->HasSideEffects(ASTCtx)) {
2438 // "If there are any side effects in them, it returns (size_t) -1
2439 // for type 0 or 1 and (size_t) 0 for type 2 or 3."
2440 pushInteger(S, Kind <= 1 ? -1 : 0, Call->getType());
2441 return true;
2442 }
2443
2444 if (Ptr.isZero() || !Ptr.isBlockPointer())
2445 return false;
2446
2447 // We can't load through pointers.
2448 if (Ptr.isDummy() && Ptr.getType()->isPointerType())
2449 return false;
2450
2451 bool DetermineForCompleteObject = Ptr.getFieldDesc() == Ptr.getDeclDesc();
2452 const Descriptor *DeclDesc = Ptr.getDeclDesc();
2453 assert(DeclDesc);
2454
2455 if (!UseFieldDesc || DetermineForCompleteObject) {
2456 // Lower bound, so we can't fall back to this.
2457 if (ReportMinimum && !DetermineForCompleteObject)
2458 return false;
2459
2460 // Can't read beyond the pointer decl desc.
2461 if (!UseFieldDesc && !ReportMinimum && DeclDesc->getType()->isPointerType())
2462 return false;
2463 } else {
2464 if (isUserWritingOffTheEnd(ASTCtx, Ptr.expand())) {
2465 // If we cannot determine the size of the initial allocation, then we
2466 // can't given an accurate upper-bound. However, we are still able to give
2467 // conservative lower-bounds for Type=3.
2468 if (Kind == 1)
2469 return false;
2470 }
2471 }
2472
2473 const Descriptor *Desc = UseFieldDesc ? Ptr.getFieldDesc() : DeclDesc;
2474 assert(Desc);
2475
2476 std::optional<unsigned> FullSize = computeFullDescSize(ASTCtx, Desc);
2477 if (!FullSize)
2478 return false;
2479
2480 unsigned ByteOffset;
2481 if (UseFieldDesc) {
2482 if (Ptr.isBaseClass())
2483 ByteOffset = computePointerOffset(ASTCtx, Ptr.getBase()) -
2484 computePointerOffset(ASTCtx, Ptr);
2485 else
2486 ByteOffset =
2487 computePointerOffset(ASTCtx, Ptr) -
2488 computePointerOffset(ASTCtx, Ptr.expand().atIndex(0).narrow());
2489 } else
2490 ByteOffset = computePointerOffset(ASTCtx, Ptr);
2491
2492 assert(ByteOffset <= *FullSize);
2493 unsigned Result = *FullSize - ByteOffset;
2494
2495 pushInteger(S, Result, Call->getType());
2496 return true;
2497}
2498
2500 const CallExpr *Call) {
2501
2502 if (!S.inConstantContext())
2503 return false;
2504
2505 const Pointer &Ptr = S.Stk.pop<Pointer>();
2506
2507 auto Error = [&](int Diag) {
2508 bool CalledFromStd = false;
2509 const auto *Callee = S.Current->getCallee();
2510 if (Callee && Callee->isInStdNamespace()) {
2511 const IdentifierInfo *Identifier = Callee->getIdentifier();
2512 CalledFromStd = Identifier && Identifier->isStr("is_within_lifetime");
2513 }
2514 S.CCEDiag(CalledFromStd
2515 ? S.Current->Caller->getSource(S.Current->getRetPC())
2516 : S.Current->getSource(OpPC),
2517 diag::err_invalid_is_within_lifetime)
2518 << (CalledFromStd ? "std::is_within_lifetime"
2519 : "__builtin_is_within_lifetime")
2520 << Diag;
2521 return false;
2522 };
2523
2524 if (Ptr.isZero())
2525 return Error(0);
2526 if (Ptr.isOnePastEnd())
2527 return Error(1);
2528
2529 bool Result = Ptr.getLifetime() != Lifetime::Ended;
2530 if (!Ptr.isActive()) {
2531 Result = false;
2532 } else {
2533 if (!CheckLive(S, OpPC, Ptr, AK_Read))
2534 return false;
2535 if (!CheckMutable(S, OpPC, Ptr))
2536 return false;
2537 if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
2538 return false;
2539 }
2540
2541 // Check if we're currently running an initializer.
2542 if (llvm::is_contained(S.InitializingBlocks, Ptr.block()))
2543 return Error(2);
2544 if (S.EvaluatingDecl && Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)
2545 return Error(2);
2546
2547 pushInteger(S, Result, Call->getType());
2548 return true;
2549}
2550
2552 InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID,
2553 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2554 assert(Call->getNumArgs() == 2);
2555
2556 // Single integer case.
2557 if (!Call->getArg(0)->getType()->isVectorType()) {
2558 assert(!Call->getArg(1)->getType()->isVectorType());
2559 APSInt RHS = popToAPSInt(
2560 S.Stk, *S.getContext().classify(Call->getArg(1)->getType()));
2561 APSInt LHS = popToAPSInt(
2562 S.Stk, *S.getContext().classify(Call->getArg(0)->getType()));
2563 APInt Result = Fn(LHS, RHS);
2564 pushInteger(S, APSInt(std::move(Result), !LHS.isSigned()), Call->getType());
2565 return true;
2566 }
2567
2568 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2569 assert(VT->getElementType()->isIntegralOrEnumerationType());
2570 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2571 unsigned NumElems = VT->getNumElements();
2572 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2573
2574 // Vector + Scalar case.
2575 if (!Call->getArg(1)->getType()->isVectorType()) {
2576 assert(Call->getArg(1)->getType()->isIntegralOrEnumerationType());
2577
2578 APSInt RHS = popToAPSInt(
2579 S.Stk, *S.getContext().classify(Call->getArg(1)->getType()));
2580 const Pointer &LHS = S.Stk.pop<Pointer>();
2581 const Pointer &Dst = S.Stk.peek<Pointer>();
2582
2583 for (unsigned I = 0; I != NumElems; ++I) {
2585 Dst.elem<T>(I) = static_cast<T>(
2586 APSInt(Fn(LHS.elem<T>(I).toAPSInt(), RHS), DestUnsigned));
2587 });
2588 }
2589 Dst.initializeAllElements();
2590 return true;
2591 }
2592
2593 // Vector case.
2594 assert(Call->getArg(0)->getType()->isVectorType() &&
2595 Call->getArg(1)->getType()->isVectorType());
2596 assert(VT->getElementType() ==
2597 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2598 assert(VT->getNumElements() ==
2599 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2600 assert(VT->getElementType()->isIntegralOrEnumerationType());
2601
2602 const Pointer &RHS = S.Stk.pop<Pointer>();
2603 const Pointer &LHS = S.Stk.pop<Pointer>();
2604 const Pointer &Dst = S.Stk.peek<Pointer>();
2605 for (unsigned I = 0; I != NumElems; ++I) {
2607 APSInt Elem1 = LHS.elem<T>(I).toAPSInt();
2608 APSInt Elem2 = RHS.elem<T>(I).toAPSInt();
2609 Dst.elem<T>(I) = static_cast<T>(APSInt(Fn(Elem1, Elem2), DestUnsigned));
2610 });
2611 }
2612 Dst.initializeAllElements();
2613
2614 return true;
2615}
2616
2618 const CallExpr *Call,
2619 unsigned BuiltinID) {
2620 assert(Call->getNumArgs() == 2);
2621
2622 QualType Arg0Type = Call->getArg(0)->getType();
2623
2624 // TODO: Support floating-point types.
2625 if (!(Arg0Type->isIntegerType() ||
2626 (Arg0Type->isVectorType() &&
2627 Arg0Type->castAs<VectorType>()->getElementType()->isIntegerType())))
2628 return false;
2629
2630 if (!Arg0Type->isVectorType()) {
2631 assert(!Call->getArg(1)->getType()->isVectorType());
2632 APSInt RHS = popToAPSInt(
2633 S.Stk, *S.getContext().classify(Call->getArg(1)->getType()));
2634 APSInt LHS = popToAPSInt(
2635 S.Stk, *S.getContext().classify(Call->getArg(0)->getType()));
2636 APInt Result;
2637 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2638 Result = std::max(LHS, RHS);
2639 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2640 Result = std::min(LHS, RHS);
2641 } else {
2642 llvm_unreachable("Wrong builtin ID");
2643 }
2644
2645 pushInteger(S, APSInt(Result, !LHS.isSigned()), Call->getType());
2646 return true;
2647 }
2648
2649 // Vector case.
2650 assert(Call->getArg(0)->getType()->isVectorType() &&
2651 Call->getArg(1)->getType()->isVectorType());
2652 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2653 assert(VT->getElementType() ==
2654 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2655 assert(VT->getNumElements() ==
2656 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2657 assert(VT->getElementType()->isIntegralOrEnumerationType());
2658
2659 const Pointer &RHS = S.Stk.pop<Pointer>();
2660 const Pointer &LHS = S.Stk.pop<Pointer>();
2661 const Pointer &Dst = S.Stk.peek<Pointer>();
2662 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2663 unsigned NumElems = VT->getNumElements();
2664 for (unsigned I = 0; I != NumElems; ++I) {
2665 APSInt Elem1;
2666 APSInt Elem2;
2668 Elem1 = LHS.elem<T>(I).toAPSInt();
2669 Elem2 = RHS.elem<T>(I).toAPSInt();
2670 });
2671
2672 APSInt Result;
2673 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2674 Result = APSInt(std::max(Elem1, Elem2),
2675 Call->getType()->isUnsignedIntegerOrEnumerationType());
2676 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2677 Result = APSInt(std::min(Elem1, Elem2),
2678 Call->getType()->isUnsignedIntegerOrEnumerationType());
2679 } else {
2680 llvm_unreachable("Wrong builtin ID");
2681 }
2682
2684 { Dst.elem<T>(I) = static_cast<T>(Result); });
2685 }
2686 Dst.initializeAllElements();
2687
2688 return true;
2689}
2690
2692 const CallExpr *Call,
2693 unsigned BuiltinID) {
2694 assert(Call->getArg(0)->getType()->isVectorType() &&
2695 Call->getArg(1)->getType()->isVectorType());
2696 const Pointer &RHS = S.Stk.pop<Pointer>();
2697 const Pointer &LHS = S.Stk.pop<Pointer>();
2698 const Pointer &Dst = S.Stk.peek<Pointer>();
2699
2700 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2701 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2702 unsigned SourceLen = VT->getNumElements();
2703
2704 PrimType DstElemT = *S.getContext().classify(
2705 Call->getType()->castAs<VectorType>()->getElementType());
2706 unsigned DstElem = 0;
2707 for (unsigned I = 0; I != SourceLen; I += 2) {
2708 APSInt Elem1;
2709 APSInt Elem2;
2711 Elem1 = LHS.elem<T>(I).toAPSInt();
2712 Elem2 = RHS.elem<T>(I).toAPSInt();
2713 });
2714
2715 APSInt Result;
2716 switch (BuiltinID) {
2717 case clang::X86::BI__builtin_ia32_pmuludq128:
2718 case clang::X86::BI__builtin_ia32_pmuludq256:
2719 case clang::X86::BI__builtin_ia32_pmuludq512:
2720 Result = APSInt(llvm::APIntOps::muluExtended(Elem1, Elem2),
2721 /*IsUnsigned=*/true);
2722 break;
2723 case clang::X86::BI__builtin_ia32_pmuldq128:
2724 case clang::X86::BI__builtin_ia32_pmuldq256:
2725 case clang::X86::BI__builtin_ia32_pmuldq512:
2726 Result = APSInt(llvm::APIntOps::mulsExtended(Elem1, Elem2),
2727 /*IsUnsigned=*/false);
2728 break;
2729 }
2730 INT_TYPE_SWITCH_NO_BOOL(DstElemT,
2731 { Dst.elem<T>(DstElem) = static_cast<T>(Result); });
2732 ++DstElem;
2733 }
2734
2735 Dst.initializeAllElements();
2736 return true;
2737}
2738
2740 const CallExpr *Call) {
2741 assert(Call->getNumArgs() == 3);
2742
2743 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2744 llvm::RoundingMode RM = getRoundingMode(FPO);
2745 const QualType Arg1Type = Call->getArg(0)->getType();
2746 const QualType Arg2Type = Call->getArg(1)->getType();
2747 const QualType Arg3Type = Call->getArg(2)->getType();
2748
2749 // Non-vector floating point types.
2750 if (!Arg1Type->isVectorType()) {
2751 assert(!Arg2Type->isVectorType());
2752 assert(!Arg3Type->isVectorType());
2753 (void)Arg2Type;
2754 (void)Arg3Type;
2755
2756 const Floating &Z = S.Stk.pop<Floating>();
2757 const Floating &Y = S.Stk.pop<Floating>();
2758 const Floating &X = S.Stk.pop<Floating>();
2759 APFloat F = X.getAPFloat();
2760 F.fusedMultiplyAdd(Y.getAPFloat(), Z.getAPFloat(), RM);
2761 Floating Result = S.allocFloat(X.getSemantics());
2762 Result.copy(F);
2763 S.Stk.push<Floating>(Result);
2764 return true;
2765 }
2766
2767 // Vector type.
2768 assert(Arg1Type->isVectorType() && Arg2Type->isVectorType() &&
2769 Arg3Type->isVectorType());
2770
2771 const VectorType *VecT = Arg1Type->castAs<VectorType>();
2772 const QualType ElemT = VecT->getElementType();
2773 unsigned NumElems = VecT->getNumElements();
2774
2775 assert(ElemT == Arg2Type->castAs<VectorType>()->getElementType() &&
2776 ElemT == Arg3Type->castAs<VectorType>()->getElementType());
2777 assert(NumElems == Arg2Type->castAs<VectorType>()->getNumElements() &&
2778 NumElems == Arg3Type->castAs<VectorType>()->getNumElements());
2779 assert(ElemT->isRealFloatingType());
2780 (void)ElemT;
2781
2782 const Pointer &VZ = S.Stk.pop<Pointer>();
2783 const Pointer &VY = S.Stk.pop<Pointer>();
2784 const Pointer &VX = S.Stk.pop<Pointer>();
2785 const Pointer &Dst = S.Stk.peek<Pointer>();
2786 for (unsigned I = 0; I != NumElems; ++I) {
2787 using T = PrimConv<PT_Float>::T;
2788 APFloat X = VX.elem<T>(I).getAPFloat();
2789 APFloat Y = VY.elem<T>(I).getAPFloat();
2790 APFloat Z = VZ.elem<T>(I).getAPFloat();
2791 (void)X.fusedMultiplyAdd(Y, Z, RM);
2792 Dst.elem<Floating>(I) = Floating(X);
2793 }
2794 Dst.initializeAllElements();
2795 return true;
2796}
2797
2798/// AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
2800 const CallExpr *Call) {
2801 const Pointer &RHS = S.Stk.pop<Pointer>();
2802 const Pointer &LHS = S.Stk.pop<Pointer>();
2803 PrimType MaskT = *S.getContext().classify(Call->getArg(0));
2804 APSInt Mask = popToAPSInt(S.Stk, MaskT);
2805 const Pointer &Dst = S.Stk.peek<Pointer>();
2806
2807 assert(LHS.getNumElems() == RHS.getNumElems());
2808 assert(LHS.getNumElems() == Dst.getNumElems());
2809 unsigned NumElems = LHS.getNumElems();
2810 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
2811 PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
2812
2813 for (unsigned I = 0; I != NumElems; ++I) {
2814 if (ElemT == PT_Float) {
2815 assert(DstElemT == PT_Float);
2816 Dst.elem<Floating>(I) =
2817 Mask[I] ? LHS.elem<Floating>(I) : RHS.elem<Floating>(I);
2818 } else {
2819 APSInt Elem;
2820 INT_TYPE_SWITCH(ElemT, {
2821 Elem = Mask[I] ? LHS.elem<T>(I).toAPSInt() : RHS.elem<T>(I).toAPSInt();
2822 });
2823 INT_TYPE_SWITCH_NO_BOOL(DstElemT,
2824 { Dst.elem<T>(I) = static_cast<T>(Elem); });
2825 }
2826 }
2828
2829 return true;
2830}
2831
2833 uint32_t BuiltinID) {
2834 if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
2835 return Invalid(S, OpPC);
2836
2837 const InterpFrame *Frame = S.Current;
2838 switch (BuiltinID) {
2839 case Builtin::BI__builtin_is_constant_evaluated:
2841
2842 case Builtin::BI__builtin_assume:
2843 case Builtin::BI__assume:
2844 return interp__builtin_assume(S, OpPC, Frame, Call);
2845
2846 case Builtin::BI__builtin_strcmp:
2847 case Builtin::BIstrcmp:
2848 case Builtin::BI__builtin_strncmp:
2849 case Builtin::BIstrncmp:
2850 case Builtin::BI__builtin_wcsncmp:
2851 case Builtin::BIwcsncmp:
2852 case Builtin::BI__builtin_wcscmp:
2853 case Builtin::BIwcscmp:
2854 return interp__builtin_strcmp(S, OpPC, Frame, Call, BuiltinID);
2855
2856 case Builtin::BI__builtin_strlen:
2857 case Builtin::BIstrlen:
2858 case Builtin::BI__builtin_wcslen:
2859 case Builtin::BIwcslen:
2860 return interp__builtin_strlen(S, OpPC, Frame, Call, BuiltinID);
2861
2862 case Builtin::BI__builtin_nan:
2863 case Builtin::BI__builtin_nanf:
2864 case Builtin::BI__builtin_nanl:
2865 case Builtin::BI__builtin_nanf16:
2866 case Builtin::BI__builtin_nanf128:
2867 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/false);
2868
2869 case Builtin::BI__builtin_nans:
2870 case Builtin::BI__builtin_nansf:
2871 case Builtin::BI__builtin_nansl:
2872 case Builtin::BI__builtin_nansf16:
2873 case Builtin::BI__builtin_nansf128:
2874 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/true);
2875
2876 case Builtin::BI__builtin_huge_val:
2877 case Builtin::BI__builtin_huge_valf:
2878 case Builtin::BI__builtin_huge_vall:
2879 case Builtin::BI__builtin_huge_valf16:
2880 case Builtin::BI__builtin_huge_valf128:
2881 case Builtin::BI__builtin_inf:
2882 case Builtin::BI__builtin_inff:
2883 case Builtin::BI__builtin_infl:
2884 case Builtin::BI__builtin_inff16:
2885 case Builtin::BI__builtin_inff128:
2886 return interp__builtin_inf(S, OpPC, Frame, Call);
2887
2888 case Builtin::BI__builtin_copysign:
2889 case Builtin::BI__builtin_copysignf:
2890 case Builtin::BI__builtin_copysignl:
2891 case Builtin::BI__builtin_copysignf128:
2892 return interp__builtin_copysign(S, OpPC, Frame);
2893
2894 case Builtin::BI__builtin_fmin:
2895 case Builtin::BI__builtin_fminf:
2896 case Builtin::BI__builtin_fminl:
2897 case Builtin::BI__builtin_fminf16:
2898 case Builtin::BI__builtin_fminf128:
2899 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/false);
2900
2901 case Builtin::BI__builtin_fminimum_num:
2902 case Builtin::BI__builtin_fminimum_numf:
2903 case Builtin::BI__builtin_fminimum_numl:
2904 case Builtin::BI__builtin_fminimum_numf16:
2905 case Builtin::BI__builtin_fminimum_numf128:
2906 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/true);
2907
2908 case Builtin::BI__builtin_fmax:
2909 case Builtin::BI__builtin_fmaxf:
2910 case Builtin::BI__builtin_fmaxl:
2911 case Builtin::BI__builtin_fmaxf16:
2912 case Builtin::BI__builtin_fmaxf128:
2913 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/false);
2914
2915 case Builtin::BI__builtin_fmaximum_num:
2916 case Builtin::BI__builtin_fmaximum_numf:
2917 case Builtin::BI__builtin_fmaximum_numl:
2918 case Builtin::BI__builtin_fmaximum_numf16:
2919 case Builtin::BI__builtin_fmaximum_numf128:
2920 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/true);
2921
2922 case Builtin::BI__builtin_isnan:
2923 return interp__builtin_isnan(S, OpPC, Frame, Call);
2924
2925 case Builtin::BI__builtin_issignaling:
2926 return interp__builtin_issignaling(S, OpPC, Frame, Call);
2927
2928 case Builtin::BI__builtin_isinf:
2929 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/false, Call);
2930
2931 case Builtin::BI__builtin_isinf_sign:
2932 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/true, Call);
2933
2934 case Builtin::BI__builtin_isfinite:
2935 return interp__builtin_isfinite(S, OpPC, Frame, Call);
2936
2937 case Builtin::BI__builtin_isnormal:
2938 return interp__builtin_isnormal(S, OpPC, Frame, Call);
2939
2940 case Builtin::BI__builtin_issubnormal:
2941 return interp__builtin_issubnormal(S, OpPC, Frame, Call);
2942
2943 case Builtin::BI__builtin_iszero:
2944 return interp__builtin_iszero(S, OpPC, Frame, Call);
2945
2946 case Builtin::BI__builtin_signbit:
2947 case Builtin::BI__builtin_signbitf:
2948 case Builtin::BI__builtin_signbitl:
2949 return interp__builtin_signbit(S, OpPC, Frame, Call);
2950
2951 case Builtin::BI__builtin_isgreater:
2952 case Builtin::BI__builtin_isgreaterequal:
2953 case Builtin::BI__builtin_isless:
2954 case Builtin::BI__builtin_islessequal:
2955 case Builtin::BI__builtin_islessgreater:
2956 case Builtin::BI__builtin_isunordered:
2957 return interp_floating_comparison(S, OpPC, Call, BuiltinID);
2958
2959 case Builtin::BI__builtin_isfpclass:
2960 return interp__builtin_isfpclass(S, OpPC, Frame, Call);
2961
2962 case Builtin::BI__builtin_fpclassify:
2963 return interp__builtin_fpclassify(S, OpPC, Frame, Call);
2964
2965 case Builtin::BI__builtin_fabs:
2966 case Builtin::BI__builtin_fabsf:
2967 case Builtin::BI__builtin_fabsl:
2968 case Builtin::BI__builtin_fabsf128:
2969 return interp__builtin_fabs(S, OpPC, Frame);
2970
2971 case Builtin::BI__builtin_abs:
2972 case Builtin::BI__builtin_labs:
2973 case Builtin::BI__builtin_llabs:
2974 return interp__builtin_abs(S, OpPC, Frame, Call);
2975
2976 case Builtin::BI__builtin_popcount:
2977 case Builtin::BI__builtin_popcountl:
2978 case Builtin::BI__builtin_popcountll:
2979 case Builtin::BI__builtin_popcountg:
2980 case Builtin::BI__popcnt16: // Microsoft variants of popcount
2981 case Builtin::BI__popcnt:
2982 case Builtin::BI__popcnt64:
2983 return interp__builtin_popcount(S, OpPC, Frame, Call);
2984
2985 case Builtin::BI__builtin_parity:
2986 case Builtin::BI__builtin_parityl:
2987 case Builtin::BI__builtin_parityll:
2988 return interp__builtin_parity(S, OpPC, Frame, Call);
2989
2990 case Builtin::BI__builtin_clrsb:
2991 case Builtin::BI__builtin_clrsbl:
2992 case Builtin::BI__builtin_clrsbll:
2993 return interp__builtin_clrsb(S, OpPC, Frame, Call);
2994
2995 case Builtin::BI__builtin_bitreverse8:
2996 case Builtin::BI__builtin_bitreverse16:
2997 case Builtin::BI__builtin_bitreverse32:
2998 case Builtin::BI__builtin_bitreverse64:
2999 return interp__builtin_bitreverse(S, OpPC, Frame, Call);
3000
3001 case Builtin::BI__builtin_classify_type:
3002 return interp__builtin_classify_type(S, OpPC, Frame, Call);
3003
3004 case Builtin::BI__builtin_expect:
3005 case Builtin::BI__builtin_expect_with_probability:
3006 return interp__builtin_expect(S, OpPC, Frame, Call);
3007
3008 case Builtin::BI__builtin_rotateleft8:
3009 case Builtin::BI__builtin_rotateleft16:
3010 case Builtin::BI__builtin_rotateleft32:
3011 case Builtin::BI__builtin_rotateleft64:
3012 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3013 case Builtin::BI_rotl16:
3014 case Builtin::BI_rotl:
3015 case Builtin::BI_lrotl:
3016 case Builtin::BI_rotl64:
3017 return interp__builtin_rotate(S, OpPC, Frame, Call, /*Right=*/false);
3018
3019 case Builtin::BI__builtin_rotateright8:
3020 case Builtin::BI__builtin_rotateright16:
3021 case Builtin::BI__builtin_rotateright32:
3022 case Builtin::BI__builtin_rotateright64:
3023 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3024 case Builtin::BI_rotr16:
3025 case Builtin::BI_rotr:
3026 case Builtin::BI_lrotr:
3027 case Builtin::BI_rotr64:
3028 return interp__builtin_rotate(S, OpPC, Frame, Call, /*Right=*/true);
3029
3030 case Builtin::BI__builtin_ffs:
3031 case Builtin::BI__builtin_ffsl:
3032 case Builtin::BI__builtin_ffsll:
3033 return interp__builtin_ffs(S, OpPC, Frame, Call);
3034
3035 case Builtin::BIaddressof:
3036 case Builtin::BI__addressof:
3037 case Builtin::BI__builtin_addressof:
3038 assert(isNoopBuiltin(BuiltinID));
3039 return interp__builtin_addressof(S, OpPC, Frame, Call);
3040
3041 case Builtin::BIas_const:
3042 case Builtin::BIforward:
3043 case Builtin::BIforward_like:
3044 case Builtin::BImove:
3045 case Builtin::BImove_if_noexcept:
3046 assert(isNoopBuiltin(BuiltinID));
3047 return interp__builtin_move(S, OpPC, Frame, Call);
3048
3049 case Builtin::BI__builtin_eh_return_data_regno:
3051
3052 case Builtin::BI__builtin_launder:
3053 assert(isNoopBuiltin(BuiltinID));
3054 return true;
3055
3056 case Builtin::BI__builtin_add_overflow:
3057 case Builtin::BI__builtin_sub_overflow:
3058 case Builtin::BI__builtin_mul_overflow:
3059 case Builtin::BI__builtin_sadd_overflow:
3060 case Builtin::BI__builtin_uadd_overflow:
3061 case Builtin::BI__builtin_uaddl_overflow:
3062 case Builtin::BI__builtin_uaddll_overflow:
3063 case Builtin::BI__builtin_usub_overflow:
3064 case Builtin::BI__builtin_usubl_overflow:
3065 case Builtin::BI__builtin_usubll_overflow:
3066 case Builtin::BI__builtin_umul_overflow:
3067 case Builtin::BI__builtin_umull_overflow:
3068 case Builtin::BI__builtin_umulll_overflow:
3069 case Builtin::BI__builtin_saddl_overflow:
3070 case Builtin::BI__builtin_saddll_overflow:
3071 case Builtin::BI__builtin_ssub_overflow:
3072 case Builtin::BI__builtin_ssubl_overflow:
3073 case Builtin::BI__builtin_ssubll_overflow:
3074 case Builtin::BI__builtin_smul_overflow:
3075 case Builtin::BI__builtin_smull_overflow:
3076 case Builtin::BI__builtin_smulll_overflow:
3077 return interp__builtin_overflowop(S, OpPC, Call, BuiltinID);
3078
3079 case Builtin::BI__builtin_addcb:
3080 case Builtin::BI__builtin_addcs:
3081 case Builtin::BI__builtin_addc:
3082 case Builtin::BI__builtin_addcl:
3083 case Builtin::BI__builtin_addcll:
3084 case Builtin::BI__builtin_subcb:
3085 case Builtin::BI__builtin_subcs:
3086 case Builtin::BI__builtin_subc:
3087 case Builtin::BI__builtin_subcl:
3088 case Builtin::BI__builtin_subcll:
3089 return interp__builtin_carryop(S, OpPC, Frame, Call, BuiltinID);
3090
3091 case Builtin::BI__builtin_clz:
3092 case Builtin::BI__builtin_clzl:
3093 case Builtin::BI__builtin_clzll:
3094 case Builtin::BI__builtin_clzs:
3095 case Builtin::BI__builtin_clzg:
3096 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
3097 case Builtin::BI__lzcnt:
3098 case Builtin::BI__lzcnt64:
3099 return interp__builtin_clz(S, OpPC, Frame, Call, BuiltinID);
3100
3101 case Builtin::BI__builtin_ctz:
3102 case Builtin::BI__builtin_ctzl:
3103 case Builtin::BI__builtin_ctzll:
3104 case Builtin::BI__builtin_ctzs:
3105 case Builtin::BI__builtin_ctzg:
3106 return interp__builtin_ctz(S, OpPC, Frame, Call, BuiltinID);
3107
3108 case Builtin::BI__builtin_elementwise_ctlz:
3109 case Builtin::BI__builtin_elementwise_cttz:
3111 BuiltinID);
3112
3113 case Builtin::BI__builtin_bswap16:
3114 case Builtin::BI__builtin_bswap32:
3115 case Builtin::BI__builtin_bswap64:
3116 return interp__builtin_bswap(S, OpPC, Frame, Call);
3117
3118 case Builtin::BI__atomic_always_lock_free:
3119 case Builtin::BI__atomic_is_lock_free:
3120 return interp__builtin_atomic_lock_free(S, OpPC, Frame, Call, BuiltinID);
3121
3122 case Builtin::BI__c11_atomic_is_lock_free:
3124
3125 case Builtin::BI__builtin_complex:
3126 return interp__builtin_complex(S, OpPC, Frame, Call);
3127
3128 case Builtin::BI__builtin_is_aligned:
3129 case Builtin::BI__builtin_align_up:
3130 case Builtin::BI__builtin_align_down:
3131 return interp__builtin_is_aligned_up_down(S, OpPC, Frame, Call, BuiltinID);
3132
3133 case Builtin::BI__builtin_assume_aligned:
3134 return interp__builtin_assume_aligned(S, OpPC, Frame, Call);
3135
3136 case clang::X86::BI__builtin_ia32_bextr_u32:
3137 case clang::X86::BI__builtin_ia32_bextr_u64:
3138 case clang::X86::BI__builtin_ia32_bextri_u32:
3139 case clang::X86::BI__builtin_ia32_bextri_u64:
3140 return interp__builtin_ia32_bextr(S, OpPC, Frame, Call);
3141
3142 case clang::X86::BI__builtin_ia32_bzhi_si:
3143 case clang::X86::BI__builtin_ia32_bzhi_di:
3144 return interp__builtin_ia32_bzhi(S, OpPC, Frame, Call);
3145
3146 case clang::X86::BI__builtin_ia32_lzcnt_u16:
3147 case clang::X86::BI__builtin_ia32_lzcnt_u32:
3148 case clang::X86::BI__builtin_ia32_lzcnt_u64:
3149 return interp__builtin_ia32_lzcnt(S, OpPC, Frame, Call);
3150
3151 case clang::X86::BI__builtin_ia32_tzcnt_u16:
3152 case clang::X86::BI__builtin_ia32_tzcnt_u32:
3153 case clang::X86::BI__builtin_ia32_tzcnt_u64:
3154 return interp__builtin_ia32_tzcnt(S, OpPC, Frame, Call);
3155
3156 case clang::X86::BI__builtin_ia32_pdep_si:
3157 case clang::X86::BI__builtin_ia32_pdep_di:
3158 return interp__builtin_ia32_pdep(S, OpPC, Frame, Call);
3159
3160 case clang::X86::BI__builtin_ia32_pext_si:
3161 case clang::X86::BI__builtin_ia32_pext_di:
3162 return interp__builtin_ia32_pext(S, OpPC, Frame, Call);
3163
3164 case clang::X86::BI__builtin_ia32_addcarryx_u32:
3165 case clang::X86::BI__builtin_ia32_addcarryx_u64:
3166 case clang::X86::BI__builtin_ia32_subborrow_u32:
3167 case clang::X86::BI__builtin_ia32_subborrow_u64:
3169 BuiltinID);
3170
3171 case Builtin::BI__builtin_os_log_format_buffer_size:
3173
3174 case Builtin::BI__builtin_ptrauth_string_discriminator:
3176
3177 case Builtin::BI__noop:
3178 pushInteger(S, 0, Call->getType());
3179 return true;
3180
3181 case Builtin::BI__builtin_operator_new:
3182 return interp__builtin_operator_new(S, OpPC, Frame, Call);
3183
3184 case Builtin::BI__builtin_operator_delete:
3185 return interp__builtin_operator_delete(S, OpPC, Frame, Call);
3186
3187 case Builtin::BI__arithmetic_fence:
3189
3190 case Builtin::BI__builtin_reduce_add:
3191 case Builtin::BI__builtin_reduce_mul:
3192 case Builtin::BI__builtin_reduce_and:
3193 case Builtin::BI__builtin_reduce_or:
3194 case Builtin::BI__builtin_reduce_xor:
3195 case Builtin::BI__builtin_reduce_min:
3196 case Builtin::BI__builtin_reduce_max:
3197 return interp__builtin_vector_reduce(S, OpPC, Call, BuiltinID);
3198
3199 case Builtin::BI__builtin_elementwise_popcount:
3200 case Builtin::BI__builtin_elementwise_bitreverse:
3202 BuiltinID);
3203
3204 case Builtin::BI__builtin_elementwise_abs:
3205 return interp__builtin_elementwise_abs(S, OpPC, Frame, Call, BuiltinID);
3206
3207 case Builtin::BI__builtin_memcpy:
3208 case Builtin::BImemcpy:
3209 case Builtin::BI__builtin_wmemcpy:
3210 case Builtin::BIwmemcpy:
3211 case Builtin::BI__builtin_memmove:
3212 case Builtin::BImemmove:
3213 case Builtin::BI__builtin_wmemmove:
3214 case Builtin::BIwmemmove:
3215 return interp__builtin_memcpy(S, OpPC, Frame, Call, BuiltinID);
3216
3217 case Builtin::BI__builtin_memcmp:
3218 case Builtin::BImemcmp:
3219 case Builtin::BI__builtin_bcmp:
3220 case Builtin::BIbcmp:
3221 case Builtin::BI__builtin_wmemcmp:
3222 case Builtin::BIwmemcmp:
3223 return interp__builtin_memcmp(S, OpPC, Frame, Call, BuiltinID);
3224
3225 case Builtin::BImemchr:
3226 case Builtin::BI__builtin_memchr:
3227 case Builtin::BIstrchr:
3228 case Builtin::BI__builtin_strchr:
3229 case Builtin::BIwmemchr:
3230 case Builtin::BI__builtin_wmemchr:
3231 case Builtin::BIwcschr:
3232 case Builtin::BI__builtin_wcschr:
3233 case Builtin::BI__builtin_char_memchr:
3234 return interp__builtin_memchr(S, OpPC, Call, BuiltinID);
3235
3236 case Builtin::BI__builtin_object_size:
3237 case Builtin::BI__builtin_dynamic_object_size:
3238 return interp__builtin_object_size(S, OpPC, Frame, Call);
3239
3240 case Builtin::BI__builtin_is_within_lifetime:
3242
3243 case Builtin::BI__builtin_elementwise_add_sat:
3245 S, OpPC, Call, BuiltinID, [](const APSInt &LHS, const APSInt &RHS) {
3246 return LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
3247 });
3248
3249 case Builtin::BI__builtin_elementwise_sub_sat:
3251 S, OpPC, Call, BuiltinID, [](const APSInt &LHS, const APSInt &RHS) {
3252 return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
3253 });
3254
3255 case clang::X86::BI__builtin_ia32_pmulhuw128:
3256 case clang::X86::BI__builtin_ia32_pmulhuw256:
3257 case clang::X86::BI__builtin_ia32_pmulhuw512:
3258 return interp__builtin_elementwise_int_binop(S, OpPC, Call, BuiltinID,
3259 llvm::APIntOps::mulhu);
3260
3261 case clang::X86::BI__builtin_ia32_pmulhw128:
3262 case clang::X86::BI__builtin_ia32_pmulhw256:
3263 case clang::X86::BI__builtin_ia32_pmulhw512:
3264 return interp__builtin_elementwise_int_binop(S, OpPC, Call, BuiltinID,
3265 llvm::APIntOps::mulhs);
3266
3267 case clang::X86::BI__builtin_ia32_psllv2di:
3268 case clang::X86::BI__builtin_ia32_psllv4di:
3269 case clang::X86::BI__builtin_ia32_psllv4si:
3270 case clang::X86::BI__builtin_ia32_psllv8si:
3271 case clang::X86::BI__builtin_ia32_psllwi128:
3272 case clang::X86::BI__builtin_ia32_psllwi256:
3273 case clang::X86::BI__builtin_ia32_psllwi512:
3274 case clang::X86::BI__builtin_ia32_pslldi128:
3275 case clang::X86::BI__builtin_ia32_pslldi256:
3276 case clang::X86::BI__builtin_ia32_pslldi512:
3277 case clang::X86::BI__builtin_ia32_psllqi128:
3278 case clang::X86::BI__builtin_ia32_psllqi256:
3279 case clang::X86::BI__builtin_ia32_psllqi512:
3281 S, OpPC, Call, BuiltinID, [](const APSInt &LHS, const APSInt &RHS) {
3282 if (RHS.uge(LHS.getBitWidth())) {
3283 return APInt::getZero(LHS.getBitWidth());
3284 }
3285 return LHS.shl(RHS.getZExtValue());
3286 });
3287
3288 case clang::X86::BI__builtin_ia32_psrav4si:
3289 case clang::X86::BI__builtin_ia32_psrav8si:
3290 case clang::X86::BI__builtin_ia32_psrawi128:
3291 case clang::X86::BI__builtin_ia32_psrawi256:
3292 case clang::X86::BI__builtin_ia32_psrawi512:
3293 case clang::X86::BI__builtin_ia32_psradi128:
3294 case clang::X86::BI__builtin_ia32_psradi256:
3295 case clang::X86::BI__builtin_ia32_psradi512:
3296 case clang::X86::BI__builtin_ia32_psraqi128:
3297 case clang::X86::BI__builtin_ia32_psraqi256:
3298 case clang::X86::BI__builtin_ia32_psraqi512:
3300 S, OpPC, Call, BuiltinID, [](const APSInt &LHS, const APSInt &RHS) {
3301 if (RHS.uge(LHS.getBitWidth())) {
3302 return LHS.ashr(LHS.getBitWidth() - 1);
3303 }
3304 return LHS.ashr(RHS.getZExtValue());
3305 });
3306
3307 case clang::X86::BI__builtin_ia32_psrlv2di:
3308 case clang::X86::BI__builtin_ia32_psrlv4di:
3309 case clang::X86::BI__builtin_ia32_psrlv4si:
3310 case clang::X86::BI__builtin_ia32_psrlv8si:
3311 case clang::X86::BI__builtin_ia32_psrlwi128:
3312 case clang::X86::BI__builtin_ia32_psrlwi256:
3313 case clang::X86::BI__builtin_ia32_psrlwi512:
3314 case clang::X86::BI__builtin_ia32_psrldi128:
3315 case clang::X86::BI__builtin_ia32_psrldi256:
3316 case clang::X86::BI__builtin_ia32_psrldi512:
3317 case clang::X86::BI__builtin_ia32_psrlqi128:
3318 case clang::X86::BI__builtin_ia32_psrlqi256:
3319 case clang::X86::BI__builtin_ia32_psrlqi512:
3321 S, OpPC, Call, BuiltinID, [](const APSInt &LHS, const APSInt &RHS) {
3322 if (RHS.uge(LHS.getBitWidth())) {
3323 return APInt::getZero(LHS.getBitWidth());
3324 }
3325 return LHS.lshr(RHS.getZExtValue());
3326 });
3327
3328 case clang::X86::BI__builtin_ia32_vprotbi:
3329 case clang::X86::BI__builtin_ia32_vprotdi:
3330 case clang::X86::BI__builtin_ia32_vprotqi:
3331 case clang::X86::BI__builtin_ia32_vprotwi:
3332 case clang::X86::BI__builtin_ia32_prold128:
3333 case clang::X86::BI__builtin_ia32_prold256:
3334 case clang::X86::BI__builtin_ia32_prold512:
3335 case clang::X86::BI__builtin_ia32_prolq128:
3336 case clang::X86::BI__builtin_ia32_prolq256:
3337 case clang::X86::BI__builtin_ia32_prolq512:
3339 S, OpPC, Call, BuiltinID,
3340 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotl(RHS); });
3341
3342 case clang::X86::BI__builtin_ia32_prord128:
3343 case clang::X86::BI__builtin_ia32_prord256:
3344 case clang::X86::BI__builtin_ia32_prord512:
3345 case clang::X86::BI__builtin_ia32_prorq128:
3346 case clang::X86::BI__builtin_ia32_prorq256:
3347 case clang::X86::BI__builtin_ia32_prorq512:
3349 S, OpPC, Call, BuiltinID,
3350 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotr(RHS); });
3351
3352 case Builtin::BI__builtin_elementwise_max:
3353 case Builtin::BI__builtin_elementwise_min:
3354 return interp__builtin_elementwise_maxmin(S, OpPC, Call, BuiltinID);
3355
3356 case clang::X86::BI__builtin_ia32_pmuldq128:
3357 case clang::X86::BI__builtin_ia32_pmuldq256:
3358 case clang::X86::BI__builtin_ia32_pmuldq512:
3359 case clang::X86::BI__builtin_ia32_pmuludq128:
3360 case clang::X86::BI__builtin_ia32_pmuludq256:
3361 case clang::X86::BI__builtin_ia32_pmuludq512:
3362 return interp__builtin_ia32_pmul(S, OpPC, Call, BuiltinID);
3363
3364 case Builtin::BI__builtin_elementwise_fma:
3365 return interp__builtin_elementwise_fma(S, OpPC, Call);
3366
3367 case X86::BI__builtin_ia32_selectb_128:
3368 case X86::BI__builtin_ia32_selectb_256:
3369 case X86::BI__builtin_ia32_selectb_512:
3370 case X86::BI__builtin_ia32_selectw_128:
3371 case X86::BI__builtin_ia32_selectw_256:
3372 case X86::BI__builtin_ia32_selectw_512:
3373 case X86::BI__builtin_ia32_selectd_128:
3374 case X86::BI__builtin_ia32_selectd_256:
3375 case X86::BI__builtin_ia32_selectd_512:
3376 case X86::BI__builtin_ia32_selectq_128:
3377 case X86::BI__builtin_ia32_selectq_256:
3378 case X86::BI__builtin_ia32_selectq_512:
3379 case X86::BI__builtin_ia32_selectph_128:
3380 case X86::BI__builtin_ia32_selectph_256:
3381 case X86::BI__builtin_ia32_selectph_512:
3382 case X86::BI__builtin_ia32_selectpbf_128:
3383 case X86::BI__builtin_ia32_selectpbf_256:
3384 case X86::BI__builtin_ia32_selectpbf_512:
3385 case X86::BI__builtin_ia32_selectps_128:
3386 case X86::BI__builtin_ia32_selectps_256:
3387 case X86::BI__builtin_ia32_selectps_512:
3388 case X86::BI__builtin_ia32_selectpd_128:
3389 case X86::BI__builtin_ia32_selectpd_256:
3390 case X86::BI__builtin_ia32_selectpd_512:
3391 return interp__builtin_select(S, OpPC, Call);
3392
3393 default:
3394 S.FFDiag(S.Current->getLocation(OpPC),
3395 diag::note_invalid_subexpr_in_const_expr)
3396 << S.Current->getRange(OpPC);
3397
3398 return false;
3399 }
3400
3401 llvm_unreachable("Unhandled builtin ID");
3402}
3403
3405 ArrayRef<int64_t> ArrayIndices, int64_t &IntResult) {
3406 CharUnits Result;
3407 unsigned N = E->getNumComponents();
3408 assert(N > 0);
3409
3410 unsigned ArrayIndex = 0;
3411 QualType CurrentType = E->getTypeSourceInfo()->getType();
3412 for (unsigned I = 0; I != N; ++I) {
3413 const OffsetOfNode &Node = E->getComponent(I);
3414 switch (Node.getKind()) {
3415 case OffsetOfNode::Field: {
3416 const FieldDecl *MemberDecl = Node.getField();
3417 const auto *RD = CurrentType->getAsRecordDecl();
3418 if (!RD || RD->isInvalidDecl())
3419 return false;
3421 unsigned FieldIndex = MemberDecl->getFieldIndex();
3422 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
3423 Result +=
3425 CurrentType = MemberDecl->getType().getNonReferenceType();
3426 break;
3427 }
3428 case OffsetOfNode::Array: {
3429 // When generating bytecode, we put all the index expressions as Sint64 on
3430 // the stack.
3431 int64_t Index = ArrayIndices[ArrayIndex];
3432 const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType);
3433 if (!AT)
3434 return false;
3435 CurrentType = AT->getElementType();
3436 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType);
3437 Result += Index * ElementSize;
3438 ++ArrayIndex;
3439 break;
3440 }
3441 case OffsetOfNode::Base: {
3442 const CXXBaseSpecifier *BaseSpec = Node.getBase();
3443 if (BaseSpec->isVirtual())
3444 return false;
3445
3446 // Find the layout of the class whose base we are looking into.
3447 const auto *RD = CurrentType->getAsCXXRecordDecl();
3448 if (!RD || RD->isInvalidDecl())
3449 return false;
3451
3452 // Find the base class itself.
3453 CurrentType = BaseSpec->getType();
3454 const auto *BaseRD = CurrentType->getAsCXXRecordDecl();
3455 if (!BaseRD)
3456 return false;
3457
3458 // Add the offset to the base.
3459 Result += RL.getBaseClassOffset(BaseRD);
3460 break;
3461 }
3463 llvm_unreachable("Dependent OffsetOfExpr?");
3464 }
3465 }
3466
3467 IntResult = Result.getQuantity();
3468
3469 return true;
3470}
3471
3473 const Pointer &Ptr, const APSInt &IntValue) {
3474
3475 const Record *R = Ptr.getRecord();
3476 assert(R);
3477 assert(R->getNumFields() == 1);
3478
3479 unsigned FieldOffset = R->getField(0u)->Offset;
3480 const Pointer &FieldPtr = Ptr.atField(FieldOffset);
3481 PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
3482
3483 INT_TYPE_SWITCH(FieldT,
3484 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
3485 FieldPtr.initialize();
3486 return true;
3487}
3488
3489static void zeroAll(Pointer &Dest) {
3490 const Descriptor *Desc = Dest.getFieldDesc();
3491
3492 if (Desc->isPrimitive()) {
3493 TYPE_SWITCH(Desc->getPrimType(), {
3494 Dest.deref<T>().~T();
3495 new (&Dest.deref<T>()) T();
3496 });
3497 return;
3498 }
3499
3500 if (Desc->isRecord()) {
3501 const Record *R = Desc->ElemRecord;
3502 for (const Record::Field &F : R->fields()) {
3503 Pointer FieldPtr = Dest.atField(F.Offset);
3504 zeroAll(FieldPtr);
3505 }
3506 return;
3507 }
3508
3509 if (Desc->isPrimitiveArray()) {
3510 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
3511 TYPE_SWITCH(Desc->getPrimType(), {
3512 Dest.deref<T>().~T();
3513 new (&Dest.deref<T>()) T();
3514 });
3515 }
3516 return;
3517 }
3518
3519 if (Desc->isCompositeArray()) {
3520 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
3521 Pointer ElemPtr = Dest.atIndex(I).narrow();
3522 zeroAll(ElemPtr);
3523 }
3524 return;
3525 }
3526}
3527
3528static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
3529 Pointer &Dest, bool Activate);
3530static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
3531 Pointer &Dest, bool Activate = false) {
3532 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
3533 const Descriptor *DestDesc = Dest.getFieldDesc();
3534
3535 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
3536 Pointer DestField = Dest.atField(F.Offset);
3537 if (OptPrimType FT = S.Ctx.classify(F.Decl->getType())) {
3538 TYPE_SWITCH(*FT, {
3539 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
3540 if (Src.atField(F.Offset).isInitialized())
3541 DestField.initialize();
3542 if (Activate)
3543 DestField.activate();
3544 });
3545 return true;
3546 }
3547 // Composite field.
3548 return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate);
3549 };
3550
3551 assert(SrcDesc->isRecord());
3552 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
3553 const Record *R = DestDesc->ElemRecord;
3554 for (const Record::Field &F : R->fields()) {
3555 if (R->isUnion()) {
3556 // For unions, only copy the active field. Zero all others.
3557 const Pointer &SrcField = Src.atField(F.Offset);
3558 if (SrcField.isActive()) {
3559 if (!copyField(F, /*Activate=*/true))
3560 return false;
3561 } else {
3562 if (!CheckMutable(S, OpPC, Src.atField(F.Offset)))
3563 return false;
3564 Pointer DestField = Dest.atField(F.Offset);
3565 zeroAll(DestField);
3566 }
3567 } else {
3568 if (!copyField(F, Activate))
3569 return false;
3570 }
3571 }
3572
3573 for (const Record::Base &B : R->bases()) {
3574 Pointer DestBase = Dest.atField(B.Offset);
3575 if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate))
3576 return false;
3577 }
3578
3579 Dest.initialize();
3580 return true;
3581}
3582
3583static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
3584 Pointer &Dest, bool Activate = false) {
3585 assert(Src.isLive() && Dest.isLive());
3586
3587 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
3588 const Descriptor *DestDesc = Dest.getFieldDesc();
3589
3590 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
3591
3592 if (DestDesc->isPrimitiveArray()) {
3593 assert(SrcDesc->isPrimitiveArray());
3594 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
3595 PrimType ET = DestDesc->getPrimType();
3596 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
3597 Pointer DestElem = Dest.atIndex(I);
3598 TYPE_SWITCH(ET, {
3599 DestElem.deref<T>() = Src.elem<T>(I);
3600 DestElem.initialize();
3601 });
3602 }
3603 return true;
3604 }
3605
3606 if (DestDesc->isCompositeArray()) {
3607 assert(SrcDesc->isCompositeArray());
3608 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
3609 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
3610 const Pointer &SrcElem = Src.atIndex(I).narrow();
3611 Pointer DestElem = Dest.atIndex(I).narrow();
3612 if (!copyComposite(S, OpPC, SrcElem, DestElem, Activate))
3613 return false;
3614 }
3615 return true;
3616 }
3617
3618 if (DestDesc->isRecord())
3619 return copyRecord(S, OpPC, Src, Dest, Activate);
3620 return Invalid(S, OpPC);
3621}
3622
3623bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
3624 return copyComposite(S, OpPC, Src, Dest);
3625}
3626
3627} // namespace interp
3628} // namespace clang
#define V(N, I)
Definition: ASTContext.h:3597
DynTypedNode Node
StringRef P
Defines enum values for all the target-independent builtin functions.
const Decl * D
Expr * E
GCCTypeClass
Values returned by __builtin_classify_type, chosen to match the values produced by GCC's builtin.
CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E, UnaryExprOrTypeTrait ExprKind)
GCCTypeClass EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts)
EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way as GCC.
static bool isOneByteCharacterType(QualType T)
static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal)
Attempts to detect a user writing into a piece of memory that's impossible to figure out the size of ...
StringRef Identifier
Definition: Format.cpp:3185
#define X(type, name)
Definition: Value.h:145
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
#define INT_TYPE_SWITCH_NO_BOOL(Expr, B)
Definition: PrimType.h:247
#define INT_TYPE_SWITCH(Expr, B)
Definition: PrimType.h:228
#define TYPE_SWITCH(Expr, B)
Definition: PrimType.h:207
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
SourceLocation Loc
Definition: SemaObjC.cpp:754
static QualType getPointeeType(const MemRegion *R)
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
CharUnits & getLValueOffset()
Definition: APValue.cpp:993
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:742
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
const LangOptions & getLangOpts() const
Definition: ASTContext.h:894
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
QualType getWCharType() const
Return the unique wchar_t type available in C++ (and available as __wchar_t as a Microsoft extension)...
Definition: ASTContext.h:2055
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Definition: ASTContext.h:2898
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2625
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:859
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Definition: ASTContext.h:2629
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
Definition: RecordLayout.h:197
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:201
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:250
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:260
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: TypeBase.h:3738
QualType getElementType() const
Definition: TypeBase.h:3750
std::string getQuotedName(unsigned ID) const
Return the identifier name for the specified builtin inside single quotes for a diagnostic,...
Definition: Builtins.cpp:85
bool isConstantEvaluated(unsigned ID) const
Return true if this function can be constant evaluated by Clang frontend.
Definition: Builtins.h:431
Represents a base class of a C++ class.
Definition: DeclCXX.h:146
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition: DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition: DeclCXX.h:249
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2879
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition: CharUnits.h:207
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition: CharUnits.h:201
static unsigned getMaxSizeBits(const ASTContext &Context)
Determine the maximum number of active bits that an array's size can require, which limits the maximu...
Definition: Type.cpp:254
SourceLocation getLocation() const
Definition: DeclBase.h:439
This represents one expression.
Definition: Expr.h:112
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:273
QualType getType() const
Definition: Expr.h:144
Represents a member of a struct/union/class.
Definition: Decl.h:3157
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.h:3242
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition: Decl.h:3393
Represents a function declaration or definition.
Definition: Decl.h:1999
One of these records is kept for each identifier that is lexed.
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition: Expr.h:2529
Helper class for OffsetOfExpr.
Definition: Expr.h:2423
@ Array
An index into an array.
Definition: Expr.h:2428
@ Identifier
A field in a dependent type, known only by its name.
Definition: Expr.h:2432
@ Field
A field.
Definition: Expr.h:2430
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition: Expr.h:2435
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: TypeBase.h:3346
A (possibly-)qualified type.
Definition: TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition: Type.cpp:2871
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition: TypeBase.h:8343
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: TypeBase.h:8528
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint=false)
Emit a diagnostic.
Definition: SemaBase.cpp:61
ASTContext & getASTContext() const
Definition: Sema.h:918
const LangOptions & getLangOpts() const
Definition: Sema.h:911
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition: Stmt.cpp:334
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition: TargetInfo.h:853
bool isBigEndian() const
Definition: TargetInfo.h:1705
virtual int getEHDataRegisterNumber(unsigned RegNo) const
Return the register number that __builtin_eh_return_regno would return with the specified argument.
Definition: TargetInfo.h:1650
virtual bool isNan2008() const
Returns true if NaN encoding is IEEE 754-2008.
Definition: TargetInfo.h:1283
bool isBooleanType() const
Definition: TypeBase.h:9066
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2229
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition: Type.cpp:2277
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.h:26
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition: Type.h:41
bool isCharType() const
Definition: Type.cpp:2136
bool isPointerType() const
Definition: TypeBase.h:8580
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: TypeBase.h:8980
const T * castAs() const
Member-template castAs<specific type>.
Definition: TypeBase.h:9226
bool isChar8Type() const
Definition: Type.cpp:2152
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:752
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type.
Definition: TypeBase.h:9212
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition: Type.cpp:2440
bool isVectorType() const
Definition: TypeBase.h:8719
bool isFloatingType() const
Definition: Type.cpp:2308
const T * getAs() const
Member-template getAs<specific type>'.
Definition: TypeBase.h:9159
QualType getType() const
Definition: Decl.h:722
Represents a GCC generic vector type.
Definition: TypeBase.h:4191
unsigned getNumElements() const
Definition: TypeBase.h:4206
QualType getElementType() const
Definition: TypeBase.h:4205
A memory block, either on the stack or in the heap.
Definition: InterpBlock.h:44
const Descriptor * getDescriptor() const
Returns the block's descriptor.
Definition: InterpBlock.h:73
bool isDynamic() const
Definition: InterpBlock.h:83
Wrapper around boolean types.
Definition: Boolean.h:25
static Boolean from(T Value)
Definition: Boolean.h:97
Pointer into the code segment.
Definition: Source.h:30
Manages dynamic memory allocations done during bytecode interpretation.
If a Floating is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition: Floating.h:35
void copy(const APFloat &F)
Definition: Floating.h:122
llvm::FPClassTest classify() const
Definition: Floating.h:153
bool isSignaling() const
Definition: Floating.h:148
bool isNormal() const
Definition: Floating.h:151
ComparisonCategoryResult compare(const Floating &RHS) const
Definition: Floating.h:156
bool isNan() const
Definition: Floating.h:147
bool isZero() const
Definition: Floating.h:143
bool isNegative() const
Definition: Floating.h:142
bool isFinite() const
Definition: Floating.h:150
bool isDenormal() const
Definition: Floating.h:152
APFloat::fltCategory getCategory() const
Definition: Floating.h:154
APFloat getAPFloat() const
Definition: Floating.h:63
Base class for stack frames, shared between VM and walker.
Definition: Frame.h:25
virtual const FunctionDecl * getCallee() const =0
Returns the called function's declaration.
If an IntegralAP is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition: IntegralAP.h:36
Frame storing local variables.
Definition: InterpFrame.h:26
const Expr * getExpr(CodePtr PC) const
CodePtr getRetPC() const
Returns the return address of the frame.
Definition: InterpFrame.h:120
Stack frame storing temporaries and parameters.
Definition: InterpStack.h:25
T pop()
Returns the value from the top of the stack and removes it.
Definition: InterpStack.h:39
void discard()
Discards the top value from the stack.
Definition: InterpStack.h:50
Interpreter context.
Definition: InterpState.h:43
A pointer to a memory block, live or dead.
Definition: Pointer.h:90
Pointer narrow() const
Restricts the scope of an array element pointer.
Definition: Pointer.h:187
bool isInitialized() const
Checks if an object was initialized.
Definition: Pointer.cpp:432
Pointer atIndex(uint64_t Idx) const
Offsets a pointer inside an array.
Definition: Pointer.h:155
bool isDummy() const
Checks if the pointer points to a dummy value.
Definition: Pointer.h:543
int64_t getIndex() const
Returns the index into an array.
Definition: Pointer.h:608
bool isActive() const
Checks if the object is active.
Definition: Pointer.h:532
Pointer atField(unsigned Off) const
Creates a pointer to a field.
Definition: Pointer.h:172
T & deref() const
Dereferences the pointer, if it's live.
Definition: Pointer.h:659
unsigned getNumElems() const
Returns the number of elements.
Definition: Pointer.h:592
bool isUnknownSizeArray() const
Checks if the structure is an array of unknown size.
Definition: Pointer.h:411
void activate() const
Activats a field.
Definition: Pointer.cpp:560
bool isIntegralPointer() const
Definition: Pointer.h:465
QualType getType() const
Returns the type of the innermost field.
Definition: Pointer.h:332
void initializeAllElements() const
Initialize all elements of a primitive array at once.
Definition: Pointer.cpp:530
bool isLive() const
Checks if the pointer is live.
Definition: Pointer.h:264
T & elem(unsigned I) const
Dereferences the element at index I.
Definition: Pointer.h:675
Pointer getBase() const
Returns a pointer to the object of which this pointer is a field.
Definition: Pointer.h:303
uint64_t getByteOffset() const
Returns the byte offset from the start.
Definition: Pointer.h:581
std::string toDiagnosticString(const ASTContext &Ctx) const
Converts the pointer to a string usable in diagnostics.
Definition: Pointer.cpp:419
bool isZero() const
Checks if the pointer is null.
Definition: Pointer.h:253
const Descriptor * getDeclDesc() const
Accessor for information about the declaration site.
Definition: Pointer.h:278
APValue toAPValue(const ASTContext &ASTCtx) const
Converts the pointer to an APValue.
Definition: Pointer.cpp:167
bool isOnePastEnd() const
Checks if the index is one past end.
Definition: Pointer.h:625
uint64_t getIntegerRepresentation() const
Definition: Pointer.h:142
Pointer expand() const
Expands a pointer to the containing array, undoing narrowing.
Definition: Pointer.h:220
bool isBlockPointer() const
Definition: Pointer.h:464
const Block * block() const
Definition: Pointer.h:598
const Descriptor * getFieldDesc() const
Accessors for information about the innermost field.
Definition: Pointer.h:322
bool isVirtualBaseClass() const
Definition: Pointer.h:539
bool isBaseClass() const
Checks if a structure is a base class.
Definition: Pointer.h:538
size_t elemSize() const
Returns the element size of the innermost field.
Definition: Pointer.h:354
bool canBeInitialized() const
If this pointer has an InlineDescriptor we can use to initialize.
Definition: Pointer.h:440
Lifetime getLifetime() const
Definition: Pointer.h:718
void initialize() const
Initializes a field.
Definition: Pointer.cpp:483
const Record * getRecord() const
Returns the record descriptor of a class.
Definition: Pointer.h:470
Structure/Class descriptor.
Definition: Record.h:25
const RecordDecl * getDecl() const
Returns the underlying declaration.
Definition: Record.h:53
bool isUnion() const
Checks if the record is a union.
Definition: Record.h:57
const Field * getField(const FieldDecl *FD) const
Returns a field.
Definition: Record.cpp:40
llvm::iterator_range< const_base_iter > bases() const
Definition: Record.h:88
unsigned getNumFields() const
Definition: Record.h:84
llvm::iterator_range< const_field_iter > fields() const
Definition: Record.h:80
Describes the statement/declaration an opcode was generated from.
Definition: Source.h:73
Defines the clang::TargetInfo interface.
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition: OSLog.cpp:192
static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT, const APSInt &Value)
llvm::APFloat APFloat
Definition: Floating.h:27
bool readPointerToBuffer(const Context &Ctx, const Pointer &FromPtr, BitcastBuffer &Buffer, bool ReturnOnUninit)
static bool interp__builtin_elementwise_fma(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static Floating abs(InterpState &S, const Floating &In)
static bool interp__builtin_ia32_pdep(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
static bool interp__builtin_elementwise_maxmin(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
llvm::APInt APInt
Definition: FixedPoint.h:19
static bool interp__builtin_ia32_tzcnt(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_assume(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, DynamicAllocator::Form AllocForm, DynamicAllocator::Form DeleteForm, const Descriptor *D, const Expr *NewExpr)
Diagnose mismatched new[]/delete or new/delete[] pairs.
Definition: Interp.cpp:1104
static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Defined as __builtin_isnan(...), to accommodate the fact that it can take a float,...
static bool interp__builtin_elementwise_countzeroes(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
Can be called with an integer or vector as the first and only parameter.
static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, const Pointer &Ptr, const APSInt &IntValue)
Sets the given integral value to the pointer, which is of a std::{weak,partial,strong}_ordering type.
static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool __atomic_always_lock_free(size_t, void const volatile*) bool __atomic_is_lock_free(size_t,...
static llvm::APSInt convertBoolVectorToInt(const Pointer &Val)
static bool interp__builtin_move(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if a pointer points to a mutable field.
Definition: Interp.cpp:594
static bool interp__builtin_ia32_lzcnt(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_parity(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
__builtin_is_aligned() __builtin_align_up() __builtin_align_down() The first parameter is either an i...
static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, bool Right)
rotateleft(value, amount)
static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
(CarryIn, LHS, RHS, Result)
static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static unsigned computePointerOffset(const ASTContext &ASTCtx, const Pointer &Ptr)
Compute the byte offset of Ptr in the full declaration.
static bool interp__builtin_ia32_bzhi(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a value can be loaded from a block.
Definition: Interp.cpp:793
static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinOp)
static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool CheckSign, const CallExpr *Call)
static bool interp__builtin_os_log_format_buffer_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, ArrayRef< int64_t > ArrayIndices, int64_t &Result)
Interpret an offsetof operation.
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is in range.
Definition: Interp.cpp:519
static bool pointsToLastObject(const Pointer &Ptr)
Does Ptr point to the last subobject?
static bool interp__builtin_select(InterpState &S, CodePtr OpPC, const CallExpr *Call)
AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
static bool interp__builtin_ia32_pmul(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID)
static void discard(InterpStack &Stk, PrimType T)
bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is live and accessible.
Definition: Interp.cpp:414
static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Five int values followed by one floating value.
static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue)
Definition: Interp.h:153
static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate)
static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool __c11_atomic_is_lock_free(size_t)
static void zeroAll(Pointer &Dest)
static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
PrimType
Enumeration of the primitive types of the VM.
Definition: PrimType.h:34
static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, uint32_t BuiltinID)
Interpret a builtin function.
static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_complex(Float A, float B);
bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK)
Checks if a pointer is a dummy pointer.
Definition: Interp.cpp:1155
static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
static bool interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static void pushInteger(InterpState &S, const APSInt &Val, QualType QT)
Pushes Val on the stack as the type given by QT.
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if the array is offsetable.
Definition: Interp.cpp:406
static bool interp__builtin_elementwise_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
Can be called with an integer or vector as the first and only parameter.
static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp__builtin_elementwise_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems)
Definition: Interp.h:3663
static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static APSInt popToAPSInt(InterpStack &Stk, PrimType T)
static std::optional< unsigned > computeFullDescSize(const ASTContext &ASTCtx, const Descriptor *Desc)
static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
First parameter to __builtin_isfpclass is the floating value, the second one is an integral value.
llvm::APSInt APSInt
Definition: FixedPoint.h:20
static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, bool Signaling)
static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest)
Copy the contents of Src into Dest.
constexpr bool isIntegralType(PrimType T)
Definition: PrimType.h:124
static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static LLVM_ATTRIBUTE_UNUSED bool isNoopBuiltin(unsigned ID)
static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC, unsigned ID)
static QualType getElemType(const Pointer &P)
static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_ia32_pext(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
The JSON file list parser is used to communicate input to InstallAPI.
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
void initialize(TemplateInstantiationCallbackPtrs &Callbacks, const Sema &TheSema)
ComparisonCategoryResult
An enumeration representing the possible results of a three-way comparison.
@ Result
The result type of a method or function.
@ AK_Read
Definition: State.h:27
const FunctionProtoType * T
Track what bits have been initialized to known values and which ones have indeterminate value.
Definition: BitcastBuffer.h:81
std::unique_ptr< std::byte[]> Data
Definition: BitcastBuffer.h:83
A quantity in bits.
Definition: BitcastBuffer.h:24
A quantity in bytes.
Definition: BitcastBuffer.h:55
size_t getQuantity() const
Definition: BitcastBuffer.h:58
Bits toBits() const
Definition: BitcastBuffer.h:59
Describes a memory block created by an allocation site.
Definition: Descriptor.h:122
unsigned getNumElems() const
Returns the number of elements stored in the block.
Definition: Descriptor.h:249
bool isPrimitive() const
Checks if the descriptor is of a primitive.
Definition: Descriptor.h:263
QualType getElemQualType() const
Definition: Descriptor.cpp:390
bool isCompositeArray() const
Checks if the descriptor is of an array of composites.
Definition: Descriptor.h:256
const ValueDecl * asValueDecl() const
Definition: Descriptor.h:214
static constexpr unsigned MaxArrayElemBytes
Maximum number of bytes to be used for array elements.
Definition: Descriptor.h:148
QualType getType() const
Definition: Descriptor.cpp:371
const Decl * asDecl() const
Definition: Descriptor.h:210
static constexpr MetadataSize InlineDescMD
Definition: Descriptor.h:144
unsigned getElemSize() const
returns the size of an element when the structure is viewed as an array.
Definition: Descriptor.h:244
bool isPrimitiveArray() const
Checks if the descriptor is of an array of primitives.
Definition: Descriptor.h:254
const VarDecl * asVarDecl() const
Definition: Descriptor.h:218
PrimType getPrimType() const
Definition: Descriptor.h:236
bool isRecord() const
Checks if the descriptor is of a record.
Definition: Descriptor.h:268
const Record *const ElemRecord
Pointer to the record, if block contains records.
Definition: Descriptor.h:153
const Expr * asExpr() const
Definition: Descriptor.h:211
bool isArray() const
Checks if the descriptor is of an array.
Definition: Descriptor.h:266
Mapping from primitive types to their representation.
Definition: PrimType.h:134