clang 22.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "CodeGenPGO.h"
18#include "TargetInfo.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/Expr.h"
21#include "clang/AST/Stmt.h"
28#include "llvm/ADT/ArrayRef.h"
29#include "llvm/ADT/DenseMap.h"
30#include "llvm/ADT/SmallSet.h"
31#include "llvm/ADT/StringExtras.h"
32#include "llvm/IR/Assumptions.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/InlineAsm.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/MDBuilder.h"
37#include "llvm/Support/SaveAndRestore.h"
38#include <optional>
39
40using namespace clang;
41using namespace CodeGen;
42
43//===----------------------------------------------------------------------===//
44// Statement Emission
45//===----------------------------------------------------------------------===//
46
47namespace llvm {
48extern cl::opt<bool> EnableSingleByteCoverage;
49} // namespace llvm
50
52 if (CGDebugInfo *DI = getDebugInfo()) {
54 Loc = S->getBeginLoc();
55 DI->EmitLocation(Builder, Loc);
56
57 LastStopPoint = Loc;
58 }
59}
60
62 assert(S && "Null statement?");
63 PGO->setCurrentStmt(S);
64
65 // These statements have their own debug info handling.
66 if (EmitSimpleStmt(S, Attrs))
67 return;
68
69 // Check if we are generating unreachable code.
70 if (!HaveInsertPoint()) {
71 // If so, and the statement doesn't contain a label, then we do not need to
72 // generate actual code. This is safe because (1) the current point is
73 // unreachable, so we don't need to execute the code, and (2) we've already
74 // handled the statements which update internal data structures (like the
75 // local variable map) which could be used by subsequent statements.
76 if (!ContainsLabel(S)) {
77 // Verify that any decl statements were handled as simple, they may be in
78 // scope of subsequent reachable statements.
79 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
80 PGO->markStmtMaybeUsed(S);
81 return;
82 }
83
84 // Otherwise, make a new block to hold the code.
86 }
87
88 // Generate a stoppoint if we are emitting debug info.
90
91 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
92 // enabled.
93 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
94 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
96 return;
97 }
98 }
99
100 switch (S->getStmtClass()) {
102 case Stmt::CXXCatchStmtClass:
103 case Stmt::SEHExceptStmtClass:
104 case Stmt::SEHFinallyStmtClass:
105 case Stmt::MSDependentExistsStmtClass:
106 llvm_unreachable("invalid statement class to emit generically");
107 case Stmt::NullStmtClass:
108 case Stmt::CompoundStmtClass:
109 case Stmt::DeclStmtClass:
110 case Stmt::LabelStmtClass:
111 case Stmt::AttributedStmtClass:
112 case Stmt::GotoStmtClass:
113 case Stmt::BreakStmtClass:
114 case Stmt::ContinueStmtClass:
115 case Stmt::DefaultStmtClass:
116 case Stmt::CaseStmtClass:
117 case Stmt::SEHLeaveStmtClass:
118 case Stmt::SYCLKernelCallStmtClass:
119 llvm_unreachable("should have emitted these statements as simple");
120
121#define STMT(Type, Base)
122#define ABSTRACT_STMT(Op)
123#define EXPR(Type, Base) \
124 case Stmt::Type##Class:
125#include "clang/AST/StmtNodes.inc"
126 {
127 // Remember the block we came in on.
128 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
129 assert(incoming && "expression emission must have an insertion point");
130
131 EmitIgnoredExpr(cast<Expr>(S));
132
133 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
134 assert(outgoing && "expression emission cleared block!");
135
136 // The expression emitters assume (reasonably!) that the insertion
137 // point is always set. To maintain that, the call-emission code
138 // for noreturn functions has to enter a new block with no
139 // predecessors. We want to kill that block and mark the current
140 // insertion point unreachable in the common case of a call like
141 // "exit();". Since expression emission doesn't otherwise create
142 // blocks with no predecessors, we can just test for that.
143 // However, we must be careful not to do this to our incoming
144 // block, because *statement* emission does sometimes create
145 // reachable blocks which will have no predecessors until later in
146 // the function. This occurs with, e.g., labels that are not
147 // reachable by fallthrough.
148 if (incoming != outgoing && outgoing->use_empty()) {
149 outgoing->eraseFromParent();
150 Builder.ClearInsertionPoint();
151 }
152 break;
153 }
154
155 case Stmt::IndirectGotoStmtClass:
156 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
157
158 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
159 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
160 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
161 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
162
163 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
164
165 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
166 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
167 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
168 case Stmt::CoroutineBodyStmtClass:
169 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
170 break;
171 case Stmt::CoreturnStmtClass:
172 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
173 break;
174 case Stmt::CapturedStmtClass: {
175 const CapturedStmt *CS = cast<CapturedStmt>(S);
177 }
178 break;
179 case Stmt::ObjCAtTryStmtClass:
180 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
181 break;
182 case Stmt::ObjCAtCatchStmtClass:
183 llvm_unreachable(
184 "@catch statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtFinallyStmtClass:
186 llvm_unreachable(
187 "@finally statements should be handled by EmitObjCAtTryStmt");
188 case Stmt::ObjCAtThrowStmtClass:
189 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
190 break;
191 case Stmt::ObjCAtSynchronizedStmtClass:
192 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
193 break;
194 case Stmt::ObjCForCollectionStmtClass:
195 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
196 break;
197 case Stmt::ObjCAutoreleasePoolStmtClass:
198 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
199 break;
200
201 case Stmt::CXXTryStmtClass:
202 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
203 break;
204 case Stmt::CXXForRangeStmtClass:
205 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
206 break;
207 case Stmt::SEHTryStmtClass:
208 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
209 break;
210 case Stmt::OMPMetaDirectiveClass:
211 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
212 break;
213 case Stmt::OMPCanonicalLoopClass:
214 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
215 break;
216 case Stmt::OMPParallelDirectiveClass:
217 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
218 break;
219 case Stmt::OMPSimdDirectiveClass:
220 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
221 break;
222 case Stmt::OMPTileDirectiveClass:
223 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
224 break;
225 case Stmt::OMPStripeDirectiveClass:
226 EmitOMPStripeDirective(cast<OMPStripeDirective>(*S));
227 break;
228 case Stmt::OMPUnrollDirectiveClass:
229 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
230 break;
231 case Stmt::OMPReverseDirectiveClass:
232 EmitOMPReverseDirective(cast<OMPReverseDirective>(*S));
233 break;
234 case Stmt::OMPInterchangeDirectiveClass:
235 EmitOMPInterchangeDirective(cast<OMPInterchangeDirective>(*S));
236 break;
237 case Stmt::OMPForDirectiveClass:
238 EmitOMPForDirective(cast<OMPForDirective>(*S));
239 break;
240 case Stmt::OMPForSimdDirectiveClass:
241 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
242 break;
243 case Stmt::OMPSectionsDirectiveClass:
244 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
245 break;
246 case Stmt::OMPSectionDirectiveClass:
247 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
248 break;
249 case Stmt::OMPSingleDirectiveClass:
250 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
251 break;
252 case Stmt::OMPMasterDirectiveClass:
253 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
254 break;
255 case Stmt::OMPCriticalDirectiveClass:
256 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
257 break;
258 case Stmt::OMPParallelForDirectiveClass:
259 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
260 break;
261 case Stmt::OMPParallelForSimdDirectiveClass:
262 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
263 break;
264 case Stmt::OMPParallelMasterDirectiveClass:
265 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
266 break;
267 case Stmt::OMPParallelSectionsDirectiveClass:
268 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
269 break;
270 case Stmt::OMPTaskDirectiveClass:
271 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
272 break;
273 case Stmt::OMPTaskyieldDirectiveClass:
274 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
275 break;
276 case Stmt::OMPErrorDirectiveClass:
277 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
278 break;
279 case Stmt::OMPBarrierDirectiveClass:
280 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
281 break;
282 case Stmt::OMPTaskwaitDirectiveClass:
283 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
284 break;
285 case Stmt::OMPTaskgroupDirectiveClass:
286 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
287 break;
288 case Stmt::OMPFlushDirectiveClass:
289 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
290 break;
291 case Stmt::OMPDepobjDirectiveClass:
292 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
293 break;
294 case Stmt::OMPScanDirectiveClass:
295 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
296 break;
297 case Stmt::OMPOrderedDirectiveClass:
298 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
299 break;
300 case Stmt::OMPAtomicDirectiveClass:
301 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
302 break;
303 case Stmt::OMPTargetDirectiveClass:
304 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
305 break;
306 case Stmt::OMPTeamsDirectiveClass:
307 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
308 break;
309 case Stmt::OMPCancellationPointDirectiveClass:
310 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
311 break;
312 case Stmt::OMPCancelDirectiveClass:
313 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
314 break;
315 case Stmt::OMPTargetDataDirectiveClass:
316 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
317 break;
318 case Stmt::OMPTargetEnterDataDirectiveClass:
319 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
320 break;
321 case Stmt::OMPTargetExitDataDirectiveClass:
322 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
323 break;
324 case Stmt::OMPTargetParallelDirectiveClass:
325 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
326 break;
327 case Stmt::OMPTargetParallelForDirectiveClass:
328 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
329 break;
330 case Stmt::OMPTaskLoopDirectiveClass:
331 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
332 break;
333 case Stmt::OMPTaskLoopSimdDirectiveClass:
334 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
335 break;
336 case Stmt::OMPMasterTaskLoopDirectiveClass:
337 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
338 break;
339 case Stmt::OMPMaskedTaskLoopDirectiveClass:
340 EmitOMPMaskedTaskLoopDirective(cast<OMPMaskedTaskLoopDirective>(*S));
341 break;
342 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
344 cast<OMPMasterTaskLoopSimdDirective>(*S));
345 break;
346 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
348 cast<OMPMaskedTaskLoopSimdDirective>(*S));
349 break;
350 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
352 cast<OMPParallelMasterTaskLoopDirective>(*S));
353 break;
354 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
356 cast<OMPParallelMaskedTaskLoopDirective>(*S));
357 break;
358 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
360 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
361 break;
362 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
364 cast<OMPParallelMaskedTaskLoopSimdDirective>(*S));
365 break;
366 case Stmt::OMPDistributeDirectiveClass:
367 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
368 break;
369 case Stmt::OMPTargetUpdateDirectiveClass:
370 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
371 break;
372 case Stmt::OMPDistributeParallelForDirectiveClass:
374 cast<OMPDistributeParallelForDirective>(*S));
375 break;
376 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
378 cast<OMPDistributeParallelForSimdDirective>(*S));
379 break;
380 case Stmt::OMPDistributeSimdDirectiveClass:
381 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
382 break;
383 case Stmt::OMPTargetParallelForSimdDirectiveClass:
385 cast<OMPTargetParallelForSimdDirective>(*S));
386 break;
387 case Stmt::OMPTargetSimdDirectiveClass:
388 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
389 break;
390 case Stmt::OMPTeamsDistributeDirectiveClass:
391 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
392 break;
393 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
395 cast<OMPTeamsDistributeSimdDirective>(*S));
396 break;
397 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
399 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
400 break;
401 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
403 cast<OMPTeamsDistributeParallelForDirective>(*S));
404 break;
405 case Stmt::OMPTargetTeamsDirectiveClass:
406 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
407 break;
408 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
410 cast<OMPTargetTeamsDistributeDirective>(*S));
411 break;
412 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
414 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
415 break;
416 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
418 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
419 break;
420 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
422 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
423 break;
424 case Stmt::OMPInteropDirectiveClass:
425 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
426 break;
427 case Stmt::OMPDispatchDirectiveClass:
428 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
429 break;
430 case Stmt::OMPScopeDirectiveClass:
431 EmitOMPScopeDirective(cast<OMPScopeDirective>(*S));
432 break;
433 case Stmt::OMPMaskedDirectiveClass:
434 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
435 break;
436 case Stmt::OMPGenericLoopDirectiveClass:
437 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
438 break;
439 case Stmt::OMPTeamsGenericLoopDirectiveClass:
440 EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
441 break;
442 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
444 cast<OMPTargetTeamsGenericLoopDirective>(*S));
445 break;
446 case Stmt::OMPParallelGenericLoopDirectiveClass:
448 cast<OMPParallelGenericLoopDirective>(*S));
449 break;
450 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
452 cast<OMPTargetParallelGenericLoopDirective>(*S));
453 break;
454 case Stmt::OMPParallelMaskedDirectiveClass:
455 EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
456 break;
457 case Stmt::OMPAssumeDirectiveClass:
458 EmitOMPAssumeDirective(cast<OMPAssumeDirective>(*S));
459 break;
460 case Stmt::OpenACCComputeConstructClass:
461 EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S));
462 break;
463 case Stmt::OpenACCLoopConstructClass:
464 EmitOpenACCLoopConstruct(cast<OpenACCLoopConstruct>(*S));
465 break;
466 case Stmt::OpenACCCombinedConstructClass:
467 EmitOpenACCCombinedConstruct(cast<OpenACCCombinedConstruct>(*S));
468 break;
469 case Stmt::OpenACCDataConstructClass:
470 EmitOpenACCDataConstruct(cast<OpenACCDataConstruct>(*S));
471 break;
472 case Stmt::OpenACCEnterDataConstructClass:
473 EmitOpenACCEnterDataConstruct(cast<OpenACCEnterDataConstruct>(*S));
474 break;
475 case Stmt::OpenACCExitDataConstructClass:
476 EmitOpenACCExitDataConstruct(cast<OpenACCExitDataConstruct>(*S));
477 break;
478 case Stmt::OpenACCHostDataConstructClass:
479 EmitOpenACCHostDataConstruct(cast<OpenACCHostDataConstruct>(*S));
480 break;
481 case Stmt::OpenACCWaitConstructClass:
482 EmitOpenACCWaitConstruct(cast<OpenACCWaitConstruct>(*S));
483 break;
484 case Stmt::OpenACCInitConstructClass:
485 EmitOpenACCInitConstruct(cast<OpenACCInitConstruct>(*S));
486 break;
487 case Stmt::OpenACCShutdownConstructClass:
488 EmitOpenACCShutdownConstruct(cast<OpenACCShutdownConstruct>(*S));
489 break;
490 case Stmt::OpenACCSetConstructClass:
491 EmitOpenACCSetConstruct(cast<OpenACCSetConstruct>(*S));
492 break;
493 case Stmt::OpenACCUpdateConstructClass:
494 EmitOpenACCUpdateConstruct(cast<OpenACCUpdateConstruct>(*S));
495 break;
496 case Stmt::OpenACCAtomicConstructClass:
497 EmitOpenACCAtomicConstruct(cast<OpenACCAtomicConstruct>(*S));
498 break;
499 case Stmt::OpenACCCacheConstructClass:
500 EmitOpenACCCacheConstruct(cast<OpenACCCacheConstruct>(*S));
501 break;
502 }
503}
504
507 switch (S->getStmtClass()) {
508 default:
509 return false;
510 case Stmt::NullStmtClass:
511 break;
512 case Stmt::CompoundStmtClass:
513 EmitCompoundStmt(cast<CompoundStmt>(*S));
514 break;
515 case Stmt::DeclStmtClass:
516 EmitDeclStmt(cast<DeclStmt>(*S));
517 break;
518 case Stmt::LabelStmtClass:
519 EmitLabelStmt(cast<LabelStmt>(*S));
520 break;
521 case Stmt::AttributedStmtClass:
522 EmitAttributedStmt(cast<AttributedStmt>(*S));
523 break;
524 case Stmt::GotoStmtClass:
525 EmitGotoStmt(cast<GotoStmt>(*S));
526 break;
527 case Stmt::BreakStmtClass:
528 EmitBreakStmt(cast<BreakStmt>(*S));
529 break;
530 case Stmt::ContinueStmtClass:
531 EmitContinueStmt(cast<ContinueStmt>(*S));
532 break;
533 case Stmt::DefaultStmtClass:
534 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
535 break;
536 case Stmt::CaseStmtClass:
537 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
538 break;
539 case Stmt::SEHLeaveStmtClass:
540 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
541 break;
542 case Stmt::SYCLKernelCallStmtClass:
543 // SYCL kernel call statements are generated as wrappers around the body
544 // of functions declared with the sycl_kernel_entry_point attribute. Such
545 // functions are used to specify how a SYCL kernel (a function object) is
546 // to be invoked; the SYCL kernel call statement contains a transformed
547 // variation of the function body and is used to generate a SYCL kernel
548 // caller function; a function that serves as the device side entry point
549 // used to execute the SYCL kernel. The sycl_kernel_entry_point attributed
550 // function is invoked by host code in order to trigger emission of the
551 // device side SYCL kernel caller function and to generate metadata needed
552 // by SYCL run-time library implementations; the function is otherwise
553 // intended to have no effect. As such, the function body is not evaluated
554 // as part of the invocation during host compilation (and the function
555 // should not be called or emitted during device compilation); the SYCL
556 // kernel call statement is thus handled as a null statement for the
557 // purpose of code generation.
558 break;
559 }
560 return true;
561}
562
563/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
564/// this captures the expression result of the last sub-statement and returns it
565/// (for use by the statement expression extension).
567 AggValueSlot AggSlot) {
568 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
569 "LLVM IR generation of compound statement ('{}')");
570
571 // Keep track of the current cleanup stack depth, including debug scopes.
572 LexicalScope Scope(*this, S.getSourceRange());
573
574 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
575}
576
579 bool GetLast,
580 AggValueSlot AggSlot) {
581
582 const Stmt *ExprResult = S.getStmtExprResult();
583 assert((!GetLast || (GetLast && ExprResult)) &&
584 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
585
586 Address RetAlloca = Address::invalid();
587
588 for (auto *CurStmt : S.body()) {
589 if (GetLast && ExprResult == CurStmt) {
590 // We have to special case labels here. They are statements, but when put
591 // at the end of a statement expression, they yield the value of their
592 // subexpression. Handle this by walking through all labels we encounter,
593 // emitting them before we evaluate the subexpr.
594 // Similar issues arise for attributed statements.
595 while (!isa<Expr>(ExprResult)) {
596 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
597 EmitLabel(LS->getDecl());
598 ExprResult = LS->getSubStmt();
599 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
600 // FIXME: Update this if we ever have attributes that affect the
601 // semantics of an expression.
602 ExprResult = AS->getSubStmt();
603 } else {
604 llvm_unreachable("unknown value statement");
605 }
606 }
607
609
610 const Expr *E = cast<Expr>(ExprResult);
611 QualType ExprTy = E->getType();
612 if (hasAggregateEvaluationKind(ExprTy)) {
613 EmitAggExpr(E, AggSlot);
614 } else {
615 // We can't return an RValue here because there might be cleanups at
616 // the end of the StmtExpr. Because of that, we have to emit the result
617 // here into a temporary alloca.
618 RetAlloca = CreateMemTemp(ExprTy);
619 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
620 /*IsInit*/ false);
621 }
622 } else {
623 EmitStmt(CurStmt);
624 }
625 }
626
627 return RetAlloca;
628}
629
631 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
632
633 // If there is a cleanup stack, then we it isn't worth trying to
634 // simplify this block (we would need to remove it from the scope map
635 // and cleanup entry).
636 if (!EHStack.empty())
637 return;
638
639 // Can only simplify direct branches.
640 if (!BI || !BI->isUnconditional())
641 return;
642
643 // Can only simplify empty blocks.
644 if (BI->getIterator() != BB->begin())
645 return;
646
647 BB->replaceAllUsesWith(BI->getSuccessor(0));
648 BI->eraseFromParent();
649 BB->eraseFromParent();
650}
651
652void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
653 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
654
655 // Fall out of the current block (if necessary).
656 EmitBranch(BB);
657
658 if (IsFinished && BB->use_empty()) {
659 delete BB;
660 return;
661 }
662
663 // Place the block after the current block, if possible, or else at
664 // the end of the function.
665 if (CurBB && CurBB->getParent())
666 CurFn->insert(std::next(CurBB->getIterator()), BB);
667 else
668 CurFn->insert(CurFn->end(), BB);
669 Builder.SetInsertPoint(BB);
670}
671
672void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
673 // Emit a branch from the current block to the target one if this
674 // was a real block. If this was just a fall-through block after a
675 // terminator, don't emit it.
676 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
677
678 if (!CurBB || CurBB->getTerminator()) {
679 // If there is no insert point or the previous block is already
680 // terminated, don't touch it.
681 } else {
682 // Otherwise, create a fall-through branch.
683 Builder.CreateBr(Target);
684 }
685
686 Builder.ClearInsertionPoint();
687}
688
689void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
690 bool inserted = false;
691 for (llvm::User *u : block->users()) {
692 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
693 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
694 inserted = true;
695 break;
696 }
697 }
698
699 if (!inserted)
700 CurFn->insert(CurFn->end(), block);
701
702 Builder.SetInsertPoint(block);
703}
704
707 JumpDest &Dest = LabelMap[D];
708 if (Dest.isValid()) return Dest;
709
710 // Create, but don't insert, the new block.
711 Dest = JumpDest(createBasicBlock(D->getName()),
714 return Dest;
715}
716
718 // Add this label to the current lexical scope if we're within any
719 // normal cleanups. Jumps "in" to this label --- when permitted by
720 // the language --- may need to be routed around such cleanups.
721 if (EHStack.hasNormalCleanups() && CurLexicalScope)
722 CurLexicalScope->addLabel(D);
723
724 JumpDest &Dest = LabelMap[D];
725
726 // If we didn't need a forward reference to this label, just go
727 // ahead and create a destination at the current scope.
728 if (!Dest.isValid()) {
729 Dest = getJumpDestInCurrentScope(D->getName());
730
731 // Otherwise, we need to give this label a target depth and remove
732 // it from the branch-fixups list.
733 } else {
734 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
737 }
738
739 EmitBlock(Dest.getBlock());
740
741 // Emit debug info for labels.
742 if (CGDebugInfo *DI = getDebugInfo()) {
744 DI->setLocation(D->getLocation());
745 DI->EmitLabel(D, Builder);
746 }
747 }
748
749 incrementProfileCounter(D->getStmt());
750}
751
752/// Change the cleanup scope of the labels in this lexical scope to
753/// match the scope of the enclosing context.
755 assert(!Labels.empty());
756 EHScopeStack::stable_iterator innermostScope
758
759 // Change the scope depth of all the labels.
760 for (const LabelDecl *Label : Labels) {
761 assert(CGF.LabelMap.count(Label));
762 JumpDest &dest = CGF.LabelMap.find(Label)->second;
763 assert(dest.getScopeDepth().isValid());
764 assert(innermostScope.encloses(dest.getScopeDepth()));
765 dest.setScopeDepth(innermostScope);
766 }
767
768 // Reparent the labels if the new scope also has cleanups.
769 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
770 ParentScope->Labels.append(Labels.begin(), Labels.end());
771 }
772}
773
774
776 EmitLabel(S.getDecl());
777
778 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
779 if (getLangOpts().EHAsynch && S.isSideEntry())
781
782 EmitStmt(S.getSubStmt());
783}
784
786 bool nomerge = false;
787 bool noinline = false;
788 bool alwaysinline = false;
789 bool noconvergent = false;
790 HLSLControlFlowHintAttr::Spelling flattenOrBranch =
791 HLSLControlFlowHintAttr::SpellingNotCalculated;
792 const CallExpr *musttail = nullptr;
793 const AtomicAttr *AA = nullptr;
794
795 for (const auto *A : S.getAttrs()) {
796 switch (A->getKind()) {
797 default:
798 break;
799 case attr::NoMerge:
800 nomerge = true;
801 break;
802 case attr::NoInline:
803 noinline = true;
804 break;
805 case attr::AlwaysInline:
806 alwaysinline = true;
807 break;
808 case attr::NoConvergent:
809 noconvergent = true;
810 break;
811 case attr::MustTail: {
812 const Stmt *Sub = S.getSubStmt();
813 const ReturnStmt *R = cast<ReturnStmt>(Sub);
814 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
815 } break;
816 case attr::CXXAssume: {
817 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
818 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
819 !Assumption->HasSideEffects(getContext())) {
820 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(Assumption);
821 Builder.CreateAssumption(AssumptionVal);
822 }
823 } break;
824 case attr::Atomic:
825 AA = cast<AtomicAttr>(A);
826 break;
827 case attr::HLSLControlFlowHint: {
828 flattenOrBranch = cast<HLSLControlFlowHintAttr>(A)->getSemanticSpelling();
829 } break;
830 }
831 }
832 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
833 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
834 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
835 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
836 SaveAndRestore save_musttail(MustTailCall, musttail);
837 SaveAndRestore save_flattenOrBranch(HLSLControlFlowAttr, flattenOrBranch);
838 CGAtomicOptionsRAII AORAII(CGM, AA);
839 EmitStmt(S.getSubStmt(), S.getAttrs());
840}
841
843 // If this code is reachable then emit a stop point (if generating
844 // debug info). We have to do this ourselves because we are on the
845 // "simple" statement path.
846 if (HaveInsertPoint())
847 EmitStopPoint(&S);
848
851}
852
853
856 if (const LabelDecl *Target = S.getConstantTarget()) {
858 return;
859 }
860
861 // Ensure that we have an i8* for our PHI node.
862 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
863 Int8PtrTy, "addr");
864 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
865
866 // Get the basic block for the indirect goto.
867 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
868
869 // The first instruction in the block has to be the PHI for the switch dest,
870 // add an entry for this branch.
871 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
872
873 EmitBranch(IndGotoBB);
874 if (CurBB && CurBB->getTerminator())
875 addInstToCurrentSourceAtom(CurBB->getTerminator(), nullptr);
876}
877
879 const Stmt *Else = S.getElse();
880
881 // The else branch of a consteval if statement is always the only branch that
882 // can be runtime evaluated.
883 if (S.isConsteval()) {
884 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else;
885 if (Executed) {
886 RunCleanupsScope ExecutedScope(*this);
887 EmitStmt(Executed);
888 }
889 return;
890 }
891
892 // C99 6.8.4.1: The first substatement is executed if the expression compares
893 // unequal to 0. The condition must be a scalar type.
894 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
895 ApplyDebugLocation DL(*this, S.getCond());
896
897 if (S.getInit())
898 EmitStmt(S.getInit());
899
900 if (S.getConditionVariable())
901 EmitDecl(*S.getConditionVariable());
902
903 // If the condition constant folds and can be elided, try to avoid emitting
904 // the condition and the dead arm of the if/else.
905 bool CondConstant;
906 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
907 S.isConstexpr())) {
908 // Figure out which block (then or else) is executed.
909 const Stmt *Executed = S.getThen();
910 const Stmt *Skipped = Else;
911 if (!CondConstant) // Condition false?
912 std::swap(Executed, Skipped);
913
914 // If the skipped block has no labels in it, just emit the executed block.
915 // This avoids emitting dead code and simplifies the CFG substantially.
916 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
917 if (CondConstant)
919 if (Executed) {
920 MaybeEmitDeferredVarDeclInit(S.getConditionVariable());
921 RunCleanupsScope ExecutedScope(*this);
922 EmitStmt(Executed);
923 }
924 PGO->markStmtMaybeUsed(Skipped);
925 return;
926 }
927 }
928
929 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
930 // the conditional branch.
931 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
932 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
933 llvm::BasicBlock *ElseBlock = ContBlock;
934 if (Else)
935 ElseBlock = createBasicBlock("if.else");
936
937 // Prefer the PGO based weights over the likelihood attribute.
938 // When the build isn't optimized the metadata isn't used, so don't generate
939 // it.
940 // Also, differentiate between disabled PGO and a never executed branch with
941 // PGO. Assuming PGO is in use:
942 // - we want to ignore the [[likely]] attribute if the branch is never
943 // executed,
944 // - assuming the profile is poor, preserving the attribute may still be
945 // beneficial.
946 // As an approximation, preserve the attribute only if both the branch and the
947 // parent context were not executed.
949 uint64_t ThenCount = getProfileCount(S.getThen());
950 if (!ThenCount && !getCurrentProfileCount() &&
951 CGM.getCodeGenOpts().OptimizationLevel)
952 LH = Stmt::getLikelihood(S.getThen(), Else);
953
954 // When measuring MC/DC, always fully evaluate the condition up front using
955 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
956 // executing the body of the if.then or if.else. This is useful for when
957 // there is a 'return' within the body, but this is particularly beneficial
958 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
959 // updates are kept linear and consistent.
960 if (!CGM.getCodeGenOpts().MCDCCoverage) {
961 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH,
962 /*ConditionalOp=*/nullptr,
963 /*ConditionalDecl=*/S.getConditionVariable());
964 } else {
965 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
966 MaybeEmitDeferredVarDeclInit(S.getConditionVariable());
967 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
968 }
969
970 // Emit the 'then' code.
971 EmitBlock(ThenBlock);
973 incrementProfileCounter(S.getThen());
974 else
976 {
977 RunCleanupsScope ThenScope(*this);
978 EmitStmt(S.getThen());
979 }
980 EmitBranch(ContBlock);
981
982 // Emit the 'else' code if present.
983 if (Else) {
984 {
985 // There is no need to emit line number for an unconditional branch.
986 auto NL = ApplyDebugLocation::CreateEmpty(*this);
987 EmitBlock(ElseBlock);
988 }
989 // When single byte coverage mode is enabled, add a counter to else block.
992 {
993 RunCleanupsScope ElseScope(*this);
994 EmitStmt(Else);
995 }
996 {
997 // There is no need to emit line number for an unconditional branch.
998 auto NL = ApplyDebugLocation::CreateEmpty(*this);
999 EmitBranch(ContBlock);
1000 }
1001 }
1002
1003 // Emit the continuation block for code after the if.
1004 EmitBlock(ContBlock, true);
1005
1006 // When single byte coverage mode is enabled, add a counter to continuation
1007 // block.
1010}
1011
1012bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
1013 bool HasEmptyBody) {
1014 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1016 return false;
1017
1018 // Now apply rules for plain C (see 6.8.5.6 in C11).
1019 // Loops with constant conditions do not have to make progress in any C
1020 // version.
1021 // As an extension, we consisider loops whose constant expression
1022 // can be constant-folded.
1024 bool CondIsConstInt =
1025 !ControllingExpression ||
1026 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
1027 Result.Val.isInt());
1028
1029 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
1030 Result.Val.getInt().getBoolValue());
1031
1032 // Loops with non-constant conditions must make progress in C11 and later.
1033 if (getLangOpts().C11 && !CondIsConstInt)
1034 return true;
1035
1036 // [C++26][intro.progress] (DR)
1037 // The implementation may assume that any thread will eventually do one of the
1038 // following:
1039 // [...]
1040 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
1041 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1043 getLangOpts().CPlusPlus11) {
1044 if (HasEmptyBody && CondIsTrue) {
1045 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
1046 return false;
1047 }
1048 return true;
1049 }
1050 return false;
1051}
1052
1053// [C++26][stmt.iter.general] (DR)
1054// A trivially empty iteration statement is an iteration statement matching one
1055// of the following forms:
1056// - while ( expression ) ;
1057// - while ( expression ) { }
1058// - do ; while ( expression ) ;
1059// - do { } while ( expression ) ;
1060// - for ( init-statement expression(opt); ) ;
1061// - for ( init-statement expression(opt); ) { }
1062template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
1063 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
1064 if (S.getInc())
1065 return false;
1066 }
1067 const Stmt *Body = S.getBody();
1068 if (!Body || isa<NullStmt>(Body))
1069 return true;
1070 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
1071 return Compound->body_empty();
1072 return false;
1073}
1074
1076 ArrayRef<const Attr *> WhileAttrs) {
1077 // Emit the header for the loop, which will also become
1078 // the continue target.
1079 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
1080 EmitBlock(LoopHeader.getBlock());
1081
1083 ConvergenceTokenStack.push_back(
1084 emitConvergenceLoopToken(LoopHeader.getBlock()));
1085
1086 // Create an exit block for when the condition fails, which will
1087 // also become the break target.
1089
1090 // Store the blocks to use for break and continue.
1091 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopHeader));
1092
1093 // C++ [stmt.while]p2:
1094 // When the condition of a while statement is a declaration, the
1095 // scope of the variable that is declared extends from its point
1096 // of declaration (3.3.2) to the end of the while statement.
1097 // [...]
1098 // The object created in a condition is destroyed and created
1099 // with each iteration of the loop.
1100 RunCleanupsScope ConditionScope(*this);
1101
1102 if (S.getConditionVariable())
1103 EmitDecl(*S.getConditionVariable());
1104
1105 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1106 // evaluation of the controlling expression takes place before each
1107 // execution of the loop body.
1108 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1109
1110 MaybeEmitDeferredVarDeclInit(S.getConditionVariable());
1111
1112 // while(1) is common, avoid extra exit blocks. Be sure
1113 // to correctly handle break/continue though.
1114 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1115 bool EmitBoolCondBranch = !C || !C->isOne();
1116 const SourceRange &R = S.getSourceRange();
1118 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1120 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1121
1122 // When single byte coverage mode is enabled, add a counter to loop condition.
1124 incrementProfileCounter(S.getCond());
1125
1126 // As long as the condition is true, go to the loop body.
1127 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1128 if (EmitBoolCondBranch) {
1129 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1130 if (ConditionScope.requiresCleanups())
1131 ExitBlock = createBasicBlock("while.exit");
1132 llvm::MDNode *Weights =
1133 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1134 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1135 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1136 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1137 auto *I = Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1138 // Key Instructions: Emit the condition and branch as separate source
1139 // location atoms otherwise we may omit a step onto the loop condition in
1140 // favour of the `while` keyword.
1141 // FIXME: We could have the branch as the backup location for the condition,
1142 // which would probably be a better experience. Explore this later.
1143 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1144 addInstToNewSourceAtom(CondI, nullptr);
1145 addInstToNewSourceAtom(I, nullptr);
1146
1147 if (ExitBlock != LoopExit.getBlock()) {
1148 EmitBlock(ExitBlock);
1150 }
1151 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1152 CGM.getDiags().Report(A->getLocation(),
1153 diag::warn_attribute_has_no_effect_on_infinite_loop)
1154 << A << A->getRange();
1156 S.getWhileLoc(),
1157 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1158 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
1159 }
1160
1161 // Emit the loop body. We have to emit this in a cleanup scope
1162 // because it might be a singleton DeclStmt.
1163 {
1164 RunCleanupsScope BodyScope(*this);
1165 EmitBlock(LoopBody);
1166 // When single byte coverage mode is enabled, add a counter to the body.
1168 incrementProfileCounter(S.getBody());
1169 else
1171 EmitStmt(S.getBody());
1172 }
1173
1174 BreakContinueStack.pop_back();
1175
1176 // Immediately force cleanup.
1177 ConditionScope.ForceCleanup();
1178
1179 EmitStopPoint(&S);
1180 // Branch to the loop header again.
1181 EmitBranch(LoopHeader.getBlock());
1182
1183 LoopStack.pop();
1184
1185 // Emit the exit block.
1186 EmitBlock(LoopExit.getBlock(), true);
1187
1188 // The LoopHeader typically is just a branch if we skipped emitting
1189 // a branch, try to erase it.
1190 if (!EmitBoolCondBranch)
1191 SimplifyForwardingBlocks(LoopHeader.getBlock());
1192
1193 // When single byte coverage mode is enabled, add a counter to continuation
1194 // block.
1197
1199 ConvergenceTokenStack.pop_back();
1200}
1201
1203 ArrayRef<const Attr *> DoAttrs) {
1205 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1206
1207 uint64_t ParentCount = getCurrentProfileCount();
1208
1209 // Store the blocks to use for break and continue.
1210 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopCond));
1211
1212 // Emit the body of the loop.
1213 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1214
1216 EmitBlockWithFallThrough(LoopBody, S.getBody());
1217 else
1218 EmitBlockWithFallThrough(LoopBody, &S);
1219
1221 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(LoopBody));
1222
1223 {
1224 RunCleanupsScope BodyScope(*this);
1225 EmitStmt(S.getBody());
1226 }
1227
1228 EmitBlock(LoopCond.getBlock());
1229 // When single byte coverage mode is enabled, add a counter to loop condition.
1231 incrementProfileCounter(S.getCond());
1232
1233 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1234 // after each execution of the loop body."
1235
1236 // Evaluate the conditional in the while header.
1237 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1238 // compares unequal to 0. The condition must be a scalar type.
1239 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1240
1241 BreakContinueStack.pop_back();
1242
1243 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1244 // to correctly handle break/continue though.
1245 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1246 bool EmitBoolCondBranch = !C || !C->isZero();
1247
1248 const SourceRange &R = S.getSourceRange();
1249 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1252 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1253
1254 // As long as the condition is true, iterate the loop.
1255 if (EmitBoolCondBranch) {
1256 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1257 auto *I = Builder.CreateCondBr(
1258 BoolCondVal, LoopBody, LoopExit.getBlock(),
1259 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1260
1261 // Key Instructions: Emit the condition and branch as separate source
1262 // location atoms otherwise we may omit a step onto the loop condition in
1263 // favour of the closing brace.
1264 // FIXME: We could have the branch as the backup location for the condition,
1265 // which would probably be a better experience (no jumping to the brace).
1266 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1267 addInstToNewSourceAtom(CondI, nullptr);
1268 addInstToNewSourceAtom(I, nullptr);
1269 }
1270
1271 LoopStack.pop();
1272
1273 // Emit the exit block.
1274 EmitBlock(LoopExit.getBlock());
1275
1276 // The DoCond block typically is just a branch if we skipped
1277 // emitting a branch, try to erase it.
1278 if (!EmitBoolCondBranch)
1280
1281 // When single byte coverage mode is enabled, add a counter to continuation
1282 // block.
1285
1287 ConvergenceTokenStack.pop_back();
1288}
1289
1291 ArrayRef<const Attr *> ForAttrs) {
1293
1294 LexicalScope ForScope(*this, S.getSourceRange());
1295
1296 // Evaluate the first part before the loop.
1297 if (S.getInit())
1298 EmitStmt(S.getInit());
1299
1300 // Start the loop with a block that tests the condition.
1301 // If there's an increment, the continue scope will be overwritten
1302 // later.
1303 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1304 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1305 EmitBlock(CondBlock);
1306
1308 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1309
1310 const SourceRange &R = S.getSourceRange();
1311 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1314 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1315
1316 // Create a cleanup scope for the condition variable cleanups.
1317 LexicalScope ConditionScope(*this, S.getSourceRange());
1318
1319 // If the for loop doesn't have an increment we can just use the condition as
1320 // the continue block. Otherwise, if there is no condition variable, we can
1321 // form the continue block now. If there is a condition variable, we can't
1322 // form the continue block until after we've emitted the condition, because
1323 // the condition is in scope in the increment, but Sema's jump diagnostics
1324 // ensure that there are no continues from the condition variable that jump
1325 // to the loop increment.
1326 JumpDest Continue;
1327 if (!S.getInc())
1328 Continue = CondDest;
1329 else if (!S.getConditionVariable())
1330 Continue = getJumpDestInCurrentScope("for.inc");
1331 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1332
1333 if (S.getCond()) {
1334 // If the for statement has a condition scope, emit the local variable
1335 // declaration.
1336 if (S.getConditionVariable()) {
1337 EmitDecl(*S.getConditionVariable());
1338
1339 // We have entered the condition variable's scope, so we're now able to
1340 // jump to the continue block.
1341 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1342 BreakContinueStack.back().ContinueBlock = Continue;
1343 }
1344
1345 // When single byte coverage mode is enabled, add a counter to loop
1346 // condition.
1348 incrementProfileCounter(S.getCond());
1349
1350 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1351 // If there are any cleanups between here and the loop-exit scope,
1352 // create a block to stage a loop exit along.
1353 if (ForScope.requiresCleanups())
1354 ExitBlock = createBasicBlock("for.cond.cleanup");
1355
1356 // As long as the condition is true, iterate the loop.
1357 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1358
1359 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1360 // compares unequal to 0. The condition must be a scalar type.
1361 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1362
1363 MaybeEmitDeferredVarDeclInit(S.getConditionVariable());
1364
1365 llvm::MDNode *Weights =
1366 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1367 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1368 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1369 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1370
1371 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1372 // Key Instructions: Emit the condition and branch as separate atoms to
1373 // match existing loop stepping behaviour. FIXME: We could have the branch
1374 // as the backup location for the condition, which would probably be a
1375 // better experience (no jumping to the brace).
1376 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1377 addInstToNewSourceAtom(CondI, nullptr);
1378 addInstToNewSourceAtom(I, nullptr);
1379
1380 if (ExitBlock != LoopExit.getBlock()) {
1381 EmitBlock(ExitBlock);
1383 }
1384
1385 EmitBlock(ForBody);
1386 } else {
1387 // Treat it as a non-zero constant. Don't even create a new block for the
1388 // body, just fall into it.
1389 }
1390
1391 // When single byte coverage mode is enabled, add a counter to the body.
1393 incrementProfileCounter(S.getBody());
1394 else
1396 {
1397 // Create a separate cleanup scope for the body, in case it is not
1398 // a compound statement.
1399 RunCleanupsScope BodyScope(*this);
1400 EmitStmt(S.getBody());
1401 }
1402
1403 // The last block in the loop's body (which unconditionally branches to the
1404 // `inc` block if there is one).
1405 auto *FinalBodyBB = Builder.GetInsertBlock();
1406
1407 // If there is an increment, emit it next.
1408 if (S.getInc()) {
1409 EmitBlock(Continue.getBlock());
1410 EmitStmt(S.getInc());
1412 incrementProfileCounter(S.getInc());
1413 }
1414
1415 BreakContinueStack.pop_back();
1416
1417 ConditionScope.ForceCleanup();
1418
1419 EmitStopPoint(&S);
1420 EmitBranch(CondBlock);
1421
1422 ForScope.ForceCleanup();
1423
1424 LoopStack.pop();
1425
1426 // Emit the fall-through block.
1427 EmitBlock(LoopExit.getBlock(), true);
1428
1429 // When single byte coverage mode is enabled, add a counter to continuation
1430 // block.
1433
1435 ConvergenceTokenStack.pop_back();
1436
1437 if (FinalBodyBB) {
1438 // Key Instructions: We want the for closing brace to be step-able on to
1439 // match existing behaviour.
1440 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1441 }
1442}
1443
1444void
1446 ArrayRef<const Attr *> ForAttrs) {
1448
1449 LexicalScope ForScope(*this, S.getSourceRange());
1450
1451 // Evaluate the first pieces before the loop.
1452 if (S.getInit())
1453 EmitStmt(S.getInit());
1454 EmitStmt(S.getRangeStmt());
1455 EmitStmt(S.getBeginStmt());
1456 EmitStmt(S.getEndStmt());
1457
1458 // Start the loop with a block that tests the condition.
1459 // If there's an increment, the continue scope will be overwritten
1460 // later.
1461 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1462 EmitBlock(CondBlock);
1463
1465 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1466
1467 const SourceRange &R = S.getSourceRange();
1468 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1471
1472 // If there are any cleanups between here and the loop-exit scope,
1473 // create a block to stage a loop exit along.
1474 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1475 if (ForScope.requiresCleanups())
1476 ExitBlock = createBasicBlock("for.cond.cleanup");
1477
1478 // The loop body, consisting of the specified body and the loop variable.
1479 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1480
1481 // The body is executed if the expression, contextually converted
1482 // to bool, is true.
1483 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1484 llvm::MDNode *Weights =
1485 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1486 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1487 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1488 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1489 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1490 // Key Instructions: Emit the condition and branch as separate atoms to
1491 // match existing loop stepping behaviour. FIXME: We could have the branch as
1492 // the backup location for the condition, which would probably be a better
1493 // experience.
1494 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1495 addInstToNewSourceAtom(CondI, nullptr);
1496 addInstToNewSourceAtom(I, nullptr);
1497
1498 if (ExitBlock != LoopExit.getBlock()) {
1499 EmitBlock(ExitBlock);
1501 }
1502
1503 EmitBlock(ForBody);
1505 incrementProfileCounter(S.getBody());
1506 else
1508
1509 // Create a block for the increment. In case of a 'continue', we jump there.
1510 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1511
1512 // Store the blocks to use for break and continue.
1513 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1514
1515 {
1516 // Create a separate cleanup scope for the loop variable and body.
1517 LexicalScope BodyScope(*this, S.getSourceRange());
1518 EmitStmt(S.getLoopVarStmt());
1519 EmitStmt(S.getBody());
1520 }
1521 // The last block in the loop's body (which unconditionally branches to the
1522 // `inc` block if there is one).
1523 auto *FinalBodyBB = Builder.GetInsertBlock();
1524
1525 EmitStopPoint(&S);
1526 // If there is an increment, emit it next.
1527 EmitBlock(Continue.getBlock());
1528 EmitStmt(S.getInc());
1529
1530 BreakContinueStack.pop_back();
1531
1532 EmitBranch(CondBlock);
1533
1534 ForScope.ForceCleanup();
1535
1536 LoopStack.pop();
1537
1538 // Emit the fall-through block.
1539 EmitBlock(LoopExit.getBlock(), true);
1540
1541 // When single byte coverage mode is enabled, add a counter to continuation
1542 // block.
1545
1547 ConvergenceTokenStack.pop_back();
1548
1549 if (FinalBodyBB) {
1550 // We want the for closing brace to be step-able on to match existing
1551 // behaviour.
1552 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1553 }
1554}
1555
1556void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1557 if (RV.isScalar()) {
1559 } else if (RV.isAggregate()) {
1560 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1563 } else {
1565 /*init*/ true);
1566 }
1568}
1569
1570namespace {
1571// RAII struct used to save and restore a return statment's result expression.
1572struct SaveRetExprRAII {
1573 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1574 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1575 CGF.RetExpr = RetExpr;
1576 }
1577 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1578 const Expr *OldRetExpr;
1579 CodeGenFunction &CGF;
1580};
1581} // namespace
1582
1583/// Determine if the given call uses the swiftasync calling convention.
1584static bool isSwiftAsyncCallee(const CallExpr *CE) {
1585 auto calleeQualType = CE->getCallee()->getType();
1586 const FunctionType *calleeType = nullptr;
1587 if (calleeQualType->isFunctionPointerType() ||
1588 calleeQualType->isFunctionReferenceType() ||
1589 calleeQualType->isBlockPointerType() ||
1590 calleeQualType->isMemberFunctionPointerType()) {
1591 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1592 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1593 calleeType = ty;
1594 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1595 if (auto methodDecl = CMCE->getMethodDecl()) {
1596 // getMethodDecl() doesn't handle member pointers at the moment.
1597 calleeType = methodDecl->getType()->castAs<FunctionType>();
1598 } else {
1599 return false;
1600 }
1601 } else {
1602 return false;
1603 }
1604 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1605}
1606
1607/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1608/// if the function returns void, or may be missing one if the function returns
1609/// non-void. Fun stuff :).
1612 if (requiresReturnValueCheck()) {
1613 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1614 auto *SLocPtr =
1615 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1616 llvm::GlobalVariable::PrivateLinkage, SLoc);
1617 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1619 assert(ReturnLocation.isValid() && "No valid return location");
1620 Builder.CreateStore(SLocPtr, ReturnLocation);
1621 }
1622
1623 // Returning from an outlined SEH helper is UB, and we already warn on it.
1624 if (IsOutlinedSEHHelper) {
1625 Builder.CreateUnreachable();
1626 Builder.ClearInsertionPoint();
1627 }
1628
1629 // Emit the result value, even if unused, to evaluate the side effects.
1630 const Expr *RV = S.getRetValue();
1631
1632 // Record the result expression of the return statement. The recorded
1633 // expression is used to determine whether a block capture's lifetime should
1634 // end at the end of the full expression as opposed to the end of the scope
1635 // enclosing the block expression.
1636 //
1637 // This permits a small, easily-implemented exception to our over-conservative
1638 // rules about not jumping to statements following block literals with
1639 // non-trivial cleanups.
1640 SaveRetExprRAII SaveRetExpr(RV, *this);
1641
1642 RunCleanupsScope cleanupScope(*this);
1643 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1644 RV = EWC->getSubExpr();
1645
1646 // If we're in a swiftasynccall function, and the return expression is a
1647 // call to a swiftasynccall function, mark the call as the musttail call.
1648 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1649 if (RV && CurFnInfo &&
1651 if (auto CE = dyn_cast<CallExpr>(RV)) {
1652 if (isSwiftAsyncCallee(CE)) {
1653 SaveMustTail.emplace(MustTailCall, CE);
1654 }
1655 }
1656 }
1657
1658 // FIXME: Clean this up by using an LValue for ReturnTemp,
1659 // EmitStoreThroughLValue, and EmitAnyExpr.
1660 // Check if the NRVO candidate was not globalized in OpenMP mode.
1661 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1662 S.getNRVOCandidate()->isNRVOVariable() &&
1663 (!getLangOpts().OpenMP ||
1665 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1666 .isValid())) {
1667 // Apply the named return value optimization for this return statement,
1668 // which means doing nothing: the appropriate result has already been
1669 // constructed into the NRVO variable.
1670
1671 // If there is an NRVO flag for this variable, set it to 1 into indicate
1672 // that the cleanup code should not destroy the variable.
1673 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1674 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1675 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1676 // Make sure not to return anything, but evaluate the expression
1677 // for side effects.
1678 if (RV) {
1679 EmitAnyExpr(RV);
1680 }
1681 } else if (!RV) {
1682 // Do nothing (return value is left uninitialized)
1683 } else if (FnRetTy->isReferenceType()) {
1684 // If this function returns a reference, take the address of the expression
1685 // rather than the value.
1687 auto *I = Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1688 addInstToCurrentSourceAtom(I, I->getValueOperand());
1689 } else {
1690 switch (getEvaluationKind(RV->getType())) {
1691 case TEK_Scalar: {
1692 llvm::Value *Ret = EmitScalarExpr(RV);
1695 /*isInit*/ true);
1696 } else {
1697 auto *I = Builder.CreateStore(Ret, ReturnValue);
1698 addInstToCurrentSourceAtom(I, I->getValueOperand());
1699 }
1700 break;
1701 }
1702 case TEK_Complex:
1704 /*isInit*/ true);
1705 break;
1706 case TEK_Aggregate:
1713 break;
1714 }
1715 }
1716
1717 ++NumReturnExprs;
1718 if (!RV || RV->isEvaluatable(getContext()))
1719 ++NumSimpleReturnExprs;
1720
1721 cleanupScope.ForceCleanup();
1723}
1724
1726 // As long as debug info is modeled with instructions, we have to ensure we
1727 // have a place to insert here and write the stop point here.
1728 if (HaveInsertPoint())
1729 EmitStopPoint(&S);
1730
1731 for (const auto *I : S.decls())
1732 EmitDecl(*I, /*EvaluateConditionDecl=*/true);
1733}
1734
1736 -> const BreakContinue * {
1737 if (!S.hasLabelTarget())
1738 return &BreakContinueStack.back();
1739
1740 const Stmt *LoopOrSwitch = S.getNamedLoopOrSwitch();
1741 assert(LoopOrSwitch && "break/continue target not set?");
1742 for (const BreakContinue &BC : llvm::reverse(BreakContinueStack))
1743 if (BC.LoopOrSwitch == LoopOrSwitch)
1744 return &BC;
1745
1746 llvm_unreachable("break/continue target not found");
1747}
1748
1750 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1751
1752 // If this code is reachable then emit a stop point (if generating
1753 // debug info). We have to do this ourselves because we are on the
1754 // "simple" statement path.
1755 if (HaveInsertPoint())
1756 EmitStopPoint(&S);
1757
1760}
1761
1763 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1764
1765 // If this code is reachable then emit a stop point (if generating
1766 // debug info). We have to do this ourselves because we are on the
1767 // "simple" statement path.
1768 if (HaveInsertPoint())
1769 EmitStopPoint(&S);
1770
1773}
1774
1775/// EmitCaseStmtRange - If case statement range is not too big then
1776/// add multiple cases to switch instruction, one for each value within
1777/// the range. If range is too big then emit "if" condition check.
1779 ArrayRef<const Attr *> Attrs) {
1780 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1781
1782 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1783 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1784
1785 // Emit the code for this case. We do this first to make sure it is
1786 // properly chained from our predecessor before generating the
1787 // switch machinery to enter this block.
1788 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1789 EmitBlockWithFallThrough(CaseDest, &S);
1790 EmitStmt(S.getSubStmt());
1791
1792 // If range is empty, do nothing.
1793 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1794 return;
1795
1797 llvm::APInt Range = RHS - LHS;
1798 // FIXME: parameters such as this should not be hardcoded.
1799 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1800 // Range is small enough to add multiple switch instruction cases.
1801 uint64_t Total = getProfileCount(&S);
1802 unsigned NCases = Range.getZExtValue() + 1;
1803 // We only have one region counter for the entire set of cases here, so we
1804 // need to divide the weights evenly between the generated cases, ensuring
1805 // that the total weight is preserved. E.g., a weight of 5 over three cases
1806 // will be distributed as weights of 2, 2, and 1.
1807 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1808 for (unsigned I = 0; I != NCases; ++I) {
1809 if (SwitchWeights)
1810 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1811 else if (SwitchLikelihood)
1812 SwitchLikelihood->push_back(LH);
1813
1814 if (Rem)
1815 Rem--;
1816 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1817 ++LHS;
1818 }
1819 return;
1820 }
1821
1822 // The range is too big. Emit "if" condition into a new block,
1823 // making sure to save and restore the current insertion point.
1824 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1825
1826 // Push this test onto the chain of range checks (which terminates
1827 // in the default basic block). The switch's default will be changed
1828 // to the top of this chain after switch emission is complete.
1829 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1830 CaseRangeBlock = createBasicBlock("sw.caserange");
1831
1832 CurFn->insert(CurFn->end(), CaseRangeBlock);
1833 Builder.SetInsertPoint(CaseRangeBlock);
1834
1835 // Emit range check.
1836 llvm::Value *Diff =
1837 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1838 llvm::Value *Cond =
1839 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1840
1841 llvm::MDNode *Weights = nullptr;
1842 if (SwitchWeights) {
1843 uint64_t ThisCount = getProfileCount(&S);
1844 uint64_t DefaultCount = (*SwitchWeights)[0];
1845 Weights = createProfileWeights(ThisCount, DefaultCount);
1846
1847 // Since we're chaining the switch default through each large case range, we
1848 // need to update the weight for the default, ie, the first case, to include
1849 // this case.
1850 (*SwitchWeights)[0] += ThisCount;
1851 } else if (SwitchLikelihood)
1852 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1853
1854 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1855
1856 // Restore the appropriate insertion point.
1857 if (RestoreBB)
1858 Builder.SetInsertPoint(RestoreBB);
1859 else
1860 Builder.ClearInsertionPoint();
1861}
1862
1864 ArrayRef<const Attr *> Attrs) {
1865 // If there is no enclosing switch instance that we're aware of, then this
1866 // case statement and its block can be elided. This situation only happens
1867 // when we've constant-folded the switch, are emitting the constant case,
1868 // and part of the constant case includes another case statement. For
1869 // instance: switch (4) { case 4: do { case 5: } while (1); }
1870 if (!SwitchInsn) {
1871 EmitStmt(S.getSubStmt());
1872 return;
1873 }
1874
1875 // Handle case ranges.
1876 if (S.getRHS()) {
1877 EmitCaseStmtRange(S, Attrs);
1878 return;
1879 }
1880
1881 llvm::ConstantInt *CaseVal =
1882 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1883
1884 // Emit debuginfo for the case value if it is an enum value.
1885 const ConstantExpr *CE;
1886 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1887 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1888 else
1889 CE = dyn_cast<ConstantExpr>(S.getLHS());
1890 if (CE) {
1891 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1892 if (CGDebugInfo *Dbg = getDebugInfo())
1894 Dbg->EmitGlobalVariable(DE->getDecl(),
1895 APValue(llvm::APSInt(CaseVal->getValue())));
1896 }
1897
1898 if (SwitchLikelihood)
1899 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1900
1901 // If the body of the case is just a 'break', try to not emit an empty block.
1902 // If we're profiling or we're not optimizing, leave the block in for better
1903 // debug and coverage analysis.
1905 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1906 isa<BreakStmt>(S.getSubStmt())) {
1907 JumpDest Block = BreakContinueStack.back().BreakBlock;
1908
1909 // Only do this optimization if there are no cleanups that need emitting.
1911 if (SwitchWeights)
1912 SwitchWeights->push_back(getProfileCount(&S));
1913 SwitchInsn->addCase(CaseVal, Block.getBlock());
1914
1915 // If there was a fallthrough into this case, make sure to redirect it to
1916 // the end of the switch as well.
1917 if (Builder.GetInsertBlock()) {
1918 Builder.CreateBr(Block.getBlock());
1919 Builder.ClearInsertionPoint();
1920 }
1921 return;
1922 }
1923 }
1924
1925 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1926 EmitBlockWithFallThrough(CaseDest, &S);
1927 if (SwitchWeights)
1928 SwitchWeights->push_back(getProfileCount(&S));
1929 SwitchInsn->addCase(CaseVal, CaseDest);
1930
1931 // Recursively emitting the statement is acceptable, but is not wonderful for
1932 // code where we have many case statements nested together, i.e.:
1933 // case 1:
1934 // case 2:
1935 // case 3: etc.
1936 // Handling this recursively will create a new block for each case statement
1937 // that falls through to the next case which is IR intensive. It also causes
1938 // deep recursion which can run into stack depth limitations. Handle
1939 // sequential non-range case statements specially.
1940 //
1941 // TODO When the next case has a likelihood attribute the code returns to the
1942 // recursive algorithm. Maybe improve this case if it becomes common practice
1943 // to use a lot of attributes.
1944 const CaseStmt *CurCase = &S;
1945 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1946
1947 // Otherwise, iteratively add consecutive cases to this switch stmt.
1948 while (NextCase && NextCase->getRHS() == nullptr) {
1949 CurCase = NextCase;
1950 llvm::ConstantInt *CaseVal =
1951 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1952
1953 if (SwitchWeights)
1954 SwitchWeights->push_back(getProfileCount(NextCase));
1956 CaseDest = createBasicBlock("sw.bb");
1957 EmitBlockWithFallThrough(CaseDest, CurCase);
1958 }
1959 // Since this loop is only executed when the CaseStmt has no attributes
1960 // use a hard-coded value.
1961 if (SwitchLikelihood)
1962 SwitchLikelihood->push_back(Stmt::LH_None);
1963
1964 SwitchInsn->addCase(CaseVal, CaseDest);
1965 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1966 }
1967
1968 // Generate a stop point for debug info if the case statement is
1969 // followed by a default statement. A fallthrough case before a
1970 // default case gets its own branch target.
1971 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1972 EmitStopPoint(CurCase);
1973
1974 // Normal default recursion for non-cases.
1975 EmitStmt(CurCase->getSubStmt());
1976}
1977
1979 ArrayRef<const Attr *> Attrs) {
1980 // If there is no enclosing switch instance that we're aware of, then this
1981 // default statement can be elided. This situation only happens when we've
1982 // constant-folded the switch.
1983 if (!SwitchInsn) {
1984 EmitStmt(S.getSubStmt());
1985 return;
1986 }
1987
1988 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1989 assert(DefaultBlock->empty() &&
1990 "EmitDefaultStmt: Default block already defined?");
1991
1992 if (SwitchLikelihood)
1993 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1994
1995 EmitBlockWithFallThrough(DefaultBlock, &S);
1996
1997 EmitStmt(S.getSubStmt());
1998}
1999
2000/// CollectStatementsForCase - Given the body of a 'switch' statement and a
2001/// constant value that is being switched on, see if we can dead code eliminate
2002/// the body of the switch to a simple series of statements to emit. Basically,
2003/// on a switch (5) we want to find these statements:
2004/// case 5:
2005/// printf(...); <--
2006/// ++i; <--
2007/// break;
2008///
2009/// and add them to the ResultStmts vector. If it is unsafe to do this
2010/// transformation (for example, one of the elided statements contains a label
2011/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
2012/// should include statements after it (e.g. the printf() line is a substmt of
2013/// the case) then return CSFC_FallThrough. If we handled it and found a break
2014/// statement, then return CSFC_Success.
2015///
2016/// If Case is non-null, then we are looking for the specified case, checking
2017/// that nothing we jump over contains labels. If Case is null, then we found
2018/// the case and are looking for the break.
2019///
2020/// If the recursive walk actually finds our Case, then we set FoundCase to
2021/// true.
2022///
2025 const SwitchCase *Case,
2026 bool &FoundCase,
2027 SmallVectorImpl<const Stmt*> &ResultStmts) {
2028 // If this is a null statement, just succeed.
2029 if (!S)
2030 return Case ? CSFC_Success : CSFC_FallThrough;
2031
2032 // If this is the switchcase (case 4: or default) that we're looking for, then
2033 // we're in business. Just add the substatement.
2034 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
2035 if (S == Case) {
2036 FoundCase = true;
2037 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
2038 ResultStmts);
2039 }
2040
2041 // Otherwise, this is some other case or default statement, just ignore it.
2042 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
2043 ResultStmts);
2044 }
2045
2046 // If we are in the live part of the code and we found our break statement,
2047 // return a success!
2048 if (!Case && isa<BreakStmt>(S))
2049 return CSFC_Success;
2050
2051 // If this is a switch statement, then it might contain the SwitchCase, the
2052 // break, or neither.
2053 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
2054 // Handle this as two cases: we might be looking for the SwitchCase (if so
2055 // the skipped statements must be skippable) or we might already have it.
2056 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
2057 bool StartedInLiveCode = FoundCase;
2058 unsigned StartSize = ResultStmts.size();
2059
2060 // If we've not found the case yet, scan through looking for it.
2061 if (Case) {
2062 // Keep track of whether we see a skipped declaration. The code could be
2063 // using the declaration even if it is skipped, so we can't optimize out
2064 // the decl if the kept statements might refer to it.
2065 bool HadSkippedDecl = false;
2066
2067 // If we're looking for the case, just see if we can skip each of the
2068 // substatements.
2069 for (; Case && I != E; ++I) {
2070 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
2071
2072 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
2073 case CSFC_Failure: return CSFC_Failure;
2074 case CSFC_Success:
2075 // A successful result means that either 1) that the statement doesn't
2076 // have the case and is skippable, or 2) does contain the case value
2077 // and also contains the break to exit the switch. In the later case,
2078 // we just verify the rest of the statements are elidable.
2079 if (FoundCase) {
2080 // If we found the case and skipped declarations, we can't do the
2081 // optimization.
2082 if (HadSkippedDecl)
2083 return CSFC_Failure;
2084
2085 for (++I; I != E; ++I)
2086 if (CodeGenFunction::ContainsLabel(*I, true))
2087 return CSFC_Failure;
2088 return CSFC_Success;
2089 }
2090 break;
2091 case CSFC_FallThrough:
2092 // If we have a fallthrough condition, then we must have found the
2093 // case started to include statements. Consider the rest of the
2094 // statements in the compound statement as candidates for inclusion.
2095 assert(FoundCase && "Didn't find case but returned fallthrough?");
2096 // We recursively found Case, so we're not looking for it anymore.
2097 Case = nullptr;
2098
2099 // If we found the case and skipped declarations, we can't do the
2100 // optimization.
2101 if (HadSkippedDecl)
2102 return CSFC_Failure;
2103 break;
2104 }
2105 }
2106
2107 if (!FoundCase)
2108 return CSFC_Success;
2109
2110 assert(!HadSkippedDecl && "fallthrough after skipping decl");
2111 }
2112
2113 // If we have statements in our range, then we know that the statements are
2114 // live and need to be added to the set of statements we're tracking.
2115 bool AnyDecls = false;
2116 for (; I != E; ++I) {
2118
2119 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
2120 case CSFC_Failure: return CSFC_Failure;
2121 case CSFC_FallThrough:
2122 // A fallthrough result means that the statement was simple and just
2123 // included in ResultStmt, keep adding them afterwards.
2124 break;
2125 case CSFC_Success:
2126 // A successful result means that we found the break statement and
2127 // stopped statement inclusion. We just ensure that any leftover stmts
2128 // are skippable and return success ourselves.
2129 for (++I; I != E; ++I)
2130 if (CodeGenFunction::ContainsLabel(*I, true))
2131 return CSFC_Failure;
2132 return CSFC_Success;
2133 }
2134 }
2135
2136 // If we're about to fall out of a scope without hitting a 'break;', we
2137 // can't perform the optimization if there were any decls in that scope
2138 // (we'd lose their end-of-lifetime).
2139 if (AnyDecls) {
2140 // If the entire compound statement was live, there's one more thing we
2141 // can try before giving up: emit the whole thing as a single statement.
2142 // We can do that unless the statement contains a 'break;'.
2143 // FIXME: Such a break must be at the end of a construct within this one.
2144 // We could emit this by just ignoring the BreakStmts entirely.
2145 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
2146 ResultStmts.resize(StartSize);
2147 ResultStmts.push_back(S);
2148 } else {
2149 return CSFC_Failure;
2150 }
2151 }
2152
2153 return CSFC_FallThrough;
2154 }
2155
2156 // Okay, this is some other statement that we don't handle explicitly, like a
2157 // for statement or increment etc. If we are skipping over this statement,
2158 // just verify it doesn't have labels, which would make it invalid to elide.
2159 if (Case) {
2160 if (CodeGenFunction::ContainsLabel(S, true))
2161 return CSFC_Failure;
2162 return CSFC_Success;
2163 }
2164
2165 // Otherwise, we want to include this statement. Everything is cool with that
2166 // so long as it doesn't contain a break out of the switch we're in.
2168
2169 // Otherwise, everything is great. Include the statement and tell the caller
2170 // that we fall through and include the next statement as well.
2171 ResultStmts.push_back(S);
2172 return CSFC_FallThrough;
2173}
2174
2175/// FindCaseStatementsForValue - Find the case statement being jumped to and
2176/// then invoke CollectStatementsForCase to find the list of statements to emit
2177/// for a switch on constant. See the comment above CollectStatementsForCase
2178/// for more details.
2180 const llvm::APSInt &ConstantCondValue,
2181 SmallVectorImpl<const Stmt*> &ResultStmts,
2182 ASTContext &C,
2183 const SwitchCase *&ResultCase) {
2184 // First step, find the switch case that is being branched to. We can do this
2185 // efficiently by scanning the SwitchCase list.
2186 const SwitchCase *Case = S.getSwitchCaseList();
2187 const DefaultStmt *DefaultCase = nullptr;
2188
2189 for (; Case; Case = Case->getNextSwitchCase()) {
2190 // It's either a default or case. Just remember the default statement in
2191 // case we're not jumping to any numbered cases.
2192 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2193 DefaultCase = DS;
2194 continue;
2195 }
2196
2197 // Check to see if this case is the one we're looking for.
2198 const CaseStmt *CS = cast<CaseStmt>(Case);
2199 // Don't handle case ranges yet.
2200 if (CS->getRHS()) return false;
2201
2202 // If we found our case, remember it as 'case'.
2203 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2204 break;
2205 }
2206
2207 // If we didn't find a matching case, we use a default if it exists, or we
2208 // elide the whole switch body!
2209 if (!Case) {
2210 // It is safe to elide the body of the switch if it doesn't contain labels
2211 // etc. If it is safe, return successfully with an empty ResultStmts list.
2212 if (!DefaultCase)
2214 Case = DefaultCase;
2215 }
2216
2217 // Ok, we know which case is being jumped to, try to collect all the
2218 // statements that follow it. This can fail for a variety of reasons. Also,
2219 // check to see that the recursive walk actually found our case statement.
2220 // Insane cases like this can fail to find it in the recursive walk since we
2221 // don't handle every stmt kind:
2222 // switch (4) {
2223 // while (1) {
2224 // case 4: ...
2225 bool FoundCase = false;
2226 ResultCase = Case;
2227 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2228 ResultStmts) != CSFC_Failure &&
2229 FoundCase;
2230}
2231
2232static std::optional<SmallVector<uint64_t, 16>>
2234 // Are there enough branches to weight them?
2235 if (Likelihoods.size() <= 1)
2236 return std::nullopt;
2237
2238 uint64_t NumUnlikely = 0;
2239 uint64_t NumNone = 0;
2240 uint64_t NumLikely = 0;
2241 for (const auto LH : Likelihoods) {
2242 switch (LH) {
2243 case Stmt::LH_Unlikely:
2244 ++NumUnlikely;
2245 break;
2246 case Stmt::LH_None:
2247 ++NumNone;
2248 break;
2249 case Stmt::LH_Likely:
2250 ++NumLikely;
2251 break;
2252 }
2253 }
2254
2255 // Is there a likelihood attribute used?
2256 if (NumUnlikely == 0 && NumLikely == 0)
2257 return std::nullopt;
2258
2259 // When multiple cases share the same code they can be combined during
2260 // optimization. In that case the weights of the branch will be the sum of
2261 // the individual weights. Make sure the combined sum of all neutral cases
2262 // doesn't exceed the value of a single likely attribute.
2263 // The additions both avoid divisions by 0 and make sure the weights of None
2264 // don't exceed the weight of Likely.
2265 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2266 const uint64_t None = Likely / (NumNone + 1);
2267 const uint64_t Unlikely = 0;
2268
2270 Result.reserve(Likelihoods.size());
2271 for (const auto LH : Likelihoods) {
2272 switch (LH) {
2273 case Stmt::LH_Unlikely:
2274 Result.push_back(Unlikely);
2275 break;
2276 case Stmt::LH_None:
2277 Result.push_back(None);
2278 break;
2279 case Stmt::LH_Likely:
2280 Result.push_back(Likely);
2281 break;
2282 }
2283 }
2284
2285 return Result;
2286}
2287
2289 // Handle nested switch statements.
2290 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2291 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2292 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2293 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2294
2295 // See if we can constant fold the condition of the switch and therefore only
2296 // emit the live case statement (if any) of the switch.
2297 llvm::APSInt ConstantCondValue;
2298 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2300 const SwitchCase *Case = nullptr;
2301 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2302 getContext(), Case)) {
2303 if (Case)
2305 RunCleanupsScope ExecutedScope(*this);
2306
2307 if (S.getInit())
2308 EmitStmt(S.getInit());
2309
2310 // Emit the condition variable if needed inside the entire cleanup scope
2311 // used by this special case for constant folded switches.
2312 if (S.getConditionVariable())
2313 EmitDecl(*S.getConditionVariable(), /*EvaluateConditionDecl=*/true);
2314
2315 // At this point, we are no longer "within" a switch instance, so
2316 // we can temporarily enforce this to ensure that any embedded case
2317 // statements are not emitted.
2318 SwitchInsn = nullptr;
2319
2320 // Okay, we can dead code eliminate everything except this case. Emit the
2321 // specified series of statements and we're good.
2322 for (const Stmt *CaseStmt : CaseStmts)
2325 PGO->markStmtMaybeUsed(S.getBody());
2326
2327 // Now we want to restore the saved switch instance so that nested
2328 // switches continue to function properly
2329 SwitchInsn = SavedSwitchInsn;
2330
2331 return;
2332 }
2333 }
2334
2335 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2336
2337 RunCleanupsScope ConditionScope(*this);
2338
2339 if (S.getInit())
2340 EmitStmt(S.getInit());
2341
2342 if (S.getConditionVariable())
2343 EmitDecl(*S.getConditionVariable());
2344 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2345 MaybeEmitDeferredVarDeclInit(S.getConditionVariable());
2346
2347 // Create basic block to hold stuff that comes after switch
2348 // statement. We also need to create a default block now so that
2349 // explicit case ranges tests can have a place to jump to on
2350 // failure.
2351 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2352 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2353 addInstToNewSourceAtom(SwitchInsn, CondV);
2354
2355 if (HLSLControlFlowAttr != HLSLControlFlowHintAttr::SpellingNotCalculated) {
2356 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2357 llvm::ConstantInt *BranchHintConstant =
2359 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2360 ? llvm::ConstantInt::get(CGM.Int32Ty, 1)
2361 : llvm::ConstantInt::get(CGM.Int32Ty, 2);
2362 llvm::Metadata *Vals[] = {MDHelper.createString("hlsl.controlflow.hint"),
2363 MDHelper.createConstant(BranchHintConstant)};
2364 SwitchInsn->setMetadata("hlsl.controlflow.hint",
2365 llvm::MDNode::get(CGM.getLLVMContext(), Vals));
2366 }
2367
2368 if (PGO->haveRegionCounts()) {
2369 // Walk the SwitchCase list to find how many there are.
2370 uint64_t DefaultCount = 0;
2371 unsigned NumCases = 0;
2372 for (const SwitchCase *Case = S.getSwitchCaseList();
2373 Case;
2374 Case = Case->getNextSwitchCase()) {
2375 if (isa<DefaultStmt>(Case))
2376 DefaultCount = getProfileCount(Case);
2377 NumCases += 1;
2378 }
2379 SwitchWeights = new SmallVector<uint64_t, 16>();
2380 SwitchWeights->reserve(NumCases);
2381 // The default needs to be first. We store the edge count, so we already
2382 // know the right weight.
2383 SwitchWeights->push_back(DefaultCount);
2384 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2385 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2386 // Initialize the default case.
2387 SwitchLikelihood->push_back(Stmt::LH_None);
2388 }
2389
2390 CaseRangeBlock = DefaultBlock;
2391
2392 // Clear the insertion point to indicate we are in unreachable code.
2393 Builder.ClearInsertionPoint();
2394
2395 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2396 // then reuse last ContinueBlock.
2397 JumpDest OuterContinue;
2398 if (!BreakContinueStack.empty())
2399 OuterContinue = BreakContinueStack.back().ContinueBlock;
2400
2401 BreakContinueStack.push_back(BreakContinue(S, SwitchExit, OuterContinue));
2402
2403 // Emit switch body.
2404 EmitStmt(S.getBody());
2405
2406 BreakContinueStack.pop_back();
2407
2408 // Update the default block in case explicit case range tests have
2409 // been chained on top.
2410 SwitchInsn->setDefaultDest(CaseRangeBlock);
2411
2412 // If a default was never emitted:
2413 if (!DefaultBlock->getParent()) {
2414 // If we have cleanups, emit the default block so that there's a
2415 // place to jump through the cleanups from.
2416 if (ConditionScope.requiresCleanups()) {
2417 EmitBlock(DefaultBlock);
2418
2419 // Otherwise, just forward the default block to the switch end.
2420 } else {
2421 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2422 delete DefaultBlock;
2423 }
2424 }
2425
2426 ConditionScope.ForceCleanup();
2427
2428 // Emit continuation.
2429 EmitBlock(SwitchExit.getBlock(), true);
2431
2432 // If the switch has a condition wrapped by __builtin_unpredictable,
2433 // create metadata that specifies that the switch is unpredictable.
2434 // Don't bother if not optimizing because that metadata would not be used.
2435 auto *Call = dyn_cast<CallExpr>(S.getCond());
2436 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2437 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2438 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2439 llvm::MDBuilder MDHelper(getLLVMContext());
2440 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2441 MDHelper.createUnpredictable());
2442 }
2443 }
2444
2445 if (SwitchWeights) {
2446 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2447 "switch weights do not match switch cases");
2448 // If there's only one jump destination there's no sense weighting it.
2449 if (SwitchWeights->size() > 1)
2450 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2451 createProfileWeights(*SwitchWeights));
2452 delete SwitchWeights;
2453 } else if (SwitchLikelihood) {
2454 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2455 "switch likelihoods do not match switch cases");
2456 std::optional<SmallVector<uint64_t, 16>> LHW =
2457 getLikelihoodWeights(*SwitchLikelihood);
2458 if (LHW) {
2459 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2460 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2461 createProfileWeights(*LHW));
2462 }
2463 delete SwitchLikelihood;
2464 }
2465 SwitchInsn = SavedSwitchInsn;
2466 SwitchWeights = SavedSwitchWeights;
2467 SwitchLikelihood = SavedSwitchLikelihood;
2468 CaseRangeBlock = SavedCRBlock;
2469}
2470
2471static std::string
2472SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2474 std::string Result;
2475
2476 while (*Constraint) {
2477 switch (*Constraint) {
2478 default:
2479 Result += Target.convertConstraint(Constraint);
2480 break;
2481 // Ignore these
2482 case '*':
2483 case '?':
2484 case '!':
2485 case '=': // Will see this and the following in mult-alt constraints.
2486 case '+':
2487 break;
2488 case '#': // Ignore the rest of the constraint alternative.
2489 while (Constraint[1] && Constraint[1] != ',')
2490 Constraint++;
2491 break;
2492 case '&':
2493 case '%':
2494 Result += *Constraint;
2495 while (Constraint[1] && Constraint[1] == *Constraint)
2496 Constraint++;
2497 break;
2498 case ',':
2499 Result += "|";
2500 break;
2501 case 'g':
2502 Result += "imr";
2503 break;
2504 case '[': {
2505 assert(OutCons &&
2506 "Must pass output names to constraints with a symbolic name");
2507 unsigned Index;
2508 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2509 assert(result && "Could not resolve symbolic name"); (void)result;
2510 Result += llvm::utostr(Index);
2511 break;
2512 }
2513 }
2514
2515 Constraint++;
2516 }
2517
2518 return Result;
2519}
2520
2521/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2522/// as using a particular register add that as a constraint that will be used
2523/// in this asm stmt.
2524static std::string
2525AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2527 const AsmStmt &Stmt, const bool EarlyClobber,
2528 std::string *GCCReg = nullptr) {
2529 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2530 if (!AsmDeclRef)
2531 return Constraint;
2532 const ValueDecl &Value = *AsmDeclRef->getDecl();
2533 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2534 if (!Variable)
2535 return Constraint;
2537 return Constraint;
2538 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2539 if (!Attr)
2540 return Constraint;
2541 StringRef Register = Attr->getLabel();
2542 assert(Target.isValidGCCRegisterName(Register));
2543 // We're using validateOutputConstraint here because we only care if
2544 // this is a register constraint.
2545 TargetInfo::ConstraintInfo Info(Constraint, "");
2546 if (Target.validateOutputConstraint(Info) &&
2547 !Info.allowsRegister()) {
2548 CGM.ErrorUnsupported(&Stmt, "__asm__");
2549 return Constraint;
2550 }
2551 // Canonicalize the register here before returning it.
2552 Register = Target.getNormalizedGCCRegisterName(Register);
2553 if (GCCReg != nullptr)
2554 *GCCReg = Register.str();
2555 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2556}
2557
2558std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2559 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2560 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2561 if (Info.allowsRegister() || !Info.allowsMemory()) {
2563 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2564
2565 llvm::Type *Ty = ConvertType(InputType);
2566 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2567 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2568 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2569 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2570
2571 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2572 nullptr};
2573 }
2574 }
2575
2576 Address Addr = InputValue.getAddress();
2577 ConstraintStr += '*';
2578 return {InputValue.getPointer(*this), Addr.getElementType()};
2579}
2580
2581std::pair<llvm::Value *, llvm::Type *>
2582CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2583 const Expr *InputExpr,
2584 std::string &ConstraintStr) {
2585 // If this can't be a register or memory, i.e., has to be a constant
2586 // (immediate or symbolic), try to emit it as such.
2587 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2588 if (Info.requiresImmediateConstant()) {
2589 Expr::EvalResult EVResult;
2590 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2591
2592 llvm::APSInt IntResult;
2593 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2594 getContext()))
2595 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2596 }
2597
2599 if (InputExpr->EvaluateAsInt(Result, getContext()))
2600 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2601 nullptr};
2602 }
2603
2604 if (Info.allowsRegister() || !Info.allowsMemory())
2606 return {EmitScalarExpr(InputExpr), nullptr};
2607 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2608 return {EmitScalarExpr(InputExpr), nullptr};
2609 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2610 LValue Dest = EmitLValue(InputExpr);
2611 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2612 InputExpr->getExprLoc());
2613}
2614
2615/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2616/// asm call instruction. The !srcloc MDNode contains a list of constant
2617/// integers which are the source locations of the start of each line in the
2618/// asm.
2619static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2620 CodeGenFunction &CGF) {
2622 // Add the location of the first line to the MDNode.
2623 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2624 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2625 StringRef StrVal = Str->getString();
2626 if (!StrVal.empty()) {
2628 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2629 unsigned StartToken = 0;
2630 unsigned ByteOffset = 0;
2631
2632 // Add the location of the start of each subsequent line of the asm to the
2633 // MDNode.
2634 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2635 if (StrVal[i] != '\n') continue;
2636 SourceLocation LineLoc = Str->getLocationOfByte(
2637 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2638 Locs.push_back(llvm::ConstantAsMetadata::get(
2639 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2640 }
2641 }
2642
2643 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2644}
2645
2646static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2647 bool HasUnwindClobber, bool ReadOnly,
2648 bool ReadNone, bool NoMerge, bool NoConvergent,
2649 const AsmStmt &S,
2650 const std::vector<llvm::Type *> &ResultRegTypes,
2651 const std::vector<llvm::Type *> &ArgElemTypes,
2652 CodeGenFunction &CGF,
2653 std::vector<llvm::Value *> &RegResults) {
2654 if (!HasUnwindClobber)
2655 Result.addFnAttr(llvm::Attribute::NoUnwind);
2656
2657 if (NoMerge)
2658 Result.addFnAttr(llvm::Attribute::NoMerge);
2659 // Attach readnone and readonly attributes.
2660 if (!HasSideEffect) {
2661 if (ReadNone)
2662 Result.setDoesNotAccessMemory();
2663 else if (ReadOnly)
2664 Result.setOnlyReadsMemory();
2665 }
2666
2667 // Add elementtype attribute for indirect constraints.
2668 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2669 if (Pair.value()) {
2670 auto Attr = llvm::Attribute::get(
2671 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2672 Result.addParamAttr(Pair.index(), Attr);
2673 }
2674 }
2675
2676 // Slap the source location of the inline asm into a !srcloc metadata on the
2677 // call.
2678 const StringLiteral *SL;
2679 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S);
2680 gccAsmStmt &&
2681 (SL = dyn_cast<StringLiteral>(gccAsmStmt->getAsmStringExpr()))) {
2682 Result.setMetadata("srcloc", getAsmSrcLocInfo(SL, CGF));
2683 } else {
2684 // At least put the line number on MS inline asm blobs and GCC asm constexpr
2685 // strings.
2686 llvm::Constant *Loc =
2687 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2688 Result.setMetadata("srcloc",
2689 llvm::MDNode::get(CGF.getLLVMContext(),
2690 llvm::ConstantAsMetadata::get(Loc)));
2691 }
2692
2693 // Make inline-asm calls Key for the debug info feature Key Instructions.
2694 CGF.addInstToNewSourceAtom(&Result, nullptr);
2695
2696 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2697 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2698 // convergent (meaning, they may call an intrinsically convergent op, such
2699 // as bar.sync, and so can't have certain optimizations applied around
2700 // them) unless it's explicitly marked 'noconvergent'.
2701 Result.addFnAttr(llvm::Attribute::Convergent);
2702 // Extract all of the register value results from the asm.
2703 if (ResultRegTypes.size() == 1) {
2704 RegResults.push_back(&Result);
2705 } else {
2706 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2707 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2708 RegResults.push_back(Tmp);
2709 }
2710 }
2711}
2712
2713static void
2715 const llvm::ArrayRef<llvm::Value *> RegResults,
2716 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2717 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2718 const llvm::ArrayRef<LValue> ResultRegDests,
2719 const llvm::ArrayRef<QualType> ResultRegQualTys,
2720 const llvm::BitVector &ResultTypeRequiresCast,
2721 const llvm::BitVector &ResultRegIsFlagReg) {
2723 CodeGenModule &CGM = CGF.CGM;
2724 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2725
2726 assert(RegResults.size() == ResultRegTypes.size());
2727 assert(RegResults.size() == ResultTruncRegTypes.size());
2728 assert(RegResults.size() == ResultRegDests.size());
2729 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2730 // in which case its size may grow.
2731 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2732 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2733
2734 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2735 llvm::Value *Tmp = RegResults[i];
2736 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2737
2738 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2739 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2740 // value.
2741 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2742 llvm::Value *IsBooleanValue =
2743 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2744 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2745 Builder.CreateCall(FnAssume, IsBooleanValue);
2746 }
2747
2748 // If the result type of the LLVM IR asm doesn't match the result type of
2749 // the expression, do the conversion.
2750 if (ResultRegTypes[i] != TruncTy) {
2751
2752 // Truncate the integer result to the right size, note that TruncTy can be
2753 // a pointer.
2754 if (TruncTy->isFloatingPointTy())
2755 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2756 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2757 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2758 Tmp = Builder.CreateTrunc(
2759 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2760 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2761 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2762 uint64_t TmpSize =
2763 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2764 Tmp = Builder.CreatePtrToInt(
2765 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2766 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2767 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2768 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2769 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2770 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2771 }
2772 }
2773
2774 ApplyAtomGroup Grp(CGF.getDebugInfo());
2775 LValue Dest = ResultRegDests[i];
2776 // ResultTypeRequiresCast elements correspond to the first
2777 // ResultTypeRequiresCast.size() elements of RegResults.
2778 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2779 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2780 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2781 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2782 llvm::StoreInst *S = Builder.CreateStore(Tmp, A);
2783 CGF.addInstToCurrentSourceAtom(S, S->getValueOperand());
2784 continue;
2785 }
2786
2787 QualType Ty =
2788 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2789 if (Ty.isNull()) {
2790 const Expr *OutExpr = S.getOutputExpr(i);
2791 CGM.getDiags().Report(OutExpr->getExprLoc(),
2792 diag::err_store_value_to_reg);
2793 return;
2794 }
2795 Dest = CGF.MakeAddrLValue(A, Ty);
2796 }
2797 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2798 }
2799}
2800
2802 const AsmStmt &S) {
2803 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2804
2805 std::string Asm;
2806 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2807 Asm = GCCAsm->getAsmString();
2808
2809 auto &Ctx = CGF->CGM.getLLVMContext();
2810
2811 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2812 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2813 {StrTy->getType()}, false);
2814 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2815
2816 CGF->Builder.CreateCall(UBF, {StrTy});
2817}
2818
2820 // Pop all cleanup blocks at the end of the asm statement.
2821 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2822
2823 // Assemble the final asm string.
2824 std::string AsmString = S.generateAsmString(getContext());
2825
2826 // Get all the output and input constraints together.
2827 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2828 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2829
2830 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2831 bool IsValidTargetAsm = true;
2832 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2833 StringRef Name;
2834 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2835 Name = GAS->getOutputName(i);
2836 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2837 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2838 if (IsHipStdPar && !IsValid)
2839 IsValidTargetAsm = false;
2840 else
2841 assert(IsValid && "Failed to parse output constraint");
2842 OutputConstraintInfos.push_back(Info);
2843 }
2844
2845 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2846 StringRef Name;
2847 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2848 Name = GAS->getInputName(i);
2849 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2850 bool IsValid =
2851 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2852 if (IsHipStdPar && !IsValid)
2853 IsValidTargetAsm = false;
2854 else
2855 assert(IsValid && "Failed to parse input constraint");
2856 InputConstraintInfos.push_back(Info);
2857 }
2858
2859 if (!IsValidTargetAsm)
2860 return EmitHipStdParUnsupportedAsm(this, S);
2861
2862 std::string Constraints;
2863
2864 std::vector<LValue> ResultRegDests;
2865 std::vector<QualType> ResultRegQualTys;
2866 std::vector<llvm::Type *> ResultRegTypes;
2867 std::vector<llvm::Type *> ResultTruncRegTypes;
2868 std::vector<llvm::Type *> ArgTypes;
2869 std::vector<llvm::Type *> ArgElemTypes;
2870 std::vector<llvm::Value*> Args;
2871 llvm::BitVector ResultTypeRequiresCast;
2872 llvm::BitVector ResultRegIsFlagReg;
2873
2874 // Keep track of inout constraints.
2875 std::string InOutConstraints;
2876 std::vector<llvm::Value*> InOutArgs;
2877 std::vector<llvm::Type*> InOutArgTypes;
2878 std::vector<llvm::Type*> InOutArgElemTypes;
2879
2880 // Keep track of out constraints for tied input operand.
2881 std::vector<std::string> OutputConstraints;
2882
2883 // Keep track of defined physregs.
2884 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2885
2886 // An inline asm can be marked readonly if it meets the following conditions:
2887 // - it doesn't have any sideeffects
2888 // - it doesn't clobber memory
2889 // - it doesn't return a value by-reference
2890 // It can be marked readnone if it doesn't have any input memory constraints
2891 // in addition to meeting the conditions listed above.
2892 bool ReadOnly = true, ReadNone = true;
2893
2894 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2895 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2896
2897 // Simplify the output constraint.
2898 std::string OutputConstraint(S.getOutputConstraint(i));
2899 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2900 getTarget(), &OutputConstraintInfos);
2901
2902 const Expr *OutExpr = S.getOutputExpr(i);
2903 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2904
2905 std::string GCCReg;
2906 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2907 getTarget(), CGM, S,
2908 Info.earlyClobber(),
2909 &GCCReg);
2910 // Give an error on multiple outputs to same physreg.
2911 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2912 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2913
2914 OutputConstraints.push_back(OutputConstraint);
2915 LValue Dest = EmitLValue(OutExpr);
2916 if (!Constraints.empty())
2917 Constraints += ',';
2918
2919 // If this is a register output, then make the inline asm return it
2920 // by-value. If this is a memory result, return the value by-reference.
2921 QualType QTy = OutExpr->getType();
2922 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2924 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2925
2926 Constraints += "=" + OutputConstraint;
2927 ResultRegQualTys.push_back(QTy);
2928 ResultRegDests.push_back(Dest);
2929
2930 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
2931 ResultRegIsFlagReg.push_back(IsFlagReg);
2932
2933 llvm::Type *Ty = ConvertTypeForMem(QTy);
2934 const bool RequiresCast = Info.allowsRegister() &&
2936 Ty->isAggregateType());
2937
2938 ResultTruncRegTypes.push_back(Ty);
2939 ResultTypeRequiresCast.push_back(RequiresCast);
2940
2941 if (RequiresCast) {
2942 unsigned Size = getContext().getTypeSize(QTy);
2943 if (Size)
2944 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2945 else
2946 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero");
2947 }
2948 ResultRegTypes.push_back(Ty);
2949 // If this output is tied to an input, and if the input is larger, then
2950 // we need to set the actual result type of the inline asm node to be the
2951 // same as the input type.
2952 if (Info.hasMatchingInput()) {
2953 unsigned InputNo;
2954 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2955 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2956 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2957 break;
2958 }
2959 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2960
2961 QualType InputTy = S.getInputExpr(InputNo)->getType();
2962 QualType OutputType = OutExpr->getType();
2963
2964 uint64_t InputSize = getContext().getTypeSize(InputTy);
2965 if (getContext().getTypeSize(OutputType) < InputSize) {
2966 // Form the asm to return the value as a larger integer or fp type.
2967 ResultRegTypes.back() = ConvertType(InputTy);
2968 }
2969 }
2970 if (llvm::Type* AdjTy =
2971 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2972 ResultRegTypes.back()))
2973 ResultRegTypes.back() = AdjTy;
2974 else {
2975 CGM.getDiags().Report(S.getAsmLoc(),
2976 diag::err_asm_invalid_type_in_input)
2977 << OutExpr->getType() << OutputConstraint;
2978 }
2979
2980 // Update largest vector width for any vector types.
2981 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2982 LargestVectorWidth =
2983 std::max((uint64_t)LargestVectorWidth,
2984 VT->getPrimitiveSizeInBits().getKnownMinValue());
2985 } else {
2986 Address DestAddr = Dest.getAddress();
2987 // Matrix types in memory are represented by arrays, but accessed through
2988 // vector pointers, with the alignment specified on the access operation.
2989 // For inline assembly, update pointer arguments to use vector pointers.
2990 // Otherwise there will be a mis-match if the matrix is also an
2991 // input-argument which is represented as vector.
2992 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2993 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2994
2995 ArgTypes.push_back(DestAddr.getType());
2996 ArgElemTypes.push_back(DestAddr.getElementType());
2997 Args.push_back(DestAddr.emitRawPointer(*this));
2998 Constraints += "=*";
2999 Constraints += OutputConstraint;
3000 ReadOnly = ReadNone = false;
3001 }
3002
3003 if (Info.isReadWrite()) {
3004 InOutConstraints += ',';
3005
3006 const Expr *InputExpr = S.getOutputExpr(i);
3007 llvm::Value *Arg;
3008 llvm::Type *ArgElemType;
3009 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
3010 Info, Dest, InputExpr->getType(), InOutConstraints,
3011 InputExpr->getExprLoc());
3012
3013 if (llvm::Type* AdjTy =
3014 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
3015 Arg->getType()))
3016 Arg = Builder.CreateBitCast(Arg, AdjTy);
3017
3018 // Update largest vector width for any vector types.
3019 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
3020 LargestVectorWidth =
3021 std::max((uint64_t)LargestVectorWidth,
3022 VT->getPrimitiveSizeInBits().getKnownMinValue());
3023 // Only tie earlyclobber physregs.
3024 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
3025 InOutConstraints += llvm::utostr(i);
3026 else
3027 InOutConstraints += OutputConstraint;
3028
3029 InOutArgTypes.push_back(Arg->getType());
3030 InOutArgElemTypes.push_back(ArgElemType);
3031 InOutArgs.push_back(Arg);
3032 }
3033 }
3034
3035 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
3036 // to the return value slot. Only do this when returning in registers.
3037 if (isa<MSAsmStmt>(&S)) {
3038 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
3039 if (RetAI.isDirect() || RetAI.isExtend()) {
3040 // Make a fake lvalue for the return value slot.
3043 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
3044 ResultRegDests, AsmString, S.getNumOutputs());
3045 SawAsmBlock = true;
3046 }
3047 }
3048
3049 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
3050 const Expr *InputExpr = S.getInputExpr(i);
3051
3052 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
3053
3054 if (Info.allowsMemory())
3055 ReadNone = false;
3056
3057 if (!Constraints.empty())
3058 Constraints += ',';
3059
3060 // Simplify the input constraint.
3061 std::string InputConstraint(S.getInputConstraint(i));
3062 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
3063 &OutputConstraintInfos);
3064
3065 InputConstraint = AddVariableConstraints(
3066 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
3067 getTarget(), CGM, S, false /* No EarlyClobber */);
3068
3069 std::string ReplaceConstraint (InputConstraint);
3070 llvm::Value *Arg;
3071 llvm::Type *ArgElemType;
3072 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
3073
3074 // If this input argument is tied to a larger output result, extend the
3075 // input to be the same size as the output. The LLVM backend wants to see
3076 // the input and output of a matching constraint be the same size. Note
3077 // that GCC does not define what the top bits are here. We use zext because
3078 // that is usually cheaper, but LLVM IR should really get an anyext someday.
3079 if (Info.hasTiedOperand()) {
3080 unsigned Output = Info.getTiedOperand();
3081 QualType OutputType = S.getOutputExpr(Output)->getType();
3082 QualType InputTy = InputExpr->getType();
3083
3084 if (getContext().getTypeSize(OutputType) >
3085 getContext().getTypeSize(InputTy)) {
3086 // Use ptrtoint as appropriate so that we can do our extension.
3087 if (isa<llvm::PointerType>(Arg->getType()))
3088 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
3089 llvm::Type *OutputTy = ConvertType(OutputType);
3090 if (isa<llvm::IntegerType>(OutputTy))
3091 Arg = Builder.CreateZExt(Arg, OutputTy);
3092 else if (isa<llvm::PointerType>(OutputTy))
3093 Arg = Builder.CreateZExt(Arg, IntPtrTy);
3094 else if (OutputTy->isFloatingPointTy())
3095 Arg = Builder.CreateFPExt(Arg, OutputTy);
3096 }
3097 // Deal with the tied operands' constraint code in adjustInlineAsmType.
3098 ReplaceConstraint = OutputConstraints[Output];
3099 }
3100 if (llvm::Type* AdjTy =
3101 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
3102 Arg->getType()))
3103 Arg = Builder.CreateBitCast(Arg, AdjTy);
3104 else
3105 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
3106 << InputExpr->getType() << InputConstraint;
3107
3108 // Update largest vector width for any vector types.
3109 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
3110 LargestVectorWidth =
3111 std::max((uint64_t)LargestVectorWidth,
3112 VT->getPrimitiveSizeInBits().getKnownMinValue());
3113
3114 ArgTypes.push_back(Arg->getType());
3115 ArgElemTypes.push_back(ArgElemType);
3116 Args.push_back(Arg);
3117 Constraints += InputConstraint;
3118 }
3119
3120 // Append the "input" part of inout constraints.
3121 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
3122 ArgTypes.push_back(InOutArgTypes[i]);
3123 ArgElemTypes.push_back(InOutArgElemTypes[i]);
3124 Args.push_back(InOutArgs[i]);
3125 }
3126 Constraints += InOutConstraints;
3127
3128 // Labels
3130 llvm::BasicBlock *Fallthrough = nullptr;
3131 bool IsGCCAsmGoto = false;
3132 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
3133 IsGCCAsmGoto = GS->isAsmGoto();
3134 if (IsGCCAsmGoto) {
3135 for (const auto *E : GS->labels()) {
3136 JumpDest Dest = getJumpDestForLabel(E->getLabel());
3137 Transfer.push_back(Dest.getBlock());
3138 if (!Constraints.empty())
3139 Constraints += ',';
3140 Constraints += "!i";
3141 }
3142 Fallthrough = createBasicBlock("asm.fallthrough");
3143 }
3144 }
3145
3146 bool HasUnwindClobber = false;
3147
3148 // Clobbers
3149 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
3150 std::string Clobber = S.getClobber(i);
3151
3152 if (Clobber == "memory")
3153 ReadOnly = ReadNone = false;
3154 else if (Clobber == "unwind") {
3155 HasUnwindClobber = true;
3156 continue;
3157 } else if (Clobber != "cc") {
3158 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
3159 if (CGM.getCodeGenOpts().StackClashProtector &&
3160 getTarget().isSPRegName(Clobber)) {
3161 CGM.getDiags().Report(S.getAsmLoc(),
3162 diag::warn_stack_clash_protection_inline_asm);
3163 }
3164 }
3165
3166 if (isa<MSAsmStmt>(&S)) {
3167 if (Clobber == "eax" || Clobber == "edx") {
3168 if (Constraints.find("=&A") != std::string::npos)
3169 continue;
3170 std::string::size_type position1 =
3171 Constraints.find("={" + Clobber + "}");
3172 if (position1 != std::string::npos) {
3173 Constraints.insert(position1 + 1, "&");
3174 continue;
3175 }
3176 std::string::size_type position2 = Constraints.find("=A");
3177 if (position2 != std::string::npos) {
3178 Constraints.insert(position2 + 1, "&");
3179 continue;
3180 }
3181 }
3182 }
3183 if (!Constraints.empty())
3184 Constraints += ',';
3185
3186 Constraints += "~{";
3187 Constraints += Clobber;
3188 Constraints += '}';
3189 }
3190
3191 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3192 "unwind clobber can't be used with asm goto");
3193
3194 // Add machine specific clobbers
3195 std::string_view MachineClobbers = getTarget().getClobbers();
3196 if (!MachineClobbers.empty()) {
3197 if (!Constraints.empty())
3198 Constraints += ',';
3199 Constraints += MachineClobbers;
3200 }
3201
3202 llvm::Type *ResultType;
3203 if (ResultRegTypes.empty())
3204 ResultType = VoidTy;
3205 else if (ResultRegTypes.size() == 1)
3206 ResultType = ResultRegTypes[0];
3207 else
3208 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3209
3210 llvm::FunctionType *FTy =
3211 llvm::FunctionType::get(ResultType, ArgTypes, false);
3212
3213 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3214
3215 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3216 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3217 ? llvm::InlineAsm::AD_ATT
3218 : llvm::InlineAsm::AD_Intel;
3219 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3220 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3221
3222 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3223 FTy, AsmString, Constraints, HasSideEffect,
3224 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3225 std::vector<llvm::Value*> RegResults;
3226 llvm::CallBrInst *CBR;
3227 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3228 CBRRegResults;
3229 if (IsGCCAsmGoto) {
3230 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3231 EmitBlock(Fallthrough);
3232 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3233 ReadNone, InNoMergeAttributedStmt,
3234 InNoConvergentAttributedStmt, S, ResultRegTypes,
3235 ArgElemTypes, *this, RegResults);
3236 // Because we are emitting code top to bottom, we don't have enough
3237 // information at this point to know precisely whether we have a critical
3238 // edge. If we have outputs, split all indirect destinations.
3239 if (!RegResults.empty()) {
3240 unsigned i = 0;
3241 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3242 llvm::Twine SynthName = Dest->getName() + ".split";
3243 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3244 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3245 Builder.SetInsertPoint(SynthBB);
3246
3247 if (ResultRegTypes.size() == 1) {
3248 CBRRegResults[SynthBB].push_back(CBR);
3249 } else {
3250 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3251 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3252 CBRRegResults[SynthBB].push_back(Tmp);
3253 }
3254 }
3255
3256 EmitBranch(Dest);
3257 EmitBlock(SynthBB);
3258 CBR->setIndirectDest(i++, SynthBB);
3259 }
3260 }
3261 } else if (HasUnwindClobber) {
3262 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3263 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true,
3264 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3265 InNoConvergentAttributedStmt, S, ResultRegTypes,
3266 ArgElemTypes, *this, RegResults);
3267 } else {
3268 llvm::CallInst *Result =
3269 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3270 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false,
3271 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3272 InNoConvergentAttributedStmt, S, ResultRegTypes,
3273 ArgElemTypes, *this, RegResults);
3274 }
3275
3276 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3277 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3278 ResultRegIsFlagReg);
3279
3280 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3281 // different insertion point; one for each indirect destination and with
3282 // CBRRegResults rather than RegResults.
3283 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3284 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3285 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3286 Builder.SetInsertPoint(Succ, --(Succ->end()));
3287 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3288 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3289 ResultTypeRequiresCast, ResultRegIsFlagReg);
3290 }
3291 }
3292}
3293
3295 const RecordDecl *RD = S.getCapturedRecordDecl();
3297
3298 // Initialize the captured struct.
3299 LValue SlotLV =
3300 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3301
3302 RecordDecl::field_iterator CurField = RD->field_begin();
3303 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
3304 E = S.capture_init_end();
3305 I != E; ++I, ++CurField) {
3306 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3307 if (CurField->hasCapturedVLAType()) {
3308 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3309 } else {
3310 EmitInitializerForField(*CurField, LV, *I);
3311 }
3312 }
3313
3314 return SlotLV;
3315}
3316
3317/// Generate an outlined function for the body of a CapturedStmt, store any
3318/// captured variables into the captured struct, and call the outlined function.
3319llvm::Function *
3321 LValue CapStruct = InitCapturedStruct(S);
3322
3323 // Emit the CapturedDecl
3324 CodeGenFunction CGF(CGM, true);
3325 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3326 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3327 delete CGF.CapturedStmtInfo;
3328
3329 // Emit call to the helper function.
3330 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3331
3332 return F;
3333}
3334
3336 LValue CapStruct = InitCapturedStruct(S);
3337 return CapStruct.getAddress();
3338}
3339
3340/// Creates the outlined function for a CapturedStmt.
3341llvm::Function *
3343 assert(CapturedStmtInfo &&
3344 "CapturedStmtInfo should be set when generating the captured function");
3345 const CapturedDecl *CD = S.getCapturedDecl();
3346 const RecordDecl *RD = S.getCapturedRecordDecl();
3347 SourceLocation Loc = S.getBeginLoc();
3348 assert(CD->hasBody() && "missing CapturedDecl body");
3349
3350 // Build the argument list.
3351 ASTContext &Ctx = CGM.getContext();
3352 FunctionArgList Args;
3353 Args.append(CD->param_begin(), CD->param_end());
3354
3355 // Create the function declaration.
3356 const CGFunctionInfo &FuncInfo =
3358 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3359
3360 llvm::Function *F =
3361 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3363 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3364 if (CD->isNothrow())
3365 F->addFnAttr(llvm::Attribute::NoUnwind);
3366
3367 // Generate the function.
3368 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3369 CD->getBody()->getBeginLoc());
3370 // Set the context parameter in CapturedStmtInfo.
3371 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3373
3374 // Initialize variable-length arrays.
3377 for (auto *FD : RD->fields()) {
3378 if (FD->hasCapturedVLAType()) {
3379 auto *ExprArg =
3380 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
3381 .getScalarVal();
3382 auto VAT = FD->getCapturedVLAType();
3383 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3384 }
3385 }
3386
3387 // If 'this' is captured, load it into CXXThisValue.
3390 LValue ThisLValue = EmitLValueForField(Base, FD);
3391 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3392 }
3393
3394 PGO->assignRegionCounters(GlobalDecl(CD), F);
3395 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3397
3398 return F;
3399}
3400
3401// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3402// std::nullptr otherwise.
3403static llvm::ConvergenceControlInst *getConvergenceToken(llvm::BasicBlock *BB) {
3404 for (auto &I : *BB) {
3405 if (auto *CI = dyn_cast<llvm::ConvergenceControlInst>(&I))
3406 return CI;
3407 }
3408 return nullptr;
3409}
3410
3411llvm::CallBase *
3412CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input) {
3413 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3414 assert(ParentToken);
3415
3416 llvm::Value *bundleArgs[] = {ParentToken};
3417 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3418 auto *Output = llvm::CallBase::addOperandBundle(
3419 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator());
3420 Input->replaceAllUsesWith(Output);
3421 Input->eraseFromParent();
3422 return Output;
3423}
3424
3425llvm::ConvergenceControlInst *
3426CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB) {
3427 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3428 assert(ParentToken);
3429 return llvm::ConvergenceControlInst::CreateLoop(*BB, ParentToken);
3430}
3431
3432llvm::ConvergenceControlInst *
3433CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3434 llvm::BasicBlock *BB = &F->getEntryBlock();
3435 llvm::ConvergenceControlInst *Token = getConvergenceToken(BB);
3436 if (Token)
3437 return Token;
3438
3439 // Adding a convergence token requires the function to be marked as
3440 // convergent.
3441 F->setConvergent();
3442 return llvm::ConvergenceControlInst::CreateEntry(*BB);
3443}
#define V(N, I)
Definition: ASTContext.h:3597
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:2525
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:2179
static llvm::ConvergenceControlInst * getConvergenceToken(llvm::BasicBlock *BB)
Definition: CGStmt.cpp:3403
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition: CGStmt.cpp:2801
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition: CGStmt.cpp:2233
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition: CGStmt.cpp:2619
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:2472
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition: CGStmt.cpp:1584
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition: CGStmt.cpp:2024
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition: CGStmt.cpp:2714
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition: CGStmt.cpp:1062
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition: CGStmt.cpp:2023
@ CSFC_Failure
Definition: CGStmt.cpp:2023
@ CSFC_Success
Definition: CGStmt.cpp:2023
@ CSFC_FallThrough
Definition: CGStmt.cpp:2023
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, bool NoConvergent, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition: CGStmt.cpp:2646
const Decl * D
Expr * E
llvm::MachO::Target Target
Definition: MachO.h:51
#define SM(sm)
Definition: OffloadArch.cpp:16
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
SourceRange Range
Definition: SemaObjC.cpp:753
VarDecl * Variable
Definition: SemaObjC.cpp:752
SourceLocation Loc
Definition: SemaObjC.cpp:754
Defines the SourceManager interface.
std::string Label
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:963
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
SourceManager & getSourceManager()
Definition: ASTContext.h:801
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2625
CanQualType VoidTy
Definition: ASTContext.h:1222
CanQualType getCanonicalTagType(const TagDecl *TD) const
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3236
Attr - This represents one attribute.
Definition: Attr.h:44
Represents an attribute applied to a statement.
Definition: Stmt.h:2203
BreakStmt - This represents a break.
Definition: Stmt.h:3135
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2879
Expr * getCallee()
Definition: Expr.h:3026
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4906
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4964
bool isNothrow() const
Definition: Decl.cpp:5569
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4981
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4979
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:5566
This captures a statement into a function.
Definition: Stmt.h:3886
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:4050
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1466
CaseStmt - Represent a case statement.
Definition: Stmt.h:1920
Stmt * getSubStmt()
Definition: Stmt.h:2033
Expr * getLHS()
Definition: Stmt.h:2003
Expr * getRHS()
Definition: Stmt.h:2015
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:253
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:276
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:204
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:906
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:963
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:168
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:140
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:112
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
API for captured statement code generation.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
RAII for correct setting/restoring of CapturedStmtInfo.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:754
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void ForceCleanup(std::initializer_list< llvm::Value ** > ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitCXXTryStmt(const CXXTryStmt &S)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
Definition: CGStmt.cpp:1445
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
LValue InitCapturedStruct(const CapturedStmt &S)
Definition: CGStmt.cpp:3294
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
CGCapturedStmtInfo * CapturedStmtInfo
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:5070
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition: CGStmt.cpp:706
void EmitCoreturnStmt(const CoreturnStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
Definition: CGObjC.cpp:2129
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition: CGExpr.cpp:3649
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
Definition: CGStmt.cpp:505
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition: CGExpr.cpp:684
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
Definition: CGStmt.cpp:689
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
Definition: CGStmt.cpp:630
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
Definition: CGCleanup.cpp:1112
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
void EmitOMPScopeDirective(const OMPScopeDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
Definition: CGExprAgg.cpp:2283
LValue EmitLValueForField(LValue Base, const FieldDecl *Field, bool IsInBounds=true)
Definition: CGExpr.cpp:5253
const TargetInfo & getTarget() const
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition: CGStmt.cpp:578
void EmitGotoStmt(const GotoStmt &S)
Definition: CGStmt.cpp:842
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition: CGExpr.cpp:242
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:2336
void EmitOMPCancelDirective(const OMPCancelDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
Definition: CGStmt.cpp:1290
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
Definition: CGObjC.cpp:3688
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPInteropDirective(const OMPInteropDirective &S)
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition: CGExpr.cpp:223
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
Definition: CGStmt.cpp:1075
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
Emit combined directive 'target parallel loop' as if its constituent constructs are 'target',...
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
void ResolveBranchFixups(llvm::BasicBlock *Target)
Definition: CGCleanup.cpp:385
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
Definition: CGStmt.cpp:1012
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPReverseDirective(const OMPReverseDirective &S)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition: CGExpr.cpp:5427
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
Definition: CGObjC.cpp:2125
const TargetCodeGenInfo & getTargetHooks() const
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition: CGCall.cpp:4998
void EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
Definition: CGStmt.cpp:51
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitIfStmt(const IfStmt &S)
Definition: CGStmt.cpp:878
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:2533
void EmitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &S)
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
Definition: CGObjC.cpp:2133
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition: CGStmt.cpp:566
void EmitOpenACCCacheConstruct(const OpenACCCacheConstruct &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
Definition: CGClass.cpp:682
void EmitAsmStmt(const AsmStmt &S)
Definition: CGStmt.cpp:2819
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
Definition: CGStmt.cpp:1978
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitSwitchStmt(const SwitchStmt &S)
Definition: CGStmt.cpp:2288
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition: CGExpr.cpp:293
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition: CGExpr.cpp:264
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
Definition: CGStmt.cpp:61
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
Generate an outlined function for the body of a CapturedStmt, store any captured variables into the c...
Definition: CGStmt.cpp:3320
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
Definition: CGStmt.cpp:1863
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitBreakStmt(const BreakStmt &S)
Definition: CGStmt.cpp:1749
void EmitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
Definition: CGStmt.cpp:1202
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
Definition: CGStmt.cpp:3335
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition: CGStmt.cpp:672
void EmitOMPSimdDirective(const OMPSimdDirective &S)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:186
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
const BreakContinue * GetDestForLoopControlStmt(const LoopControlStmt &S)
Definition: CGStmt.cpp:1735
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Definition: CGExprAgg.cpp:2205
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S)
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPStripeDirective(const OMPStripeDirective &S)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
EmitCaseStmtRange - If case statement range is not too big then add multiple cases to switch instruct...
Definition: CGStmt.cpp:1778
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
EmitReturnStmt - Note that due to GCC extensions, this can have an operand if the function returns vo...
Definition: CGStmt.cpp:1610
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
Definition: CGBuiltin.cpp:2055
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
Creates the outlined function for a CapturedStmt.
Definition: CGStmt.cpp:3342
const CGFunctionInfo * CurFnInfo
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitDeclStmt(const DeclStmt &S)
Definition: CGStmt.cpp:1725
void EmitLabelStmt(const LabelStmt &S)
Definition: CGStmt.cpp:775
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
void EmitDecl(const Decl &D, bool EvaluateConditionDecl=false)
EmitDecl - Emit a declaration.
Definition: CGDecl.cpp:52
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition: CGExpr.cpp:1631
void EmitAttributedStmt(const AttributedStmt &S)
Definition: CGStmt.cpp:785
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
Definition: CGObjC.cpp:1804
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
Definition: CGStmt.cpp:854
void MaybeEmitDeferredVarDeclInit(const VarDecl *var)
Definition: CGDecl.cpp:2075
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Definition: CGCleanup.cpp:1087
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPForDirective(const OMPForDirective &S)
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
Definition: CGStmt.cpp:717
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:652
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
void EmitContinueStmt(const ContinueStmt &S)
Definition: CGStmt.cpp:1762
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1702
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:739
A saved depth on the scope stack.
Definition: EHScopeStack.h:106
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
Definition: EHScopeStack.h:123
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:375
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:398
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:364
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:369
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:403
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:375
LValue - This represents an lvalue references.
Definition: CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition: CGValue.h:361
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:828
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
bool isAggregate() const
Definition: CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:78
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:208
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:202
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1720
Stmt *const * const_body_iterator
Definition: Stmt.h:1792
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1084
ContinueStmt - This represents a continue.
Definition: Stmt.h:3119
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2393
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1272
ValueDecl * getDecl()
Definition: Expr.h:1340
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1611
T * getAttr() const
Definition: DeclBase.h:573
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:1076
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:1093
SourceLocation getLocation() const
Definition: DeclBase.h:439
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1529
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2832
This represents one expression.
Definition: Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3100
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3069
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3624
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:273
QualType getType() const
Definition: Expr.h:144
Represents a member of a struct/union/class.
Definition: Decl.h:3157
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2888
const Expr * getSubExpr() const
Definition: Expr.h:1064
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: TypeBase.h:4478
CallingConv getCallConv() const
Definition: TypeBase.h:4833
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:3395
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:57
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2969
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2259
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:3008
Represents the declaration of a label.
Definition: Decl.h:523
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2146
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:434
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:644
Base class for BreakStmt and ContinueStmt.
Definition: Stmt.h:3057
Represents a point when we exit a loop.
Definition: ProgramPoint.h:721
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition: TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: TypeBase.h:1004
QualType getCanonicalType() const
Definition: TypeBase.h:8395
The collection of all-type qualifiers we support.
Definition: TypeBase.h:331
Represents a struct/union/class.
Definition: Decl.h:4309
field_range fields() const
Definition: Decl.h:4512
field_iterator field_begin() const
Definition: Decl.cpp:5154
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3160
Expr * getRetValue()
Definition: Stmt.h:3187
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:85
@ NoStmtClass
Definition: Stmt.h:88
StmtClass getStmtClass() const
Definition: Stmt.h:1472
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1415
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1416
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1417
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1419
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition: Stmt.cpp:171
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:346
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition: Stmt.cpp:163
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1801
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1975
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1322
StringRef getString() const
Definition: Expr.h:1869
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1893
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2509
Exposes information about the current target.
Definition: TargetInfo.h:226
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:851
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:713
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:754
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
Token - This structure provides full information about a lexed token.
Definition: Token.h:36
bool isVoidType() const
Definition: TypeBase.h:8936
const T * castAs() const
Member-template castAs<specific type>.
Definition: TypeBase.h:9226
bool isReferenceType() const
Definition: TypeBase.h:8604
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:752
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:711
Represents a variable declaration or definition.
Definition: Decl.h:925
StorageClass getStorageClass() const
Returns the storage class as written in the source.
Definition: Decl.h:1167
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2697
Defines the clang::TargetInfo interface.
The JSON file list parser is used to communicate input to InstallAPI.
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ SC_Register
Definition: Specifiers.h:257
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
@ CC_SwiftAsync
Definition: Specifiers.h:294
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:647
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:1158
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
Definition: TargetInfo.h:1165