LLVM 22.0.0git
Local.cpp
Go to the documentation of this file.
1//===- Local.cpp - Functions to perform local transformations -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This family of functions perform various local transformations to the
10// program.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/Statistic.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constant.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DIBuilder.h"
43#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugInfo.h"
46#include "llvm/IR/DebugLoc.h"
48#include "llvm/IR/Dominators.h"
50#include "llvm/IR/Function.h"
52#include "llvm/IR/IRBuilder.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
57#include "llvm/IR/Intrinsics.h"
58#include "llvm/IR/IntrinsicsWebAssembly.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/MDBuilder.h"
62#include "llvm/IR/Metadata.h"
63#include "llvm/IR/Module.h"
66#include "llvm/IR/Type.h"
67#include "llvm/IR/Use.h"
68#include "llvm/IR/User.h"
69#include "llvm/IR/Value.h"
70#include "llvm/IR/ValueHandle.h"
74#include "llvm/Support/Debug.h"
80#include <algorithm>
81#include <cassert>
82#include <cstdint>
83#include <iterator>
84#include <map>
85#include <optional>
86#include <utility>
87
88using namespace llvm;
89using namespace llvm::PatternMatch;
90
91#define DEBUG_TYPE "local"
92
93STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
94STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
95
97 "phicse-debug-hash",
98#ifdef EXPENSIVE_CHECKS
99 cl::init(true),
100#else
101 cl::init(false),
102#endif
104 cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
105 "function is well-behaved w.r.t. its isEqual predicate"));
106
108 "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
109 cl::desc(
110 "When the basic block contains not more than this number of PHI nodes, "
111 "perform a (faster!) exhaustive search instead of set-driven one."));
112
114 "max-phi-entries-increase-after-removing-empty-block", cl::init(1000),
116 cl::desc("Stop removing an empty block if removing it will introduce more "
117 "than this number of phi entries in its successor"));
118
119// Max recursion depth for collectBitParts used when detecting bswap and
120// bitreverse idioms.
121static const unsigned BitPartRecursionMaxDepth = 48;
122
123//===----------------------------------------------------------------------===//
124// Local constant propagation.
125//
126
127/// ConstantFoldTerminator - If a terminator instruction is predicated on a
128/// constant value, convert it into an unconditional branch to the constant
129/// destination. This is a nontrivial operation because the successors of this
130/// basic block must have their PHI nodes updated.
131/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
132/// conditions and indirectbr addresses this might make dead if
133/// DeleteDeadConditions is true.
134bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
135 const TargetLibraryInfo *TLI,
136 DomTreeUpdater *DTU) {
137 Instruction *T = BB->getTerminator();
138 IRBuilder<> Builder(T);
139
140 // Branch - See if we are conditional jumping on constant
141 if (auto *BI = dyn_cast<BranchInst>(T)) {
142 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
143
144 BasicBlock *Dest1 = BI->getSuccessor(0);
145 BasicBlock *Dest2 = BI->getSuccessor(1);
146
147 if (Dest2 == Dest1) { // Conditional branch to same location?
148 // This branch matches something like this:
149 // br bool %cond, label %Dest, label %Dest
150 // and changes it into: br label %Dest
151
152 // Let the basic block know that we are letting go of one copy of it.
153 assert(BI->getParent() && "Terminator not inserted in block!");
154 Dest1->removePredecessor(BI->getParent());
155
156 // Replace the conditional branch with an unconditional one.
157 BranchInst *NewBI = Builder.CreateBr(Dest1);
158
159 // Transfer the metadata to the new branch instruction.
160 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
161 LLVMContext::MD_annotation});
162
163 Value *Cond = BI->getCondition();
164 BI->eraseFromParent();
165 if (DeleteDeadConditions)
167 return true;
168 }
169
170 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
171 // Are we branching on constant?
172 // YES. Change to unconditional branch...
173 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
174 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
175
176 // Let the basic block know that we are letting go of it. Based on this,
177 // it will adjust it's PHI nodes.
178 OldDest->removePredecessor(BB);
179
180 // Replace the conditional branch with an unconditional one.
181 BranchInst *NewBI = Builder.CreateBr(Destination);
182
183 // Transfer the metadata to the new branch instruction.
184 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
185 LLVMContext::MD_annotation});
186
187 BI->eraseFromParent();
188 if (DTU)
189 DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
190 return true;
191 }
192
193 return false;
194 }
195
196 if (auto *SI = dyn_cast<SwitchInst>(T)) {
197 // If we are switching on a constant, we can convert the switch to an
198 // unconditional branch.
199 auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
200 BasicBlock *DefaultDest = SI->getDefaultDest();
201 BasicBlock *TheOnlyDest = DefaultDest;
202
203 // If the default is unreachable, ignore it when searching for TheOnlyDest.
204 if (SI->defaultDestUnreachable() && SI->getNumCases() > 0)
205 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
206
207 bool Changed = false;
208
209 // Figure out which case it goes to.
210 for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) {
211 // Found case matching a constant operand?
212 if (It->getCaseValue() == CI) {
213 TheOnlyDest = It->getCaseSuccessor();
214 break;
215 }
216
217 // Check to see if this branch is going to the same place as the default
218 // dest. If so, eliminate it as an explicit compare.
219 if (It->getCaseSuccessor() == DefaultDest) {
221 unsigned NCases = SI->getNumCases();
222 // Fold the case metadata into the default if there will be any branches
223 // left, unless the metadata doesn't match the switch.
224 if (NCases > 1 && MD) {
225 // Collect branch weights into a vector.
227 extractBranchWeights(MD, Weights);
228
229 // Merge weight of this case to the default weight.
230 unsigned Idx = It->getCaseIndex();
231 // TODO: Add overflow check.
232 Weights[0] += Weights[Idx + 1];
233 // Remove weight for this case.
234 std::swap(Weights[Idx + 1], Weights.back());
235 Weights.pop_back();
236 setBranchWeights(*SI, Weights, hasBranchWeightOrigin(MD));
237 }
238 // Remove this entry.
239 BasicBlock *ParentBB = SI->getParent();
240 DefaultDest->removePredecessor(ParentBB);
241 It = SI->removeCase(It);
242 End = SI->case_end();
243
244 // Removing this case may have made the condition constant. In that
245 // case, update CI and restart iteration through the cases.
246 if (auto *NewCI = dyn_cast<ConstantInt>(SI->getCondition())) {
247 CI = NewCI;
248 It = SI->case_begin();
249 }
250
251 Changed = true;
252 continue;
253 }
254
255 // Otherwise, check to see if the switch only branches to one destination.
256 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
257 // destinations.
258 if (It->getCaseSuccessor() != TheOnlyDest)
259 TheOnlyDest = nullptr;
260
261 // Increment this iterator as we haven't removed the case.
262 ++It;
263 }
264
265 if (CI && !TheOnlyDest) {
266 // Branching on a constant, but not any of the cases, go to the default
267 // successor.
268 TheOnlyDest = SI->getDefaultDest();
269 }
270
271 // If we found a single destination that we can fold the switch into, do so
272 // now.
273 if (TheOnlyDest) {
274 // Insert the new branch.
275 Builder.CreateBr(TheOnlyDest);
276 BasicBlock *BB = SI->getParent();
277
278 SmallPtrSet<BasicBlock *, 8> RemovedSuccessors;
279
280 // Remove entries from PHI nodes which we no longer branch to...
281 BasicBlock *SuccToKeep = TheOnlyDest;
282 for (BasicBlock *Succ : successors(SI)) {
283 if (DTU && Succ != TheOnlyDest)
284 RemovedSuccessors.insert(Succ);
285 // Found case matching a constant operand?
286 if (Succ == SuccToKeep) {
287 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
288 } else {
289 Succ->removePredecessor(BB);
290 }
291 }
292
293 // Delete the old switch.
294 Value *Cond = SI->getCondition();
295 SI->eraseFromParent();
296 if (DeleteDeadConditions)
298 if (DTU) {
299 std::vector<DominatorTree::UpdateType> Updates;
300 Updates.reserve(RemovedSuccessors.size());
301 for (auto *RemovedSuccessor : RemovedSuccessors)
302 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
303 DTU->applyUpdates(Updates);
304 }
305 return true;
306 }
307
308 if (SI->getNumCases() == 1) {
309 // Otherwise, we can fold this switch into a conditional branch
310 // instruction if it has only one non-default destination.
311 auto FirstCase = *SI->case_begin();
312 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
313 FirstCase.getCaseValue(), "cond");
314
315 // Insert the new branch.
316 BranchInst *NewBr = Builder.CreateCondBr(Cond,
317 FirstCase.getCaseSuccessor(),
318 SI->getDefaultDest());
319 SmallVector<uint32_t> Weights;
320 if (extractBranchWeights(*SI, Weights) && Weights.size() == 2) {
321 uint32_t DefWeight = Weights[0];
322 uint32_t CaseWeight = Weights[1];
323 // The TrueWeight should be the weight for the single case of SI.
324 NewBr->setMetadata(LLVMContext::MD_prof,
325 MDBuilder(BB->getContext())
326 .createBranchWeights(CaseWeight, DefWeight));
327 }
328
329 // Update make.implicit metadata to the newly-created conditional branch.
330 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
331 if (MakeImplicitMD)
332 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
333
334 // Delete the old switch.
335 SI->eraseFromParent();
336 return true;
337 }
338 return Changed;
339 }
340
341 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
342 // indirectbr blockaddress(@F, @BB) -> br label @BB
343 if (auto *BA =
344 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
345 BasicBlock *TheOnlyDest = BA->getBasicBlock();
346 SmallPtrSet<BasicBlock *, 8> RemovedSuccessors;
347
348 // Insert the new branch.
349 Builder.CreateBr(TheOnlyDest);
350
351 BasicBlock *SuccToKeep = TheOnlyDest;
352 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
353 BasicBlock *DestBB = IBI->getDestination(i);
354 if (DTU && DestBB != TheOnlyDest)
355 RemovedSuccessors.insert(DestBB);
356 if (IBI->getDestination(i) == SuccToKeep) {
357 SuccToKeep = nullptr;
358 } else {
359 DestBB->removePredecessor(BB);
360 }
361 }
362 Value *Address = IBI->getAddress();
363 IBI->eraseFromParent();
364 if (DeleteDeadConditions)
365 // Delete pointer cast instructions.
367
368 // Also zap the blockaddress constant if there are no users remaining,
369 // otherwise the destination is still marked as having its address taken.
370 if (BA->use_empty())
371 BA->destroyConstant();
372
373 // If we didn't find our destination in the IBI successor list, then we
374 // have undefined behavior. Replace the unconditional branch with an
375 // 'unreachable' instruction.
376 if (SuccToKeep) {
378 new UnreachableInst(BB->getContext(), BB);
379 }
380
381 if (DTU) {
382 std::vector<DominatorTree::UpdateType> Updates;
383 Updates.reserve(RemovedSuccessors.size());
384 for (auto *RemovedSuccessor : RemovedSuccessors)
385 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
386 DTU->applyUpdates(Updates);
387 }
388 return true;
389 }
390 }
391
392 return false;
393}
394
395//===----------------------------------------------------------------------===//
396// Local dead code elimination.
397//
398
399/// isInstructionTriviallyDead - Return true if the result produced by the
400/// instruction is not used, and the instruction has no side effects.
401///
403 const TargetLibraryInfo *TLI) {
404 if (!I->use_empty())
405 return false;
407}
408
410 Instruction *I, const TargetLibraryInfo *TLI) {
411 // Instructions that are "markers" and have implied meaning on code around
412 // them (without explicit uses), are not dead on unused paths.
413 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
414 if (II->getIntrinsicID() == Intrinsic::stacksave ||
415 II->getIntrinsicID() == Intrinsic::launder_invariant_group ||
416 II->isLifetimeStartOrEnd())
417 return false;
419}
420
422 const TargetLibraryInfo *TLI) {
423 if (I->isTerminator())
424 return false;
425
426 // We don't want the landingpad-like instructions removed by anything this
427 // general.
428 if (I->isEHPad())
429 return false;
430
431 if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
432 if (DLI->getLabel())
433 return false;
434 return true;
435 }
436
437 if (auto *CB = dyn_cast<CallBase>(I))
438 if (isRemovableAlloc(CB, TLI))
439 return true;
440
441 if (!I->willReturn()) {
442 auto *II = dyn_cast<IntrinsicInst>(I);
443 if (!II)
444 return false;
445
446 switch (II->getIntrinsicID()) {
447 case Intrinsic::experimental_guard: {
448 // Guards on true are operationally no-ops. In the future we can
449 // consider more sophisticated tradeoffs for guards considering potential
450 // for check widening, but for now we keep things simple.
451 auto *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0));
452 return Cond && Cond->isOne();
453 }
454 // TODO: These intrinsics are not safe to remove, because this may remove
455 // a well-defined trap.
456 case Intrinsic::wasm_trunc_signed:
457 case Intrinsic::wasm_trunc_unsigned:
458 case Intrinsic::ptrauth_auth:
459 case Intrinsic::ptrauth_resign:
460 return true;
461 default:
462 return false;
463 }
464 }
465
466 if (!I->mayHaveSideEffects())
467 return true;
468
469 // Special case intrinsics that "may have side effects" but can be deleted
470 // when dead.
471 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
472 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
473 if (II->getIntrinsicID() == Intrinsic::stacksave ||
474 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
475 return true;
476
477 // Intrinsics declare sideeffects to prevent them from moving, but they are
478 // nops without users.
479 if (II->getIntrinsicID() == Intrinsic::allow_runtime_check ||
480 II->getIntrinsicID() == Intrinsic::allow_ubsan_check)
481 return true;
482
483 if (II->isLifetimeStartOrEnd()) {
484 auto *Arg = II->getArgOperand(0);
485 if (isa<PoisonValue>(Arg))
486 return true;
487
488 // If the only uses of the alloca are lifetime intrinsics, then the
489 // intrinsics are dead.
490 return llvm::all_of(Arg->uses(), [](Use &Use) {
491 return isa<LifetimeIntrinsic>(Use.getUser());
492 });
493 }
494
495 // Assumptions are dead if their condition is trivially true.
496 if (II->getIntrinsicID() == Intrinsic::assume &&
497 isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) {
498 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
499 return !Cond->isZero();
500
501 return false;
502 }
503
504 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
505 std::optional<fp::ExceptionBehavior> ExBehavior =
506 FPI->getExceptionBehavior();
507 return *ExBehavior != fp::ebStrict;
508 }
509 }
510
511 if (auto *Call = dyn_cast<CallBase>(I)) {
512 if (Value *FreedOp = getFreedOperand(Call, TLI))
513 if (Constant *C = dyn_cast<Constant>(FreedOp))
514 return C->isNullValue() || isa<UndefValue>(C);
515 if (isMathLibCallNoop(Call, TLI))
516 return true;
517 }
518
519 // Non-volatile atomic loads from constants can be removed.
520 if (auto *LI = dyn_cast<LoadInst>(I))
521 if (auto *GV = dyn_cast<GlobalVariable>(
522 LI->getPointerOperand()->stripPointerCasts()))
523 if (!LI->isVolatile() && GV->isConstant())
524 return true;
525
526 return false;
527}
528
529/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
530/// trivially dead instruction, delete it. If that makes any of its operands
531/// trivially dead, delete them too, recursively. Return true if any
532/// instructions were deleted.
534 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
535 std::function<void(Value *)> AboutToDeleteCallback) {
536 Instruction *I = dyn_cast<Instruction>(V);
537 if (!I || !isInstructionTriviallyDead(I, TLI))
538 return false;
539
541 DeadInsts.push_back(I);
542 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
543 AboutToDeleteCallback);
544
545 return true;
546}
547
550 MemorySSAUpdater *MSSAU,
551 std::function<void(Value *)> AboutToDeleteCallback) {
552 unsigned S = 0, E = DeadInsts.size(), Alive = 0;
553 for (; S != E; ++S) {
554 auto *I = dyn_cast_or_null<Instruction>(DeadInsts[S]);
555 if (!I || !isInstructionTriviallyDead(I)) {
556 DeadInsts[S] = nullptr;
557 ++Alive;
558 }
559 }
560 if (Alive == E)
561 return false;
562 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
563 AboutToDeleteCallback);
564 return true;
565}
566
569 MemorySSAUpdater *MSSAU,
570 std::function<void(Value *)> AboutToDeleteCallback) {
571 // Process the dead instruction list until empty.
572 while (!DeadInsts.empty()) {
573 Value *V = DeadInsts.pop_back_val();
574 Instruction *I = cast_or_null<Instruction>(V);
575 if (!I)
576 continue;
578 "Live instruction found in dead worklist!");
579 assert(I->use_empty() && "Instructions with uses are not dead.");
580
581 // Don't lose the debug info while deleting the instructions.
583
584 if (AboutToDeleteCallback)
585 AboutToDeleteCallback(I);
586
587 // Null out all of the instruction's operands to see if any operand becomes
588 // dead as we go.
589 for (Use &OpU : I->operands()) {
590 Value *OpV = OpU.get();
591 OpU.set(nullptr);
592
593 if (!OpV->use_empty())
594 continue;
595
596 // If the operand is an instruction that became dead as we nulled out the
597 // operand, and if it is 'trivially' dead, delete it in a future loop
598 // iteration.
599 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
600 if (isInstructionTriviallyDead(OpI, TLI))
601 DeadInsts.push_back(OpI);
602 }
603 if (MSSAU)
604 MSSAU->removeMemoryAccess(I);
605
606 I->eraseFromParent();
607 }
608}
609
612 findDbgUsers(I, DPUsers);
613 for (auto *DVR : DPUsers)
614 DVR->setKillLocation();
615 return !DPUsers.empty();
616}
617
618/// areAllUsesEqual - Check whether the uses of a value are all the same.
619/// This is similar to Instruction::hasOneUse() except this will also return
620/// true when there are no uses or multiple uses that all refer to the same
621/// value.
623 Value::user_iterator UI = I->user_begin();
624 Value::user_iterator UE = I->user_end();
625 if (UI == UE)
626 return true;
627
628 User *TheUse = *UI;
629 for (++UI; UI != UE; ++UI) {
630 if (*UI != TheUse)
631 return false;
632 }
633 return true;
634}
635
636/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
637/// dead PHI node, due to being a def-use chain of single-use nodes that
638/// either forms a cycle or is terminated by a trivially dead instruction,
639/// delete it. If that makes any of its operands trivially dead, delete them
640/// too, recursively. Return true if a change was made.
642 const TargetLibraryInfo *TLI,
643 llvm::MemorySSAUpdater *MSSAU) {
645 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
646 I = cast<Instruction>(*I->user_begin())) {
647 if (I->use_empty())
649
650 // If we find an instruction more than once, we're on a cycle that
651 // won't prove fruitful.
652 if (!Visited.insert(I).second) {
653 // Break the cycle and delete the instruction and its operands.
654 I->replaceAllUsesWith(PoisonValue::get(I->getType()));
656 return true;
657 }
658 }
659 return false;
660}
661
662static bool
665 const DataLayout &DL,
666 const TargetLibraryInfo *TLI) {
667 if (isInstructionTriviallyDead(I, TLI)) {
669
670 // Null out all of the instruction's operands to see if any operand becomes
671 // dead as we go.
672 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
673 Value *OpV = I->getOperand(i);
674 I->setOperand(i, nullptr);
675
676 if (!OpV->use_empty() || I == OpV)
677 continue;
678
679 // If the operand is an instruction that became dead as we nulled out the
680 // operand, and if it is 'trivially' dead, delete it in a future loop
681 // iteration.
682 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
683 if (isInstructionTriviallyDead(OpI, TLI))
684 WorkList.insert(OpI);
685 }
686
687 I->eraseFromParent();
688
689 return true;
690 }
691
692 if (Value *SimpleV = simplifyInstruction(I, DL)) {
693 // Add the users to the worklist. CAREFUL: an instruction can use itself,
694 // in the case of a phi node.
695 for (User *U : I->users()) {
696 if (U != I) {
697 WorkList.insert(cast<Instruction>(U));
698 }
699 }
700
701 // Replace the instruction with its simplified value.
702 bool Changed = false;
703 if (!I->use_empty()) {
704 I->replaceAllUsesWith(SimpleV);
705 Changed = true;
706 }
707 if (isInstructionTriviallyDead(I, TLI)) {
708 I->eraseFromParent();
709 Changed = true;
710 }
711 return Changed;
712 }
713 return false;
714}
715
716/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
717/// simplify any instructions in it and recursively delete dead instructions.
718///
719/// This returns true if it changed the code, note that it can delete
720/// instructions in other blocks as well in this block.
722 const TargetLibraryInfo *TLI) {
723 bool MadeChange = false;
724 const DataLayout &DL = BB->getDataLayout();
725
726#ifndef NDEBUG
727 // In debug builds, ensure that the terminator of the block is never replaced
728 // or deleted by these simplifications. The idea of simplification is that it
729 // cannot introduce new instructions, and there is no way to replace the
730 // terminator of a block without introducing a new instruction.
731 AssertingVH<Instruction> TerminatorVH(&BB->back());
732#endif
733
735 // Iterate over the original function, only adding insts to the worklist
736 // if they actually need to be revisited. This avoids having to pre-init
737 // the worklist with the entire function's worth of instructions.
738 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
739 BI != E;) {
740 assert(!BI->isTerminator());
741 Instruction *I = &*BI;
742 ++BI;
743
744 // We're visiting this instruction now, so make sure it's not in the
745 // worklist from an earlier visit.
746 if (!WorkList.count(I))
747 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
748 }
749
750 while (!WorkList.empty()) {
751 Instruction *I = WorkList.pop_back_val();
752 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
753 }
754 return MadeChange;
755}
756
757//===----------------------------------------------------------------------===//
758// Control Flow Graph Restructuring.
759//
760
762 DomTreeUpdater *DTU) {
763
764 // If BB has single-entry PHI nodes, fold them.
765 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
766 Value *NewVal = PN->getIncomingValue(0);
767 // Replace self referencing PHI with poison, it must be dead.
768 if (NewVal == PN) NewVal = PoisonValue::get(PN->getType());
769 PN->replaceAllUsesWith(NewVal);
770 PN->eraseFromParent();
771 }
772
773 BasicBlock *PredBB = DestBB->getSinglePredecessor();
774 assert(PredBB && "Block doesn't have a single predecessor!");
775
776 bool ReplaceEntryBB = PredBB->isEntryBlock();
777
778 // DTU updates: Collect all the edges that enter
779 // PredBB. These dominator edges will be redirected to DestBB.
781
782 if (DTU) {
783 // To avoid processing the same predecessor more than once.
785 Updates.reserve(Updates.size() + 2 * pred_size(PredBB) + 1);
786 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
787 // This predecessor of PredBB may already have DestBB as a successor.
788 if (PredOfPredBB != PredBB)
789 if (SeenPreds.insert(PredOfPredBB).second)
790 Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
791 SeenPreds.clear();
792 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
793 if (SeenPreds.insert(PredOfPredBB).second)
794 Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
795 Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
796 }
797
798 // Zap anything that took the address of DestBB. Not doing this will give the
799 // address an invalid value.
800 if (DestBB->hasAddressTaken()) {
801 BlockAddress *BA = BlockAddress::get(DestBB);
802 Constant *Replacement =
803 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
805 BA->getType()));
806 BA->destroyConstant();
807 }
808
809 // Anything that branched to PredBB now branches to DestBB.
810 PredBB->replaceAllUsesWith(DestBB);
811
812 // Splice all the instructions from PredBB to DestBB.
813 PredBB->getTerminator()->eraseFromParent();
814 DestBB->splice(DestBB->begin(), PredBB);
815 new UnreachableInst(PredBB->getContext(), PredBB);
816
817 // If the PredBB is the entry block of the function, move DestBB up to
818 // become the entry block after we erase PredBB.
819 if (ReplaceEntryBB)
820 DestBB->moveAfter(PredBB);
821
822 if (DTU) {
823 assert(PredBB->size() == 1 &&
824 isa<UnreachableInst>(PredBB->getTerminator()) &&
825 "The successor list of PredBB isn't empty before "
826 "applying corresponding DTU updates.");
827 DTU->applyUpdatesPermissive(Updates);
828 DTU->deleteBB(PredBB);
829 // Recalculation of DomTree is needed when updating a forward DomTree and
830 // the Entry BB is replaced.
831 if (ReplaceEntryBB && DTU->hasDomTree()) {
832 // The entry block was removed and there is no external interface for
833 // the dominator tree to be notified of this change. In this corner-case
834 // we recalculate the entire tree.
835 DTU->recalculate(*(DestBB->getParent()));
836 }
837 }
838
839 else {
840 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
841 }
842}
843
844/// Return true if we can choose one of these values to use in place of the
845/// other. Note that we will always choose the non-undef value to keep.
846static bool CanMergeValues(Value *First, Value *Second) {
847 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
848}
849
850/// Return true if we can fold BB, an almost-empty BB ending in an unconditional
851/// branch to Succ, into Succ.
852///
853/// Assumption: Succ is the single successor for BB.
854static bool
856 const SmallPtrSetImpl<BasicBlock *> &BBPreds) {
857 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
858
859 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
860 << Succ->getName() << "\n");
861 // Shortcut, if there is only a single predecessor it must be BB and merging
862 // is always safe
863 if (Succ->getSinglePredecessor())
864 return true;
865
866 // Look at all the phi nodes in Succ, to see if they present a conflict when
867 // merging these blocks
868 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
869 PHINode *PN = cast<PHINode>(I);
870
871 // If the incoming value from BB is again a PHINode in
872 // BB which has the same incoming value for *PI as PN does, we can
873 // merge the phi nodes and then the blocks can still be merged
874 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
875 if (BBPN && BBPN->getParent() == BB) {
876 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
877 BasicBlock *IBB = PN->getIncomingBlock(PI);
878 if (BBPreds.count(IBB) &&
880 PN->getIncomingValue(PI))) {
882 << "Can't fold, phi node " << PN->getName() << " in "
883 << Succ->getName() << " is conflicting with "
884 << BBPN->getName() << " with regard to common predecessor "
885 << IBB->getName() << "\n");
886 return false;
887 }
888 }
889 } else {
890 Value* Val = PN->getIncomingValueForBlock(BB);
891 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
892 // See if the incoming value for the common predecessor is equal to the
893 // one for BB, in which case this phi node will not prevent the merging
894 // of the block.
895 BasicBlock *IBB = PN->getIncomingBlock(PI);
896 if (BBPreds.count(IBB) &&
897 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
898 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
899 << " in " << Succ->getName()
900 << " is conflicting with regard to common "
901 << "predecessor " << IBB->getName() << "\n");
902 return false;
903 }
904 }
905 }
906 }
907
908 return true;
909}
910
913
914/// Determines the value to use as the phi node input for a block.
915///
916/// Select between \p OldVal any value that we know flows from \p BB
917/// to a particular phi on the basis of which one (if either) is not
918/// undef. Update IncomingValues based on the selected value.
919///
920/// \param OldVal The value we are considering selecting.
921/// \param BB The block that the value flows in from.
922/// \param IncomingValues A map from block-to-value for other phi inputs
923/// that we have examined.
924///
925/// \returns the selected value.
927 IncomingValueMap &IncomingValues) {
928 if (!isa<UndefValue>(OldVal)) {
929 assert((!IncomingValues.count(BB) ||
930 IncomingValues.find(BB)->second == OldVal) &&
931 "Expected OldVal to match incoming value from BB!");
932
933 IncomingValues.insert(std::make_pair(BB, OldVal));
934 return OldVal;
935 }
936
937 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
938 if (It != IncomingValues.end()) return It->second;
939
940 return OldVal;
941}
942
943/// Create a map from block to value for the operands of a
944/// given phi.
945///
946/// Create a map from block to value for each non-undef value flowing
947/// into \p PN.
948///
949/// \param PN The phi we are collecting the map for.
950/// \param IncomingValues [out] The map from block to value for this phi.
952 IncomingValueMap &IncomingValues) {
953 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
954 BasicBlock *BB = PN->getIncomingBlock(i);
955 Value *V = PN->getIncomingValue(i);
956
957 if (!isa<UndefValue>(V))
958 IncomingValues.insert(std::make_pair(BB, V));
959 }
960}
961
962/// Replace the incoming undef values to a phi with the values
963/// from a block-to-value map.
964///
965/// \param PN The phi we are replacing the undefs in.
966/// \param IncomingValues A map from block to value.
968 const IncomingValueMap &IncomingValues) {
969 SmallVector<unsigned> TrueUndefOps;
970 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
971 Value *V = PN->getIncomingValue(i);
972
973 if (!isa<UndefValue>(V)) continue;
974
975 BasicBlock *BB = PN->getIncomingBlock(i);
976 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
977
978 // Keep track of undef/poison incoming values. Those must match, so we fix
979 // them up below if needed.
980 // Note: this is conservatively correct, but we could try harder and group
981 // the undef values per incoming basic block.
982 if (It == IncomingValues.end()) {
983 TrueUndefOps.push_back(i);
984 continue;
985 }
986
987 // There is a defined value for this incoming block, so map this undef
988 // incoming value to the defined value.
989 PN->setIncomingValue(i, It->second);
990 }
991
992 // If there are both undef and poison values incoming, then convert those
993 // values to undef. It is invalid to have different values for the same
994 // incoming block.
995 unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
996 return isa<PoisonValue>(PN->getIncomingValue(i));
997 });
998 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
999 for (unsigned i : TrueUndefOps)
1001 }
1002}
1003
1004// Only when they shares a single common predecessor, return true.
1005// Only handles cases when BB can't be merged while its predecessors can be
1006// redirected.
1007static bool
1009 const SmallPtrSetImpl<BasicBlock *> &BBPreds,
1010 BasicBlock *&CommonPred) {
1011
1012 // There must be phis in BB, otherwise BB will be merged into Succ directly
1013 if (BB->phis().empty() || Succ->phis().empty())
1014 return false;
1015
1016 // BB must have predecessors not shared that can be redirected to Succ
1017 if (!BB->hasNPredecessorsOrMore(2))
1018 return false;
1019
1020 if (any_of(BBPreds, [](const BasicBlock *Pred) {
1021 return isa<IndirectBrInst>(Pred->getTerminator());
1022 }))
1023 return false;
1024
1025 // Get the single common predecessor of both BB and Succ. Return false
1026 // when there are more than one common predecessors.
1027 for (BasicBlock *SuccPred : predecessors(Succ)) {
1028 if (BBPreds.count(SuccPred)) {
1029 if (CommonPred)
1030 return false;
1031 CommonPred = SuccPred;
1032 }
1033 }
1034
1035 return true;
1036}
1037
1038/// Check whether removing \p BB will make the phis in its \p Succ have too
1039/// many incoming entries. This function does not check whether \p BB is
1040/// foldable or not.
1042 // If BB only has one predecessor, then removing it will not introduce more
1043 // incoming edges for phis.
1044 if (BB->hasNPredecessors(1))
1045 return false;
1046 unsigned NumPreds = pred_size(BB);
1047 unsigned NumChangedPhi = 0;
1048 for (auto &Phi : Succ->phis()) {
1049 // If the incoming value is a phi and the phi is defined in BB,
1050 // then removing BB will not increase the total phi entries of the ir.
1051 if (auto *IncomingPhi = dyn_cast<PHINode>(Phi.getIncomingValueForBlock(BB)))
1052 if (IncomingPhi->getParent() == BB)
1053 continue;
1054 // Otherwise, we need to add entries to the phi
1055 NumChangedPhi++;
1056 }
1057 // For every phi that needs to be changed, (NumPreds - 1) new entries will be
1058 // added. If the total increase in phi entries exceeds
1059 // MaxPhiEntriesIncreaseAfterRemovingEmptyBlock, it will be considered as
1060 // introducing too many new phi entries.
1061 return (NumPreds - 1) * NumChangedPhi >
1063}
1064
1065/// Replace a value flowing from a block to a phi with
1066/// potentially multiple instances of that value flowing from the
1067/// block's predecessors to the phi.
1068///
1069/// \param BB The block with the value flowing into the phi.
1070/// \param BBPreds The predecessors of BB.
1071/// \param PN The phi that we are updating.
1072/// \param CommonPred The common predecessor of BB and PN's BasicBlock
1074 const PredBlockVector &BBPreds,
1075 PHINode *PN,
1076 BasicBlock *CommonPred) {
1077 Value *OldVal = PN->removeIncomingValue(BB, false);
1078 assert(OldVal && "No entry in PHI for Pred BB!");
1079
1080 IncomingValueMap IncomingValues;
1081
1082 // We are merging two blocks - BB, and the block containing PN - and
1083 // as a result we need to redirect edges from the predecessors of BB
1084 // to go to the block containing PN, and update PN
1085 // accordingly. Since we allow merging blocks in the case where the
1086 // predecessor and successor blocks both share some predecessors,
1087 // and where some of those common predecessors might have undef
1088 // values flowing into PN, we want to rewrite those values to be
1089 // consistent with the non-undef values.
1090
1091 gatherIncomingValuesToPhi(PN, IncomingValues);
1092
1093 // If this incoming value is one of the PHI nodes in BB, the new entries
1094 // in the PHI node are the entries from the old PHI.
1095 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
1096 PHINode *OldValPN = cast<PHINode>(OldVal);
1097 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1098 // Note that, since we are merging phi nodes and BB and Succ might
1099 // have common predecessors, we could end up with a phi node with
1100 // identical incoming branches. This will be cleaned up later (and
1101 // will trigger asserts if we try to clean it up now, without also
1102 // simplifying the corresponding conditional branch).
1103 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1104
1105 if (PredBB == CommonPred)
1106 continue;
1107
1108 Value *PredVal = OldValPN->getIncomingValue(i);
1109 Value *Selected =
1110 selectIncomingValueForBlock(PredVal, PredBB, IncomingValues);
1111
1112 // And add a new incoming value for this predecessor for the
1113 // newly retargeted branch.
1114 PN->addIncoming(Selected, PredBB);
1115 }
1116 if (CommonPred)
1117 PN->addIncoming(OldValPN->getIncomingValueForBlock(CommonPred), BB);
1118
1119 } else {
1120 for (BasicBlock *PredBB : BBPreds) {
1121 // Update existing incoming values in PN for this
1122 // predecessor of BB.
1123 if (PredBB == CommonPred)
1124 continue;
1125
1126 Value *Selected =
1127 selectIncomingValueForBlock(OldVal, PredBB, IncomingValues);
1128
1129 // And add a new incoming value for this predecessor for the
1130 // newly retargeted branch.
1131 PN->addIncoming(Selected, PredBB);
1132 }
1133 if (CommonPred)
1134 PN->addIncoming(OldVal, BB);
1135 }
1136
1137 replaceUndefValuesInPhi(PN, IncomingValues);
1138}
1139
1141 DomTreeUpdater *DTU) {
1142 assert(BB != &BB->getParent()->getEntryBlock() &&
1143 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1144
1145 // We can't simplify infinite loops.
1146 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1147 if (BB == Succ)
1148 return false;
1149
1151
1152 // The single common predecessor of BB and Succ when BB cannot be killed
1153 BasicBlock *CommonPred = nullptr;
1154
1155 bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds);
1156
1157 // Even if we can not fold BB into Succ, we may be able to redirect the
1158 // predecessors of BB to Succ.
1159 bool BBPhisMergeable = BBKillable || CanRedirectPredsOfEmptyBBToSucc(
1160 BB, Succ, BBPreds, CommonPred);
1161
1162 if ((!BBKillable && !BBPhisMergeable) || introduceTooManyPhiEntries(BB, Succ))
1163 return false;
1164
1165 // Check to see if merging these blocks/phis would cause conflicts for any of
1166 // the phi nodes in BB or Succ. If not, we can safely merge.
1167
1168 // Check for cases where Succ has multiple predecessors and a PHI node in BB
1169 // has uses which will not disappear when the PHI nodes are merged. It is
1170 // possible to handle such cases, but difficult: it requires checking whether
1171 // BB dominates Succ, which is non-trivial to calculate in the case where
1172 // Succ has multiple predecessors. Also, it requires checking whether
1173 // constructing the necessary self-referential PHI node doesn't introduce any
1174 // conflicts; this isn't too difficult, but the previous code for doing this
1175 // was incorrect.
1176 //
1177 // Note that if this check finds a live use, BB dominates Succ, so BB is
1178 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1179 // folding the branch isn't profitable in that case anyway.
1180 if (!Succ->getSinglePredecessor()) {
1181 BasicBlock::iterator BBI = BB->begin();
1182 while (isa<PHINode>(*BBI)) {
1183 for (Use &U : BBI->uses()) {
1184 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1185 if (PN->getIncomingBlock(U) != BB)
1186 return false;
1187 } else {
1188 return false;
1189 }
1190 }
1191 ++BBI;
1192 }
1193 }
1194
1195 if (BBPhisMergeable && CommonPred)
1196 LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName()
1197 << " and " << Succ->getName() << " : "
1198 << CommonPred->getName() << "\n");
1199
1200 // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop
1201 // metadata.
1202 //
1203 // FIXME: This is a stop-gap solution to preserve inner-loop metadata given
1204 // current status (that loop metadata is implemented as metadata attached to
1205 // the branch instruction in the loop latch block). To quote from review
1206 // comments, "the current representation of loop metadata (using a loop latch
1207 // terminator attachment) is known to be fundamentally broken. Loop latches
1208 // are not uniquely associated with loops (both in that a latch can be part of
1209 // multiple loops and a loop may have multiple latches). Loop headers are. The
1210 // solution to this problem is also known: Add support for basic block
1211 // metadata, and attach loop metadata to the loop header."
1212 //
1213 // Why bail out:
1214 // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is
1215 // the latch for inner-loop (see reason below), so bail out to prerserve
1216 // inner-loop metadata rather than eliminating 'BB' and attaching its metadata
1217 // to this inner-loop.
1218 // - The reason we believe 'BB' and 'BB->Pred' have different inner-most
1219 // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L,
1220 // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of
1221 // one self-looping basic block, which is contradictory with the assumption.
1222 //
1223 // To illustrate how inner-loop metadata is dropped:
1224 //
1225 // CFG Before
1226 //
1227 // BB is while.cond.exit, attached with loop metdata md2.
1228 // BB->Pred is for.body, attached with loop metadata md1.
1229 //
1230 // entry
1231 // |
1232 // v
1233 // ---> while.cond -------------> while.end
1234 // | |
1235 // | v
1236 // | while.body
1237 // | |
1238 // | v
1239 // | for.body <---- (md1)
1240 // | | |______|
1241 // | v
1242 // | while.cond.exit (md2)
1243 // | |
1244 // |_______|
1245 //
1246 // CFG After
1247 //
1248 // while.cond1 is the merge of while.cond.exit and while.cond above.
1249 // for.body is attached with md2, and md1 is dropped.
1250 // If LoopSimplify runs later (as a part of loop pass), it could create
1251 // dedicated exits for inner-loop (essentially adding `while.cond.exit`
1252 // back), but won't it won't see 'md1' nor restore it for the inner-loop.
1253 //
1254 // entry
1255 // |
1256 // v
1257 // ---> while.cond1 -------------> while.end
1258 // | |
1259 // | v
1260 // | while.body
1261 // | |
1262 // | v
1263 // | for.body <---- (md2)
1264 // |_______| |______|
1265 if (Instruction *TI = BB->getTerminator())
1266 if (TI->hasNonDebugLocLoopMetadata())
1267 for (BasicBlock *Pred : predecessors(BB))
1268 if (Instruction *PredTI = Pred->getTerminator())
1269 if (PredTI->hasNonDebugLocLoopMetadata())
1270 return false;
1271
1272 if (BBKillable)
1273 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1274 else if (BBPhisMergeable)
1275 LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB);
1276
1278
1279 if (DTU) {
1280 // To avoid processing the same predecessor more than once.
1282 // All predecessors of BB (except the common predecessor) will be moved to
1283 // Succ.
1284 Updates.reserve(Updates.size() + 2 * pred_size(BB) + 1);
1286 predecessors(Succ));
1287 for (auto *PredOfBB : predecessors(BB)) {
1288 // Do not modify those common predecessors of BB and Succ
1289 if (!SuccPreds.contains(PredOfBB))
1290 if (SeenPreds.insert(PredOfBB).second)
1291 Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1292 }
1293
1294 SeenPreds.clear();
1295
1296 for (auto *PredOfBB : predecessors(BB))
1297 // When BB cannot be killed, do not remove the edge between BB and
1298 // CommonPred.
1299 if (SeenPreds.insert(PredOfBB).second && PredOfBB != CommonPred)
1300 Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1301
1302 if (BBKillable)
1303 Updates.push_back({DominatorTree::Delete, BB, Succ});
1304 }
1305
1306 if (isa<PHINode>(Succ->begin())) {
1307 // If there is more than one pred of succ, and there are PHI nodes in
1308 // the successor, then we need to add incoming edges for the PHI nodes
1309 //
1310 const PredBlockVector BBPreds(predecessors(BB));
1311
1312 // Loop over all of the PHI nodes in the successor of BB.
1313 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1314 PHINode *PN = cast<PHINode>(I);
1315 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred);
1316 }
1317 }
1318
1319 if (Succ->getSinglePredecessor()) {
1320 // BB is the only predecessor of Succ, so Succ will end up with exactly
1321 // the same predecessors BB had.
1322 // Copy over any phi, debug or lifetime instruction.
1324 Succ->splice(Succ->getFirstNonPHIIt(), BB);
1325 } else {
1326 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1327 // We explicitly check for such uses for merging phis.
1328 assert(PN->use_empty() && "There shouldn't be any uses here!");
1329 PN->eraseFromParent();
1330 }
1331 }
1332
1333 // If the unconditional branch we replaced contains non-debug llvm.loop
1334 // metadata, we add the metadata to the branch instructions in the
1335 // predecessors.
1336 if (Instruction *TI = BB->getTerminator())
1337 if (TI->hasNonDebugLocLoopMetadata()) {
1338 MDNode *LoopMD = TI->getMetadata(LLVMContext::MD_loop);
1339 for (BasicBlock *Pred : predecessors(BB))
1340 Pred->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopMD);
1341 }
1342
1343 if (BBKillable) {
1344 // Everything that jumped to BB now goes to Succ.
1345 BB->replaceAllUsesWith(Succ);
1346
1347 if (!Succ->hasName())
1348 Succ->takeName(BB);
1349
1350 // Clear the successor list of BB to match updates applying to DTU later.
1351 if (BB->getTerminator())
1352 BB->back().eraseFromParent();
1353
1354 new UnreachableInst(BB->getContext(), BB);
1355 assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1356 "applying corresponding DTU updates.");
1357 } else if (BBPhisMergeable) {
1358 // Everything except CommonPred that jumped to BB now goes to Succ.
1359 BB->replaceUsesWithIf(Succ, [BBPreds, CommonPred](Use &U) -> bool {
1360 if (Instruction *UseInst = dyn_cast<Instruction>(U.getUser()))
1361 return UseInst->getParent() != CommonPred &&
1362 BBPreds.contains(UseInst->getParent());
1363 return false;
1364 });
1365 }
1366
1367 if (DTU)
1368 DTU->applyUpdates(Updates);
1369
1370 if (BBKillable)
1371 DeleteDeadBlock(BB, DTU);
1372
1373 return true;
1374}
1375
1376static bool
1379 // This implementation doesn't currently consider undef operands
1380 // specially. Theoretically, two phis which are identical except for
1381 // one having an undef where the other doesn't could be collapsed.
1382
1383 bool Changed = false;
1384
1385 // Examine each PHI.
1386 // Note that increment of I must *NOT* be in the iteration_expression, since
1387 // we don't want to immediately advance when we restart from the beginning.
1388 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1389 ++I;
1390 // Is there an identical PHI node in this basic block?
1391 // Note that we only look in the upper square's triangle,
1392 // we already checked that the lower triangle PHI's aren't identical.
1393 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1394 if (ToRemove.contains(DuplicatePN))
1395 continue;
1396 if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1397 continue;
1398 // A duplicate. Replace this PHI with the base PHI.
1399 ++NumPHICSEs;
1400 DuplicatePN->replaceAllUsesWith(PN);
1401 ToRemove.insert(DuplicatePN);
1402 Changed = true;
1403
1404 // The RAUW can change PHIs that we already visited.
1405 I = BB->begin();
1406 break; // Start over from the beginning.
1407 }
1408 }
1409 return Changed;
1410}
1411
1412static bool
1415 // This implementation doesn't currently consider undef operands
1416 // specially. Theoretically, two phis which are identical except for
1417 // one having an undef where the other doesn't could be collapsed.
1418
1419 struct PHIDenseMapInfo {
1420 static PHINode *getEmptyKey() {
1422 }
1423
1424 static PHINode *getTombstoneKey() {
1426 }
1427
1428 static bool isSentinel(PHINode *PN) {
1429 return PN == getEmptyKey() || PN == getTombstoneKey();
1430 }
1431
1432 // WARNING: this logic must be kept in sync with
1433 // Instruction::isIdenticalToWhenDefined()!
1434 static unsigned getHashValueImpl(PHINode *PN) {
1435 // Compute a hash value on the operands. Instcombine will likely have
1436 // sorted them, which helps expose duplicates, but we have to check all
1437 // the operands to be safe in case instcombine hasn't run.
1438 return static_cast<unsigned>(
1440 hash_combine_range(PN->blocks())));
1441 }
1442
1443 static unsigned getHashValue(PHINode *PN) {
1444#ifndef NDEBUG
1445 // If -phicse-debug-hash was specified, return a constant -- this
1446 // will force all hashing to collide, so we'll exhaustively search
1447 // the table for a match, and the assertion in isEqual will fire if
1448 // there's a bug causing equal keys to hash differently.
1449 if (PHICSEDebugHash)
1450 return 0;
1451#endif
1452 return getHashValueImpl(PN);
1453 }
1454
1455 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1456 if (isSentinel(LHS) || isSentinel(RHS))
1457 return LHS == RHS;
1458 return LHS->isIdenticalTo(RHS);
1459 }
1460
1461 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1462 // These comparisons are nontrivial, so assert that equality implies
1463 // hash equality (DenseMap demands this as an invariant).
1464 bool Result = isEqualImpl(LHS, RHS);
1465 assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1467 return Result;
1468 }
1469 };
1470
1471 // Set of unique PHINodes.
1473 PHISet.reserve(4 * PHICSENumPHISmallSize);
1474
1475 // Examine each PHI.
1476 bool Changed = false;
1477 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1478 if (ToRemove.contains(PN))
1479 continue;
1480 auto Inserted = PHISet.insert(PN);
1481 if (!Inserted.second) {
1482 // A duplicate. Replace this PHI with its duplicate.
1483 ++NumPHICSEs;
1484 PN->replaceAllUsesWith(*Inserted.first);
1485 ToRemove.insert(PN);
1486 Changed = true;
1487
1488 // The RAUW can change PHIs that we already visited. Start over from the
1489 // beginning.
1490 PHISet.clear();
1491 I = BB->begin();
1492 }
1493 }
1494
1495 return Changed;
1496}
1497
1500 if (
1501#ifndef NDEBUG
1502 !PHICSEDebugHash &&
1503#endif
1507}
1508
1511 bool Changed = EliminateDuplicatePHINodes(BB, ToRemove);
1512 for (PHINode *PN : ToRemove)
1513 PN->eraseFromParent();
1514 return Changed;
1515}
1516
1518 const DataLayout &DL) {
1519 V = V->stripPointerCasts();
1520
1521 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1522 // TODO: Ideally, this function would not be called if PrefAlign is smaller
1523 // than the current alignment, as the known bits calculation should have
1524 // already taken it into account. However, this is not always the case,
1525 // as computeKnownBits() has a depth limit, while stripPointerCasts()
1526 // doesn't.
1527 Align CurrentAlign = AI->getAlign();
1528 if (PrefAlign <= CurrentAlign)
1529 return CurrentAlign;
1530
1531 // If the preferred alignment is greater than the natural stack alignment
1532 // then don't round up. This avoids dynamic stack realignment.
1533 MaybeAlign StackAlign = DL.getStackAlignment();
1534 if (StackAlign && PrefAlign > *StackAlign)
1535 return CurrentAlign;
1536 AI->setAlignment(PrefAlign);
1537 return PrefAlign;
1538 }
1539
1540 if (auto *GV = dyn_cast<GlobalVariable>(V)) {
1541 // TODO: as above, this shouldn't be necessary.
1542 Align CurrentAlign = GV->getPointerAlignment(DL);
1543 if (PrefAlign <= CurrentAlign)
1544 return CurrentAlign;
1545
1546 // If there is a large requested alignment and we can, bump up the alignment
1547 // of the global. If the memory we set aside for the global may not be the
1548 // memory used by the final program then it is impossible for us to reliably
1549 // enforce the preferred alignment.
1550 if (!GV->canIncreaseAlignment())
1551 return CurrentAlign;
1552
1553 if (GV->isThreadLocal()) {
1554 unsigned MaxTLSAlign = GV->getParent()->getMaxTLSAlignment() / CHAR_BIT;
1555 if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign))
1556 PrefAlign = Align(MaxTLSAlign);
1557 }
1558
1559 GV->setAlignment(PrefAlign);
1560 return PrefAlign;
1561 }
1562
1563 return Align(1);
1564}
1565
1567 const DataLayout &DL,
1568 const Instruction *CxtI,
1569 AssumptionCache *AC,
1570 const DominatorTree *DT) {
1571 assert(V->getType()->isPointerTy() &&
1572 "getOrEnforceKnownAlignment expects a pointer!");
1573
1574 KnownBits Known = computeKnownBits(V, DL, AC, CxtI, DT);
1575 unsigned TrailZ = Known.countMinTrailingZeros();
1576
1577 // Avoid trouble with ridiculously large TrailZ values, such as
1578 // those computed from a null pointer.
1579 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1580 TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1581
1582 Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1583
1584 if (PrefAlign && *PrefAlign > Alignment)
1585 Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1586
1587 // We don't need to make any adjustment.
1588 return Alignment;
1589}
1590
1591///===---------------------------------------------------------------------===//
1592/// Dbg Intrinsic utilities
1593///
1594
1595/// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1597 DIExpression *DIExpr,
1598 PHINode *APN) {
1599 // Since we can't guarantee that the original dbg.declare intrinsic
1600 // is removed by LowerDbgDeclare(), we need to make sure that we are
1601 // not inserting the same dbg.value intrinsic over and over.
1602 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
1603 findDbgValues(APN, DbgVariableRecords);
1604 for (DbgVariableRecord *DVR : DbgVariableRecords) {
1605 assert(is_contained(DVR->location_ops(), APN));
1606 if ((DVR->getVariable() == DIVar) && (DVR->getExpression() == DIExpr))
1607 return true;
1608 }
1609 return false;
1610}
1611
1612/// Check if the alloc size of \p ValTy is large enough to cover the variable
1613/// (or fragment of the variable) described by \p DII.
1614///
1615/// This is primarily intended as a helper for the different
1616/// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted
1617/// describes an alloca'd variable, so we need to use the alloc size of the
1618/// value when doing the comparison. E.g. an i1 value will be identified as
1619/// covering an n-bit fragment, if the store size of i1 is at least n bits.
1621 const DataLayout &DL = DVR->getModule()->getDataLayout();
1622 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1623 if (std::optional<uint64_t> FragmentSize =
1624 DVR->getExpression()->getActiveBits(DVR->getVariable()))
1625 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1626
1627 // We can't always calculate the size of the DI variable (e.g. if it is a
1628 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1629 // instead.
1630 if (DVR->isAddressOfVariable()) {
1631 // DVR should have exactly 1 location when it is an address.
1632 assert(DVR->getNumVariableLocationOps() == 1 &&
1633 "address of variable must have exactly 1 location operand.");
1634 if (auto *AI =
1635 dyn_cast_or_null<AllocaInst>(DVR->getVariableLocationOp(0))) {
1636 if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1637 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1638 }
1639 }
1640 }
1641 // Could not determine size of variable. Conservatively return false.
1642 return false;
1643}
1644
1646 DILocalVariable *DIVar,
1647 DIExpression *DIExpr,
1648 const DebugLoc &NewLoc,
1649 BasicBlock::iterator Instr) {
1651 DbgVariableRecord *DVRec =
1652 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1653 Instr->getParent()->insertDbgRecordBefore(DVRec, Instr);
1654}
1655
1657 int NumEltDropped = DIExpr->getElements()[0] == dwarf::DW_OP_LLVM_arg ? 3 : 1;
1658 return DIExpression::get(DIExpr->getContext(),
1659 DIExpr->getElements().drop_front(NumEltDropped));
1660}
1661
1663 StoreInst *SI, DIBuilder &Builder) {
1664 assert(DVR->isAddressOfVariable() || DVR->isDbgAssign());
1665 auto *DIVar = DVR->getVariable();
1666 assert(DIVar && "Missing variable");
1667 auto *DIExpr = DVR->getExpression();
1668 Value *DV = SI->getValueOperand();
1669
1670 DebugLoc NewLoc = getDebugValueLoc(DVR);
1671
1672 // If the alloca describes the variable itself, i.e. the expression in the
1673 // dbg.declare doesn't start with a dereference, we can perform the
1674 // conversion if the value covers the entire fragment of DII.
1675 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1676 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1677 // We conservatively ignore other dereferences, because the following two are
1678 // not equivalent:
1679 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1680 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1681 // The former is adding 2 to the address of the variable, whereas the latter
1682 // is adding 2 to the value of the variable. As such, we insist on just a
1683 // deref expression.
1684 bool CanConvert =
1685 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1687 if (CanConvert) {
1688 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1689 SI->getIterator());
1690 return;
1691 }
1692
1693 // FIXME: If storing to a part of the variable described by the dbg.declare,
1694 // then we want to insert a dbg.value for the corresponding fragment.
1695 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DVR
1696 << '\n');
1697
1698 // For now, when there is a store to parts of the variable (but we do not
1699 // know which part) we insert an dbg.value intrinsic to indicate that we
1700 // know nothing about the variable's content.
1701 DV = PoisonValue::get(DV->getType());
1703 DbgVariableRecord *NewDVR =
1704 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1705 SI->getParent()->insertDbgRecordBefore(NewDVR, SI->getIterator());
1706}
1707
1709 DIBuilder &Builder) {
1710 auto *DIVar = DVR->getVariable();
1711 assert(DIVar && "Missing variable");
1712 auto *DIExpr = DVR->getExpression();
1713 DIExpr = dropInitialDeref(DIExpr);
1714 Value *DV = SI->getValueOperand();
1715
1716 DebugLoc NewLoc = getDebugValueLoc(DVR);
1717
1718 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1719 SI->getIterator());
1720}
1721
1723 DIBuilder &Builder) {
1724 auto *DIVar = DVR->getVariable();
1725 auto *DIExpr = DVR->getExpression();
1726 assert(DIVar && "Missing variable");
1727
1728 if (!valueCoversEntireFragment(LI->getType(), DVR)) {
1729 // FIXME: If only referring to a part of the variable described by the
1730 // dbg.declare, then we want to insert a DbgVariableRecord for the
1731 // corresponding fragment.
1732 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1733 << *DVR << '\n');
1734 return;
1735 }
1736
1737 DebugLoc NewLoc = getDebugValueLoc(DVR);
1738
1739 // We are now tracking the loaded value instead of the address. In the
1740 // future if multi-location support is added to the IR, it might be
1741 // preferable to keep tracking both the loaded value and the original
1742 // address in case the alloca can not be elided.
1743
1744 // Create a DbgVariableRecord directly and insert.
1746 DbgVariableRecord *DV =
1747 new DbgVariableRecord(LIVAM, DIVar, DIExpr, NewLoc.get());
1748 LI->getParent()->insertDbgRecordAfter(DV, LI);
1749}
1750
1751/// Determine whether this alloca is either a VLA or an array.
1752static bool isArray(AllocaInst *AI) {
1753 return AI->isArrayAllocation() ||
1754 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1755}
1756
1757/// Determine whether this alloca is a structure.
1758static bool isStructure(AllocaInst *AI) {
1759 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1760}
1762 DIBuilder &Builder) {
1763 auto *DIVar = DVR->getVariable();
1764 auto *DIExpr = DVR->getExpression();
1765 assert(DIVar && "Missing variable");
1766
1767 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1768 return;
1769
1770 if (!valueCoversEntireFragment(APN->getType(), DVR)) {
1771 // FIXME: If only referring to a part of the variable described by the
1772 // dbg.declare, then we want to insert a DbgVariableRecord for the
1773 // corresponding fragment.
1774 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1775 << *DVR << '\n');
1776 return;
1777 }
1778
1779 BasicBlock *BB = APN->getParent();
1780 auto InsertionPt = BB->getFirstInsertionPt();
1781
1782 DebugLoc NewLoc = getDebugValueLoc(DVR);
1783
1784 // The block may be a catchswitch block, which does not have a valid
1785 // insertion point.
1786 // FIXME: Insert DbgVariableRecord markers in the successors when appropriate.
1787 if (InsertionPt != BB->end()) {
1788 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc,
1789 InsertionPt);
1790 }
1791}
1792
1793/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1794/// of llvm.dbg.value intrinsics.
1796 bool Changed = false;
1797 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1800 for (auto &FI : F) {
1801 for (Instruction &BI : FI) {
1802 if (auto *DDI = dyn_cast<DbgDeclareInst>(&BI))
1803 Dbgs.push_back(DDI);
1804 for (DbgVariableRecord &DVR : filterDbgVars(BI.getDbgRecordRange())) {
1805 if (DVR.getType() == DbgVariableRecord::LocationType::Declare)
1806 DVRs.push_back(&DVR);
1807 }
1808 }
1809 }
1810
1811 if (Dbgs.empty() && DVRs.empty())
1812 return Changed;
1813
1814 auto LowerOne = [&](DbgVariableRecord *DDI) {
1815 AllocaInst *AI =
1816 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocationOp(0));
1817 // If this is an alloca for a scalar variable, insert a dbg.value
1818 // at each load and store to the alloca and erase the dbg.declare.
1819 // The dbg.values allow tracking a variable even if it is not
1820 // stored on the stack, while the dbg.declare can only describe
1821 // the stack slot (and at a lexical-scope granularity). Later
1822 // passes will attempt to elide the stack slot.
1823 if (!AI || isArray(AI) || isStructure(AI))
1824 return;
1825
1826 // A volatile load/store means that the alloca can't be elided anyway.
1827 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1828 if (LoadInst *LI = dyn_cast<LoadInst>(U))
1829 return LI->isVolatile();
1830 if (StoreInst *SI = dyn_cast<StoreInst>(U))
1831 return SI->isVolatile();
1832 return false;
1833 }))
1834 return;
1835
1837 WorkList.push_back(AI);
1838 while (!WorkList.empty()) {
1839 const Value *V = WorkList.pop_back_val();
1840 for (const auto &AIUse : V->uses()) {
1841 User *U = AIUse.getUser();
1842 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1843 if (AIUse.getOperandNo() == 1)
1844 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1845 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1846 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1847 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1848 // This is a call by-value or some other instruction that takes a
1849 // pointer to the variable. Insert a *value* intrinsic that describes
1850 // the variable by dereferencing the alloca.
1851 if (!CI->isLifetimeStartOrEnd()) {
1852 DebugLoc NewLoc = getDebugValueLoc(DDI);
1853 auto *DerefExpr =
1854 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1855 insertDbgValueOrDbgVariableRecord(DIB, AI, DDI->getVariable(),
1856 DerefExpr, NewLoc,
1857 CI->getIterator());
1858 }
1859 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1860 if (BI->getType()->isPointerTy())
1861 WorkList.push_back(BI);
1862 }
1863 }
1864 }
1865 DDI->eraseFromParent();
1866 Changed = true;
1867 };
1868
1869 for_each(DVRs, LowerOne);
1870
1871 if (Changed)
1872 for (BasicBlock &BB : F)
1874
1875 return Changed;
1876}
1877
1878/// Propagate dbg.value records through the newly inserted PHIs.
1880 SmallVectorImpl<PHINode *> &InsertedPHIs) {
1881 assert(BB && "No BasicBlock to clone DbgVariableRecord(s) from.");
1882 if (InsertedPHIs.size() == 0)
1883 return;
1884
1885 // Map existing PHI nodes to their DbgVariableRecords.
1887 for (auto &I : *BB) {
1888 for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) {
1889 for (Value *V : DVR.location_ops())
1890 if (auto *Loc = dyn_cast_or_null<PHINode>(V))
1891 DbgValueMap.insert({Loc, &DVR});
1892 }
1893 }
1894 if (DbgValueMap.size() == 0)
1895 return;
1896
1897 // Map a pair of the destination BB and old DbgVariableRecord to the new
1898 // DbgVariableRecord, so that if a DbgVariableRecord is being rewritten to use
1899 // more than one of the inserted PHIs in the same destination BB, we can
1900 // update the same DbgVariableRecord with all the new PHIs instead of creating
1901 // one copy for each.
1903 NewDbgValueMap;
1904 // Then iterate through the new PHIs and look to see if they use one of the
1905 // previously mapped PHIs. If so, create a new DbgVariableRecord that will
1906 // propagate the info through the new PHI. If we use more than one new PHI in
1907 // a single destination BB with the same old dbg.value, merge the updates so
1908 // that we get a single new DbgVariableRecord with all the new PHIs.
1909 for (auto PHI : InsertedPHIs) {
1910 BasicBlock *Parent = PHI->getParent();
1911 // Avoid inserting a debug-info record into an EH block.
1912 if (Parent->getFirstNonPHIIt()->isEHPad())
1913 continue;
1914 for (auto VI : PHI->operand_values()) {
1915 auto V = DbgValueMap.find(VI);
1916 if (V != DbgValueMap.end()) {
1917 DbgVariableRecord *DbgII = cast<DbgVariableRecord>(V->second);
1918 auto NewDI = NewDbgValueMap.find({Parent, DbgII});
1919 if (NewDI == NewDbgValueMap.end()) {
1920 DbgVariableRecord *NewDbgII = DbgII->clone();
1921 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
1922 }
1923 DbgVariableRecord *NewDbgII = NewDI->second;
1924 // If PHI contains VI as an operand more than once, we may
1925 // replaced it in NewDbgII; confirm that it is present.
1926 if (is_contained(NewDbgII->location_ops(), VI))
1927 NewDbgII->replaceVariableLocationOp(VI, PHI);
1928 }
1929 }
1930 }
1931 // Insert the new DbgVariableRecords into their destination blocks.
1932 for (auto DI : NewDbgValueMap) {
1933 BasicBlock *Parent = DI.first.first;
1934 DbgVariableRecord *NewDbgII = DI.second;
1935 auto InsertionPt = Parent->getFirstInsertionPt();
1936 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
1937
1938 Parent->insertDbgRecordBefore(NewDbgII, InsertionPt);
1939 }
1940}
1941
1942bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1943 DIBuilder &Builder, uint8_t DIExprFlags,
1944 int Offset) {
1946
1947 auto ReplaceOne = [&](DbgVariableRecord *DII) {
1948 assert(DII->getVariable() && "Missing variable");
1949 auto *DIExpr = DII->getExpression();
1950 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
1951 DII->setExpression(DIExpr);
1952 DII->replaceVariableLocationOp(Address, NewAddress);
1953 };
1954
1955 for_each(DVRDeclares, ReplaceOne);
1956
1957 return !DVRDeclares.empty();
1958}
1959
1961 DILocalVariable *DIVar,
1962 DIExpression *DIExpr, Value *NewAddress,
1963 DbgVariableRecord *DVR,
1964 DIBuilder &Builder, int Offset) {
1965 assert(DIVar && "Missing variable");
1966
1967 // This is an alloca-based dbg.value/DbgVariableRecord. The first thing it
1968 // should do with the alloca pointer is dereference it. Otherwise we don't
1969 // know how to handle it and give up.
1970 if (!DIExpr || DIExpr->getNumElements() < 1 ||
1971 DIExpr->getElement(0) != dwarf::DW_OP_deref)
1972 return;
1973
1974 // Insert the offset before the first deref.
1975 if (Offset)
1976 DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
1977
1978 DVR->setExpression(DIExpr);
1979 DVR->replaceVariableLocationOp(0u, NewAddress);
1980}
1981
1983 DIBuilder &Builder, int Offset) {
1985 findDbgValues(AI, DPUsers);
1986
1987 // Replace any DbgVariableRecords that use this alloca.
1988 for (DbgVariableRecord *DVR : DPUsers)
1989 updateOneDbgValueForAlloca(DVR->getDebugLoc(), DVR->getVariable(),
1990 DVR->getExpression(), NewAllocaAddress, DVR,
1991 Builder, Offset);
1992}
1993
1994/// Where possible to salvage debug information for \p I do so.
1995/// If not possible mark undef.
1998 findDbgUsers(&I, DPUsers);
2000}
2001
2002template <typename T> static void salvageDbgAssignAddress(T *Assign) {
2003 Instruction *I = dyn_cast<Instruction>(Assign->getAddress());
2004 // Only instructions can be salvaged at the moment.
2005 if (!I)
2006 return;
2007
2008 assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() &&
2009 "address-expression shouldn't have fragment info");
2010
2011 // The address component of a dbg.assign cannot be variadic.
2012 uint64_t CurrentLocOps = 0;
2013 SmallVector<Value *, 4> AdditionalValues;
2015 Value *NewV = salvageDebugInfoImpl(*I, CurrentLocOps, Ops, AdditionalValues);
2016
2017 // Check if the salvage failed.
2018 if (!NewV)
2019 return;
2020
2022 Assign->getAddressExpression(), Ops, 0, /*StackValue=*/false);
2023 assert(!SalvagedExpr->getFragmentInfo().has_value() &&
2024 "address-expression shouldn't have fragment info");
2025
2026 SalvagedExpr = SalvagedExpr->foldConstantMath();
2027
2028 // Salvage succeeds if no additional values are required.
2029 if (AdditionalValues.empty()) {
2030 Assign->setAddress(NewV);
2031 Assign->setAddressExpression(SalvagedExpr);
2032 } else {
2033 Assign->setKillAddress();
2034 }
2035}
2036
2039 // These are arbitrary chosen limits on the maximum number of values and the
2040 // maximum size of a debug expression we can salvage up to, used for
2041 // performance reasons.
2042 const unsigned MaxDebugArgs = 16;
2043 const unsigned MaxExpressionSize = 128;
2044 bool Salvaged = false;
2045
2046 for (auto *DVR : DPUsers) {
2047 if (DVR->isDbgAssign()) {
2048 if (DVR->getAddress() == &I) {
2050 Salvaged = true;
2051 }
2052 if (DVR->getValue() != &I)
2053 continue;
2054 }
2055
2056 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
2057 // are implicitly pointing out the value as a DWARF memory location
2058 // description.
2059 bool StackValue =
2060 DVR->getType() != DbgVariableRecord::LocationType::Declare;
2061 auto DVRLocation = DVR->location_ops();
2062 assert(
2063 is_contained(DVRLocation, &I) &&
2064 "DbgVariableIntrinsic must use salvaged instruction as its location");
2065 SmallVector<Value *, 4> AdditionalValues;
2066 // 'I' may appear more than once in DVR's location ops, and each use of 'I'
2067 // must be updated in the DIExpression and potentially have additional
2068 // values added; thus we call salvageDebugInfoImpl for each 'I' instance in
2069 // DVRLocation.
2070 Value *Op0 = nullptr;
2071 DIExpression *SalvagedExpr = DVR->getExpression();
2072 auto LocItr = find(DVRLocation, &I);
2073 while (SalvagedExpr && LocItr != DVRLocation.end()) {
2075 unsigned LocNo = std::distance(DVRLocation.begin(), LocItr);
2076 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2077 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2078 if (!Op0)
2079 break;
2080 SalvagedExpr =
2081 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
2082 LocItr = std::find(++LocItr, DVRLocation.end(), &I);
2083 }
2084 // salvageDebugInfoImpl should fail on examining the first element of
2085 // DbgUsers, or none of them.
2086 if (!Op0)
2087 break;
2088
2089 SalvagedExpr = SalvagedExpr->foldConstantMath();
2090 DVR->replaceVariableLocationOp(&I, Op0);
2091 bool IsValidSalvageExpr =
2092 SalvagedExpr->getNumElements() <= MaxExpressionSize;
2093 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2094 DVR->setExpression(SalvagedExpr);
2095 } else if (DVR->getType() != DbgVariableRecord::LocationType::Declare &&
2096 IsValidSalvageExpr &&
2097 DVR->getNumVariableLocationOps() + AdditionalValues.size() <=
2098 MaxDebugArgs) {
2099 DVR->addVariableLocationOps(AdditionalValues, SalvagedExpr);
2100 } else {
2101 // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
2102 // currently only valid for stack value expressions.
2103 // Also do not salvage if the resulting DIArgList would contain an
2104 // unreasonably large number of values.
2105 DVR->setKillLocation();
2106 }
2107 LLVM_DEBUG(dbgs() << "SALVAGE: " << DVR << '\n');
2108 Salvaged = true;
2109 }
2110
2111 if (Salvaged)
2112 return;
2113
2114 for (auto *DVR : DPUsers)
2115 DVR->setKillLocation();
2116}
2117
2119 uint64_t CurrentLocOps,
2121 SmallVectorImpl<Value *> &AdditionalValues) {
2122 unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
2123 // Rewrite a GEP into a DIExpression.
2124 SmallMapVector<Value *, APInt, 4> VariableOffsets;
2125 APInt ConstantOffset(BitWidth, 0);
2126 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
2127 return nullptr;
2128 if (!VariableOffsets.empty() && !CurrentLocOps) {
2129 Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
2130 CurrentLocOps = 1;
2131 }
2132 for (const auto &Offset : VariableOffsets) {
2133 AdditionalValues.push_back(Offset.first);
2134 assert(Offset.second.isStrictlyPositive() &&
2135 "Expected strictly positive multiplier for offset.");
2136 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
2137 Offset.second.getZExtValue(), dwarf::DW_OP_mul,
2138 dwarf::DW_OP_plus});
2139 }
2140 DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
2141 return GEP->getOperand(0);
2142}
2143
2145 switch (Opcode) {
2146 case Instruction::Add:
2147 return dwarf::DW_OP_plus;
2148 case Instruction::Sub:
2149 return dwarf::DW_OP_minus;
2150 case Instruction::Mul:
2151 return dwarf::DW_OP_mul;
2152 case Instruction::SDiv:
2153 return dwarf::DW_OP_div;
2154 case Instruction::SRem:
2155 return dwarf::DW_OP_mod;
2156 case Instruction::Or:
2157 return dwarf::DW_OP_or;
2158 case Instruction::And:
2159 return dwarf::DW_OP_and;
2160 case Instruction::Xor:
2161 return dwarf::DW_OP_xor;
2162 case Instruction::Shl:
2163 return dwarf::DW_OP_shl;
2164 case Instruction::LShr:
2165 return dwarf::DW_OP_shr;
2166 case Instruction::AShr:
2167 return dwarf::DW_OP_shra;
2168 default:
2169 // TODO: Salvage from each kind of binop we know about.
2170 return 0;
2171 }
2172}
2173
2174static void handleSSAValueOperands(uint64_t CurrentLocOps,
2176 SmallVectorImpl<Value *> &AdditionalValues,
2177 Instruction *I) {
2178 if (!CurrentLocOps) {
2179 Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
2180 CurrentLocOps = 1;
2181 }
2182 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
2183 AdditionalValues.push_back(I->getOperand(1));
2184}
2185
2188 SmallVectorImpl<Value *> &AdditionalValues) {
2189 // Handle binary operations with constant integer operands as a special case.
2190 auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
2191 // Values wider than 64 bits cannot be represented within a DIExpression.
2192 if (ConstInt && ConstInt->getBitWidth() > 64)
2193 return nullptr;
2194
2195 Instruction::BinaryOps BinOpcode = BI->getOpcode();
2196 // Push any Constant Int operand onto the expression stack.
2197 if (ConstInt) {
2198 uint64_t Val = ConstInt->getSExtValue();
2199 // Add or Sub Instructions with a constant operand can potentially be
2200 // simplified.
2201 if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
2202 uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
2204 return BI->getOperand(0);
2205 }
2206 Opcodes.append({dwarf::DW_OP_constu, Val});
2207 } else {
2208 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, BI);
2209 }
2210
2211 // Add salvaged binary operator to expression stack, if it has a valid
2212 // representation in a DIExpression.
2213 uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
2214 if (!DwarfBinOp)
2215 return nullptr;
2216 Opcodes.push_back(DwarfBinOp);
2217 return BI->getOperand(0);
2218}
2219
2221 // The signedness of the operation is implicit in the typed stack, signed and
2222 // unsigned instructions map to the same DWARF opcode.
2223 switch (Pred) {
2224 case CmpInst::ICMP_EQ:
2225 return dwarf::DW_OP_eq;
2226 case CmpInst::ICMP_NE:
2227 return dwarf::DW_OP_ne;
2228 case CmpInst::ICMP_UGT:
2229 case CmpInst::ICMP_SGT:
2230 return dwarf::DW_OP_gt;
2231 case CmpInst::ICMP_UGE:
2232 case CmpInst::ICMP_SGE:
2233 return dwarf::DW_OP_ge;
2234 case CmpInst::ICMP_ULT:
2235 case CmpInst::ICMP_SLT:
2236 return dwarf::DW_OP_lt;
2237 case CmpInst::ICMP_ULE:
2238 case CmpInst::ICMP_SLE:
2239 return dwarf::DW_OP_le;
2240 default:
2241 return 0;
2242 }
2243}
2244
2247 SmallVectorImpl<Value *> &AdditionalValues) {
2248 // Handle icmp operations with constant integer operands as a special case.
2249 auto *ConstInt = dyn_cast<ConstantInt>(Icmp->getOperand(1));
2250 // Values wider than 64 bits cannot be represented within a DIExpression.
2251 if (ConstInt && ConstInt->getBitWidth() > 64)
2252 return nullptr;
2253 // Push any Constant Int operand onto the expression stack.
2254 if (ConstInt) {
2255 if (Icmp->isSigned())
2256 Opcodes.push_back(dwarf::DW_OP_consts);
2257 else
2258 Opcodes.push_back(dwarf::DW_OP_constu);
2259 uint64_t Val = ConstInt->getSExtValue();
2260 Opcodes.push_back(Val);
2261 } else {
2262 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, Icmp);
2263 }
2264
2265 // Add salvaged binary operator to expression stack, if it has a valid
2266 // representation in a DIExpression.
2267 uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Icmp->getPredicate());
2268 if (!DwarfIcmpOp)
2269 return nullptr;
2270 Opcodes.push_back(DwarfIcmpOp);
2271 return Icmp->getOperand(0);
2272}
2273
2276 SmallVectorImpl<Value *> &AdditionalValues) {
2277 auto &M = *I.getModule();
2278 auto &DL = M.getDataLayout();
2279
2280 if (auto *CI = dyn_cast<CastInst>(&I)) {
2281 Value *FromValue = CI->getOperand(0);
2282 // No-op casts are irrelevant for debug info.
2283 if (CI->isNoopCast(DL)) {
2284 return FromValue;
2285 }
2286
2287 Type *Type = CI->getType();
2288 if (Type->isPointerTy())
2289 Type = DL.getIntPtrType(Type);
2290 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
2291 if (Type->isVectorTy() ||
2292 !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I) ||
2293 isa<IntToPtrInst>(&I) || isa<PtrToIntInst>(&I)))
2294 return nullptr;
2295
2296 llvm::Type *FromType = FromValue->getType();
2297 if (FromType->isPointerTy())
2298 FromType = DL.getIntPtrType(FromType);
2299
2300 unsigned FromTypeBitSize = FromType->getScalarSizeInBits();
2301 unsigned ToTypeBitSize = Type->getScalarSizeInBits();
2302
2303 auto ExtOps = DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
2304 isa<SExtInst>(&I));
2305 Ops.append(ExtOps.begin(), ExtOps.end());
2306 return FromValue;
2307 }
2308
2309 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
2310 return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues);
2311 if (auto *BI = dyn_cast<BinaryOperator>(&I))
2312 return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues);
2313 if (auto *IC = dyn_cast<ICmpInst>(&I))
2314 return getSalvageOpsForIcmpOp(IC, CurrentLocOps, Ops, AdditionalValues);
2315
2316 // *Not* to do: we should not attempt to salvage load instructions,
2317 // because the validity and lifetime of a dbg.value containing
2318 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
2319 return nullptr;
2320}
2321
2322/// A replacement for a dbg.value expression.
2323using DbgValReplacement = std::optional<DIExpression *>;
2324
2325/// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
2326/// possibly moving/undefing users to prevent use-before-def. Returns true if
2327/// changes are made.
2329 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
2330 function_ref<DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr) {
2331 // Find debug users of From.
2333 findDbgUsers(&From, DPUsers);
2334 if (DPUsers.empty())
2335 return false;
2336
2337 // Prevent use-before-def of To.
2338 bool Changed = false;
2339
2340 SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR;
2341 if (isa<Instruction>(&To)) {
2342 bool DomPointAfterFrom = From.getNextNode() == &DomPoint;
2343
2344 // DbgVariableRecord implementation of the above.
2345 for (auto *DVR : DPUsers) {
2346 Instruction *MarkedInstr = DVR->getMarker()->MarkedInstr;
2347 Instruction *NextNonDebug = MarkedInstr;
2348
2349 // It's common to see a debug user between From and DomPoint. Move it
2350 // after DomPoint to preserve the variable update without any reordering.
2351 if (DomPointAfterFrom && NextNonDebug == &DomPoint) {
2352 LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n');
2353 DVR->removeFromParent();
2354 DomPoint.getParent()->insertDbgRecordAfter(DVR, &DomPoint);
2355 Changed = true;
2356
2357 // Users which otherwise aren't dominated by the replacement value must
2358 // be salvaged or deleted.
2359 } else if (!DT.dominates(&DomPoint, MarkedInstr)) {
2360 UndefOrSalvageDVR.insert(DVR);
2361 }
2362 }
2363 }
2364
2365 // Update debug users without use-before-def risk.
2366 for (auto *DVR : DPUsers) {
2367 if (UndefOrSalvageDVR.count(DVR))
2368 continue;
2369
2370 DbgValReplacement DVRepl = RewriteDVRExpr(*DVR);
2371 if (!DVRepl)
2372 continue;
2373
2374 DVR->replaceVariableLocationOp(&From, &To);
2375 DVR->setExpression(*DVRepl);
2376 LLVM_DEBUG(dbgs() << "REWRITE: " << DVR << '\n');
2377 Changed = true;
2378 }
2379
2380 if (!UndefOrSalvageDVR.empty()) {
2381 // Try to salvage the remaining debug users.
2383 Changed = true;
2384 }
2385
2386 return Changed;
2387}
2388
2389/// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
2390/// losslessly preserve the bits and semantics of the value. This predicate is
2391/// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2392///
2393/// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2394/// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2395/// and also does not allow lossless pointer <-> integer conversions.
2397 Type *ToTy) {
2398 // Trivially compatible types.
2399 if (FromTy == ToTy)
2400 return true;
2401
2402 // Handle compatible pointer <-> integer conversions.
2403 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2404 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
2405 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
2406 !DL.isNonIntegralPointerType(ToTy);
2407 return SameSize && LosslessConversion;
2408 }
2409
2410 // TODO: This is not exhaustive.
2411 return false;
2412}
2413
2415 Instruction &DomPoint, DominatorTree &DT) {
2416 // Exit early if From has no debug users.
2417 if (!From.isUsedByMetadata())
2418 return false;
2419
2420 assert(&From != &To && "Can't replace something with itself");
2421
2422 Type *FromTy = From.getType();
2423 Type *ToTy = To.getType();
2424
2425 auto IdentityDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2426 return DVR.getExpression();
2427 };
2428
2429 // Handle no-op conversions.
2430 Module &M = *From.getModule();
2431 const DataLayout &DL = M.getDataLayout();
2432 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2433 return rewriteDebugUsers(From, To, DomPoint, DT, IdentityDVR);
2434
2435 // Handle integer-to-integer widening and narrowing.
2436 // FIXME: Use DW_OP_convert when it's available everywhere.
2437 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2438 uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
2439 uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
2440 assert(FromBits != ToBits && "Unexpected no-op conversion");
2441
2442 // When the width of the result grows, assume that a debugger will only
2443 // access the low `FromBits` bits when inspecting the source variable.
2444 if (FromBits < ToBits)
2445 return rewriteDebugUsers(From, To, DomPoint, DT, IdentityDVR);
2446
2447 // The width of the result has shrunk. Use sign/zero extension to describe
2448 // the source variable's high bits.
2449 auto SignOrZeroExtDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2450 DILocalVariable *Var = DVR.getVariable();
2451
2452 // Without knowing signedness, sign/zero extension isn't possible.
2453 auto Signedness = Var->getSignedness();
2454 if (!Signedness)
2455 return std::nullopt;
2456
2457 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2458 return DIExpression::appendExt(DVR.getExpression(), ToBits, FromBits,
2459 Signed);
2460 };
2461 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExtDVR);
2462 }
2463
2464 // TODO: Floating-point conversions, vectors.
2465 return false;
2466}
2467
2469 Instruction *I, SmallVectorImpl<Value *> &PoisonedValues) {
2470 bool Changed = false;
2471 // RemoveDIs: erase debug-info on this instruction manually.
2472 I->dropDbgRecords();
2473 for (Use &U : I->operands()) {
2474 Value *Op = U.get();
2475 if (isa<Instruction>(Op) && !Op->getType()->isTokenTy()) {
2476 U.set(PoisonValue::get(Op->getType()));
2477 PoisonedValues.push_back(Op);
2478 Changed = true;
2479 }
2480 }
2481
2482 return Changed;
2483}
2484
2486 unsigned NumDeadInst = 0;
2487 // Delete the instructions backwards, as it has a reduced likelihood of
2488 // having to update as many def-use and use-def chains.
2489 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2492
2493 while (EndInst != &BB->front()) {
2494 // Delete the next to last instruction.
2495 Instruction *Inst = &*--EndInst->getIterator();
2496 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2498 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2499 // EHPads can't have DbgVariableRecords attached to them, but it might be
2500 // possible for things with token type.
2501 Inst->dropDbgRecords();
2502 EndInst = Inst;
2503 continue;
2504 }
2505 ++NumDeadInst;
2506 // RemoveDIs: erasing debug-info must be done manually.
2507 Inst->dropDbgRecords();
2508 Inst->eraseFromParent();
2509 }
2510 return NumDeadInst;
2511}
2512
2513unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2514 DomTreeUpdater *DTU,
2515 MemorySSAUpdater *MSSAU) {
2516 BasicBlock *BB = I->getParent();
2517
2518 if (MSSAU)
2519 MSSAU->changeToUnreachable(I);
2520
2521 SmallPtrSet<BasicBlock *, 8> UniqueSuccessors;
2522
2523 // Loop over all of the successors, removing BB's entry from any PHI
2524 // nodes.
2525 for (BasicBlock *Successor : successors(BB)) {
2526 Successor->removePredecessor(BB, PreserveLCSSA);
2527 if (DTU)
2528 UniqueSuccessors.insert(Successor);
2529 }
2530 auto *UI = new UnreachableInst(I->getContext(), I->getIterator());
2531 UI->setDebugLoc(I->getDebugLoc());
2532
2533 // All instructions after this are dead.
2534 unsigned NumInstrsRemoved = 0;
2535 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2536 while (BBI != BBE) {
2537 if (!BBI->use_empty())
2538 BBI->replaceAllUsesWith(PoisonValue::get(BBI->getType()));
2539 BBI++->eraseFromParent();
2540 ++NumInstrsRemoved;
2541 }
2542 if (DTU) {
2544 Updates.reserve(UniqueSuccessors.size());
2545 for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2546 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2547 DTU->applyUpdates(Updates);
2548 }
2550 return NumInstrsRemoved;
2551}
2552
2554 SmallVector<Value *, 8> Args(II->args());
2556 II->getOperandBundlesAsDefs(OpBundles);
2557 CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2558 II->getCalledOperand(), Args, OpBundles);
2559 NewCall->setCallingConv(II->getCallingConv());
2560 NewCall->setAttributes(II->getAttributes());
2561 NewCall->setDebugLoc(II->getDebugLoc());
2562 NewCall->copyMetadata(*II);
2563
2564 // If the invoke had profile metadata, try converting them for CallInst.
2565 uint64_t TotalWeight;
2566 if (NewCall->extractProfTotalWeight(TotalWeight)) {
2567 // Set the total weight if it fits into i32, otherwise reset.
2568 MDBuilder MDB(NewCall->getContext());
2569 auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2570 ? nullptr
2571 : MDB.createBranchWeights({uint32_t(TotalWeight)});
2572 NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2573 }
2574
2575 return NewCall;
2576}
2577
2578// changeToCall - Convert the specified invoke into a normal call.
2581 NewCall->takeName(II);
2582 NewCall->insertBefore(II->getIterator());
2583 II->replaceAllUsesWith(NewCall);
2584
2585 // Follow the call by a branch to the normal destination.
2586 BasicBlock *NormalDestBB = II->getNormalDest();
2587 auto *BI = BranchInst::Create(NormalDestBB, II->getIterator());
2588 // Although it takes place after the call itself, the new branch is still
2589 // performing part of the control-flow functionality of the invoke, so we use
2590 // II's DebugLoc.
2591 BI->setDebugLoc(II->getDebugLoc());
2592
2593 // Update PHI nodes in the unwind destination
2594 BasicBlock *BB = II->getParent();
2595 BasicBlock *UnwindDestBB = II->getUnwindDest();
2596 UnwindDestBB->removePredecessor(BB);
2597 II->eraseFromParent();
2598 if (DTU)
2599 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2600 return NewCall;
2601}
2602
2604 BasicBlock *UnwindEdge,
2605 DomTreeUpdater *DTU) {
2606 BasicBlock *BB = CI->getParent();
2607
2608 // Convert this function call into an invoke instruction. First, split the
2609 // basic block.
2610 BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2611 CI->getName() + ".noexc");
2612
2613 // Delete the unconditional branch inserted by SplitBlock
2614 BB->back().eraseFromParent();
2615
2616 // Create the new invoke instruction.
2617 SmallVector<Value *, 8> InvokeArgs(CI->args());
2619
2620 CI->getOperandBundlesAsDefs(OpBundles);
2621
2622 // Note: we're round tripping operand bundles through memory here, and that
2623 // can potentially be avoided with a cleverer API design that we do not have
2624 // as of this time.
2625
2626 InvokeInst *II =
2628 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2629 II->setDebugLoc(CI->getDebugLoc());
2630 II->setCallingConv(CI->getCallingConv());
2631 II->setAttributes(CI->getAttributes());
2632 II->setMetadata(LLVMContext::MD_prof, CI->getMetadata(LLVMContext::MD_prof));
2633
2634 if (DTU)
2635 DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2636
2637 // Make sure that anything using the call now uses the invoke! This also
2638 // updates the CallGraph if present, because it uses a WeakTrackingVH.
2640
2641 // Delete the original call
2642 Split->front().eraseFromParent();
2643 return Split;
2644}
2645
2648 DomTreeUpdater *DTU = nullptr) {
2650 BasicBlock *BB = &F.front();
2651 Worklist.push_back(BB);
2652 Reachable.insert(BB);
2653 bool Changed = false;
2654 do {
2655 BB = Worklist.pop_back_val();
2656
2657 // Do a quick scan of the basic block, turning any obviously unreachable
2658 // instructions into LLVM unreachable insts. The instruction combining pass
2659 // canonicalizes unreachable insts into stores to null or undef.
2660 for (Instruction &I : *BB) {
2661 if (auto *CI = dyn_cast<CallInst>(&I)) {
2662 Value *Callee = CI->getCalledOperand();
2663 // Handle intrinsic calls.
2664 if (Function *F = dyn_cast<Function>(Callee)) {
2665 auto IntrinsicID = F->getIntrinsicID();
2666 // Assumptions that are known to be false are equivalent to
2667 // unreachable. Also, if the condition is undefined, then we make the
2668 // choice most beneficial to the optimizer, and choose that to also be
2669 // unreachable.
2670 if (IntrinsicID == Intrinsic::assume) {
2671 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2672 // Don't insert a call to llvm.trap right before the unreachable.
2673 changeToUnreachable(CI, false, DTU);
2674 Changed = true;
2675 break;
2676 }
2677 } else if (IntrinsicID == Intrinsic::experimental_guard) {
2678 // A call to the guard intrinsic bails out of the current
2679 // compilation unit if the predicate passed to it is false. If the
2680 // predicate is a constant false, then we know the guard will bail
2681 // out of the current compile unconditionally, so all code following
2682 // it is dead.
2683 //
2684 // Note: unlike in llvm.assume, it is not "obviously profitable" for
2685 // guards to treat `undef` as `false` since a guard on `undef` can
2686 // still be useful for widening.
2687 if (match(CI->getArgOperand(0), m_Zero()))
2688 if (!isa<UnreachableInst>(CI->getNextNode())) {
2689 changeToUnreachable(CI->getNextNode(), false, DTU);
2690 Changed = true;
2691 break;
2692 }
2693 }
2694 } else if ((isa<ConstantPointerNull>(Callee) &&
2695 !NullPointerIsDefined(CI->getFunction(),
2696 cast<PointerType>(Callee->getType())
2697 ->getAddressSpace())) ||
2698 isa<UndefValue>(Callee)) {
2699 changeToUnreachable(CI, false, DTU);
2700 Changed = true;
2701 break;
2702 }
2703 if (CI->doesNotReturn() && !CI->isMustTailCall()) {
2704 // If we found a call to a no-return function, insert an unreachable
2705 // instruction after it. Make sure there isn't *already* one there
2706 // though.
2707 if (!isa<UnreachableInst>(CI->getNextNode())) {
2708 // Don't insert a call to llvm.trap right before the unreachable.
2709 changeToUnreachable(CI->getNextNode(), false, DTU);
2710 Changed = true;
2711 }
2712 break;
2713 }
2714 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2715 // Store to undef and store to null are undefined and used to signal
2716 // that they should be changed to unreachable by passes that can't
2717 // modify the CFG.
2718
2719 // Don't touch volatile stores.
2720 if (SI->isVolatile()) continue;
2721
2722 Value *Ptr = SI->getOperand(1);
2723
2724 if (isa<UndefValue>(Ptr) ||
2725 (isa<ConstantPointerNull>(Ptr) &&
2726 !NullPointerIsDefined(SI->getFunction(),
2727 SI->getPointerAddressSpace()))) {
2728 changeToUnreachable(SI, false, DTU);
2729 Changed = true;
2730 break;
2731 }
2732 }
2733 }
2734
2735 Instruction *Terminator = BB->getTerminator();
2736 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2737 // Turn invokes that call 'nounwind' functions into ordinary calls.
2738 Value *Callee = II->getCalledOperand();
2739 if ((isa<ConstantPointerNull>(Callee) &&
2740 !NullPointerIsDefined(BB->getParent())) ||
2741 isa<UndefValue>(Callee)) {
2742 changeToUnreachable(II, false, DTU);
2743 Changed = true;
2744 } else {
2745 if (II->doesNotReturn() &&
2746 !isa<UnreachableInst>(II->getNormalDest()->front())) {
2747 // If we found an invoke of a no-return function,
2748 // create a new empty basic block with an `unreachable` terminator,
2749 // and set it as the normal destination for the invoke,
2750 // unless that is already the case.
2751 // Note that the original normal destination could have other uses.
2752 BasicBlock *OrigNormalDest = II->getNormalDest();
2753 OrigNormalDest->removePredecessor(II->getParent());
2754 LLVMContext &Ctx = II->getContext();
2755 BasicBlock *UnreachableNormalDest = BasicBlock::Create(
2756 Ctx, OrigNormalDest->getName() + ".unreachable",
2757 II->getFunction(), OrigNormalDest);
2758 auto *UI = new UnreachableInst(Ctx, UnreachableNormalDest);
2759 UI->setDebugLoc(DebugLoc::getTemporary());
2760 II->setNormalDest(UnreachableNormalDest);
2761 if (DTU)
2762 DTU->applyUpdates(
2763 {{DominatorTree::Delete, BB, OrigNormalDest},
2764 {DominatorTree::Insert, BB, UnreachableNormalDest}});
2765 Changed = true;
2766 }
2767 if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
2768 if (II->use_empty() && !II->mayHaveSideEffects()) {
2769 // jump to the normal destination branch.
2770 BasicBlock *NormalDestBB = II->getNormalDest();
2771 BasicBlock *UnwindDestBB = II->getUnwindDest();
2772 BranchInst::Create(NormalDestBB, II->getIterator());
2773 UnwindDestBB->removePredecessor(II->getParent());
2774 II->eraseFromParent();
2775 if (DTU)
2776 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2777 } else
2778 changeToCall(II, DTU);
2779 Changed = true;
2780 }
2781 }
2782 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
2783 // Remove catchpads which cannot be reached.
2784 struct CatchPadDenseMapInfo {
2785 static CatchPadInst *getEmptyKey() {
2787 }
2788
2789 static CatchPadInst *getTombstoneKey() {
2791 }
2792
2793 static unsigned getHashValue(CatchPadInst *CatchPad) {
2794 return static_cast<unsigned>(hash_combine_range(
2795 CatchPad->value_op_begin(), CatchPad->value_op_end()));
2796 }
2797
2798 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2799 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2800 RHS == getEmptyKey() || RHS == getTombstoneKey())
2801 return LHS == RHS;
2802 return LHS->isIdenticalTo(RHS);
2803 }
2804 };
2805
2806 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
2807 // Set of unique CatchPads.
2809 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2810 HandlerSet;
2812 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2813 E = CatchSwitch->handler_end();
2814 I != E; ++I) {
2815 BasicBlock *HandlerBB = *I;
2816 if (DTU)
2817 ++NumPerSuccessorCases[HandlerBB];
2818 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHIIt());
2819 if (!HandlerSet.insert({CatchPad, Empty}).second) {
2820 if (DTU)
2821 --NumPerSuccessorCases[HandlerBB];
2822 CatchSwitch->removeHandler(I);
2823 --I;
2824 --E;
2825 Changed = true;
2826 }
2827 }
2828 if (DTU) {
2829 std::vector<DominatorTree::UpdateType> Updates;
2830 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
2831 if (I.second == 0)
2832 Updates.push_back({DominatorTree::Delete, BB, I.first});
2833 DTU->applyUpdates(Updates);
2834 }
2835 }
2836
2837 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
2838 for (BasicBlock *Successor : successors(BB))
2839 if (Reachable.insert(Successor).second)
2840 Worklist.push_back(Successor);
2841 } while (!Worklist.empty());
2842 return Changed;
2843}
2844
2846 Instruction *TI = BB->getTerminator();
2847
2848 if (auto *II = dyn_cast<InvokeInst>(TI))
2849 return changeToCall(II, DTU);
2850
2851 Instruction *NewTI;
2852 BasicBlock *UnwindDest;
2853
2854 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
2855 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI->getIterator());
2856 UnwindDest = CRI->getUnwindDest();
2857 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
2858 auto *NewCatchSwitch = CatchSwitchInst::Create(
2859 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
2860 CatchSwitch->getName(), CatchSwitch->getIterator());
2861 for (BasicBlock *PadBB : CatchSwitch->handlers())
2862 NewCatchSwitch->addHandler(PadBB);
2863
2864 NewTI = NewCatchSwitch;
2865 UnwindDest = CatchSwitch->getUnwindDest();
2866 } else {
2867 llvm_unreachable("Could not find unwind successor");
2868 }
2869
2870 NewTI->takeName(TI);
2871 NewTI->setDebugLoc(TI->getDebugLoc());
2872 UnwindDest->removePredecessor(BB);
2873 TI->replaceAllUsesWith(NewTI);
2874 TI->eraseFromParent();
2875 if (DTU)
2876 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
2877 return NewTI;
2878}
2879
2880/// removeUnreachableBlocks - Remove blocks that are not reachable, even
2881/// if they are in a dead cycle. Return true if a change was made, false
2882/// otherwise.
2884 MemorySSAUpdater *MSSAU) {
2886 bool Changed = markAliveBlocks(F, Reachable, DTU);
2887
2888 // If there are unreachable blocks in the CFG...
2889 if (Reachable.size() == F.size())
2890 return Changed;
2891
2892 assert(Reachable.size() < F.size());
2893
2894 // Are there any blocks left to actually delete?
2895 SmallSetVector<BasicBlock *, 8> BlocksToRemove;
2896 for (BasicBlock &BB : F) {
2897 // Skip reachable basic blocks
2898 if (Reachable.count(&BB))
2899 continue;
2900 // Skip already-deleted blocks
2901 if (DTU && DTU->isBBPendingDeletion(&BB))
2902 continue;
2903 BlocksToRemove.insert(&BB);
2904 }
2905
2906 if (BlocksToRemove.empty())
2907 return Changed;
2908
2909 Changed = true;
2910 NumRemoved += BlocksToRemove.size();
2911
2912 if (MSSAU)
2913 MSSAU->removeBlocks(BlocksToRemove);
2914
2915 DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU);
2916
2917 return Changed;
2918}
2919
2920/// If AAOnly is set, only intersect alias analysis metadata and preserve other
2921/// known metadata. Unknown metadata is always dropped.
2922static void combineMetadata(Instruction *K, const Instruction *J,
2923 bool DoesKMove, bool AAOnly = false) {
2925 K->getAllMetadataOtherThanDebugLoc(Metadata);
2926 for (const auto &MD : Metadata) {
2927 unsigned Kind = MD.first;
2928 MDNode *JMD = J->getMetadata(Kind);
2929 MDNode *KMD = MD.second;
2930
2931 // TODO: Assert that this switch is exhaustive for fixed MD kinds.
2932 switch (Kind) {
2933 default:
2934 K->setMetadata(Kind, nullptr); // Remove unknown metadata
2935 break;
2936 case LLVMContext::MD_dbg:
2937 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
2938 case LLVMContext::MD_DIAssignID:
2939 if (!AAOnly)
2940 K->mergeDIAssignID(J);
2941 break;
2942 case LLVMContext::MD_tbaa:
2943 if (DoesKMove)
2944 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2945 break;
2946 case LLVMContext::MD_alias_scope:
2947 if (DoesKMove)
2948 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2949 break;
2950 case LLVMContext::MD_noalias:
2951 case LLVMContext::MD_mem_parallel_loop_access:
2952 if (DoesKMove)
2953 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2954 break;
2955 case LLVMContext::MD_access_group:
2956 if (DoesKMove)
2957 K->setMetadata(LLVMContext::MD_access_group,
2958 intersectAccessGroups(K, J));
2959 break;
2960 case LLVMContext::MD_range:
2961 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
2962 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2963 break;
2964 case LLVMContext::MD_fpmath:
2965 if (!AAOnly)
2966 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2967 break;
2968 case LLVMContext::MD_invariant_load:
2969 // If K moves, only set the !invariant.load if it is present in both
2970 // instructions.
2971 if (DoesKMove)
2972 K->setMetadata(Kind, JMD);
2973 break;
2974 case LLVMContext::MD_nonnull:
2975 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
2976 K->setMetadata(Kind, JMD);
2977 break;
2978 case LLVMContext::MD_invariant_group:
2979 // Preserve !invariant.group in K.
2980 break;
2981 // Keep empty cases for prof, mmra, memprof, and callsite to prevent them
2982 // from being removed as unknown metadata. The actual merging is handled
2983 // separately below.
2984 case LLVMContext::MD_prof:
2985 case LLVMContext::MD_mmra:
2986 case LLVMContext::MD_memprof:
2987 case LLVMContext::MD_callsite:
2988 break;
2989 case LLVMContext::MD_callee_type:
2990 if (!AAOnly) {
2991 K->setMetadata(LLVMContext::MD_callee_type,
2993 }
2994 break;
2995 case LLVMContext::MD_align:
2996 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
2997 K->setMetadata(
2999 break;
3000 case LLVMContext::MD_dereferenceable:
3001 case LLVMContext::MD_dereferenceable_or_null:
3002 if (!AAOnly && DoesKMove)
3003 K->setMetadata(Kind,
3005 break;
3006 case LLVMContext::MD_preserve_access_index:
3007 // Preserve !preserve.access.index in K.
3008 break;
3009 case LLVMContext::MD_noundef:
3010 // If K does move, keep noundef if it is present in both instructions.
3011 if (!AAOnly && DoesKMove)
3012 K->setMetadata(Kind, JMD);
3013 break;
3014 case LLVMContext::MD_nontemporal:
3015 // Preserve !nontemporal if it is present on both instructions.
3016 if (!AAOnly)
3017 K->setMetadata(Kind, JMD);
3018 break;
3019 case LLVMContext::MD_noalias_addrspace:
3020 if (DoesKMove)
3021 K->setMetadata(Kind,
3023 break;
3024 case LLVMContext::MD_nosanitize:
3025 // Preserve !nosanitize if both K and J have it.
3026 K->setMetadata(Kind, JMD);
3027 break;
3028 }
3029 }
3030 // Set !invariant.group from J if J has it. If both instructions have it
3031 // then we will just pick it from J - even when they are different.
3032 // Also make sure that K is load or store - f.e. combining bitcast with load
3033 // could produce bitcast with invariant.group metadata, which is invalid.
3034 // FIXME: we should try to preserve both invariant.group md if they are
3035 // different, but right now instruction can only have one invariant.group.
3036 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
3037 if (isa<LoadInst>(K) || isa<StoreInst>(K))
3038 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
3039
3040 // Merge MMRAs.
3041 // This is handled separately because we also want to handle cases where K
3042 // doesn't have tags but J does.
3043 auto JMMRA = J->getMetadata(LLVMContext::MD_mmra);
3044 auto KMMRA = K->getMetadata(LLVMContext::MD_mmra);
3045 if (JMMRA || KMMRA) {
3046 K->setMetadata(LLVMContext::MD_mmra,
3047 MMRAMetadata::combine(K->getContext(), JMMRA, KMMRA));
3048 }
3049
3050 // Merge memprof metadata.
3051 // Handle separately to support cases where only one instruction has the
3052 // metadata.
3053 auto *JMemProf = J->getMetadata(LLVMContext::MD_memprof);
3054 auto *KMemProf = K->getMetadata(LLVMContext::MD_memprof);
3055 if (!AAOnly && (JMemProf || KMemProf)) {
3056 K->setMetadata(LLVMContext::MD_memprof,
3057 MDNode::getMergedMemProfMetadata(KMemProf, JMemProf));
3058 }
3059
3060 // Merge callsite metadata.
3061 // Handle separately to support cases where only one instruction has the
3062 // metadata.
3063 auto *JCallSite = J->getMetadata(LLVMContext::MD_callsite);
3064 auto *KCallSite = K->getMetadata(LLVMContext::MD_callsite);
3065 if (!AAOnly && (JCallSite || KCallSite)) {
3066 K->setMetadata(LLVMContext::MD_callsite,
3067 MDNode::getMergedCallsiteMetadata(KCallSite, JCallSite));
3068 }
3069
3070 // Merge prof metadata.
3071 // Handle separately to support cases where only one instruction has the
3072 // metadata.
3073 auto *JProf = J->getMetadata(LLVMContext::MD_prof);
3074 auto *KProf = K->getMetadata(LLVMContext::MD_prof);
3075 if (!AAOnly && (JProf || KProf)) {
3076 K->setMetadata(LLVMContext::MD_prof,
3077 MDNode::getMergedProfMetadata(KProf, JProf, K, J));
3078 }
3079}
3080
3082 bool DoesKMove) {
3083 combineMetadata(K, J, DoesKMove);
3084}
3085
3087 combineMetadata(K, J, /*DoesKMove=*/true, /*AAOnly=*/true);
3088}
3089
3090void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
3092 Source.getAllMetadata(MD);
3093 MDBuilder MDB(Dest.getContext());
3094 Type *NewType = Dest.getType();
3095 const DataLayout &DL = Source.getDataLayout();
3096 for (const auto &MDPair : MD) {
3097 unsigned ID = MDPair.first;
3098 MDNode *N = MDPair.second;
3099 // Note, essentially every kind of metadata should be preserved here! This
3100 // routine is supposed to clone a load instruction changing *only its type*.
3101 // The only metadata it makes sense to drop is metadata which is invalidated
3102 // when the pointer type changes. This should essentially never be the case
3103 // in LLVM, but we explicitly switch over only known metadata to be
3104 // conservatively correct. If you are adding metadata to LLVM which pertains
3105 // to loads, you almost certainly want to add it here.
3106 switch (ID) {
3107 case LLVMContext::MD_dbg:
3108 case LLVMContext::MD_tbaa:
3109 case LLVMContext::MD_prof:
3110 case LLVMContext::MD_fpmath:
3111 case LLVMContext::MD_tbaa_struct:
3112 case LLVMContext::MD_invariant_load:
3113 case LLVMContext::MD_alias_scope:
3114 case LLVMContext::MD_noalias:
3115 case LLVMContext::MD_nontemporal:
3116 case LLVMContext::MD_mem_parallel_loop_access:
3117 case LLVMContext::MD_access_group:
3118 case LLVMContext::MD_noundef:
3119 case LLVMContext::MD_noalias_addrspace:
3120 // All of these directly apply.
3121 Dest.setMetadata(ID, N);
3122 break;
3123
3124 case LLVMContext::MD_nonnull:
3125 copyNonnullMetadata(Source, N, Dest);
3126 break;
3127
3128 case LLVMContext::MD_align:
3129 case LLVMContext::MD_dereferenceable:
3130 case LLVMContext::MD_dereferenceable_or_null:
3131 // These only directly apply if the new type is also a pointer.
3132 if (NewType->isPointerTy())
3133 Dest.setMetadata(ID, N);
3134 break;
3135
3136 case LLVMContext::MD_range:
3137 copyRangeMetadata(DL, Source, N, Dest);
3138 break;
3139 }
3140 }
3141}
3142
3144 auto *ReplInst = dyn_cast<Instruction>(Repl);
3145 if (!ReplInst)
3146 return;
3147
3148 // Patch the replacement so that it is not more restrictive than the value
3149 // being replaced.
3150 WithOverflowInst *UnusedWO;
3151 // When replacing the result of a llvm.*.with.overflow intrinsic with a
3152 // overflowing binary operator, nuw/nsw flags may no longer hold.
3153 if (isa<OverflowingBinaryOperator>(ReplInst) &&
3154 match(I, m_ExtractValue<0>(m_WithOverflowInst(UnusedWO))))
3155 ReplInst->dropPoisonGeneratingFlags();
3156 // Note that if 'I' is a load being replaced by some operation,
3157 // for example, by an arithmetic operation, then andIRFlags()
3158 // would just erase all math flags from the original arithmetic
3159 // operation, which is clearly not wanted and not needed.
3160 else if (!isa<LoadInst>(I))
3161 ReplInst->andIRFlags(I);
3162
3163 // Handle attributes.
3164 if (auto *CB1 = dyn_cast<CallBase>(ReplInst)) {
3165 if (auto *CB2 = dyn_cast<CallBase>(I)) {
3166 bool Success = CB1->tryIntersectAttributes(CB2);
3167 assert(Success && "We should not be trying to sink callbases "
3168 "with non-intersectable attributes");
3169 // For NDEBUG Compile.
3170 (void)Success;
3171 }
3172 }
3173
3174 // FIXME: If both the original and replacement value are part of the
3175 // same control-flow region (meaning that the execution of one
3176 // guarantees the execution of the other), then we can combine the
3177 // noalias scopes here and do better than the general conservative
3178 // answer used in combineMetadata().
3179
3180 // In general, GVN unifies expressions over different control-flow
3181 // regions, and so we need a conservative combination of the noalias
3182 // scopes.
3183 combineMetadataForCSE(ReplInst, I, false);
3184}
3185
3186template <typename ShouldReplaceFn>
3188 const ShouldReplaceFn &ShouldReplace) {
3189 assert(From->getType() == To->getType());
3190
3191 unsigned Count = 0;
3192 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3193 auto *II = dyn_cast<IntrinsicInst>(U.getUser());
3194 if (II && II->getIntrinsicID() == Intrinsic::fake_use)
3195 continue;
3196 if (!ShouldReplace(U))
3197 continue;
3198 LLVM_DEBUG(dbgs() << "Replace dominated use of '";
3199 From->printAsOperand(dbgs());
3200 dbgs() << "' with " << *To << " in " << *U.getUser() << "\n");
3201 U.set(To);
3202 ++Count;
3203 }
3204 return Count;
3205}
3206
3208 assert(From->getType() == To->getType());
3209 auto *BB = From->getParent();
3210 unsigned Count = 0;
3211
3212 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3213 auto *I = cast<Instruction>(U.getUser());
3214 if (I->getParent() == BB)
3215 continue;
3216 U.set(To);
3217 ++Count;
3218 }
3219 return Count;
3220}
3221
3223 DominatorTree &DT,
3224 const BasicBlockEdge &Root) {
3225 auto Dominates = [&](const Use &U) { return DT.dominates(Root, U); };
3226 return ::replaceDominatedUsesWith(From, To, Dominates);
3227}
3228
3230 DominatorTree &DT,
3231 const BasicBlock *BB) {
3232 auto Dominates = [&](const Use &U) { return DT.dominates(BB, U); };
3233 return ::replaceDominatedUsesWith(From, To, Dominates);
3234}
3235
3237 Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Root,
3238 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3239 auto DominatesAndShouldReplace = [&](const Use &U) {
3240 return DT.dominates(Root, U) && ShouldReplace(U, To);
3241 };
3242 return ::replaceDominatedUsesWith(From, To, DominatesAndShouldReplace);
3243}
3244
3246 Value *From, Value *To, DominatorTree &DT, const BasicBlock *BB,
3247 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3248 auto DominatesAndShouldReplace = [&](const Use &U) {
3249 return DT.dominates(BB, U) && ShouldReplace(U, To);
3250 };
3251 return ::replaceDominatedUsesWith(From, To, DominatesAndShouldReplace);
3252}
3253
3255 const TargetLibraryInfo &TLI) {
3256 // Check if the function is specifically marked as a gc leaf function.
3257 if (Call->hasFnAttr("gc-leaf-function"))
3258 return true;
3259 if (const Function *F = Call->getCalledFunction()) {
3260 if (F->hasFnAttribute("gc-leaf-function"))
3261 return true;
3262
3263 if (auto IID = F->getIntrinsicID()) {
3264 // Most LLVM intrinsics do not take safepoints.
3265 return IID != Intrinsic::experimental_gc_statepoint &&
3266 IID != Intrinsic::experimental_deoptimize &&
3267 IID != Intrinsic::memcpy_element_unordered_atomic &&
3268 IID != Intrinsic::memmove_element_unordered_atomic;
3269 }
3270 }
3271
3272 // Lib calls can be materialized by some passes, and won't be
3273 // marked as 'gc-leaf-function.' All available Libcalls are
3274 // GC-leaf.
3275 LibFunc LF;
3276 if (TLI.getLibFunc(*Call, LF)) {
3277 return TLI.has(LF);
3278 }
3279
3280 return false;
3281}
3282
3284 LoadInst &NewLI) {
3285 auto *NewTy = NewLI.getType();
3286
3287 // This only directly applies if the new type is also a pointer.
3288 if (NewTy->isPointerTy()) {
3289 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
3290 return;
3291 }
3292
3293 // The only other translation we can do is to integral loads with !range
3294 // metadata.
3295 if (!NewTy->isIntegerTy())
3296 return;
3297
3298 MDBuilder MDB(NewLI.getContext());
3299 const Value *Ptr = OldLI.getPointerOperand();
3300 auto *ITy = cast<IntegerType>(NewTy);
3301 auto *NullInt = ConstantExpr::getPtrToInt(
3302 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
3303 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
3304 NewLI.setMetadata(LLVMContext::MD_range,
3305 MDB.createRange(NonNullInt, NullInt));
3306}
3307
3309 MDNode *N, LoadInst &NewLI) {
3310 auto *NewTy = NewLI.getType();
3311 // Simply copy the metadata if the type did not change.
3312 if (NewTy == OldLI.getType()) {
3313 NewLI.setMetadata(LLVMContext::MD_range, N);
3314 return;
3315 }
3316
3317 // Give up unless it is converted to a pointer where there is a single very
3318 // valuable mapping we can do reliably.
3319 // FIXME: It would be nice to propagate this in more ways, but the type
3320 // conversions make it hard.
3321 if (!NewTy->isPointerTy())
3322 return;
3323
3324 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
3325 if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
3327 MDNode *NN = MDNode::get(OldLI.getContext(), {});
3328 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
3329 }
3330}
3331
3334 findDbgUsers(&I, DPUsers);
3335 for (auto *DVR : DPUsers)
3336 DVR->eraseFromParent();
3337}
3338
3340 BasicBlock *BB) {
3341 // Since we are moving the instructions out of its basic block, we do not
3342 // retain their original debug locations (DILocations) and debug intrinsic
3343 // instructions.
3344 //
3345 // Doing so would degrade the debugging experience and adversely affect the
3346 // accuracy of profiling information.
3347 //
3348 // Currently, when hoisting the instructions, we take the following actions:
3349 // - Remove their debug intrinsic instructions.
3350 // - Set their debug locations to the values from the insertion point.
3351 //
3352 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
3353 // need to be deleted, is because there will not be any instructions with a
3354 // DILocation in either branch left after performing the transformation. We
3355 // can only insert a dbg.value after the two branches are joined again.
3356 //
3357 // See PR38762, PR39243 for more details.
3358 //
3359 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
3360 // encode predicated DIExpressions that yield different results on different
3361 // code paths.
3362
3363 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
3364 Instruction *I = &*II;
3365 I->dropUBImplyingAttrsAndMetadata();
3366 if (I->isUsedByMetadata())
3367 dropDebugUsers(*I);
3368 // RemoveDIs: drop debug-info too as the following code does.
3369 I->dropDbgRecords();
3370 if (I->isDebugOrPseudoInst()) {
3371 // Remove DbgInfo and pseudo probe Intrinsics.
3372 II = I->eraseFromParent();
3373 continue;
3374 }
3375 I->setDebugLoc(InsertPt->getDebugLoc());
3376 ++II;
3377 }
3378 DomBlock->splice(InsertPt->getIterator(), BB, BB->begin(),
3379 BB->getTerminator()->getIterator());
3380}
3381
3383 Type &Ty) {
3384 // Create integer constant expression.
3385 auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * {
3386 const APInt &API = cast<ConstantInt>(&CV)->getValue();
3387 std::optional<int64_t> InitIntOpt = API.trySExtValue();
3388 return InitIntOpt ? DIB.createConstantValueExpression(
3389 static_cast<uint64_t>(*InitIntOpt))
3390 : nullptr;
3391 };
3392
3393 if (isa<ConstantInt>(C))
3394 return createIntegerExpression(C);
3395
3396 auto *FP = dyn_cast<ConstantFP>(&C);
3397 if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) {
3398 const APFloat &APF = FP->getValueAPF();
3399 APInt const &API = APF.bitcastToAPInt();
3400 if (auto Temp = API.getZExtValue())
3401 return DIB.createConstantValueExpression(static_cast<uint64_t>(Temp));
3402 return DIB.createConstantValueExpression(*API.getRawData());
3403 }
3404
3405 if (!Ty.isPointerTy())
3406 return nullptr;
3407
3408 if (isa<ConstantPointerNull>(C))
3409 return DIB.createConstantValueExpression(0);
3410
3411 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(&C))
3412 if (CE->getOpcode() == Instruction::IntToPtr) {
3413 const Value *V = CE->getOperand(0);
3414 if (auto CI = dyn_cast_or_null<ConstantInt>(V))
3415 return createIntegerExpression(*CI);
3416 }
3417 return nullptr;
3418}
3419
3421 auto RemapDebugOperands = [&Mapping](auto *DV, auto Set) {
3422 for (auto *Op : Set) {
3423 auto I = Mapping.find(Op);
3424 if (I != Mapping.end())
3425 DV->replaceVariableLocationOp(Op, I->second, /*AllowEmpty=*/true);
3426 }
3427 };
3428 auto RemapAssignAddress = [&Mapping](auto *DA) {
3429 auto I = Mapping.find(DA->getAddress());
3430 if (I != Mapping.end())
3431 DA->setAddress(I->second);
3432 };
3433 for (DbgVariableRecord &DVR : filterDbgVars(Inst->getDbgRecordRange())) {
3434 RemapDebugOperands(&DVR, DVR.location_ops());
3435 if (DVR.isDbgAssign())
3436 RemapAssignAddress(&DVR);
3437 }
3438}
3439
3440namespace {
3441
3442/// A potential constituent of a bitreverse or bswap expression. See
3443/// collectBitParts for a fuller explanation.
3444struct BitPart {
3445 BitPart(Value *P, unsigned BW) : Provider(P) {
3446 Provenance.resize(BW);
3447 }
3448
3449 /// The Value that this is a bitreverse/bswap of.
3450 Value *Provider;
3451
3452 /// The "provenance" of each bit. Provenance[A] = B means that bit A
3453 /// in Provider becomes bit B in the result of this expression.
3454 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
3455
3456 enum { Unset = -1 };
3457};
3458
3459} // end anonymous namespace
3460
3461/// Analyze the specified subexpression and see if it is capable of providing
3462/// pieces of a bswap or bitreverse. The subexpression provides a potential
3463/// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
3464/// the output of the expression came from a corresponding bit in some other
3465/// value. This function is recursive, and the end result is a mapping of
3466/// bitnumber to bitnumber. It is the caller's responsibility to validate that
3467/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
3468///
3469/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
3470/// that the expression deposits the low byte of %X into the high byte of the
3471/// result and that all other bits are zero. This expression is accepted and a
3472/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
3473/// [0-7].
3474///
3475/// For vector types, all analysis is performed at the per-element level. No
3476/// cross-element analysis is supported (shuffle/insertion/reduction), and all
3477/// constant masks must be splatted across all elements.
3478///
3479/// To avoid revisiting values, the BitPart results are memoized into the
3480/// provided map. To avoid unnecessary copying of BitParts, BitParts are
3481/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
3482/// store BitParts objects, not pointers. As we need the concept of a nullptr
3483/// BitParts (Value has been analyzed and the analysis failed), we an Optional
3484/// type instead to provide the same functionality.
3485///
3486/// Because we pass around references into \c BPS, we must use a container that
3487/// does not invalidate internal references (std::map instead of DenseMap).
3488static const std::optional<BitPart> &
3489collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
3490 std::map<Value *, std::optional<BitPart>> &BPS, int Depth,
3491 bool &FoundRoot) {
3492 auto [I, Inserted] = BPS.try_emplace(V);
3493 if (!Inserted)
3494 return I->second;
3495
3496 auto &Result = I->second;
3497 auto BitWidth = V->getType()->getScalarSizeInBits();
3498
3499 // Can't do integer/elements > 128 bits.
3500 if (BitWidth > 128)
3501 return Result;
3502
3503 // Prevent stack overflow by limiting the recursion depth
3505 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
3506 return Result;
3507 }
3508
3509 if (auto *I = dyn_cast<Instruction>(V)) {
3510 Value *X, *Y;
3511 const APInt *C;
3512
3513 // If this is an or instruction, it may be an inner node of the bswap.
3514 if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
3515 // Check we have both sources and they are from the same provider.
3516 const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3517 Depth + 1, FoundRoot);
3518 if (!A || !A->Provider)
3519 return Result;
3520
3521 const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3522 Depth + 1, FoundRoot);
3523 if (!B || A->Provider != B->Provider)
3524 return Result;
3525
3526 // Try and merge the two together.
3527 Result = BitPart(A->Provider, BitWidth);
3528 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
3529 if (A->Provenance[BitIdx] != BitPart::Unset &&
3530 B->Provenance[BitIdx] != BitPart::Unset &&
3531 A->Provenance[BitIdx] != B->Provenance[BitIdx])
3532 return Result = std::nullopt;
3533
3534 if (A->Provenance[BitIdx] == BitPart::Unset)
3535 Result->Provenance[BitIdx] = B->Provenance[BitIdx];
3536 else
3537 Result->Provenance[BitIdx] = A->Provenance[BitIdx];
3538 }
3539
3540 return Result;
3541 }
3542
3543 // If this is a logical shift by a constant, recurse then shift the result.
3544 if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
3545 const APInt &BitShift = *C;
3546
3547 // Ensure the shift amount is defined.
3548 if (BitShift.uge(BitWidth))
3549 return Result;
3550
3551 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3552 if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
3553 return Result;
3554
3555 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3556 Depth + 1, FoundRoot);
3557 if (!Res)
3558 return Result;
3559 Result = Res;
3560
3561 // Perform the "shift" on BitProvenance.
3562 auto &P = Result->Provenance;
3563 if (I->getOpcode() == Instruction::Shl) {
3564 P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
3565 P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
3566 } else {
3567 P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
3568 P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
3569 }
3570
3571 return Result;
3572 }
3573
3574 // If this is a logical 'and' with a mask that clears bits, recurse then
3575 // unset the appropriate bits.
3576 if (match(V, m_And(m_Value(X), m_APInt(C)))) {
3577 const APInt &AndMask = *C;
3578
3579 // Check that the mask allows a multiple of 8 bits for a bswap, for an
3580 // early exit.
3581 unsigned NumMaskedBits = AndMask.popcount();
3582 if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3583 return Result;
3584
3585 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3586 Depth + 1, FoundRoot);
3587 if (!Res)
3588 return Result;
3589 Result = Res;
3590
3591 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3592 // If the AndMask is zero for this bit, clear the bit.
3593 if (AndMask[BitIdx] == 0)
3594 Result->Provenance[BitIdx] = BitPart::Unset;
3595 return Result;
3596 }
3597
3598 // If this is a zext instruction zero extend the result.
3599 if (match(V, m_ZExt(m_Value(X)))) {
3600 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3601 Depth + 1, FoundRoot);
3602 if (!Res)
3603 return Result;
3604
3605 Result = BitPart(Res->Provider, BitWidth);
3606 auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3607 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3608 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3609 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3610 Result->Provenance[BitIdx] = BitPart::Unset;
3611 return Result;
3612 }
3613
3614 // If this is a truncate instruction, extract the lower bits.
3615 if (match(V, m_Trunc(m_Value(X)))) {
3616 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3617 Depth + 1, FoundRoot);
3618 if (!Res)
3619 return Result;
3620
3621 Result = BitPart(Res->Provider, BitWidth);
3622 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3623 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3624 return Result;
3625 }
3626
3627 // BITREVERSE - most likely due to us previous matching a partial
3628 // bitreverse.
3629 if (match(V, m_BitReverse(m_Value(X)))) {
3630 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3631 Depth + 1, FoundRoot);
3632 if (!Res)
3633 return Result;
3634
3635 Result = BitPart(Res->Provider, BitWidth);
3636 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3637 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3638 return Result;
3639 }
3640
3641 // BSWAP - most likely due to us previous matching a partial bswap.
3642 if (match(V, m_BSwap(m_Value(X)))) {
3643 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3644 Depth + 1, FoundRoot);
3645 if (!Res)
3646 return Result;
3647
3648 unsigned ByteWidth = BitWidth / 8;
3649 Result = BitPart(Res->Provider, BitWidth);
3650 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3651 unsigned ByteBitOfs = ByteIdx * 8;
3652 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3653 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3654 Res->Provenance[ByteBitOfs + BitIdx];
3655 }
3656 return Result;
3657 }
3658
3659 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3660 // amount (modulo).
3661 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3662 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3663 if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3664 match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3665 // We can treat fshr as a fshl by flipping the modulo amount.
3666 unsigned ModAmt = C->urem(BitWidth);
3667 if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3668 ModAmt = BitWidth - ModAmt;
3669
3670 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3671 if (!MatchBitReversals && (ModAmt % 8) != 0)
3672 return Result;
3673
3674 // Check we have both sources and they are from the same provider.
3675 const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3676 Depth + 1, FoundRoot);
3677 if (!LHS || !LHS->Provider)
3678 return Result;
3679
3680 const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3681 Depth + 1, FoundRoot);
3682 if (!RHS || LHS->Provider != RHS->Provider)
3683 return Result;
3684
3685 unsigned StartBitRHS = BitWidth - ModAmt;
3686 Result = BitPart(LHS->Provider, BitWidth);
3687 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3688 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3689 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3690 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3691 return Result;
3692 }
3693 }
3694
3695 // If we've already found a root input value then we're never going to merge
3696 // these back together.
3697 if (FoundRoot)
3698 return Result;
3699
3700 // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3701 // be the root input value to the bswap/bitreverse.
3702 FoundRoot = true;
3703 Result = BitPart(V, BitWidth);
3704 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3705 Result->Provenance[BitIdx] = BitIdx;
3706 return Result;
3707}
3708
3709static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3710 unsigned BitWidth) {
3711 if (From % 8 != To % 8)
3712 return false;
3713 // Convert from bit indices to byte indices and check for a byte reversal.
3714 From >>= 3;
3715 To >>= 3;
3716 BitWidth >>= 3;
3717 return From == BitWidth - To - 1;
3718}
3719
3720static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3721 unsigned BitWidth) {
3722 return From == BitWidth - To - 1;
3723}
3724
3726 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3727 SmallVectorImpl<Instruction *> &InsertedInsts) {
3728 if (!match(I, m_Or(m_Value(), m_Value())) &&
3729 !match(I, m_FShl(m_Value(), m_Value(), m_Value())) &&
3730 !match(I, m_FShr(m_Value(), m_Value(), m_Value())) &&
3731 !match(I, m_BSwap(m_Value())))
3732 return false;
3733 if (!MatchBSwaps && !MatchBitReversals)
3734 return false;
3735 Type *ITy = I->getType();
3736 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() == 1 ||
3737 ITy->getScalarSizeInBits() > 128)
3738 return false; // Can't do integer/elements > 128 bits.
3739
3740 // Try to find all the pieces corresponding to the bswap.
3741 bool FoundRoot = false;
3742 std::map<Value *, std::optional<BitPart>> BPS;
3743 const auto &Res =
3744 collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot);
3745 if (!Res)
3746 return false;
3747 ArrayRef<int8_t> BitProvenance = Res->Provenance;
3748 assert(all_of(BitProvenance,
3749 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3750 "Illegal bit provenance index");
3751
3752 // If the upper bits are zero, then attempt to perform as a truncated op.
3753 Type *DemandedTy = ITy;
3754 if (BitProvenance.back() == BitPart::Unset) {
3755 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3756 BitProvenance = BitProvenance.drop_back();
3757 if (BitProvenance.empty())
3758 return false; // TODO - handle null value?
3759 DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3760 if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3761 DemandedTy = VectorType::get(DemandedTy, IVecTy);
3762 }
3763
3764 // Check BitProvenance hasn't found a source larger than the result type.
3765 unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3766 if (DemandedBW > ITy->getScalarSizeInBits())
3767 return false;
3768
3769 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3770 // only byteswap values with an even number of bytes.
3771 APInt DemandedMask = APInt::getAllOnes(DemandedBW);
3772 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3773 bool OKForBitReverse = MatchBitReversals;
3774 for (unsigned BitIdx = 0;
3775 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3776 if (BitProvenance[BitIdx] == BitPart::Unset) {
3777 DemandedMask.clearBit(BitIdx);
3778 continue;
3779 }
3780 OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
3781 DemandedBW);
3782 OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
3783 BitIdx, DemandedBW);
3784 }
3785
3786 Intrinsic::ID Intrin;
3787 if (OKForBSwap)
3788 Intrin = Intrinsic::bswap;
3789 else if (OKForBitReverse)
3790 Intrin = Intrinsic::bitreverse;
3791 else
3792 return false;
3793
3794 Function *F =
3795 Intrinsic::getOrInsertDeclaration(I->getModule(), Intrin, DemandedTy);
3796 Value *Provider = Res->Provider;
3797
3798 // We may need to truncate the provider.
3799 if (DemandedTy != Provider->getType()) {
3800 auto *Trunc =
3801 CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I->getIterator());
3802 InsertedInsts.push_back(Trunc);
3803 Provider = Trunc;
3804 }
3805
3806 Instruction *Result = CallInst::Create(F, Provider, "rev", I->getIterator());
3807 InsertedInsts.push_back(Result);
3808
3809 if (!DemandedMask.isAllOnes()) {
3810 auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
3811 Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I->getIterator());
3812 InsertedInsts.push_back(Result);
3813 }
3814
3815 // We may need to zeroextend back to the result type.
3816 if (ITy != Result->getType()) {
3817 auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I->getIterator());
3818 InsertedInsts.push_back(ExtInst);
3819 }
3820
3821 return true;
3822}
3823
3824// CodeGen has special handling for some string functions that may replace
3825// them with target-specific intrinsics. Since that'd skip our interceptors
3826// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
3827// we mark affected calls as NoBuiltin, which will disable optimization
3828// in CodeGen.
3830 CallInst *CI, const TargetLibraryInfo *TLI) {
3831 Function *F = CI->getCalledFunction();
3832 LibFunc Func;
3833 if (F && !F->hasLocalLinkage() && F->hasName() &&
3834 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
3835 !F->doesNotAccessMemory())
3836 CI->addFnAttr(Attribute::NoBuiltin);
3837}
3838
3840 const auto *Op = I->getOperand(OpIdx);
3841 // We can't have a PHI with a metadata or token type.
3842 if (Op->getType()->isMetadataTy() || Op->getType()->isTokenLikeTy())
3843 return false;
3844
3845 // swifterror pointers can only be used by a load, store, or as a swifterror
3846 // argument; swifterror pointers are not allowed to be used in select or phi
3847 // instructions.
3848 if (Op->isSwiftError())
3849 return false;
3850
3851 // Cannot replace alloca argument with phi/select.
3852 if (I->isLifetimeStartOrEnd())
3853 return false;
3854
3855 // Early exit.
3856 if (!isa<Constant, InlineAsm>(Op))
3857 return true;
3858
3859 switch (I->getOpcode()) {
3860 default:
3861 return true;
3862 case Instruction::Call:
3863 case Instruction::Invoke: {
3864 const auto &CB = cast<CallBase>(*I);
3865
3866 // Can't handle inline asm. Skip it.
3867 if (CB.isInlineAsm())
3868 return false;
3869
3870 // Constant bundle operands may need to retain their constant-ness for
3871 // correctness.
3872 if (CB.isBundleOperand(OpIdx))
3873 return false;
3874
3875 if (OpIdx < CB.arg_size()) {
3876 // Some variadic intrinsics require constants in the variadic arguments,
3877 // which currently aren't markable as immarg.
3878 if (isa<IntrinsicInst>(CB) &&
3879 OpIdx >= CB.getFunctionType()->getNumParams()) {
3880 // This is known to be OK for stackmap.
3881 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
3882 }
3883
3884 // gcroot is a special case, since it requires a constant argument which
3885 // isn't also required to be a simple ConstantInt.
3886 if (CB.getIntrinsicID() == Intrinsic::gcroot)
3887 return false;
3888
3889 // Some intrinsic operands are required to be immediates.
3890 return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
3891 }
3892
3893 // It is never allowed to replace the call argument to an intrinsic, but it
3894 // may be possible for a call.
3895 return !isa<IntrinsicInst>(CB);
3896 }
3897 case Instruction::ShuffleVector:
3898 // Shufflevector masks are constant.
3899 return OpIdx != 2;
3900 case Instruction::Switch:
3901 case Instruction::ExtractValue:
3902 // All operands apart from the first are constant.
3903 return OpIdx == 0;
3904 case Instruction::InsertValue:
3905 // All operands apart from the first and the second are constant.
3906 return OpIdx < 2;
3907 case Instruction::Alloca:
3908 // Static allocas (constant size in the entry block) are handled by
3909 // prologue/epilogue insertion so they're free anyway. We definitely don't
3910 // want to make them non-constant.
3911 return !cast<AllocaInst>(I)->isStaticAlloca();
3912 case Instruction::GetElementPtr:
3913 if (OpIdx == 0)
3914 return true;
3916 for (auto E = std::next(It, OpIdx); It != E; ++It)
3917 if (It.isStruct())
3918 return false;
3919 return true;
3920 }
3921}
3922
3924 // First: Check if it's a constant
3925 if (Constant *C = dyn_cast<Constant>(Condition))
3926 return ConstantExpr::getNot(C);
3927
3928 // Second: If the condition is already inverted, return the original value
3929 Value *NotCondition;
3930 if (match(Condition, m_Not(m_Value(NotCondition))))
3931 return NotCondition;
3932
3933 BasicBlock *Parent = nullptr;
3934 Instruction *Inst = dyn_cast<Instruction>(Condition);
3935 if (Inst)
3936 Parent = Inst->getParent();
3937 else if (Argument *Arg = dyn_cast<Argument>(Condition))
3938 Parent = &Arg->getParent()->getEntryBlock();
3939 assert(Parent && "Unsupported condition to invert");
3940
3941 // Third: Check all the users for an invert
3942 for (User *U : Condition->users())
3943 if (Instruction *I = dyn_cast<Instruction>(U))
3944 if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
3945 return I;
3946
3947 // Last option: Create a new instruction
3948 auto *Inverted =
3949 BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
3950 if (Inst && !isa<PHINode>(Inst))
3951 Inverted->insertAfter(Inst->getIterator());
3952 else
3953 Inverted->insertBefore(Parent->getFirstInsertionPt());
3954 return Inverted;
3955}
3956
3958 // Note: We explicitly check for attributes rather than using cover functions
3959 // because some of the cover functions include the logic being implemented.
3960
3961 bool Changed = false;
3962 // readnone + not convergent implies nosync
3963 if (!F.hasFnAttribute(Attribute::NoSync) &&
3964 F.doesNotAccessMemory() && !F.isConvergent()) {
3965 F.setNoSync();
3966 Changed = true;
3967 }
3968
3969 // readonly implies nofree
3970 if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
3971 F.setDoesNotFreeMemory();
3972 Changed = true;
3973 }
3974
3975 // willreturn implies mustprogress
3976 if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
3977 F.setMustProgress();
3978 Changed = true;
3979 }
3980
3981 // TODO: There are a bunch of cases of restrictive memory effects we
3982 // can infer by inspecting arguments of argmemonly-ish functions.
3983
3984 return Changed;
3985}
3986
3988#ifndef NDEBUG
3989 if (Opcode)
3990 assert(Opcode == I.getOpcode() &&
3991 "can only use mergeFlags on instructions with matching opcodes");
3992 else
3993 Opcode = I.getOpcode();
3994#endif
3995 if (isa<OverflowingBinaryOperator>(&I)) {
3996 HasNUW &= I.hasNoUnsignedWrap();
3997 HasNSW &= I.hasNoSignedWrap();
3998 }
3999 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
4000 IsDisjoint &= DisjointOp->isDisjoint();
4001}
4002
4004 I.clearSubclassOptionalData();
4005 if (I.getOpcode() == Instruction::Add ||
4006 (I.getOpcode() == Instruction::Mul && AllKnownNonZero)) {
4007 if (HasNUW)
4008 I.setHasNoUnsignedWrap();
4009 if (HasNSW && (AllKnownNonNegative || HasNUW))
4010 I.setHasNoSignedWrap();
4011 }
4012 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
4013 DisjointOp->setIsDisjoint(IsDisjoint);
4014}
#define Success
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isSentinel(const DWARFDebugNames::AttributeEncoding &AE)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
bool End
Definition: ELF_riscv.cpp:480
static unsigned getHashValueImpl(SimpleValue Val)
Definition: EarlyCSE.cpp:233
static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS)
Definition: EarlyCSE.cpp:354
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
#define LLVM_DEBUG(...)
Definition: Debug.h:119
static bool valueCoversEntireFragment(Type *ValTy, DbgVariableRecord *DVR)
Check if the alloc size of ValTy is large enough to cover the variable (or fragment of the variable) ...
Definition: Local.cpp:1620
static bool markAliveBlocks(Function &F, SmallPtrSetImpl< BasicBlock * > &Reachable, DomTreeUpdater *DTU=nullptr)
Definition: Local.cpp:2646
static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy, Type *ToTy)
Check if a bitcast between a value of type FromTy to type ToTy would losslessly preserve the bits and...
Definition: Local.cpp:2396
static bool isStructure(AllocaInst *AI)
Determine whether this alloca is a structure.
Definition: Local.cpp:1758
uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode)
Definition: Local.cpp:2144
static bool PhiHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr, PHINode *APN)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition: Local.cpp:1596
static void gatherIncomingValuesToPhi(PHINode *PN, IncomingValueMap &IncomingValues)
Create a map from block to value for the operands of a given phi.
Definition: Local.cpp:951
static void combineMetadata(Instruction *K, const Instruction *J, bool DoesKMove, bool AAOnly=false)
If AAOnly is set, only intersect alias analysis metadata and preserve other known metadata.
Definition: Local.cpp:2922
static void handleSSAValueOperands(uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues, Instruction *I)
Definition: Local.cpp:2174
std::optional< DIExpression * > DbgValReplacement
A replacement for a dbg.value expression.
Definition: Local.cpp:2323
static bool rewriteDebugUsers(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT, function_ref< DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr)
Point debug users of From to To using exprs given by RewriteExpr, possibly moving/undefing users to p...
Definition: Local.cpp:2328
Value * getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2186
static DIExpression * dropInitialDeref(const DIExpression *DIExpr)
Definition: Local.cpp:1656
static void replaceUndefValuesInPhi(PHINode *PN, const IncomingValueMap &IncomingValues)
Replace the incoming undef values to a phi with the values from a block-to-value map.
Definition: Local.cpp:967
Value * getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2118
static bool CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ, const SmallPtrSetImpl< BasicBlock * > &BBPreds, BasicBlock *&CommonPred)
Definition: Local.cpp:1008
Value * getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2245
static bool CanMergeValues(Value *First, Value *Second)
Return true if we can choose one of these values to use in place of the other.
Definition: Local.cpp:846
static bool simplifyAndDCEInstruction(Instruction *I, SmallSetVector< Instruction *, 16 > &WorkList, const DataLayout &DL, const TargetLibraryInfo *TLI)
Definition: Local.cpp:663
static bool isArray(AllocaInst *AI)
Determine whether this alloca is either a VLA or an array.
Definition: Local.cpp:1752
static bool areAllUsesEqual(Instruction *I)
areAllUsesEqual - Check whether the uses of a value are all the same.
Definition: Local.cpp:622
static cl::opt< bool > PHICSEDebugHash("phicse-debug-hash", cl::init(false), cl::Hidden, cl::desc("Perform extra assertion checking to verify that PHINodes's hash " "function is well-behaved w.r.t. its isEqual predicate"))
uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred)
Definition: Local.cpp:2220
static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To, unsigned BitWidth)
Definition: Local.cpp:3709
static const std::optional< BitPart > & collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, std::map< Value *, std::optional< BitPart > > &BPS, int Depth, bool &FoundRoot)
Analyze the specified subexpression and see if it is capable of providing pieces of a bswap or bitrev...
Definition: Local.cpp:3489
static unsigned replaceDominatedUsesWith(Value *From, Value *To, const ShouldReplaceFn &ShouldReplace)
Definition: Local.cpp:3187
static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB, SmallPtrSetImpl< PHINode * > &ToRemove)
Definition: Local.cpp:1377
static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ, const SmallPtrSetImpl< BasicBlock * > &BBPreds)
Return true if we can fold BB, an almost-empty BB ending in an unconditional branch to Succ,...
Definition: Local.cpp:855
static cl::opt< unsigned > PHICSENumPHISmallSize("phicse-num-phi-smallsize", cl::init(32), cl::Hidden, cl::desc("When the basic block contains not more than this number of PHI nodes, " "perform a (faster!) exhaustive search instead of set-driven one."))
static void updateOneDbgValueForAlloca(const DebugLoc &Loc, DILocalVariable *DIVar, DIExpression *DIExpr, Value *NewAddress, DbgVariableRecord *DVR, DIBuilder &Builder, int Offset)
Definition: Local.cpp:1960
static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB, SmallPtrSetImpl< PHINode * > &ToRemove)
Definition: Local.cpp:1413
static void insertDbgValueOrDbgVariableRecord(DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr, const DebugLoc &NewLoc, BasicBlock::iterator Instr)
Definition: Local.cpp:1645
static bool introduceTooManyPhiEntries(BasicBlock *BB, BasicBlock *Succ)
Check whether removing BB will make the phis in its Succ have too many incoming entries.
Definition: Local.cpp:1041
static Value * selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, IncomingValueMap &IncomingValues)
Determines the value to use as the phi node input for a block.
Definition: Local.cpp:926
static const unsigned BitPartRecursionMaxDepth
Definition: Local.cpp:121
static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, const PredBlockVector &BBPreds, PHINode *PN, BasicBlock *CommonPred)
Replace a value flowing from a block to a phi with potentially multiple instances of that value flowi...
Definition: Local.cpp:1073
static cl::opt< unsigned > MaxPhiEntriesIncreaseAfterRemovingEmptyBlock("max-phi-entries-increase-after-removing-empty-block", cl::init(1000), cl::Hidden, cl::desc("Stop removing an empty block if removing it will introduce more " "than this number of phi entries in its successor"))
static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To, unsigned BitWidth)
Definition: Local.cpp:3720
static void salvageDbgAssignAddress(T *Assign)
Definition: Local.cpp:2002
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition: APFloat.h:1353
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:234
void clearBit(unsigned BitPosition)
Set a given bit to 0.
Definition: APInt.h:1406
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1540
unsigned popcount() const
Count the number of bits set.
Definition: APInt.h:1670
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition: APInt.h:371
const uint64_t * getRawData() const
This function returns a pointer to the internal storage of the APInt.
Definition: APInt.h:569
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
Definition: APInt.h:1574
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1562
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1221
an instruction to allocate memory on the stack
Definition: Instructions.h:64
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:121
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:156
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:200
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition: ArrayRef.h:206
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:142
Value handle that asserts if the Value is deleted.
Definition: ValueHandle.h:265
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
iterator end()
Definition: BasicBlock.h:472
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:528
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:393
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:690
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:337
LLVM_ABI void insertDbgRecordBefore(DbgRecord *DR, InstListType::iterator Here)
Insert a DbgRecord into a block at the position given by Here.
const Instruction & front() const
Definition: BasicBlock.h:482
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:206
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:549
LLVM_ABI void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
Definition: BasicBlock.cpp:243
LLVM_ABI bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
Definition: BasicBlock.cpp:459
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:437
LLVM_ABI void flushTerminatorDbgRecords()
Eject any debug-info trailing at the end of a block.
Definition: BasicBlock.cpp:699
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:252
LLVM_ABI SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
Definition: BasicBlock.cpp:235
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:170
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:131
size_t size() const
Definition: BasicBlock.h:480
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:233
LLVM_ABI bool hasNPredecessorsOrMore(unsigned N) const
Return true if this block has N predecessors or more.
Definition: BasicBlock.cpp:463
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
Definition: BasicBlock.h:662
const Instruction & back() const
Definition: BasicBlock.h:484
LLVM_ABI void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
Definition: BasicBlock.cpp:494
BinaryOps getOpcode() const
Definition: InstrTypes.h:374
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This class represents a no-op cast from one type to another.
The address of a basic block.
Definition: Constants.h:899
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
Definition: Constants.cpp:1911
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1116
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1410
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1481
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1348
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1406
Value * getCalledOperand() const
Definition: InstrTypes.h:1340
void setAttributes(AttributeList A)
Set the attributes for this call.
Definition: InstrTypes.h:1427
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1205
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1283
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1424
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:678
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:707
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:708
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:702
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:701
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:705
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:703
@ ICMP_EQ
equal
Definition: InstrTypes.h:699
@ ICMP_NE
not equal
Definition: InstrTypes.h:700
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:706
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:704
bool isSigned() const
Definition: InstrTypes.h:932
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:767
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1120
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2314
static LLVM_ABI Constant * getNot(Constant *C)
Definition: Constants.cpp:2641
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2300
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2647
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1833
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
This is an important base class in LLVM.
Definition: Constant.h:43
LLVM_ABI void destroyConstant()
Called if some element of this constant is no longer valid.
Definition: Constants.cpp:489
DIExpression * createConstantValueExpression(uint64_t Val)
Create an expression for a variable that does not have an address, but does have a constant value.
Definition: DIBuilder.h:934
DWARF expression.
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
unsigned getNumElements() const
static LLVM_ABI ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI DIExpression * foldConstantMath()
Try to shorten an expression with constant math operations that can be evaluated at compile time.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
ArrayRef< uint64_t > getElements() const
LLVM_ABI std::optional< uint64_t > getActiveBits(DIVariable *Var)
Return the number of bits that have an active value, i.e.
uint64_t getElement(unsigned I) const
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * appendExt(const DIExpression *Expr, unsigned FromSize, unsigned ToSize, bool Signed)
Append a zero- or sign-extension to Expr.
std::optional< DIBasicType::Signedness > getSignedness() const
Return the signedness of this variable's type, or std::nullopt if this type is neither signed nor uns...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
This represents the llvm.dbg.label instruction.
Instruction * MarkedInstr
Link back to the Instruction that owns this marker.
LLVM_ABI void removeFromParent()
LLVM_ABI Module * getModule()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
bool isAddressOfVariable() const
Does this describe the address of a local variable.
LLVM_ABI DbgVariableRecord * clone() const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
LLVM_ABI unsigned getNumVariableLocationOps() const
LLVM_ABI void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
A debug info location.
Definition: DebugLoc.h:124
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:50
static DebugLoc getTemporary()
Definition: DebugLoc.h:161
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:177
unsigned size() const
Definition: DenseMap.h:120
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:173
iterator end()
Definition: DenseMap.h:87
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:230
Implements a dense probed hash-table based set.
Definition: DenseSet.h:263
LLVM_ABI void deleteBB(BasicBlock *DelBB)
Delete DelBB.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:135
const BasicBlock & getEntryBlock() const
Definition: Function.h:807
void applyUpdatesPermissive(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
bool hasDomTree() const
Returns true if it holds a DomTreeT.
void recalculate(FuncT &F)
Notify DTU that the entry block was replaced.
bool isBBPendingDeletion(BasicBlockT *DelBB) const
Returns true if DelBB is awaiting deletion.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:949
This instruction compares its operands according to the predicate given to the constructor.
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2329
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition: IRBuilder.h:1197
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition: IRBuilder.h:1191
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2780
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
Definition: Instruction.h:105
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:513
LLVM_ABI bool extractProfTotalWeight(uint64_t &TotalVal) const
Retrieve total raw weight values of a branch.
Definition: Metadata.cpp:1829
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:879
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:428
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1718
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:510
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
LLVM_ABI void dropDbgRecords()
Erase any DbgRecords attached to this instruction.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:49
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
An instruction for reading from memory.
Definition: Instructions.h:180
Value * getPointerOperand()
Definition: Instructions.h:259
LLVM_ABI MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight, bool IsExpected=false)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:38
LLVM_ABI MDNode * createRange(const APInt &Lo, const APInt &Hi)
Return metadata describing the range [Lo, Hi).
Definition: MDBuilder.cpp:96
Metadata node.
Definition: Metadata.h:1077
static LLVM_ABI MDNode * getMostGenericAliasScope(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1142
static LLVM_ABI MDNode * getMergedCallsiteMetadata(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMergedCalleeTypeMetadata(const MDNode *A, const MDNode *B)
Definition: Metadata.cpp:1306
static LLVM_ABI MDNode * getMostGenericTBAA(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMostGenericNoaliasAddrspace(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1397
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1565
static LLVM_ABI MDNode * getMergedProfMetadata(MDNode *A, MDNode *B, const Instruction *AInstr, const Instruction *BInstr)
Merge !prof metadata from two instructions.
Definition: Metadata.cpp:1224
static LLVM_ABI MDNode * getMostGenericFPMath(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1174
static LLVM_ABI MDNode * getMostGenericRange(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1324
static LLVM_ABI MDNode * getMergedMemProfMetadata(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * intersect(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1129
LLVMContext & getContext() const
Definition: Metadata.h:1241
static LLVM_ABI MDNode * getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1434
static LLVM_ABI MDNode * combine(LLVMContext &Ctx, const MMRAMetadata &A, const MMRAMetadata &B)
Combines A and B according to MMRA semantics.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
iterator end()
Definition: MapVector.h:67
iterator find(const KeyT &Key)
Definition: MapVector.h:141
bool empty() const
Definition: MapVector.h:75
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: MapVector.h:115
LLVM_ABI void changeToUnreachable(const Instruction *I)
Instruction I will be changed to an unreachable.
LLVM_ABI void removeBlocks(const SmallSetVector< BasicBlock *, 8 > &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
LLVM_ABI void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Root of the metadata hierarchy.
Definition: Metadata.h:63
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:278
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
iterator_range< const_block_iterator > blocks() const
LLVM_ABI Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
void setIncomingValue(unsigned i, Value *V)
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1885
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:104
Vector takeVector()
Clear the SetVector and return the underlying vector.
Definition: SetVector.h:93
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:279
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:99
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:168
value_type pop_back_val()
Definition: SetVector.h:296
size_type size() const
Definition: SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:380
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:470
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:401
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:476
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:356
bool empty() const
Definition: SmallVector.h:82
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void reserve(size_type N)
Definition: SmallVector.h:664
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:684
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:806
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
An instruction for storing to memory.
Definition: Instructions.h:296
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
bool empty() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:346
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:273
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:264
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:267
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:261
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:255
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:240
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:234
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1866
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
value_op_iterator value_op_end()
Definition: User.h:313
Value * getOperand(unsigned i) const
Definition: User.h:232
value_op_iterator value_op_begin()
Definition: User.h:310
iterator_range< value_op_iterator > operand_values()
Definition: User.h:316
Value wrapper in the Metadata hierarchy.
Definition: Metadata.h:457
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:502
iterator find(const KeyT &Val)
Definition: ValueMap.h:160
iterator end()
Definition: ValueMap.h:139
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:546
iterator_range< user_iterator > users()
Definition: Value.h:426
LLVM_ABI void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:554
bool use_empty() const
Definition: Value.h:346
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1098
static constexpr unsigned MaxAlignmentExponent
The maximum alignment for instructions.
Definition: Value.h:829
user_iterator_impl< User > user_iterator
Definition: Value.h:391
bool hasName() const
Definition: Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:396
Represents an op.with.overflow intrinsic.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:194
void reserve(size_t Size)
Grow the DenseSet so that it can contain at least NumEntries items before resizing again.
Definition: DenseSet.h:96
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:34
self_iterator getIterator()
Definition: ilist_node.h:134
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
Definition: Intrinsics.cpp:751
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:962
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
Definition: PatternMatch.h:876
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
auto m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:152
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition: Dwarf.h:148
@ ebStrict
This corresponds to "fpexcept.strict".
Definition: FPEnv.h:42
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:477
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1770
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1737
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition: Local.cpp:2485
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:533
bool succ_empty(const Instruction *I)
Definition: CFG.h:256
LLVM_ABI BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition: Local.cpp:2603
LLVM_ABI bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition: Local.cpp:134
LLVM_ABI unsigned replaceDominatedUsesWithIf(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge, function_ref< bool(const Use &U, const Value *To)> ShouldReplace)
Replace each use of 'From' with 'To' if that use is dominated by the given edge and the callback Shou...
Definition: Local.cpp:3236
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
Definition: DebugInfo.cpp:124
LLVM_ABI unsigned replaceNonLocalUsesWith(Instruction *From, Value *To)
Definition: Local.cpp:3207
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1723
auto successors(const MachineBasicBlock *BB)
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition: Local.cpp:2579
LLVM_ABI bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
LLVM_ABI void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
Definition: Local.cpp:3090
LLVM_ABI void InsertDebugValueAtStoreLoc(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition: Local.cpp:1708
constexpr from_range_t from_range
bool hasNItemsOrLess(IterTy &&Begin, IterTy &&End, unsigned N, Pred &&ShouldBeCounted=[](const decltype(*std::declval< IterTy >()) &) { return true;})
Returns true if the sequence [Begin, End) has N or less items.
Definition: STLExtras.h:2596
LLVM_ABI void remapDebugVariable(ValueToValueMapTy &Mapping, Instruction *Inst)
Remap the operands of the debug records attached to Inst, and the operands of Inst itself if it's a d...
Definition: Local.cpp:3420
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:663
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr)
Scan the specified basic block and try to simplify any instructions in it and recursively delete dead...
Definition: Local.cpp:721
LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
LLVM_ABI void DeleteDeadBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified block, which must have no predecessors.
LLVM_ABI bool hasBranchWeightOrigin(const Instruction &I)
Check if Branch Weight Metadata has an "expected" field from an llvm.expect* intrinsic.
LLVM_ABI void insertDebugValuesForPHIs(BasicBlock *BB, SmallVectorImpl< PHINode * > &InsertedPHIs)
Propagate dbg.value intrinsics through the newly inserted PHIs.
Definition: Local.cpp:1879
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
LLVM_ABI MDNode * intersectAccessGroups(const Instruction *Inst1, const Instruction *Inst2)
Compute the access-group list of access groups that Inst1 and Inst2 are both in.
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition: Local.cpp:2468
LLVM_ABI bool canSimplifyInvokeNoUnwind(const Function *F)
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1751
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:402
LLVM_ABI bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is known to contain an unconditional branch, and contains no instructions other than PHI nodes,...
Definition: Local.cpp:1140
LLVM_ABI bool recognizeBSwapOrBitReverseIdiom(Instruction *I, bool MatchBSwaps, bool MatchBitReversals, SmallVectorImpl< Instruction * > &InsertedInsts)
Try to match a bswap or bitreverse idiom.
Definition: Local.cpp:3725
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
LLVM_ABI MDNode * getValidBranchWeightMDNode(const Instruction &I)
Get the valid branch weights metadata node.
LLVM_ABI Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1566
LLVM_ABI bool wouldInstructionBeTriviallyDeadOnUnusedPaths(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction has no side effects on any paths other than whe...
Definition: Local.cpp:409
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition: Local.cpp:1795
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1172
LLVM_ABI DIExpression * getExpressionForConstant(DIBuilder &DIB, const Constant &C, Type &Ty)
Given a constant, create a debug information expression.
Definition: Local.cpp:3382
LLVM_ABI CallInst * createCallMatchingInvoke(InvokeInst *II)
Create a call that matches the invoke II in terms of arguments, attributes, debug information,...
Definition: Local.cpp:2553
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition: Local.cpp:1662
LLVM_ABI Instruction * removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
Replace 'BB's terminator with one that does not have an unwind successor block.
Definition: Local.cpp:2845
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition: Local.cpp:421
LLVM_ABI void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
Definition: Local.cpp:3143
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition: Local.cpp:2037
LLVM_ABI unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
Definition: Local.cpp:3222
LLVM_ABI unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2513
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition: Local.cpp:2414
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2274
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:3081
LLVM_ABI void dropDebugUsers(Instruction &I)
Remove the debug intrinsic instructions for the given instruction.
Definition: Local.cpp:3332
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is a block with one predecessor and its predecessor is known to have one successor (BB!...
Definition: Local.cpp:761
LLVM_ABI bool replaceDbgUsesWithUndef(Instruction *I)
Replace all the uses of an SSA value in @llvm.dbg intrinsics with undef.
Definition: Local.cpp:610
LLVM_ABI void hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, BasicBlock *BB)
Hoist all of the instructions in the IfBlock to the dominant block DomBlock, by moving its instructio...
Definition: Local.cpp:3339
LLVM_ABI void copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a range metadata node to a new load instruction.
Definition: Local.cpp:3308
LLVM_ABI DebugLoc getDebugValueLoc(DbgVariableRecord *DVR)
Produce a DebugLoc to use for each dbg.declare that is promoted to a dbg.value.
Definition: DebugInfo.cpp:140
LLVM_ABI void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a nonnull metadata node to a new load instruction.
Definition: Local.cpp:3283
LLVM_ABI bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx)
Given an instruction, is it legal to set operand OpIdx to a non-constant value?
Definition: Local.cpp:3839
LLVM_ABI void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, int Offset=0)
Replaces multiple dbg.value records when the alloca it describes is replaced with a new value.
Definition: Local.cpp:1982
LLVM_ABI Align tryEnforceAlignment(Value *V, Align PrefAlign, const DataLayout &DL)
If the specified pointer points to an object that we control, try to modify the object's alignment to...
Definition: Local.cpp:1517
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructionsPermissive(SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
Same functionality as RecursivelyDeleteTriviallyDeadInstructions, but allow instructions that are not...
Definition: Local.cpp:548
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:223
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition: STLExtras.h:1980
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
Finds dbg.declare records declaring local variables as living in the memory that 'V' points to.
Definition: DebugInfo.cpp:48
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1916
LLVM_ABI void combineAAMetadata(Instruction *K, const Instruction *J)
Combine metadata of two instructions, where instruction J is a memory access that has been merged int...
Definition: Local.cpp:3086
LLVM_ABI bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
If the specified value is an effectively dead PHI node, due to being a def-use chain of single-use no...
Definition: Local.cpp:641
LLVM_ABI bool inferAttributesFromOthers(Function &F)
If we can infer one attribute from another on the declaration of a function, explicitly materialize t...
Definition: Local.cpp:3957
LLVM_ABI Value * invertCondition(Value *Condition)
Invert the given true/false value, possibly reusing an existing copy.
Definition: Local.cpp:3923
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:595
LLVM_ABI void DeleteDeadBlocks(ArrayRef< BasicBlock * > BBs, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified blocks from BB.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:3829
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Definition: Local.cpp:2883
LLVM_ABI bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
Definition: Local.cpp:1509
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
Definition: DebugInfo.cpp:129
LLVM_ABI bool callsGCLeafFunction(const CallBase *Call, const TargetLibraryInfo &TLI)
Return true if this call calls a gc leaf function.
Definition: Local.cpp:3254
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition: Hashing.h:469
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition: Local.cpp:1942
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:858
#define N
#define NDEBUG
Definition: regutils.h:48
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:54
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:235
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:44
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
bool AllKnownNonNegative
Definition: Local.h:587
std::optional< unsigned > Opcode
Opcode of merged instructions.
Definition: Local.h:580
LLVM_ABI void mergeFlags(Instruction &I)
Merge in the no-wrap flags from I.
Definition: Local.cpp:3987
LLVM_ABI void applyFlags(Instruction &I)
Apply the no-wrap flags to I if applicable.
Definition: Local.cpp:4003
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:249