LLVM 22.0.0git
InstCombinePHI.cpp
Go to the documentation of this file.
1//===- InstCombinePHI.cpp -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visitPHINode function.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/Statistic.h"
23#include <optional>
24
25using namespace llvm;
26using namespace llvm::PatternMatch;
27
28#define DEBUG_TYPE "instcombine"
29
31MaxNumPhis("instcombine-max-num-phis", cl::init(512),
32 cl::desc("Maximum number phis to handle in intptr/ptrint folding"));
33
34STATISTIC(NumPHIsOfInsertValues,
35 "Number of phi-of-insertvalue turned into insertvalue-of-phis");
36STATISTIC(NumPHIsOfExtractValues,
37 "Number of phi-of-extractvalue turned into extractvalue-of-phi");
38STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
39
40/// The PHI arguments will be folded into a single operation with a PHI node
41/// as input. The debug location of the single operation will be the merged
42/// locations of the original PHI node arguments.
44 auto *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
45 Inst->setDebugLoc(FirstInst->getDebugLoc());
46 // We do not expect a CallInst here, otherwise, N-way merging of DebugLoc
47 // will be inefficient.
48 assert(!isa<CallInst>(Inst));
49
50 for (Value *V : drop_begin(PN.incoming_values())) {
51 auto *I = cast<Instruction>(V);
52 Inst->applyMergedLocation(Inst->getDebugLoc(), I->getDebugLoc());
53 }
54}
55
56/// If the phi is within a phi web, which is formed by the def-use chain
57/// of phis and all the phis in the web are only used in the other phis.
58/// In this case, these phis are dead and we will remove all of them.
62 Stack.push_back(&PN);
63 Visited.insert(&PN);
64 while (!Stack.empty()) {
65 PHINode *Phi = Stack.pop_back_val();
66 for (User *Use : Phi->users()) {
67 if (PHINode *PhiUse = dyn_cast<PHINode>(Use)) {
68 if (!Visited.insert(PhiUse).second)
69 continue;
70 // Early stop if the set of PHIs is large
71 if (Visited.size() >= 16)
72 return false;
73 Stack.push_back(PhiUse);
74 } else
75 return false;
76 }
77 }
78 for (PHINode *Phi : Visited)
79 replaceInstUsesWith(*Phi, PoisonValue::get(Phi->getType()));
80 for (PHINode *Phi : Visited)
82 return true;
83}
84
85// Replace Integer typed PHI PN if the PHI's value is used as a pointer value.
86// If there is an existing pointer typed PHI that produces the same value as PN,
87// replace PN and the IntToPtr operation with it. Otherwise, synthesize a new
88// PHI node:
89//
90// Case-1:
91// bb1:
92// int_init = PtrToInt(ptr_init)
93// br label %bb2
94// bb2:
95// int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
96// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
97// ptr_val2 = IntToPtr(int_val)
98// ...
99// use(ptr_val2)
100// ptr_val_inc = ...
101// inc_val_inc = PtrToInt(ptr_val_inc)
102//
103// ==>
104// bb1:
105// br label %bb2
106// bb2:
107// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
108// ...
109// use(ptr_val)
110// ptr_val_inc = ...
111//
112// Case-2:
113// bb1:
114// int_ptr = BitCast(ptr_ptr)
115// int_init = Load(int_ptr)
116// br label %bb2
117// bb2:
118// int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
119// ptr_val2 = IntToPtr(int_val)
120// ...
121// use(ptr_val2)
122// ptr_val_inc = ...
123// inc_val_inc = PtrToInt(ptr_val_inc)
124// ==>
125// bb1:
126// ptr_init = Load(ptr_ptr)
127// br label %bb2
128// bb2:
129// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
130// ...
131// use(ptr_val)
132// ptr_val_inc = ...
133// ...
134//
136 if (!PN.getType()->isIntegerTy())
137 return false;
138 if (!PN.hasOneUse())
139 return false;
140
141 auto *IntToPtr = dyn_cast<IntToPtrInst>(PN.user_back());
142 if (!IntToPtr)
143 return false;
144
145 // Check if the pointer is actually used as pointer:
146 auto HasPointerUse = [](Instruction *IIP) {
147 for (User *U : IIP->users()) {
148 Value *Ptr = nullptr;
149 if (LoadInst *LoadI = dyn_cast<LoadInst>(U)) {
150 Ptr = LoadI->getPointerOperand();
151 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
152 Ptr = SI->getPointerOperand();
154 Ptr = GI->getPointerOperand();
155 }
156
157 if (Ptr && Ptr == IIP)
158 return true;
159 }
160 return false;
161 };
162
163 if (!HasPointerUse(IntToPtr))
164 return false;
165
166 if (DL.getPointerSizeInBits(IntToPtr->getAddressSpace()) !=
167 DL.getTypeSizeInBits(IntToPtr->getOperand(0)->getType()))
168 return false;
169
170 SmallVector<Value *, 4> AvailablePtrVals;
171 for (auto Incoming : zip(PN.blocks(), PN.incoming_values())) {
172 BasicBlock *BB = std::get<0>(Incoming);
173 Value *Arg = std::get<1>(Incoming);
174
175 // Arg could be a constant, constant expr, etc., which we don't cover here.
176 if (!isa<Instruction>(Arg) && !isa<Argument>(Arg))
177 return false;
178
179 // First look backward:
180 if (auto *PI = dyn_cast<PtrToIntInst>(Arg)) {
181 AvailablePtrVals.emplace_back(PI->getOperand(0));
182 continue;
183 }
184
185 // Next look forward:
186 Value *ArgIntToPtr = nullptr;
187 for (User *U : Arg->users()) {
188 if (isa<IntToPtrInst>(U) && U->getType() == IntToPtr->getType() &&
189 (DT.dominates(cast<Instruction>(U), BB) ||
190 cast<Instruction>(U)->getParent() == BB)) {
191 ArgIntToPtr = U;
192 break;
193 }
194 }
195
196 if (ArgIntToPtr) {
197 AvailablePtrVals.emplace_back(ArgIntToPtr);
198 continue;
199 }
200
201 // If Arg is defined by a PHI, allow it. This will also create
202 // more opportunities iteratively.
203 if (isa<PHINode>(Arg)) {
204 AvailablePtrVals.emplace_back(Arg);
205 continue;
206 }
207
208 // For a single use integer load:
209 auto *LoadI = dyn_cast<LoadInst>(Arg);
210 if (!LoadI)
211 return false;
212
213 if (!LoadI->hasOneUse())
214 return false;
215
216 // Push the integer typed Load instruction into the available
217 // value set, and fix it up later when the pointer typed PHI
218 // is synthesized.
219 AvailablePtrVals.emplace_back(LoadI);
220 }
221
222 // Now search for a matching PHI
223 auto *BB = PN.getParent();
224 assert(AvailablePtrVals.size() == PN.getNumIncomingValues() &&
225 "Not enough available ptr typed incoming values");
226 PHINode *MatchingPtrPHI = nullptr;
227 unsigned NumPhis = 0;
228 for (PHINode &PtrPHI : BB->phis()) {
229 // FIXME: consider handling this in AggressiveInstCombine
230 if (NumPhis++ > MaxNumPhis)
231 return false;
232 if (&PtrPHI == &PN || PtrPHI.getType() != IntToPtr->getType())
233 continue;
234 if (any_of(zip(PN.blocks(), AvailablePtrVals),
235 [&](const auto &BlockAndValue) {
236 BasicBlock *BB = std::get<0>(BlockAndValue);
237 Value *V = std::get<1>(BlockAndValue);
238 return PtrPHI.getIncomingValueForBlock(BB) != V;
239 }))
240 continue;
241 MatchingPtrPHI = &PtrPHI;
242 break;
243 }
244
245 if (MatchingPtrPHI) {
246 assert(MatchingPtrPHI->getType() == IntToPtr->getType() &&
247 "Phi's Type does not match with IntToPtr");
248 // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
249 // to make sure another transform can't undo it in the meantime.
250 replaceInstUsesWith(*IntToPtr, MatchingPtrPHI);
251 eraseInstFromFunction(*IntToPtr);
253 return true;
254 }
255
256 // If it requires a conversion for every PHI operand, do not do it.
257 if (all_of(AvailablePtrVals, [&](Value *V) {
258 return (V->getType() != IntToPtr->getType()) || isa<IntToPtrInst>(V);
259 }))
260 return false;
261
262 // If any of the operand that requires casting is a terminator
263 // instruction, do not do it. Similarly, do not do the transform if the value
264 // is PHI in a block with no insertion point, for example, a catchswitch
265 // block, since we will not be able to insert a cast after the PHI.
266 if (any_of(AvailablePtrVals, [&](Value *V) {
267 if (V->getType() == IntToPtr->getType())
268 return false;
269 auto *Inst = dyn_cast<Instruction>(V);
270 if (!Inst)
271 return false;
272 if (Inst->isTerminator())
273 return true;
274 auto *BB = Inst->getParent();
275 if (isa<PHINode>(Inst) && BB->getFirstInsertionPt() == BB->end())
276 return true;
277 return false;
278 }))
279 return false;
280
281 PHINode *NewPtrPHI = PHINode::Create(
282 IntToPtr->getType(), PN.getNumIncomingValues(), PN.getName() + ".ptr");
283
284 InsertNewInstBefore(NewPtrPHI, PN.getIterator());
286 for (auto Incoming : zip(PN.blocks(), AvailablePtrVals)) {
287 auto *IncomingBB = std::get<0>(Incoming);
288 auto *IncomingVal = std::get<1>(Incoming);
289
290 if (IncomingVal->getType() == IntToPtr->getType()) {
291 NewPtrPHI->addIncoming(IncomingVal, IncomingBB);
292 continue;
293 }
294
295#ifndef NDEBUG
296 LoadInst *LoadI = dyn_cast<LoadInst>(IncomingVal);
297 assert((isa<PHINode>(IncomingVal) ||
298 IncomingVal->getType()->isPointerTy() ||
299 (LoadI && LoadI->hasOneUse())) &&
300 "Can not replace LoadInst with multiple uses");
301#endif
302 // Need to insert a BitCast.
303 // For an integer Load instruction with a single use, the load + IntToPtr
304 // cast will be simplified into a pointer load:
305 // %v = load i64, i64* %a.ip, align 8
306 // %v.cast = inttoptr i64 %v to float **
307 // ==>
308 // %v.ptrp = bitcast i64 * %a.ip to float **
309 // %v.cast = load float *, float ** %v.ptrp, align 8
310 Instruction *&CI = Casts[IncomingVal];
311 if (!CI) {
312 CI = CastInst::CreateBitOrPointerCast(IncomingVal, IntToPtr->getType(),
313 IncomingVal->getName() + ".ptr");
314 if (auto *IncomingI = dyn_cast<Instruction>(IncomingVal)) {
315 BasicBlock::iterator InsertPos(IncomingI);
316 InsertPos++;
317 BasicBlock *BB = IncomingI->getParent();
318 if (isa<PHINode>(IncomingI))
319 InsertPos = BB->getFirstInsertionPt();
320 assert(InsertPos != BB->end() && "should have checked above");
321 InsertNewInstBefore(CI, InsertPos);
322 } else {
323 auto *InsertBB = &IncomingBB->getParent()->getEntryBlock();
324 InsertNewInstBefore(CI, InsertBB->getFirstInsertionPt());
325 }
326 }
327 NewPtrPHI->addIncoming(CI, IncomingBB);
328 }
329
330 // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
331 // to make sure another transform can't undo it in the meantime.
332 replaceInstUsesWith(*IntToPtr, NewPtrPHI);
333 eraseInstFromFunction(*IntToPtr);
335 return true;
336}
337
338// Remove RoundTrip IntToPtr/PtrToInt Cast on PHI-Operand and
339// fold Phi-operand to bitcast.
341 // convert ptr2int ( phi[ int2ptr(ptr2int(x))] ) --> ptr2int ( phi [ x ] )
342 // Make sure all uses of phi are ptr2int.
344 return nullptr;
345
346 // Iterating over all operands to check presence of target pointers for
347 // optimization.
348 bool OperandWithRoundTripCast = false;
349 for (unsigned OpNum = 0; OpNum != PN.getNumIncomingValues(); ++OpNum) {
350 if (auto *NewOp =
351 simplifyIntToPtrRoundTripCast(PN.getIncomingValue(OpNum))) {
352 replaceOperand(PN, OpNum, NewOp);
353 OperandWithRoundTripCast = true;
354 }
355 }
356 if (!OperandWithRoundTripCast)
357 return nullptr;
358 return &PN;
359}
360
361/// If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)],
362/// turn this into a phi[a,c] and phi[b,d] and a single insertvalue.
365 auto *FirstIVI = cast<InsertValueInst>(PN.getIncomingValue(0));
366
367 // Scan to see if all operands are `insertvalue`'s with the same indices,
368 // and all have a single use.
369 for (Value *V : drop_begin(PN.incoming_values())) {
370 auto *I = dyn_cast<InsertValueInst>(V);
371 if (!I || !I->hasOneUser() || I->getIndices() != FirstIVI->getIndices())
372 return nullptr;
373 }
374
375 // For each operand of an `insertvalue`
376 std::array<PHINode *, 2> NewOperands;
377 for (int OpIdx : {0, 1}) {
378 auto *&NewOperand = NewOperands[OpIdx];
379 // Create a new PHI node to receive the values the operand has in each
380 // incoming basic block.
381 NewOperand = PHINode::Create(
382 FirstIVI->getOperand(OpIdx)->getType(), PN.getNumIncomingValues(),
383 FirstIVI->getOperand(OpIdx)->getName() + ".pn");
384 // And populate each operand's PHI with said values.
385 for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
386 NewOperand->addIncoming(
387 cast<InsertValueInst>(std::get<1>(Incoming))->getOperand(OpIdx),
388 std::get<0>(Incoming));
389 InsertNewInstBefore(NewOperand, PN.getIterator());
390 }
391
392 // And finally, create `insertvalue` over the newly-formed PHI nodes.
393 auto *NewIVI = InsertValueInst::Create(NewOperands[0], NewOperands[1],
394 FirstIVI->getIndices(), PN.getName());
395
396 PHIArgMergedDebugLoc(NewIVI, PN);
397 ++NumPHIsOfInsertValues;
398 return NewIVI;
399}
400
401/// If we have something like phi [extractvalue(a,0), extractvalue(b,0)],
402/// turn this into a phi[a,b] and a single extractvalue.
405 auto *FirstEVI = cast<ExtractValueInst>(PN.getIncomingValue(0));
406
407 // Scan to see if all operands are `extractvalue`'s with the same indices,
408 // and all have a single use.
409 for (Value *V : drop_begin(PN.incoming_values())) {
410 auto *I = dyn_cast<ExtractValueInst>(V);
411 if (!I || !I->hasOneUser() || I->getIndices() != FirstEVI->getIndices() ||
412 I->getAggregateOperand()->getType() !=
413 FirstEVI->getAggregateOperand()->getType())
414 return nullptr;
415 }
416
417 // Create a new PHI node to receive the values the aggregate operand has
418 // in each incoming basic block.
419 auto *NewAggregateOperand = PHINode::Create(
420 FirstEVI->getAggregateOperand()->getType(), PN.getNumIncomingValues(),
421 FirstEVI->getAggregateOperand()->getName() + ".pn");
422 // And populate the PHI with said values.
423 for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
424 NewAggregateOperand->addIncoming(
425 cast<ExtractValueInst>(std::get<1>(Incoming))->getAggregateOperand(),
426 std::get<0>(Incoming));
427 InsertNewInstBefore(NewAggregateOperand, PN.getIterator());
428
429 // And finally, create `extractvalue` over the newly-formed PHI nodes.
430 auto *NewEVI = ExtractValueInst::Create(NewAggregateOperand,
431 FirstEVI->getIndices(), PN.getName());
432
433 PHIArgMergedDebugLoc(NewEVI, PN);
434 ++NumPHIsOfExtractValues;
435 return NewEVI;
436}
437
438/// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
439/// adds all have a single user, turn this into a phi and a single binop.
442 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
443 unsigned Opc = FirstInst->getOpcode();
444 Value *LHSVal = FirstInst->getOperand(0);
445 Value *RHSVal = FirstInst->getOperand(1);
446
447 Type *LHSType = LHSVal->getType();
448 Type *RHSType = RHSVal->getType();
449
450 // Scan to see if all operands are the same opcode, and all have one user.
451 for (Value *V : drop_begin(PN.incoming_values())) {
453 if (!I || I->getOpcode() != Opc || !I->hasOneUser() ||
454 // Verify type of the LHS matches so we don't fold cmp's of different
455 // types.
456 I->getOperand(0)->getType() != LHSType ||
457 I->getOperand(1)->getType() != RHSType)
458 return nullptr;
459
460 // If they are CmpInst instructions, check their predicates
461 if (CmpInst *CI = dyn_cast<CmpInst>(I))
462 if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
463 return nullptr;
464
465 // Keep track of which operand needs a phi node.
466 if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
467 if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
468 }
469
470 // If both LHS and RHS would need a PHI, don't do this transformation,
471 // because it would increase the number of PHIs entering the block,
472 // which leads to higher register pressure. This is especially
473 // bad when the PHIs are in the header of a loop.
474 if (!LHSVal && !RHSVal)
475 return nullptr;
476
477 // Otherwise, this is safe to transform!
478
479 Value *InLHS = FirstInst->getOperand(0);
480 Value *InRHS = FirstInst->getOperand(1);
481 PHINode *NewLHS = nullptr, *NewRHS = nullptr;
482 if (!LHSVal) {
483 NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
484 FirstInst->getOperand(0)->getName() + ".pn");
485 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
486 InsertNewInstBefore(NewLHS, PN.getIterator());
487 LHSVal = NewLHS;
488 }
489
490 if (!RHSVal) {
491 NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
492 FirstInst->getOperand(1)->getName() + ".pn");
493 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
494 InsertNewInstBefore(NewRHS, PN.getIterator());
495 RHSVal = NewRHS;
496 }
497
498 // Add all operands to the new PHIs.
499 if (NewLHS || NewRHS) {
500 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
501 BasicBlock *InBB = std::get<0>(Incoming);
502 Value *InVal = std::get<1>(Incoming);
503 Instruction *InInst = cast<Instruction>(InVal);
504 if (NewLHS) {
505 Value *NewInLHS = InInst->getOperand(0);
506 NewLHS->addIncoming(NewInLHS, InBB);
507 }
508 if (NewRHS) {
509 Value *NewInRHS = InInst->getOperand(1);
510 NewRHS->addIncoming(NewInRHS, InBB);
511 }
512 }
513 }
514
515 if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
516 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
517 LHSVal, RHSVal);
518 PHIArgMergedDebugLoc(NewCI, PN);
519 return NewCI;
520 }
521
522 BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
523 BinaryOperator *NewBinOp =
524 BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
525
526 NewBinOp->copyIRFlags(PN.getIncomingValue(0));
527
528 for (Value *V : drop_begin(PN.incoming_values()))
529 NewBinOp->andIRFlags(V);
530
531 PHIArgMergedDebugLoc(NewBinOp, PN);
532 return NewBinOp;
533}
534
537
538 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
539 FirstInst->op_end());
540 // This is true if all GEP bases are allocas and if all indices into them are
541 // constants.
542 bool AllBasePointersAreAllocas = true;
543
544 // We don't want to replace this phi if the replacement would require
545 // more than one phi, which leads to higher register pressure. This is
546 // especially bad when the PHIs are in the header of a loop.
547 bool NeededPhi = false;
548
549 // Remember flags of the first phi-operand getelementptr.
550 GEPNoWrapFlags NW = FirstInst->getNoWrapFlags();
551
552 // Scan to see if all operands are the same opcode, and all have one user.
553 for (Value *V : drop_begin(PN.incoming_values())) {
555 if (!GEP || !GEP->hasOneUser() ||
556 GEP->getSourceElementType() != FirstInst->getSourceElementType() ||
557 GEP->getNumOperands() != FirstInst->getNumOperands())
558 return nullptr;
559
560 NW &= GEP->getNoWrapFlags();
561
562 // Keep track of whether or not all GEPs are of alloca pointers.
563 if (AllBasePointersAreAllocas &&
564 (!isa<AllocaInst>(GEP->getOperand(0)) ||
565 !GEP->hasAllConstantIndices()))
566 AllBasePointersAreAllocas = false;
567
568 // Compare the operand lists.
569 for (unsigned Op = 0, E = FirstInst->getNumOperands(); Op != E; ++Op) {
570 if (FirstInst->getOperand(Op) == GEP->getOperand(Op))
571 continue;
572
573 // Don't merge two GEPs when two operands differ (introducing phi nodes)
574 // if one of the PHIs has a constant for the index. The index may be
575 // substantially cheaper to compute for the constants, so making it a
576 // variable index could pessimize the path. This also handles the case
577 // for struct indices, which must always be constant.
578 if (isa<Constant>(FirstInst->getOperand(Op)) ||
579 isa<Constant>(GEP->getOperand(Op)))
580 return nullptr;
581
582 if (FirstInst->getOperand(Op)->getType() !=
583 GEP->getOperand(Op)->getType())
584 return nullptr;
585
586 // If we already needed a PHI for an earlier operand, and another operand
587 // also requires a PHI, we'd be introducing more PHIs than we're
588 // eliminating, which increases register pressure on entry to the PHI's
589 // block.
590 if (NeededPhi)
591 return nullptr;
592
593 FixedOperands[Op] = nullptr; // Needs a PHI.
594 NeededPhi = true;
595 }
596 }
597
598 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
599 // bother doing this transformation. At best, this will just save a bit of
600 // offset calculation, but all the predecessors will have to materialize the
601 // stack address into a register anyway. We'd actually rather *clone* the
602 // load up into the predecessors so that we have a load of a gep of an alloca,
603 // which can usually all be folded into the load.
604 if (AllBasePointersAreAllocas)
605 return nullptr;
606
607 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
608 // that is variable.
609 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
610
611 bool HasAnyPHIs = false;
612 for (unsigned I = 0, E = FixedOperands.size(); I != E; ++I) {
613 if (FixedOperands[I])
614 continue; // operand doesn't need a phi.
615 Value *FirstOp = FirstInst->getOperand(I);
616 PHINode *NewPN =
617 PHINode::Create(FirstOp->getType(), E, FirstOp->getName() + ".pn");
618 InsertNewInstBefore(NewPN, PN.getIterator());
619
620 NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
621 OperandPhis[I] = NewPN;
622 FixedOperands[I] = NewPN;
623 HasAnyPHIs = true;
624 }
625
626 // Add all operands to the new PHIs.
627 if (HasAnyPHIs) {
628 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
629 BasicBlock *InBB = std::get<0>(Incoming);
630 Value *InVal = std::get<1>(Incoming);
632
633 for (unsigned Op = 0, E = OperandPhis.size(); Op != E; ++Op)
634 if (PHINode *OpPhi = OperandPhis[Op])
635 OpPhi->addIncoming(InGEP->getOperand(Op), InBB);
636 }
637 }
638
639 Value *Base = FixedOperands[0];
640 GetElementPtrInst *NewGEP =
642 ArrayRef(FixedOperands).slice(1), NW);
643 PHIArgMergedDebugLoc(NewGEP, PN);
644 return NewGEP;
645}
646
647/// Return true if we know that it is safe to sink the load out of the block
648/// that defines it. This means that it must be obvious the value of the load is
649/// not changed from the point of the load to the end of the block it is in.
650///
651/// Finally, it is safe, but not profitable, to sink a load targeting a
652/// non-address-taken alloca. Doing so will cause us to not promote the alloca
653/// to a register.
655 BasicBlock::iterator BBI = L->getIterator(), E = L->getParent()->end();
656
657 for (++BBI; BBI != E; ++BBI)
658 if (BBI->mayWriteToMemory()) {
659 // Calls that only access inaccessible memory do not block sinking the
660 // load.
661 if (auto *CB = dyn_cast<CallBase>(BBI))
662 if (CB->onlyAccessesInaccessibleMemory())
663 continue;
664 return false;
665 }
666
667 // Check for non-address taken alloca. If not address-taken already, it isn't
668 // profitable to do this xform.
669 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
670 bool IsAddressTaken = false;
671 for (User *U : AI->users()) {
672 if (isa<LoadInst>(U)) continue;
673 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
674 // If storing TO the alloca, then the address isn't taken.
675 if (SI->getOperand(1) == AI) continue;
676 }
677 IsAddressTaken = true;
678 break;
679 }
680
681 if (!IsAddressTaken && AI->isStaticAlloca())
682 return false;
683 }
684
685 // If this load is a load from a GEP with a constant offset from an alloca,
686 // then we don't want to sink it. In its present form, it will be
687 // load [constant stack offset]. Sinking it will cause us to have to
688 // materialize the stack addresses in each predecessor in a register only to
689 // do a shared load from register in the successor.
690 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
691 if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
692 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
693 return false;
694
695 return true;
696}
697
699 LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
700
701 // Can't forward swifterror through a phi.
702 if (FirstLI->getOperand(0)->isSwiftError())
703 return nullptr;
704
705 // FIXME: This is overconservative; this transform is allowed in some cases
706 // for atomic operations.
707 if (FirstLI->isAtomic())
708 return nullptr;
709
710 // When processing loads, we need to propagate two bits of information to the
711 // sunk load: whether it is volatile, and what its alignment is.
712 bool IsVolatile = FirstLI->isVolatile();
713 Align LoadAlignment = FirstLI->getAlign();
714 const unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
715
716 // We can't sink the load if the loaded value could be modified between the
717 // load and the PHI.
718 if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
720 return nullptr;
721
722 // If the PHI is of volatile loads and the load block has multiple
723 // successors, sinking it would remove a load of the volatile value from
724 // the path through the other successor.
725 if (IsVolatile &&
726 FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
727 return nullptr;
728
729 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
730 BasicBlock *InBB = std::get<0>(Incoming);
731 Value *InVal = std::get<1>(Incoming);
732 LoadInst *LI = dyn_cast<LoadInst>(InVal);
733 if (!LI || !LI->hasOneUser() || LI->isAtomic())
734 return nullptr;
735
736 // Make sure all arguments are the same type of operation.
737 if (LI->isVolatile() != IsVolatile ||
738 LI->getPointerAddressSpace() != LoadAddrSpace)
739 return nullptr;
740
741 // Can't forward swifterror through a phi.
742 if (LI->getOperand(0)->isSwiftError())
743 return nullptr;
744
745 // We can't sink the load if the loaded value could be modified between
746 // the load and the PHI.
747 if (LI->getParent() != InBB || !isSafeAndProfitableToSinkLoad(LI))
748 return nullptr;
749
750 LoadAlignment = std::min(LoadAlignment, LI->getAlign());
751
752 // If the PHI is of volatile loads and the load block has multiple
753 // successors, sinking it would remove a load of the volatile value from
754 // the path through the other successor.
755 if (IsVolatile && LI->getParent()->getTerminator()->getNumSuccessors() != 1)
756 return nullptr;
757 }
758
759 // Okay, they are all the same operation. Create a new PHI node of the
760 // correct type, and PHI together all of the LHS's of the instructions.
761 PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
763 PN.getName()+".in");
764
765 Value *InVal = FirstLI->getOperand(0);
766 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
767 LoadInst *NewLI =
768 new LoadInst(FirstLI->getType(), NewPN, "", IsVolatile, LoadAlignment);
769 NewLI->copyMetadata(*FirstLI);
770
771 // Add all operands to the new PHI and combine TBAA metadata.
772 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
773 BasicBlock *BB = std::get<0>(Incoming);
774 Value *V = std::get<1>(Incoming);
775 LoadInst *LI = cast<LoadInst>(V);
776 combineMetadataForCSE(NewLI, LI, true);
777 Value *NewInVal = LI->getOperand(0);
778 if (NewInVal != InVal)
779 InVal = nullptr;
780 NewPN->addIncoming(NewInVal, BB);
781 }
782
783 if (InVal) {
784 // The new PHI unions all of the same values together. This is really
785 // common, so we handle it intelligently here for compile-time speed.
786 NewLI->setOperand(0, InVal);
787 delete NewPN;
788 } else {
789 InsertNewInstBefore(NewPN, PN.getIterator());
790 }
791
792 // If this was a volatile load that we are merging, make sure to loop through
793 // and mark all the input loads as non-volatile. If we don't do this, we will
794 // insert a new volatile load and the old ones will not be deletable.
795 if (IsVolatile)
796 for (Value *IncValue : PN.incoming_values())
797 cast<LoadInst>(IncValue)->setVolatile(false);
798
799 PHIArgMergedDebugLoc(NewLI, PN);
800 return NewLI;
801}
802
803/// TODO: This function could handle other cast types, but then it might
804/// require special-casing a cast from the 'i1' type. See the comment in
805/// FoldPHIArgOpIntoPHI() about pessimizing illegal integer types.
807 // We cannot create a new instruction after the PHI if the terminator is an
808 // EHPad because there is no valid insertion point.
809 if (Instruction *TI = Phi.getParent()->getTerminator())
810 if (TI->isEHPad())
811 return nullptr;
812
813 // Early exit for the common case of a phi with two operands. These are
814 // handled elsewhere. See the comment below where we check the count of zexts
815 // and constants for more details.
816 unsigned NumIncomingValues = Phi.getNumIncomingValues();
817 if (NumIncomingValues < 3)
818 return nullptr;
819
820 // Find the narrower type specified by the first zext.
821 Type *NarrowType = nullptr;
822 for (Value *V : Phi.incoming_values()) {
823 if (auto *Zext = dyn_cast<ZExtInst>(V)) {
824 NarrowType = Zext->getSrcTy();
825 break;
826 }
827 }
828 if (!NarrowType)
829 return nullptr;
830
831 // Walk the phi operands checking that we only have zexts or constants that
832 // we can shrink for free. Store the new operands for the new phi.
833 SmallVector<Value *, 4> NewIncoming;
834 unsigned NumZexts = 0;
835 unsigned NumConsts = 0;
836 for (Value *V : Phi.incoming_values()) {
837 if (auto *Zext = dyn_cast<ZExtInst>(V)) {
838 // All zexts must be identical and have one user.
839 if (Zext->getSrcTy() != NarrowType || !Zext->hasOneUser())
840 return nullptr;
841 NewIncoming.push_back(Zext->getOperand(0));
842 NumZexts++;
843 } else if (auto *C = dyn_cast<Constant>(V)) {
844 // Make sure that constants can fit in the new type.
845 Constant *Trunc = getLosslessUnsignedTrunc(C, NarrowType, DL);
846 if (!Trunc)
847 return nullptr;
848 NewIncoming.push_back(Trunc);
849 NumConsts++;
850 } else {
851 // If it's not a cast or a constant, bail out.
852 return nullptr;
853 }
854 }
855
856 // The more common cases of a phi with no constant operands or just one
857 // variable operand are handled by FoldPHIArgOpIntoPHI() and foldOpIntoPhi()
858 // respectively. foldOpIntoPhi() wants to do the opposite transform that is
859 // performed here. It tries to replicate a cast in the phi operand's basic
860 // block to expose other folding opportunities. Thus, InstCombine will
861 // infinite loop without this check.
862 if (NumConsts == 0 || NumZexts < 2)
863 return nullptr;
864
865 // All incoming values are zexts or constants that are safe to truncate.
866 // Create a new phi node of the narrow type, phi together all of the new
867 // operands, and zext the result back to the original type.
868 PHINode *NewPhi = PHINode::Create(NarrowType, NumIncomingValues,
869 Phi.getName() + ".shrunk");
870 for (unsigned I = 0; I != NumIncomingValues; ++I)
871 NewPhi->addIncoming(NewIncoming[I], Phi.getIncomingBlock(I));
872
873 InsertNewInstBefore(NewPhi, Phi.getIterator());
874 auto *CI = CastInst::CreateZExtOrBitCast(NewPhi, Phi.getType());
875
876 // We use a dropped location here because the new ZExt is necessarily a merge
877 // of ZExtInsts and at least one constant from incoming branches; the presence
878 // of the constant means we have no viable DebugLoc from that branch, and
879 // therefore we must use a dropped location.
880 CI->setDebugLoc(DebugLoc::getDropped());
881 return CI;
882}
883
884/// If all operands to a PHI node are the same "unary" operator and they all are
885/// only used by the PHI, PHI together their inputs, and do the operation once,
886/// to the result of the PHI.
888 // We cannot create a new instruction after the PHI if the terminator is an
889 // EHPad because there is no valid insertion point.
890 if (Instruction *TI = PN.getParent()->getTerminator())
891 if (TI->isEHPad())
892 return nullptr;
893
895
896 if (isa<GetElementPtrInst>(FirstInst))
897 return foldPHIArgGEPIntoPHI(PN);
898 if (isa<LoadInst>(FirstInst))
899 return foldPHIArgLoadIntoPHI(PN);
900 if (isa<InsertValueInst>(FirstInst))
902 if (isa<ExtractValueInst>(FirstInst))
904
905 // Scan the instruction, looking for input operations that can be folded away.
906 // If all input operands to the phi are the same instruction (e.g. a cast from
907 // the same type or "+42") we can pull the operation through the PHI, reducing
908 // code size and simplifying code.
909 Constant *ConstantOp = nullptr;
910 Type *CastSrcTy = nullptr;
911
912 if (isa<CastInst>(FirstInst)) {
913 CastSrcTy = FirstInst->getOperand(0)->getType();
914
915 // Be careful about transforming integer PHIs. We don't want to pessimize
916 // the code by turning an i32 into an i1293.
917 if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
918 if (!shouldChangeType(PN.getType(), CastSrcTy))
919 return nullptr;
920 }
921 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
922 // Can fold binop, compare or shift here if the RHS is a constant,
923 // otherwise call FoldPHIArgBinOpIntoPHI.
924 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
925 if (!ConstantOp)
926 return foldPHIArgBinOpIntoPHI(PN);
927 } else {
928 return nullptr; // Cannot fold this operation.
929 }
930
931 // Check to see if all arguments are the same operation.
932 for (Value *V : drop_begin(PN.incoming_values())) {
934 if (!I || !I->hasOneUser() || !I->isSameOperationAs(FirstInst))
935 return nullptr;
936 if (CastSrcTy) {
937 if (I->getOperand(0)->getType() != CastSrcTy)
938 return nullptr; // Cast operation must match.
939 } else if (I->getOperand(1) != ConstantOp) {
940 return nullptr;
941 }
942 }
943
944 // Okay, they are all the same operation. Create a new PHI node of the
945 // correct type, and PHI together all of the LHS's of the instructions.
946 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
948 PN.getName()+".in");
949
950 Value *InVal = FirstInst->getOperand(0);
951 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
952
953 // Add all operands to the new PHI.
954 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
955 BasicBlock *BB = std::get<0>(Incoming);
956 Value *V = std::get<1>(Incoming);
957 Value *NewInVal = cast<Instruction>(V)->getOperand(0);
958 if (NewInVal != InVal)
959 InVal = nullptr;
960 NewPN->addIncoming(NewInVal, BB);
961 }
962
963 Value *PhiVal;
964 if (InVal) {
965 // The new PHI unions all of the same values together. This is really
966 // common, so we handle it intelligently here for compile-time speed.
967 PhiVal = InVal;
968 delete NewPN;
969 } else {
970 InsertNewInstBefore(NewPN, PN.getIterator());
971 PhiVal = NewPN;
972 }
973
974 // Insert and return the new operation.
975 if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
976 CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
977 PN.getType());
978 PHIArgMergedDebugLoc(NewCI, PN);
979 return NewCI;
980 }
981
982 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
983 BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
984 BinOp->copyIRFlags(PN.getIncomingValue(0));
985
986 for (Value *V : drop_begin(PN.incoming_values()))
987 BinOp->andIRFlags(V);
988
989 PHIArgMergedDebugLoc(BinOp, PN);
990 return BinOp;
991 }
992
993 CmpInst *CIOp = cast<CmpInst>(FirstInst);
994 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
995 PhiVal, ConstantOp);
996 PHIArgMergedDebugLoc(NewCI, PN);
997 return NewCI;
998}
999
1000/// Return true if this phi node is always equal to NonPhiInVal.
1001/// This happens with mutually cyclic phi nodes like:
1002/// z = some value; x = phi (y, z); y = phi (x, z)
1003static bool PHIsEqualValue(PHINode *PN, Value *&NonPhiInVal,
1004 SmallPtrSetImpl<PHINode *> &ValueEqualPHIs) {
1005 // See if we already saw this PHI node.
1006 if (!ValueEqualPHIs.insert(PN).second)
1007 return true;
1008
1009 // Don't scan crazily complex things.
1010 if (ValueEqualPHIs.size() == 16)
1011 return false;
1012
1013 // Scan the operands to see if they are either phi nodes or are equal to
1014 // the value.
1015 for (Value *Op : PN->incoming_values()) {
1016 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
1017 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs)) {
1018 if (NonPhiInVal)
1019 return false;
1020 NonPhiInVal = OpPN;
1021 }
1022 } else if (Op != NonPhiInVal)
1023 return false;
1024 }
1025
1026 return true;
1027}
1028
1029/// Return an existing non-zero constant if this phi node has one, otherwise
1030/// return constant 1.
1032 assert(isa<IntegerType>(PN.getType()) && "Expect only integer type phi");
1033 for (Value *V : PN.operands())
1034 if (auto *ConstVA = dyn_cast<ConstantInt>(V))
1035 if (!ConstVA->isZero())
1036 return ConstVA;
1037 return ConstantInt::get(cast<IntegerType>(PN.getType()), 1);
1038}
1039
1040namespace {
1041struct PHIUsageRecord {
1042 unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
1043 unsigned Shift; // The amount shifted.
1044 Instruction *Inst; // The trunc instruction.
1045
1046 PHIUsageRecord(unsigned Pn, unsigned Sh, Instruction *User)
1047 : PHIId(Pn), Shift(Sh), Inst(User) {}
1048
1049 bool operator<(const PHIUsageRecord &RHS) const {
1050 if (PHIId < RHS.PHIId) return true;
1051 if (PHIId > RHS.PHIId) return false;
1052 if (Shift < RHS.Shift) return true;
1053 if (Shift > RHS.Shift) return false;
1054 return Inst->getType()->getPrimitiveSizeInBits() <
1056 }
1057};
1058
1059struct LoweredPHIRecord {
1060 PHINode *PN; // The PHI that was lowered.
1061 unsigned Shift; // The amount shifted.
1062 unsigned Width; // The width extracted.
1063
1064 LoweredPHIRecord(PHINode *Phi, unsigned Sh, Type *Ty)
1065 : PN(Phi), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
1066
1067 // Ctor form used by DenseMap.
1068 LoweredPHIRecord(PHINode *Phi, unsigned Sh) : PN(Phi), Shift(Sh), Width(0) {}
1069};
1070} // namespace
1071
1072namespace llvm {
1073 template<>
1074 struct DenseMapInfo<LoweredPHIRecord> {
1075 static inline LoweredPHIRecord getEmptyKey() {
1076 return LoweredPHIRecord(nullptr, 0);
1077 }
1078 static inline LoweredPHIRecord getTombstoneKey() {
1079 return LoweredPHIRecord(nullptr, 1);
1080 }
1081 static unsigned getHashValue(const LoweredPHIRecord &Val) {
1082 return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
1083 (Val.Width>>3);
1084 }
1085 static bool isEqual(const LoweredPHIRecord &LHS,
1086 const LoweredPHIRecord &RHS) {
1087 return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
1088 LHS.Width == RHS.Width;
1089 }
1090 };
1091} // namespace llvm
1092
1093
1094/// This is an integer PHI and we know that it has an illegal type: see if it is
1095/// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
1096/// the various pieces being extracted. This sort of thing is introduced when
1097/// SROA promotes an aggregate to large integer values.
1098///
1099/// TODO: The user of the trunc may be an bitcast to float/double/vector or an
1100/// inttoptr. We should produce new PHIs in the right type.
1101///
1103 // PHIUsers - Keep track of all of the truncated values extracted from a set
1104 // of PHIs, along with their offset. These are the things we want to rewrite.
1106
1107 // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
1108 // nodes which are extracted from. PHIsToSlice is a set we use to avoid
1109 // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
1110 // check the uses of (to ensure they are all extracts).
1111 SmallVector<PHINode*, 8> PHIsToSlice;
1112 SmallPtrSet<PHINode*, 8> PHIsInspected;
1113
1114 PHIsToSlice.push_back(&FirstPhi);
1115 PHIsInspected.insert(&FirstPhi);
1116
1117 for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
1118 PHINode *PN = PHIsToSlice[PHIId];
1119
1120 // Scan the input list of the PHI. If any input is an invoke, and if the
1121 // input is defined in the predecessor, then we won't be split the critical
1122 // edge which is required to insert a truncate. Because of this, we have to
1123 // bail out.
1124 for (auto Incoming : zip(PN->blocks(), PN->incoming_values())) {
1125 BasicBlock *BB = std::get<0>(Incoming);
1126 Value *V = std::get<1>(Incoming);
1128 if (!II)
1129 continue;
1130 if (II->getParent() != BB)
1131 continue;
1132
1133 // If we have a phi, and if it's directly in the predecessor, then we have
1134 // a critical edge where we need to put the truncate. Since we can't
1135 // split the edge in instcombine, we have to bail out.
1136 return nullptr;
1137 }
1138
1139 // If the incoming value is a PHI node before a catchswitch, we cannot
1140 // extract the value within that BB because we cannot insert any non-PHI
1141 // instructions in the BB.
1142 for (auto *Pred : PN->blocks())
1143 if (Pred->getFirstInsertionPt() == Pred->end())
1144 return nullptr;
1145
1146 for (User *U : PN->users()) {
1147 Instruction *UserI = cast<Instruction>(U);
1148
1149 // If the user is a PHI, inspect its uses recursively.
1150 if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
1151 if (PHIsInspected.insert(UserPN).second)
1152 PHIsToSlice.push_back(UserPN);
1153 continue;
1154 }
1155
1156 // Truncates are always ok.
1157 if (isa<TruncInst>(UserI)) {
1158 PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
1159 continue;
1160 }
1161
1162 // Otherwise it must be a lshr which can only be used by one trunc.
1163 if (UserI->getOpcode() != Instruction::LShr ||
1164 !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
1165 !isa<ConstantInt>(UserI->getOperand(1)))
1166 return nullptr;
1167
1168 // Bail on out of range shifts.
1169 unsigned SizeInBits = UserI->getType()->getScalarSizeInBits();
1170 if (cast<ConstantInt>(UserI->getOperand(1))->getValue().uge(SizeInBits))
1171 return nullptr;
1172
1173 unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
1174 PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
1175 }
1176 }
1177
1178 // If we have no users, they must be all self uses, just nuke the PHI.
1179 if (PHIUsers.empty())
1180 return replaceInstUsesWith(FirstPhi, PoisonValue::get(FirstPhi.getType()));
1181
1182 // If this phi node is transformable, create new PHIs for all the pieces
1183 // extracted out of it. First, sort the users by their offset and size.
1184 array_pod_sort(PHIUsers.begin(), PHIUsers.end());
1185
1186 LLVM_DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
1187 for (unsigned I = 1; I != PHIsToSlice.size(); ++I) dbgs()
1188 << "AND USER PHI #" << I << ": " << *PHIsToSlice[I] << '\n');
1189
1190 // PredValues - This is a temporary used when rewriting PHI nodes. It is
1191 // hoisted out here to avoid construction/destruction thrashing.
1193
1194 // ExtractedVals - Each new PHI we introduce is saved here so we don't
1195 // introduce redundant PHIs.
1197
1198 for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
1199 unsigned PHIId = PHIUsers[UserI].PHIId;
1200 PHINode *PN = PHIsToSlice[PHIId];
1201 unsigned Offset = PHIUsers[UserI].Shift;
1202 Type *Ty = PHIUsers[UserI].Inst->getType();
1203
1204 PHINode *EltPHI;
1205
1206 // If we've already lowered a user like this, reuse the previously lowered
1207 // value.
1208 if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
1209
1210 // Otherwise, Create the new PHI node for this user.
1211 EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
1212 PN->getName() + ".off" + Twine(Offset),
1213 PN->getIterator());
1214 assert(EltPHI->getType() != PN->getType() &&
1215 "Truncate didn't shrink phi?");
1216
1217 for (auto Incoming : zip(PN->blocks(), PN->incoming_values())) {
1218 BasicBlock *Pred = std::get<0>(Incoming);
1219 Value *InVal = std::get<1>(Incoming);
1220 Value *&PredVal = PredValues[Pred];
1221
1222 // If we already have a value for this predecessor, reuse it.
1223 if (PredVal) {
1224 EltPHI->addIncoming(PredVal, Pred);
1225 continue;
1226 }
1227
1228 // Handle the PHI self-reuse case.
1229 if (InVal == PN) {
1230 PredVal = EltPHI;
1231 EltPHI->addIncoming(PredVal, Pred);
1232 continue;
1233 }
1234
1235 if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
1236 // If the incoming value was a PHI, and if it was one of the PHIs we
1237 // already rewrote it, just use the lowered value.
1238 if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
1239 PredVal = Res;
1240 EltPHI->addIncoming(PredVal, Pred);
1241 continue;
1242 }
1243 }
1244
1245 // Otherwise, do an extract in the predecessor.
1246 Builder.SetInsertPoint(Pred->getTerminator());
1247 Value *Res = InVal;
1248 if (Offset)
1249 Res = Builder.CreateLShr(
1250 Res, ConstantInt::get(InVal->getType(), Offset), "extract");
1251 Res = Builder.CreateTrunc(Res, Ty, "extract.t");
1252 PredVal = Res;
1253 EltPHI->addIncoming(Res, Pred);
1254
1255 // If the incoming value was a PHI, and if it was one of the PHIs we are
1256 // rewriting, we will ultimately delete the code we inserted. This
1257 // means we need to revisit that PHI to make sure we extract out the
1258 // needed piece.
1259 if (PHINode *OldInVal = dyn_cast<PHINode>(InVal))
1260 if (PHIsInspected.count(OldInVal)) {
1261 unsigned RefPHIId =
1262 find(PHIsToSlice, OldInVal) - PHIsToSlice.begin();
1263 PHIUsers.push_back(
1264 PHIUsageRecord(RefPHIId, Offset, cast<Instruction>(Res)));
1265 ++UserE;
1266 }
1267 }
1268 PredValues.clear();
1269
1270 LLVM_DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
1271 << *EltPHI << '\n');
1272 ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
1273 }
1274
1275 // Replace the use of this piece with the PHI node.
1276 replaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
1277 }
1278
1279 // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
1280 // with poison.
1281 Value *Poison = PoisonValue::get(FirstPhi.getType());
1282 for (PHINode *PHI : drop_begin(PHIsToSlice))
1284 return replaceInstUsesWith(FirstPhi, Poison);
1285}
1286
1288 const DominatorTree &DT) {
1289 // Simplify the following patterns:
1290 // if (cond)
1291 // / \
1292 // ... ...
1293 // \ /
1294 // phi [true] [false]
1295 // and
1296 // switch (cond)
1297 // case v1: / \ case v2:
1298 // ... ...
1299 // \ /
1300 // phi [v1] [v2]
1301 // Make sure all inputs are constants.
1303 return nullptr;
1304
1305 BasicBlock *BB = PN.getParent();
1306 // Do not bother with unreachable instructions.
1307 if (!DT.isReachableFromEntry(BB))
1308 return nullptr;
1309
1310 // Determine which value the condition of the idom has for which successor.
1311 LLVMContext &Context = PN.getContext();
1312 auto *IDom = DT.getNode(BB)->getIDom()->getBlock();
1313 Value *Cond;
1316 auto AddSucc = [&](ConstantInt *C, BasicBlock *Succ) {
1317 SuccForValue[C] = Succ;
1318 ++SuccCount[Succ];
1319 };
1320 if (auto *BI = dyn_cast<BranchInst>(IDom->getTerminator())) {
1321 if (BI->isUnconditional())
1322 return nullptr;
1323
1324 Cond = BI->getCondition();
1325 AddSucc(ConstantInt::getTrue(Context), BI->getSuccessor(0));
1326 AddSucc(ConstantInt::getFalse(Context), BI->getSuccessor(1));
1327 } else if (auto *SI = dyn_cast<SwitchInst>(IDom->getTerminator())) {
1328 Cond = SI->getCondition();
1329 ++SuccCount[SI->getDefaultDest()];
1330 for (auto Case : SI->cases())
1331 AddSucc(Case.getCaseValue(), Case.getCaseSuccessor());
1332 } else {
1333 return nullptr;
1334 }
1335
1336 if (Cond->getType() != PN.getType())
1337 return nullptr;
1338
1339 // Check that edges outgoing from the idom's terminators dominate respective
1340 // inputs of the Phi.
1341 std::optional<bool> Invert;
1342 for (auto Pair : zip(PN.incoming_values(), PN.blocks())) {
1343 auto *Input = cast<ConstantInt>(std::get<0>(Pair));
1344 BasicBlock *Pred = std::get<1>(Pair);
1345 auto IsCorrectInput = [&](ConstantInt *Input) {
1346 // The input needs to be dominated by the corresponding edge of the idom.
1347 // This edge cannot be a multi-edge, as that would imply that multiple
1348 // different condition values follow the same edge.
1349 auto It = SuccForValue.find(Input);
1350 return It != SuccForValue.end() && SuccCount[It->second] == 1 &&
1351 DT.dominates(BasicBlockEdge(IDom, It->second),
1352 BasicBlockEdge(Pred, BB));
1353 };
1354
1355 // Depending on the constant, the condition may need to be inverted.
1356 bool NeedsInvert;
1357 if (IsCorrectInput(Input))
1358 NeedsInvert = false;
1359 else if (IsCorrectInput(cast<ConstantInt>(ConstantExpr::getNot(Input))))
1360 NeedsInvert = true;
1361 else
1362 return nullptr;
1363
1364 // Make sure the inversion requirement is always the same.
1365 if (Invert && *Invert != NeedsInvert)
1366 return nullptr;
1367
1368 Invert = NeedsInvert;
1369 }
1370
1371 if (!*Invert)
1372 return Cond;
1373
1374 // This Phi is actually opposite to branching condition of IDom. We invert
1375 // the condition that will potentially open up some opportunities for
1376 // sinking.
1377 auto InsertPt = BB->getFirstInsertionPt();
1378 if (InsertPt != BB->end()) {
1379 Self.Builder.SetInsertPoint(&*BB, InsertPt);
1380 return Self.Builder.CreateNot(Cond);
1381 }
1382
1383 return nullptr;
1384}
1385
1386// Fold iv = phi(start, iv.next = iv2.next op start)
1387// where iv2 = phi(iv2.start, iv2.next = iv2 + iv2.step)
1388// and iv2.start op start = start
1389// to iv = iv2 op start
1391 BasicBlock *BB = PN.getParent();
1392 if (PN.getNumIncomingValues() != 2)
1393 return nullptr;
1394
1395 Value *Start;
1396 Instruction *IvNext;
1397 BinaryOperator *Iv2Next;
1398 auto MatchOuterIV = [&](Value *V1, Value *V2) {
1399 if (match(V2, m_c_BinOp(m_Specific(V1), m_BinOp(Iv2Next))) ||
1400 match(V2, m_GEP(m_Specific(V1), m_BinOp(Iv2Next)))) {
1401 Start = V1;
1402 IvNext = cast<Instruction>(V2);
1403 return true;
1404 }
1405 return false;
1406 };
1407
1408 if (!MatchOuterIV(PN.getIncomingValue(0), PN.getIncomingValue(1)) &&
1409 !MatchOuterIV(PN.getIncomingValue(1), PN.getIncomingValue(0)))
1410 return nullptr;
1411
1412 PHINode *Iv2;
1413 Value *Iv2Start, *Iv2Step;
1414 if (!matchSimpleRecurrence(Iv2Next, Iv2, Iv2Start, Iv2Step) ||
1415 Iv2->getParent() != BB)
1416 return nullptr;
1417
1418 auto *BO = dyn_cast<BinaryOperator>(IvNext);
1419 Constant *Identity =
1420 BO ? ConstantExpr::getBinOpIdentity(BO->getOpcode(), Iv2Start->getType())
1421 : Constant::getNullValue(Iv2Start->getType());
1422 if (Iv2Start != Identity)
1423 return nullptr;
1424
1425 Builder.SetInsertPoint(&*BB, BB->getFirstInsertionPt());
1426 if (!BO) {
1427 auto *GEP = cast<GEPOperator>(IvNext);
1428 return Builder.CreateGEP(GEP->getSourceElementType(), Start, Iv2, "",
1429 cast<GEPOperator>(IvNext)->getNoWrapFlags());
1430 }
1431
1432 assert(BO->isCommutative() && "Must be commutative");
1433 Value *Res = Builder.CreateBinOp(BO->getOpcode(), Iv2, Start);
1434 cast<Instruction>(Res)->copyIRFlags(BO);
1435 return Res;
1436}
1437
1438// PHINode simplification
1439//
1441 if (Value *V = simplifyInstruction(&PN, SQ.getWithInstruction(&PN)))
1442 return replaceInstUsesWith(PN, V);
1443
1444 if (Instruction *Result = foldPHIArgZextsIntoPHI(PN))
1445 return Result;
1446
1447 if (Instruction *Result = foldPHIArgIntToPtrToPHI(PN))
1448 return Result;
1449
1450 // If all PHI operands are the same operation, pull them through the PHI,
1451 // reducing code size.
1452 auto *Inst0 = dyn_cast<Instruction>(PN.getIncomingValue(0));
1453 auto *Inst1 = dyn_cast<Instruction>(PN.getIncomingValue(1));
1454 if (Inst0 && Inst1 && Inst0->getOpcode() == Inst1->getOpcode() &&
1455 Inst0->hasOneUser())
1456 if (Instruction *Result = foldPHIArgOpIntoPHI(PN))
1457 return Result;
1458
1459 // If the incoming values are pointer casts of the same original value,
1460 // replace the phi with a single cast iff we can insert a non-PHI instruction.
1461 if (PN.getType()->isPointerTy() &&
1462 PN.getParent()->getFirstInsertionPt() != PN.getParent()->end()) {
1463 Value *IV0 = PN.getIncomingValue(0);
1464 Value *IV0Stripped = IV0->stripPointerCasts();
1465 // Set to keep track of values known to be equal to IV0Stripped after
1466 // stripping pointer casts.
1467 SmallPtrSet<Value *, 4> CheckedIVs;
1468 CheckedIVs.insert(IV0);
1469 if (IV0 != IV0Stripped &&
1470 all_of(PN.incoming_values(), [&CheckedIVs, IV0Stripped](Value *IV) {
1471 return !CheckedIVs.insert(IV).second ||
1472 IV0Stripped == IV->stripPointerCasts();
1473 })) {
1474 return CastInst::CreatePointerCast(IV0Stripped, PN.getType());
1475 }
1476 }
1477
1478 if (foldDeadPhiWeb(PN))
1479 return nullptr;
1480
1481 // Optimization when the phi only has one use
1482 if (PN.hasOneUse()) {
1483 if (foldIntegerTypedPHI(PN))
1484 return nullptr;
1485
1486 // If this phi has a single use, and if that use just computes a value for
1487 // the next iteration of a loop, delete the phi. This occurs with unused
1488 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
1489 // common case here is good because the only other things that catch this
1490 // are induction variable analysis (sometimes) and ADCE, which is only run
1491 // late.
1492 Instruction *PHIUser = cast<Instruction>(PN.user_back());
1493 if (PHIUser->hasOneUse() &&
1494 (isa<BinaryOperator>(PHIUser) || isa<UnaryOperator>(PHIUser) ||
1495 isa<GetElementPtrInst>(PHIUser)) &&
1496 PHIUser->user_back() == &PN) {
1498 }
1499 }
1500
1501 // When a PHI is used only to be compared with zero, it is safe to replace
1502 // an incoming value proved as known nonzero with any non-zero constant.
1503 // For example, in the code below, the incoming value %v can be replaced
1504 // with any non-zero constant based on the fact that the PHI is only used to
1505 // be compared with zero and %v is a known non-zero value:
1506 // %v = select %cond, 1, 2
1507 // %p = phi [%v, BB] ...
1508 // icmp eq, %p, 0
1509 // FIXME: To be simple, handle only integer type for now.
1510 // This handles a small number of uses to keep the complexity down, and an
1511 // icmp(or(phi)) can equally be replaced with any non-zero constant as the
1512 // "or" will only add bits.
1513 if (!PN.hasNUsesOrMore(3)) {
1514 SmallVector<Instruction *> DropPoisonFlags;
1515 bool AllUsesOfPhiEndsInCmp = all_of(PN.users(), [&](User *U) {
1516 auto *CmpInst = dyn_cast<ICmpInst>(U);
1517 if (!CmpInst) {
1518 // This is always correct as OR only add bits and we are checking
1519 // against 0.
1520 if (U->hasOneUse() && match(U, m_c_Or(m_Specific(&PN), m_Value()))) {
1521 DropPoisonFlags.push_back(cast<Instruction>(U));
1522 CmpInst = dyn_cast<ICmpInst>(U->user_back());
1523 }
1524 }
1525 if (!CmpInst || !isa<IntegerType>(PN.getType()) ||
1526 !CmpInst->isEquality() || !match(CmpInst->getOperand(1), m_Zero())) {
1527 return false;
1528 }
1529 return true;
1530 });
1531 // All uses of PHI results in a compare with zero.
1532 if (AllUsesOfPhiEndsInCmp) {
1533 ConstantInt *NonZeroConst = nullptr;
1534 bool MadeChange = false;
1535 for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
1537 Value *VA = PN.getIncomingValue(I);
1538 if (isKnownNonZero(VA, getSimplifyQuery().getWithInstruction(CtxI))) {
1539 if (!NonZeroConst)
1540 NonZeroConst = getAnyNonZeroConstInt(PN);
1541 if (NonZeroConst != VA) {
1542 replaceOperand(PN, I, NonZeroConst);
1543 // The "disjoint" flag may no longer hold after the transform.
1544 for (Instruction *I : DropPoisonFlags)
1545 I->dropPoisonGeneratingFlags();
1546 MadeChange = true;
1547 }
1548 }
1549 }
1550 if (MadeChange)
1551 return &PN;
1552 }
1553 }
1554
1555 // We sometimes end up with phi cycles that non-obviously end up being the
1556 // same value, for example:
1557 // z = some value; x = phi (y, z); y = phi (x, z)
1558 // where the phi nodes don't necessarily need to be in the same block. Do a
1559 // quick check to see if the PHI node only contains a single non-phi value, if
1560 // so, scan to see if the phi cycle is actually equal to that value. If the
1561 // phi has no non-phi values then allow the "NonPhiInVal" to be set later if
1562 // one of the phis itself does not have a single input.
1563 {
1564 unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
1565 // Scan for the first non-phi operand.
1566 while (InValNo != NumIncomingVals &&
1567 isa<PHINode>(PN.getIncomingValue(InValNo)))
1568 ++InValNo;
1569
1570 Value *NonPhiInVal =
1571 InValNo != NumIncomingVals ? PN.getIncomingValue(InValNo) : nullptr;
1572
1573 // Scan the rest of the operands to see if there are any conflicts, if so
1574 // there is no need to recursively scan other phis.
1575 if (NonPhiInVal)
1576 for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
1577 Value *OpVal = PN.getIncomingValue(InValNo);
1578 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
1579 break;
1580 }
1581
1582 // If we scanned over all operands, then we have one unique value plus
1583 // phi values. Scan PHI nodes to see if they all merge in each other or
1584 // the value.
1585 if (InValNo == NumIncomingVals) {
1586 SmallPtrSet<PHINode *, 16> ValueEqualPHIs;
1587 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
1588 return replaceInstUsesWith(PN, NonPhiInVal);
1589 }
1590 }
1591
1592 // If there are multiple PHIs, sort their operands so that they all list
1593 // the blocks in the same order. This will help identical PHIs be eliminated
1594 // by other passes. Other passes shouldn't depend on this for correctness
1595 // however.
1596 auto Res = PredOrder.try_emplace(PN.getParent());
1597 if (!Res.second) {
1598 const auto &Preds = Res.first->second;
1599 for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
1600 BasicBlock *BBA = PN.getIncomingBlock(I);
1601 BasicBlock *BBB = Preds[I];
1602 if (BBA != BBB) {
1603 Value *VA = PN.getIncomingValue(I);
1604 unsigned J = PN.getBasicBlockIndex(BBB);
1605 Value *VB = PN.getIncomingValue(J);
1606 PN.setIncomingBlock(I, BBB);
1607 PN.setIncomingValue(I, VB);
1608 PN.setIncomingBlock(J, BBA);
1609 PN.setIncomingValue(J, VA);
1610 // NOTE: Instcombine normally would want us to "return &PN" if we
1611 // modified any of the operands of an instruction. However, since we
1612 // aren't adding or removing uses (just rearranging them) we don't do
1613 // this in this case.
1614 }
1615 }
1616 } else {
1617 // Remember the block order of the first encountered phi node.
1618 append_range(Res.first->second, PN.blocks());
1619 }
1620
1621 // Is there an identical PHI node in this basic block?
1622 for (PHINode &IdenticalPN : PN.getParent()->phis()) {
1623 // Ignore the PHI node itself.
1624 if (&IdenticalPN == &PN)
1625 continue;
1626 // Note that even though we've just canonicalized this PHI, due to the
1627 // worklist visitation order, there are no guarantess that *every* PHI
1628 // has been canonicalized, so we can't just compare operands ranges.
1629 if (!PN.isIdenticalToWhenDefined(&IdenticalPN))
1630 continue;
1631 // Just use that PHI instead then.
1632 ++NumPHICSEs;
1633 return replaceInstUsesWith(PN, &IdenticalPN);
1634 }
1635
1636 // If this is an integer PHI and we know that it has an illegal type, see if
1637 // it is only used by trunc or trunc(lshr) operations. If so, we split the
1638 // PHI into the various pieces being extracted. This sort of thing is
1639 // introduced when SROA promotes an aggregate to a single large integer type.
1640 if (PN.getType()->isIntegerTy() &&
1641 !DL.isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
1642 if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
1643 return Res;
1644
1645 // Ultimately, try to replace this Phi with a dominating condition.
1646 if (auto *V = simplifyUsingControlFlow(*this, PN, DT))
1647 return replaceInstUsesWith(PN, V);
1648
1649 if (Value *Res = foldDependentIVs(PN, Builder))
1650 return replaceInstUsesWith(PN, Res);
1651
1652 return nullptr;
1653}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Hexagon Common GEP
This file provides internal interfaces used to implement the InstCombine.
static ConstantInt * getAnyNonZeroConstInt(PHINode &PN)
Return an existing non-zero constant if this phi node has one, otherwise return constant 1.
static Value * foldDependentIVs(PHINode &PN, IRBuilderBase &Builder)
static bool isSafeAndProfitableToSinkLoad(LoadInst *L)
Return true if we know that it is safe to sink the load out of the block that defines it.
static Value * simplifyUsingControlFlow(InstCombiner &Self, PHINode &PN, const DominatorTree &DT)
static bool PHIsEqualValue(PHINode *PN, Value *&NonPhiInVal, SmallPtrSetImpl< PHINode * > &ValueEqualPHIs)
Return true if this phi node is always equal to NonPhiInVal.
static cl::opt< unsigned > MaxNumPhis("instcombine-max-num-phis", cl::init(512), cl::desc("Maximum number phis to handle in intptr/ptrint folding"))
This file provides the interface for the instcombine pass implementation.
#define I(x, y, z)
Definition MD5.cpp:58
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
if(PassOpts->AAPipeline)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
The Input class is used to parse a yaml document into in-memory structs and vectors.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator end()
Definition BasicBlock.h:472
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
static LLVM_ABI CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
This class is the base class for the comparison instructions.
Definition InstrTypes.h:666
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:767
OtherOps getOpcode() const
Get the opcode casted to the right type.
Definition InstrTypes.h:762
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
static DebugLoc getDropped()
Definition DebugLoc.h:164
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:165
iterator end()
Definition DenseMap.h:81
DomTreeNodeBase * getIDom() const
NodeT * getBlock() const
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Represents flags for the getelementptr instruction/expression.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Type * getSourceElementType() const
LLVM_ABI GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreateNot(Value *V, const Twine &Name="")
Definition IRBuilder.h:1805
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Instruction * foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN)
If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)], turn this into a phi[a,...
Instruction * foldPHIArgBinOpIntoPHI(PHINode &PN)
If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the adds all have a single user,...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitPHINode(PHINode &PN)
Instruction * foldPHIArgOpIntoPHI(PHINode &PN)
Try to rotate an operation below a PHI node, using PHI nodes for its operands.
Instruction * foldPHIArgZextsIntoPHI(PHINode &PN)
TODO: This function could handle other cast types, but then it might require special-casing a cast fr...
Instruction * foldPHIArgLoadIntoPHI(PHINode &PN)
bool foldIntegerTypedPHI(PHINode &PN)
If an integer typed PHI has only one use which is an IntToPtr operation, replace the PHI with an exis...
bool foldDeadPhiWeb(PHINode &PN)
If the phi is within a phi web, which is formed by the def-use chain of phis and all the phis in the ...
Instruction * foldPHIArgIntToPtrToPHI(PHINode &PN)
Instruction * SliceUpIllegalIntegerPHI(PHINode &PN)
This is an integer PHI and we know that it has an illegal type: see if it is only used by trunc or tr...
Instruction * foldPHIArgGEPIntoPHI(PHINode &PN)
void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN)
Helper function for FoldPHIArgXIntoPHI() to set debug location for the folded operation.
Instruction * foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN)
If we have something like phi [extractvalue(a,0), extractvalue(b,0)], turn this into a phi[a,...
The core instruction combiner logic.
SimplifyQuery SQ
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
const DataLayout & DL
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
BuilderTy & Builder
const SimplifyQuery & getSimplifyQuery() const
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
LLVM_ABI void applyMergedLocation(DebugLoc LocA, DebugLoc LocB)
Merge 2 debug locations and apply it to the Instruction.
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Align getAlign() const
Return the alignment of the access that is being performed.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:292
op_iterator op_begin()
Definition User.h:284
void setOperand(unsigned i, Value *Val)
Definition User.h:237
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
op_iterator op_end()
Definition User.h:286
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
Definition Value.cpp:158
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI bool isSwiftError() const
Return true if this value is a swifterror value.
Definition Value.cpp:1122
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1101
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:130
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
auto m_GEP(const OperandTypes &...Ops)
Matches GetElementPtrInst.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
bool operator<(int64_t V1, const APSInt &V2)
Definition APSInt.h:362
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1731
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2116
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
LLVM_ABI Constant * getLosslessUnsignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition Local.cpp:3081
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
Definition STLExtras.h:1584
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:836
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
static bool isEqual(const LoweredPHIRecord &LHS, const LoweredPHIRecord &RHS)
static unsigned getHashValue(const LoweredPHIRecord &Val)
static LoweredPHIRecord getTombstoneKey()
An information struct used to provide DenseMap with the various necessary components for a given valu...
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...