LLVM 22.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APFloat.h"
37#include "llvm/ADT/APInt.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/ADT/DenseMap.h"
42#include "llvm/ADT/Statistic.h"
47#include "llvm/Analysis/CFG.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constant.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DIBuilder.h"
67#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/DebugInfo.h"
70#include "llvm/IR/Dominators.h"
72#include "llvm/IR/Function.h"
74#include "llvm/IR/IRBuilder.h"
75#include "llvm/IR/InstrTypes.h"
76#include "llvm/IR/Instruction.h"
79#include "llvm/IR/Intrinsics.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/Operator.h"
82#include "llvm/IR/PassManager.h"
84#include "llvm/IR/Type.h"
85#include "llvm/IR/Use.h"
86#include "llvm/IR/User.h"
87#include "llvm/IR/Value.h"
88#include "llvm/IR/ValueHandle.h"
93#include "llvm/Support/Debug.h"
102#include <algorithm>
103#include <cassert>
104#include <cstdint>
105#include <memory>
106#include <optional>
107#include <string>
108#include <utility>
109
110#define DEBUG_TYPE "instcombine"
112#include <optional>
113
114using namespace llvm;
115using namespace llvm::PatternMatch;
116
117STATISTIC(NumWorklistIterations,
118 "Number of instruction combining iterations performed");
119STATISTIC(NumOneIteration, "Number of functions with one iteration");
120STATISTIC(NumTwoIterations, "Number of functions with two iterations");
121STATISTIC(NumThreeIterations, "Number of functions with three iterations");
122STATISTIC(NumFourOrMoreIterations,
123 "Number of functions with four or more iterations");
124
125STATISTIC(NumCombined , "Number of insts combined");
126STATISTIC(NumConstProp, "Number of constant folds");
127STATISTIC(NumDeadInst , "Number of dead inst eliminated");
128STATISTIC(NumSunkInst , "Number of instructions sunk");
129STATISTIC(NumExpand, "Number of expansions");
130STATISTIC(NumFactor , "Number of factorizations");
131STATISTIC(NumReassoc , "Number of reassociations");
132DEBUG_COUNTER(VisitCounter, "instcombine-visit",
133 "Controls which instructions are visited");
134
135static cl::opt<bool> EnableCodeSinking("instcombine-code-sinking",
136 cl::desc("Enable code sinking"),
137 cl::init(true));
138
140 "instcombine-max-sink-users", cl::init(32),
141 cl::desc("Maximum number of undroppable users for instruction sinking"));
142
144MaxArraySize("instcombine-maxarray-size", cl::init(1024),
145 cl::desc("Maximum array size considered when doing a combine"));
146
147namespace llvm {
149} // end namespace llvm
150
151// FIXME: Remove this flag when it is no longer necessary to convert
152// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
153// increases variable availability at the cost of accuracy. Variables that
154// cannot be promoted by mem2reg or SROA will be described as living in memory
155// for their entire lifetime. However, passes like DSE and instcombine can
156// delete stores to the alloca, leading to misleading and inaccurate debug
157// information. This flag can be removed when those passes are fixed.
158static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
159 cl::Hidden, cl::init(true));
160
161std::optional<Instruction *>
163 // Handle target specific intrinsics
164 if (II.getCalledFunction()->isTargetIntrinsic()) {
165 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
166 }
167 return std::nullopt;
168}
169
171 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
172 bool &KnownBitsComputed) {
173 // Handle target specific intrinsics
174 if (II.getCalledFunction()->isTargetIntrinsic()) {
175 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
176 *this, II, DemandedMask, Known, KnownBitsComputed);
177 }
178 return std::nullopt;
179}
180
182 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
183 APInt &PoisonElts2, APInt &PoisonElts3,
184 std::function<void(Instruction *, unsigned, APInt, APInt &)>
185 SimplifyAndSetOp) {
186 // Handle target specific intrinsics
187 if (II.getCalledFunction()->isTargetIntrinsic()) {
188 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
189 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
190 SimplifyAndSetOp);
191 }
192 return std::nullopt;
193}
194
195bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
196 // Approved exception for TTI use: This queries a legality property of the
197 // target, not an profitability heuristic. Ideally this should be part of
198 // DataLayout instead.
199 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
200}
201
202Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
203 if (!RewriteGEP)
204 return llvm::emitGEPOffset(&Builder, DL, GEP);
205
206 IRBuilderBase::InsertPointGuard Guard(Builder);
207 auto *Inst = dyn_cast<Instruction>(GEP);
208 if (Inst)
209 Builder.SetInsertPoint(Inst);
210
211 Value *Offset = EmitGEPOffset(GEP);
212 // Rewrite non-trivial GEPs to avoid duplicating the offset arithmetic.
213 if (Inst && !GEP->hasAllConstantIndices() &&
214 !GEP->getSourceElementType()->isIntegerTy(8)) {
216 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
217 Offset, "", GEP->getNoWrapFlags()));
219 }
220 return Offset;
221}
222
223Value *InstCombinerImpl::EmitGEPOffsets(ArrayRef<GEPOperator *> GEPs,
224 GEPNoWrapFlags NW, Type *IdxTy,
225 bool RewriteGEPs) {
226 auto Add = [&](Value *Sum, Value *Offset) -> Value * {
227 if (Sum)
228 return Builder.CreateAdd(Sum, Offset, "", NW.hasNoUnsignedWrap(),
229 NW.isInBounds());
230 else
231 return Offset;
232 };
233
234 Value *Sum = nullptr;
235 Value *OneUseSum = nullptr;
236 Value *OneUseBase = nullptr;
237 GEPNoWrapFlags OneUseFlags = GEPNoWrapFlags::all();
238 for (GEPOperator *GEP : reverse(GEPs)) {
239 Value *Offset;
240 {
241 // Expand the offset at the point of the previous GEP to enable rewriting.
242 // However, use the original insertion point for calculating Sum.
243 IRBuilderBase::InsertPointGuard Guard(Builder);
244 auto *Inst = dyn_cast<Instruction>(GEP);
245 if (RewriteGEPs && Inst)
246 Builder.SetInsertPoint(Inst);
247
249 if (Offset->getType() != IdxTy)
250 Offset = Builder.CreateVectorSplat(
251 cast<VectorType>(IdxTy)->getElementCount(), Offset);
252 if (GEP->hasOneUse()) {
253 // Offsets of one-use GEPs will be merged into the next multi-use GEP.
254 OneUseSum = Add(OneUseSum, Offset);
255 OneUseFlags = OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags());
256 if (!OneUseBase)
257 OneUseBase = GEP->getPointerOperand();
258 continue;
259 }
260
261 if (OneUseSum)
262 Offset = Add(OneUseSum, Offset);
263
264 // Rewrite the GEP to reuse the computed offset. This also includes
265 // offsets from preceding one-use GEPs.
266 if (RewriteGEPs && Inst &&
267 !(GEP->getSourceElementType()->isIntegerTy(8) &&
268 GEP->getOperand(1) == Offset)) {
270 *Inst,
271 Builder.CreatePtrAdd(
272 OneUseBase ? OneUseBase : GEP->getPointerOperand(), Offset, "",
273 OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags())));
275 }
276 }
277
278 Sum = Add(Sum, Offset);
279 OneUseSum = OneUseBase = nullptr;
280 OneUseFlags = GEPNoWrapFlags::all();
281 }
282 if (OneUseSum)
283 Sum = Add(Sum, OneUseSum);
284 if (!Sum)
285 return Constant::getNullValue(IdxTy);
286 return Sum;
287}
288
289/// Legal integers and common types are considered desirable. This is used to
290/// avoid creating instructions with types that may not be supported well by the
291/// the backend.
292/// NOTE: This treats i8, i16 and i32 specially because they are common
293/// types in frontend languages.
294bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
295 switch (BitWidth) {
296 case 8:
297 case 16:
298 case 32:
299 return true;
300 default:
301 return DL.isLegalInteger(BitWidth);
302 }
303}
304
305/// Return true if it is desirable to convert an integer computation from a
306/// given bit width to a new bit width.
307/// We don't want to convert from a legal or desirable type (like i8) to an
308/// illegal type or from a smaller to a larger illegal type. A width of '1'
309/// is always treated as a desirable type because i1 is a fundamental type in
310/// IR, and there are many specialized optimizations for i1 types.
311/// Common/desirable widths are equally treated as legal to convert to, in
312/// order to open up more combining opportunities.
313bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
314 unsigned ToWidth) const {
315 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
316 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
317
318 // Convert to desirable widths even if they are not legal types.
319 // Only shrink types, to prevent infinite loops.
320 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
321 return true;
322
323 // If this is a legal or desiable integer from type, and the result would be
324 // an illegal type, don't do the transformation.
325 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
326 return false;
327
328 // Otherwise, if both are illegal, do not increase the size of the result. We
329 // do allow things like i160 -> i64, but not i64 -> i160.
330 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
331 return false;
332
333 return true;
334}
335
336/// Return true if it is desirable to convert a computation from 'From' to 'To'.
337/// We don't want to convert from a legal to an illegal type or from a smaller
338/// to a larger illegal type. i1 is always treated as a legal type because it is
339/// a fundamental type in IR, and there are many specialized optimizations for
340/// i1 types.
341bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
342 // TODO: This could be extended to allow vectors. Datalayout changes might be
343 // needed to properly support that.
344 if (!From->isIntegerTy() || !To->isIntegerTy())
345 return false;
346
347 unsigned FromWidth = From->getPrimitiveSizeInBits();
348 unsigned ToWidth = To->getPrimitiveSizeInBits();
349 return shouldChangeType(FromWidth, ToWidth);
350}
351
352// Return true, if No Signed Wrap should be maintained for I.
353// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
354// where both B and C should be ConstantInts, results in a constant that does
355// not overflow. This function only handles the Add/Sub/Mul opcodes. For
356// all other opcodes, the function conservatively returns false.
359 if (!OBO || !OBO->hasNoSignedWrap())
360 return false;
361
362 const APInt *BVal, *CVal;
363 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
364 return false;
365
366 // We reason about Add/Sub/Mul Only.
367 bool Overflow = false;
368 switch (I.getOpcode()) {
369 case Instruction::Add:
370 (void)BVal->sadd_ov(*CVal, Overflow);
371 break;
372 case Instruction::Sub:
373 (void)BVal->ssub_ov(*CVal, Overflow);
374 break;
375 case Instruction::Mul:
376 (void)BVal->smul_ov(*CVal, Overflow);
377 break;
378 default:
379 // Conservatively return false for other opcodes.
380 return false;
381 }
382 return !Overflow;
383}
384
387 return OBO && OBO->hasNoUnsignedWrap();
388}
389
392 return OBO && OBO->hasNoSignedWrap();
393}
394
395/// Conservatively clears subclassOptionalData after a reassociation or
396/// commutation. We preserve fast-math flags when applicable as they can be
397/// preserved.
400 if (!FPMO) {
401 I.clearSubclassOptionalData();
402 return;
403 }
404
405 FastMathFlags FMF = I.getFastMathFlags();
406 I.clearSubclassOptionalData();
407 I.setFastMathFlags(FMF);
408}
409
410/// Combine constant operands of associative operations either before or after a
411/// cast to eliminate one of the associative operations:
412/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
413/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
415 InstCombinerImpl &IC) {
416 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
417 if (!Cast || !Cast->hasOneUse())
418 return false;
419
420 // TODO: Enhance logic for other casts and remove this check.
421 auto CastOpcode = Cast->getOpcode();
422 if (CastOpcode != Instruction::ZExt)
423 return false;
424
425 // TODO: Enhance logic for other BinOps and remove this check.
426 if (!BinOp1->isBitwiseLogicOp())
427 return false;
428
429 auto AssocOpcode = BinOp1->getOpcode();
430 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
431 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
432 return false;
433
434 Constant *C1, *C2;
435 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
436 !match(BinOp2->getOperand(1), m_Constant(C2)))
437 return false;
438
439 // TODO: This assumes a zext cast.
440 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
441 // to the destination type might lose bits.
442
443 // Fold the constants together in the destination type:
444 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
445 const DataLayout &DL = IC.getDataLayout();
446 Type *DestTy = C1->getType();
447 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
448 if (!CastC2)
449 return false;
450 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
451 if (!FoldedC)
452 return false;
453
454 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
455 IC.replaceOperand(*BinOp1, 1, FoldedC);
457 Cast->dropPoisonGeneratingFlags();
458 return true;
459}
460
461// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
462// inttoptr ( ptrtoint (x) ) --> x
463Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
464 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
465 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
466 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
467 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
468 Type *CastTy = IntToPtr->getDestTy();
469 if (PtrToInt &&
470 CastTy->getPointerAddressSpace() ==
471 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
472 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
473 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
474 return PtrToInt->getOperand(0);
475 }
476 return nullptr;
477}
478
479/// This performs a few simplifications for operators that are associative or
480/// commutative:
481///
482/// Commutative operators:
483///
484/// 1. Order operands such that they are listed from right (least complex) to
485/// left (most complex). This puts constants before unary operators before
486/// binary operators.
487///
488/// Associative operators:
489///
490/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
491/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
492///
493/// Associative and commutative operators:
494///
495/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
496/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
497/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
498/// if C1 and C2 are constants.
500 Instruction::BinaryOps Opcode = I.getOpcode();
501 bool Changed = false;
502
503 do {
504 // Order operands such that they are listed from right (least complex) to
505 // left (most complex). This puts constants before unary operators before
506 // binary operators.
507 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
508 getComplexity(I.getOperand(1)))
509 Changed = !I.swapOperands();
510
511 if (I.isCommutative()) {
512 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
513 replaceOperand(I, 0, Pair->first);
514 replaceOperand(I, 1, Pair->second);
515 Changed = true;
516 }
517 }
518
519 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
520 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
521
522 if (I.isAssociative()) {
523 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
524 if (Op0 && Op0->getOpcode() == Opcode) {
525 Value *A = Op0->getOperand(0);
526 Value *B = Op0->getOperand(1);
527 Value *C = I.getOperand(1);
528
529 // Does "B op C" simplify?
530 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
531 // It simplifies to V. Form "A op V".
532 replaceOperand(I, 0, A);
533 replaceOperand(I, 1, V);
534 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
535 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
536
537 // Conservatively clear all optional flags since they may not be
538 // preserved by the reassociation. Reset nsw/nuw based on the above
539 // analysis.
541
542 // Note: this is only valid because SimplifyBinOp doesn't look at
543 // the operands to Op0.
544 if (IsNUW)
545 I.setHasNoUnsignedWrap(true);
546
547 if (IsNSW)
548 I.setHasNoSignedWrap(true);
549
550 Changed = true;
551 ++NumReassoc;
552 continue;
553 }
554 }
555
556 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
557 if (Op1 && Op1->getOpcode() == Opcode) {
558 Value *A = I.getOperand(0);
559 Value *B = Op1->getOperand(0);
560 Value *C = Op1->getOperand(1);
561
562 // Does "A op B" simplify?
563 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
564 // It simplifies to V. Form "V op C".
565 replaceOperand(I, 0, V);
566 replaceOperand(I, 1, C);
567 // Conservatively clear the optional flags, since they may not be
568 // preserved by the reassociation.
570 Changed = true;
571 ++NumReassoc;
572 continue;
573 }
574 }
575 }
576
577 if (I.isAssociative() && I.isCommutative()) {
578 if (simplifyAssocCastAssoc(&I, *this)) {
579 Changed = true;
580 ++NumReassoc;
581 continue;
582 }
583
584 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
585 if (Op0 && Op0->getOpcode() == Opcode) {
586 Value *A = Op0->getOperand(0);
587 Value *B = Op0->getOperand(1);
588 Value *C = I.getOperand(1);
589
590 // Does "C op A" simplify?
591 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
592 // It simplifies to V. Form "V op B".
593 replaceOperand(I, 0, V);
594 replaceOperand(I, 1, B);
595 // Conservatively clear the optional flags, since they may not be
596 // preserved by the reassociation.
598 Changed = true;
599 ++NumReassoc;
600 continue;
601 }
602 }
603
604 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
605 if (Op1 && Op1->getOpcode() == Opcode) {
606 Value *A = I.getOperand(0);
607 Value *B = Op1->getOperand(0);
608 Value *C = Op1->getOperand(1);
609
610 // Does "C op A" simplify?
611 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
612 // It simplifies to V. Form "B op V".
613 replaceOperand(I, 0, B);
614 replaceOperand(I, 1, V);
615 // Conservatively clear the optional flags, since they may not be
616 // preserved by the reassociation.
618 Changed = true;
619 ++NumReassoc;
620 continue;
621 }
622 }
623
624 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
625 // if C1 and C2 are constants.
626 Value *A, *B;
627 Constant *C1, *C2, *CRes;
628 if (Op0 && Op1 &&
629 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
630 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
631 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
632 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
633 bool IsNUW = hasNoUnsignedWrap(I) &&
634 hasNoUnsignedWrap(*Op0) &&
635 hasNoUnsignedWrap(*Op1);
636 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
637 BinaryOperator::CreateNUW(Opcode, A, B) :
638 BinaryOperator::Create(Opcode, A, B);
639
640 if (isa<FPMathOperator>(NewBO)) {
641 FastMathFlags Flags = I.getFastMathFlags() &
642 Op0->getFastMathFlags() &
643 Op1->getFastMathFlags();
644 NewBO->setFastMathFlags(Flags);
645 }
646 InsertNewInstWith(NewBO, I.getIterator());
647 NewBO->takeName(Op1);
648 replaceOperand(I, 0, NewBO);
649 replaceOperand(I, 1, CRes);
650 // Conservatively clear the optional flags, since they may not be
651 // preserved by the reassociation.
653 if (IsNUW)
654 I.setHasNoUnsignedWrap(true);
655
656 Changed = true;
657 continue;
658 }
659 }
660
661 // No further simplifications.
662 return Changed;
663 } while (true);
664}
665
666/// Return whether "X LOp (Y ROp Z)" is always equal to
667/// "(X LOp Y) ROp (X LOp Z)".
670 // X & (Y | Z) <--> (X & Y) | (X & Z)
671 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
672 if (LOp == Instruction::And)
673 return ROp == Instruction::Or || ROp == Instruction::Xor;
674
675 // X | (Y & Z) <--> (X | Y) & (X | Z)
676 if (LOp == Instruction::Or)
677 return ROp == Instruction::And;
678
679 // X * (Y + Z) <--> (X * Y) + (X * Z)
680 // X * (Y - Z) <--> (X * Y) - (X * Z)
681 if (LOp == Instruction::Mul)
682 return ROp == Instruction::Add || ROp == Instruction::Sub;
683
684 return false;
685}
686
687/// Return whether "(X LOp Y) ROp Z" is always equal to
688/// "(X ROp Z) LOp (Y ROp Z)".
692 return leftDistributesOverRight(ROp, LOp);
693
694 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
696
697 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
698 // but this requires knowing that the addition does not overflow and other
699 // such subtleties.
700}
701
702/// This function returns identity value for given opcode, which can be used to
703/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
705 if (isa<Constant>(V))
706 return nullptr;
707
708 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
709}
710
711/// This function predicates factorization using distributive laws. By default,
712/// it just returns the 'Op' inputs. But for special-cases like
713/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
714/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
715/// allow more factorization opportunities.
718 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
719 assert(Op && "Expected a binary operator");
720 LHS = Op->getOperand(0);
721 RHS = Op->getOperand(1);
722 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
723 Constant *C;
724 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
725 // X << C --> X * (1 << C)
727 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
728 assert(RHS && "Constant folding of immediate constants failed");
729 return Instruction::Mul;
730 }
731 // TODO: We can add other conversions e.g. shr => div etc.
732 }
733 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
734 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
736 // lshr nneg C, X --> ashr nneg C, X
737 return Instruction::AShr;
738 }
739 }
740 return Op->getOpcode();
741}
742
743/// This tries to simplify binary operations by factorizing out common terms
744/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
747 Instruction::BinaryOps InnerOpcode, Value *A,
748 Value *B, Value *C, Value *D) {
749 assert(A && B && C && D && "All values must be provided");
750
751 Value *V = nullptr;
752 Value *RetVal = nullptr;
753 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
754 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
755
756 // Does "X op' Y" always equal "Y op' X"?
757 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
758
759 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
760 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
761 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
762 // commutative case, "(A op' B) op (C op' A)"?
763 if (A == C || (InnerCommutative && A == D)) {
764 if (A != C)
765 std::swap(C, D);
766 // Consider forming "A op' (B op D)".
767 // If "B op D" simplifies then it can be formed with no cost.
768 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
769
770 // If "B op D" doesn't simplify then only go on if one of the existing
771 // operations "A op' B" and "C op' D" will be zapped as no longer used.
772 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
773 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
774 if (V)
775 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
776 }
777 }
778
779 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
780 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
781 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
782 // commutative case, "(A op' B) op (B op' D)"?
783 if (B == D || (InnerCommutative && B == C)) {
784 if (B != D)
785 std::swap(C, D);
786 // Consider forming "(A op C) op' B".
787 // If "A op C" simplifies then it can be formed with no cost.
788 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
789
790 // If "A op C" doesn't simplify then only go on if one of the existing
791 // operations "A op' B" and "C op' D" will be zapped as no longer used.
792 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
793 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
794 if (V)
795 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
796 }
797 }
798
799 if (!RetVal)
800 return nullptr;
801
802 ++NumFactor;
803 RetVal->takeName(&I);
804
805 // Try to add no-overflow flags to the final value.
806 if (isa<BinaryOperator>(RetVal)) {
807 bool HasNSW = false;
808 bool HasNUW = false;
810 HasNSW = I.hasNoSignedWrap();
811 HasNUW = I.hasNoUnsignedWrap();
812 }
813 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
814 HasNSW &= LOBO->hasNoSignedWrap();
815 HasNUW &= LOBO->hasNoUnsignedWrap();
816 }
817
818 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
819 HasNSW &= ROBO->hasNoSignedWrap();
820 HasNUW &= ROBO->hasNoUnsignedWrap();
821 }
822
823 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
824 // We can propagate 'nsw' if we know that
825 // %Y = mul nsw i16 %X, C
826 // %Z = add nsw i16 %Y, %X
827 // =>
828 // %Z = mul nsw i16 %X, C+1
829 //
830 // iff C+1 isn't INT_MIN
831 const APInt *CInt;
832 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
833 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
834
835 // nuw can be propagated with any constant or nuw value.
836 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
837 }
838 }
839 return RetVal;
840}
841
842// If `I` has one Const operand and the other matches `(ctpop (not x))`,
843// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
844// This is only useful is the new subtract can fold so we only handle the
845// following cases:
846// 1) (add/sub/disjoint_or C, (ctpop (not x))
847// -> (add/sub/disjoint_or C', (ctpop x))
848// 1) (cmp pred C, (ctpop (not x))
849// -> (cmp pred C', (ctpop x))
851 unsigned Opc = I->getOpcode();
852 unsigned ConstIdx = 1;
853 switch (Opc) {
854 default:
855 return nullptr;
856 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
857 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
858 // is constant.
859 case Instruction::Sub:
860 ConstIdx = 0;
861 break;
862 case Instruction::ICmp:
863 // Signed predicates aren't correct in some edge cases like for i2 types, as
864 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
865 // comparisons against it are simplfied to unsigned.
866 if (cast<ICmpInst>(I)->isSigned())
867 return nullptr;
868 break;
869 case Instruction::Or:
870 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
871 return nullptr;
872 [[fallthrough]];
873 case Instruction::Add:
874 break;
875 }
876
877 Value *Op;
878 // Find ctpop.
879 if (!match(I->getOperand(1 - ConstIdx),
881 return nullptr;
882
883 Constant *C;
884 // Check other operand is ImmConstant.
885 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
886 return nullptr;
887
888 Type *Ty = Op->getType();
889 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
890 // Need extra check for icmp. Note if this check is true, it generally means
891 // the icmp will simplify to true/false.
892 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
893 Constant *Cmp =
895 if (!Cmp || !Cmp->isZeroValue())
896 return nullptr;
897 }
898
899 // Check we can invert `(not x)` for free.
900 bool Consumes = false;
901 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
902 return nullptr;
903 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
904 assert(NotOp != nullptr &&
905 "Desync between isFreeToInvert and getFreelyInverted");
906
907 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
908
909 Value *R = nullptr;
910
911 // Do the transformation here to avoid potentially introducing an infinite
912 // loop.
913 switch (Opc) {
914 case Instruction::Sub:
915 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
916 break;
917 case Instruction::Or:
918 case Instruction::Add:
919 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
920 break;
921 case Instruction::ICmp:
922 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
923 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
924 break;
925 default:
926 llvm_unreachable("Unhandled Opcode");
927 }
928 assert(R != nullptr);
929 return replaceInstUsesWith(*I, R);
930}
931
932// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
933// IFF
934// 1) the logic_shifts match
935// 2) either both binops are binops and one is `and` or
936// BinOp1 is `and`
937// (logic_shift (inv_logic_shift C1, C), C) == C1 or
938//
939// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
940//
941// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
942// IFF
943// 1) the logic_shifts match
944// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
945//
946// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
947//
948// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
949// IFF
950// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
951// 2) Binop2 is `not`
952//
953// -> (arithmetic_shift Binop1((not X), Y), Amt)
954
956 const DataLayout &DL = I.getDataLayout();
957 auto IsValidBinOpc = [](unsigned Opc) {
958 switch (Opc) {
959 default:
960 return false;
961 case Instruction::And:
962 case Instruction::Or:
963 case Instruction::Xor:
964 case Instruction::Add:
965 // Skip Sub as we only match constant masks which will canonicalize to use
966 // add.
967 return true;
968 }
969 };
970
971 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
972 // constraints.
973 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
974 unsigned ShOpc) {
975 assert(ShOpc != Instruction::AShr);
976 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
977 ShOpc == Instruction::Shl;
978 };
979
980 auto GetInvShift = [](unsigned ShOpc) {
981 assert(ShOpc != Instruction::AShr);
982 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
983 };
984
985 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
986 unsigned ShOpc, Constant *CMask,
987 Constant *CShift) {
988 // If the BinOp1 is `and` we don't need to check the mask.
989 if (BinOpc1 == Instruction::And)
990 return true;
991
992 // For all other possible transfers we need complete distributable
993 // binop/shift (anything but `add` + `lshr`).
994 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
995 return false;
996
997 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
998 // vecs, otherwise the mask will be simplified and the following check will
999 // handle it).
1000 if (BinOpc2 == Instruction::And)
1001 return true;
1002
1003 // Otherwise, need mask that meets the below requirement.
1004 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
1005 Constant *MaskInvShift =
1006 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1007 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
1008 CMask;
1009 };
1010
1011 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
1012 Constant *CMask, *CShift;
1013 Value *X, *Y, *ShiftedX, *Mask, *Shift;
1014 if (!match(I.getOperand(ShOpnum),
1015 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
1016 return nullptr;
1017 if (!match(I.getOperand(1 - ShOpnum),
1019 m_OneUse(m_Shift(m_Value(X), m_Specific(Shift))),
1020 m_Value(ShiftedX)),
1021 m_Value(Mask))))
1022 return nullptr;
1023 // Make sure we are matching instruction shifts and not ConstantExpr
1024 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
1025 auto *IX = dyn_cast<Instruction>(ShiftedX);
1026 if (!IY || !IX)
1027 return nullptr;
1028
1029 // LHS and RHS need same shift opcode
1030 unsigned ShOpc = IY->getOpcode();
1031 if (ShOpc != IX->getOpcode())
1032 return nullptr;
1033
1034 // Make sure binop is real instruction and not ConstantExpr
1035 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
1036 if (!BO2)
1037 return nullptr;
1038
1039 unsigned BinOpc = BO2->getOpcode();
1040 // Make sure we have valid binops.
1041 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
1042 return nullptr;
1043
1044 if (ShOpc == Instruction::AShr) {
1045 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
1046 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
1047 Value *NotX = Builder.CreateNot(X);
1048 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
1050 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
1051 }
1052
1053 return nullptr;
1054 }
1055
1056 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
1057 // distribute to drop the shift irrelevant of constants.
1058 if (BinOpc == I.getOpcode() &&
1059 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
1060 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
1061 Value *NewBinOp1 = Builder.CreateBinOp(
1062 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
1063 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
1064 }
1065
1066 // Otherwise we can only distribute by constant shifting the mask, so
1067 // ensure we have constants.
1068 if (!match(Shift, m_ImmConstant(CShift)))
1069 return nullptr;
1070 if (!match(Mask, m_ImmConstant(CMask)))
1071 return nullptr;
1072
1073 // Check if we can distribute the binops.
1074 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1075 return nullptr;
1076
1077 Constant *NewCMask =
1078 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1079 Value *NewBinOp2 = Builder.CreateBinOp(
1080 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1081 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1082 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1083 NewBinOp1, CShift);
1084 };
1085
1086 if (Instruction *R = MatchBinOp(0))
1087 return R;
1088 return MatchBinOp(1);
1089}
1090
1091// (Binop (zext C), (select C, T, F))
1092// -> (select C, (binop 1, T), (binop 0, F))
1093//
1094// (Binop (sext C), (select C, T, F))
1095// -> (select C, (binop -1, T), (binop 0, F))
1096//
1097// Attempt to simplify binary operations into a select with folded args, when
1098// one operand of the binop is a select instruction and the other operand is a
1099// zext/sext extension, whose value is the select condition.
1102 // TODO: this simplification may be extended to any speculatable instruction,
1103 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1104 Instruction::BinaryOps Opc = I.getOpcode();
1105 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1106 Value *A, *CondVal, *TrueVal, *FalseVal;
1107 Value *CastOp;
1108
1109 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1110 return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
1111 A->getType()->getScalarSizeInBits() == 1 &&
1112 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1113 m_Value(FalseVal)));
1114 };
1115
1116 // Make sure one side of the binop is a select instruction, and the other is a
1117 // zero/sign extension operating on a i1.
1118 if (MatchSelectAndCast(LHS, RHS))
1119 CastOp = LHS;
1120 else if (MatchSelectAndCast(RHS, LHS))
1121 CastOp = RHS;
1122 else
1123 return nullptr;
1124
1125 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1126 bool IsCastOpRHS = (CastOp == RHS);
1127 bool IsZExt = isa<ZExtInst>(CastOp);
1128 Constant *C;
1129
1130 if (IsTrueArm) {
1131 C = Constant::getNullValue(V->getType());
1132 } else if (IsZExt) {
1133 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1134 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
1135 } else {
1136 C = Constant::getAllOnesValue(V->getType());
1137 }
1138
1139 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
1140 : Builder.CreateBinOp(Opc, C, V);
1141 };
1142
1143 // If the value used in the zext/sext is the select condition, or the negated
1144 // of the select condition, the binop can be simplified.
1145 if (CondVal == A) {
1146 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1147 return SelectInst::Create(CondVal, NewTrueVal,
1148 NewFoldedConst(true, FalseVal));
1149 }
1150
1151 if (match(A, m_Not(m_Specific(CondVal)))) {
1152 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1153 return SelectInst::Create(CondVal, NewTrueVal,
1154 NewFoldedConst(false, FalseVal));
1155 }
1156
1157 return nullptr;
1158}
1159
1161 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1164 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1165 Value *A, *B, *C, *D;
1166 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1167
1168 if (Op0)
1169 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1170 if (Op1)
1171 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1172
1173 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1174 // a common term.
1175 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1176 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1177 return V;
1178
1179 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1180 // term.
1181 if (Op0)
1182 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1183 if (Value *V =
1184 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1185 return V;
1186
1187 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1188 // term.
1189 if (Op1)
1190 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1191 if (Value *V =
1192 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1193 return V;
1194
1195 return nullptr;
1196}
1197
1198/// This tries to simplify binary operations which some other binary operation
1199/// distributes over either by factorizing out common terms
1200/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1201/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1202/// Returns the simplified value, or null if it didn't simplify.
1204 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1207 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1208
1209 // Factorization.
1210 if (Value *R = tryFactorizationFolds(I))
1211 return R;
1212
1213 // Expansion.
1214 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1215 // The instruction has the form "(A op' B) op C". See if expanding it out
1216 // to "(A op C) op' (B op C)" results in simplifications.
1217 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1218 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1219
1220 // Disable the use of undef because it's not safe to distribute undef.
1221 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1222 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1223 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1224
1225 // Do "A op C" and "B op C" both simplify?
1226 if (L && R) {
1227 // They do! Return "L op' R".
1228 ++NumExpand;
1229 C = Builder.CreateBinOp(InnerOpcode, L, R);
1230 C->takeName(&I);
1231 return C;
1232 }
1233
1234 // Does "A op C" simplify to the identity value for the inner opcode?
1235 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1236 // They do! Return "B op C".
1237 ++NumExpand;
1238 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1239 C->takeName(&I);
1240 return C;
1241 }
1242
1243 // Does "B op C" simplify to the identity value for the inner opcode?
1244 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1245 // They do! Return "A op C".
1246 ++NumExpand;
1247 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1248 C->takeName(&I);
1249 return C;
1250 }
1251 }
1252
1253 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1254 // The instruction has the form "A op (B op' C)". See if expanding it out
1255 // to "(A op B) op' (A op C)" results in simplifications.
1256 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1257 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1258
1259 // Disable the use of undef because it's not safe to distribute undef.
1260 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1261 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1262 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1263
1264 // Do "A op B" and "A op C" both simplify?
1265 if (L && R) {
1266 // They do! Return "L op' R".
1267 ++NumExpand;
1268 A = Builder.CreateBinOp(InnerOpcode, L, R);
1269 A->takeName(&I);
1270 return A;
1271 }
1272
1273 // Does "A op B" simplify to the identity value for the inner opcode?
1274 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1275 // They do! Return "A op C".
1276 ++NumExpand;
1277 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1278 A->takeName(&I);
1279 return A;
1280 }
1281
1282 // Does "A op C" simplify to the identity value for the inner opcode?
1283 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1284 // They do! Return "A op B".
1285 ++NumExpand;
1286 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1287 A->takeName(&I);
1288 return A;
1289 }
1290 }
1291
1292 return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
1293}
1294
1295static std::optional<std::pair<Value *, Value *>>
1297 if (LHS->getParent() != RHS->getParent())
1298 return std::nullopt;
1299
1300 if (LHS->getNumIncomingValues() < 2)
1301 return std::nullopt;
1302
1303 if (!equal(LHS->blocks(), RHS->blocks()))
1304 return std::nullopt;
1305
1306 Value *L0 = LHS->getIncomingValue(0);
1307 Value *R0 = RHS->getIncomingValue(0);
1308
1309 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1310 Value *L1 = LHS->getIncomingValue(I);
1311 Value *R1 = RHS->getIncomingValue(I);
1312
1313 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1314 continue;
1315
1316 return std::nullopt;
1317 }
1318
1319 return std::optional(std::pair(L0, R0));
1320}
1321
1322std::optional<std::pair<Value *, Value *>>
1323InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1326 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1327 return std::nullopt;
1328 switch (LHSInst->getOpcode()) {
1329 case Instruction::PHI:
1331 case Instruction::Select: {
1332 Value *Cond = LHSInst->getOperand(0);
1333 Value *TrueVal = LHSInst->getOperand(1);
1334 Value *FalseVal = LHSInst->getOperand(2);
1335 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1336 FalseVal == RHSInst->getOperand(1))
1337 return std::pair(TrueVal, FalseVal);
1338 return std::nullopt;
1339 }
1340 case Instruction::Call: {
1341 // Match min(a, b) and max(a, b)
1342 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1343 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1344 if (LHSMinMax && RHSMinMax &&
1345 LHSMinMax->getPredicate() ==
1347 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1348 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1349 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1350 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1351 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1352 return std::nullopt;
1353 }
1354 default:
1355 return std::nullopt;
1356 }
1357}
1358
1360 Value *LHS,
1361 Value *RHS) {
1362 Value *A, *B, *C, *D, *E, *F;
1363 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1364 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1365 if (!LHSIsSelect && !RHSIsSelect)
1366 return nullptr;
1367
1369 ? nullptr
1370 : cast<SelectInst>(LHSIsSelect ? LHS : RHS);
1371
1372 FastMathFlags FMF;
1374 if (isa<FPMathOperator>(&I)) {
1375 FMF = I.getFastMathFlags();
1376 Builder.setFastMathFlags(FMF);
1377 }
1378
1379 Instruction::BinaryOps Opcode = I.getOpcode();
1380 SimplifyQuery Q = SQ.getWithInstruction(&I);
1381
1382 Value *Cond, *True = nullptr, *False = nullptr;
1383
1384 // Special-case for add/negate combination. Replace the zero in the negation
1385 // with the trailing add operand:
1386 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1387 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1388 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1389 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1390 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1391 return nullptr;
1392 Value *N;
1393 if (True && match(FVal, m_Neg(m_Value(N)))) {
1394 Value *Sub = Builder.CreateSub(Z, N);
1395 return Builder.CreateSelect(Cond, True, Sub, I.getName(), SI);
1396 }
1397 if (False && match(TVal, m_Neg(m_Value(N)))) {
1398 Value *Sub = Builder.CreateSub(Z, N);
1399 return Builder.CreateSelect(Cond, Sub, False, I.getName(), SI);
1400 }
1401 return nullptr;
1402 };
1403
1404 if (LHSIsSelect && RHSIsSelect && A == D) {
1405 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1406 Cond = A;
1407 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1408 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1409
1410 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1411 if (False && !True)
1412 True = Builder.CreateBinOp(Opcode, B, E);
1413 else if (True && !False)
1414 False = Builder.CreateBinOp(Opcode, C, F);
1415 }
1416 } else if (LHSIsSelect && LHS->hasOneUse()) {
1417 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1418 Cond = A;
1419 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1420 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1421 if (Value *NewSel = foldAddNegate(B, C, RHS))
1422 return NewSel;
1423 } else if (RHSIsSelect && RHS->hasOneUse()) {
1424 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1425 Cond = D;
1426 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1427 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1428 if (Value *NewSel = foldAddNegate(E, F, LHS))
1429 return NewSel;
1430 }
1431
1432 if (!True || !False)
1433 return nullptr;
1434
1435 Value *NewSI = Builder.CreateSelect(Cond, True, False, I.getName(), SI);
1436 NewSI->takeName(&I);
1437 return NewSI;
1438}
1439
1440/// Freely adapt every user of V as-if V was changed to !V.
1441/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1443 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1444 for (User *U : make_early_inc_range(I->users())) {
1445 if (U == IgnoredUser)
1446 continue; // Don't consider this user.
1447 switch (cast<Instruction>(U)->getOpcode()) {
1448 case Instruction::Select: {
1449 auto *SI = cast<SelectInst>(U);
1450 SI->swapValues();
1451 SI->swapProfMetadata();
1452 break;
1453 }
1454 case Instruction::Br: {
1456 BI->swapSuccessors(); // swaps prof metadata too
1457 if (BPI)
1458 BPI->swapSuccEdgesProbabilities(BI->getParent());
1459 break;
1460 }
1461 case Instruction::Xor:
1463 // Add to worklist for DCE.
1465 break;
1466 default:
1467 llvm_unreachable("Got unexpected user - out of sync with "
1468 "canFreelyInvertAllUsersOf() ?");
1469 }
1470 }
1471
1472 // Update pre-existing debug value uses.
1473 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords;
1474 llvm::findDbgValues(I, DbgVariableRecords);
1475
1476 for (DbgVariableRecord *DbgVal : DbgVariableRecords) {
1477 SmallVector<uint64_t, 1> Ops = {dwarf::DW_OP_not};
1478 for (unsigned Idx = 0, End = DbgVal->getNumVariableLocationOps();
1479 Idx != End; ++Idx)
1480 if (DbgVal->getVariableLocationOp(Idx) == I)
1481 DbgVal->setExpression(
1482 DIExpression::appendOpsToArg(DbgVal->getExpression(), Ops, Idx));
1483 }
1484}
1485
1486/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1487/// constant zero (which is the 'negate' form).
1488Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1489 Value *NegV;
1490 if (match(V, m_Neg(m_Value(NegV))))
1491 return NegV;
1492
1493 // Constants can be considered to be negated values if they can be folded.
1495 return ConstantExpr::getNeg(C);
1496
1498 if (C->getType()->getElementType()->isIntegerTy())
1499 return ConstantExpr::getNeg(C);
1500
1502 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1503 Constant *Elt = CV->getAggregateElement(i);
1504 if (!Elt)
1505 return nullptr;
1506
1507 if (isa<UndefValue>(Elt))
1508 continue;
1509
1510 if (!isa<ConstantInt>(Elt))
1511 return nullptr;
1512 }
1513 return ConstantExpr::getNeg(CV);
1514 }
1515
1516 // Negate integer vector splats.
1517 if (auto *CV = dyn_cast<Constant>(V))
1518 if (CV->getType()->isVectorTy() &&
1519 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1520 return ConstantExpr::getNeg(CV);
1521
1522 return nullptr;
1523}
1524
1525// Try to fold:
1526// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1527// -> ({s|u}itofp (int_binop x, y))
1528// 2) (fp_binop ({s|u}itofp x), FpC)
1529// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1530//
1531// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1532Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1533 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1535
1536 Type *FPTy = BO.getType();
1537 Type *IntTy = IntOps[0]->getType();
1538
1539 unsigned IntSz = IntTy->getScalarSizeInBits();
1540 // This is the maximum number of inuse bits by the integer where the int -> fp
1541 // casts are exact.
1542 unsigned MaxRepresentableBits =
1544
1545 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1546 // checks later on.
1547 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1548
1549 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1550 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1551 auto IsNonZero = [&](unsigned OpNo) -> bool {
1552 if (OpsKnown[OpNo].hasKnownBits() &&
1553 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1554 return true;
1555 return isKnownNonZero(IntOps[OpNo], SQ);
1556 };
1557
1558 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1559 // NB: This matches the impl in ValueTracking, we just try to use cached
1560 // knownbits here. If we ever start supporting WithCache for
1561 // `isKnownNonNegative`, change this to an explicit call.
1562 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1563 };
1564
1565 // Check if we know for certain that ({s|u}itofp op) is exact.
1566 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1567 // Can we treat this operand as the desired sign?
1568 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1569 !IsNonNeg(OpNo))
1570 return false;
1571
1572 // If fp precision >= bitwidth(op) then its exact.
1573 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1574 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1575 // handled specially. We can't, however, increase the bound arbitrarily for
1576 // `sitofp` as for larger sizes, it won't sign extend.
1577 if (MaxRepresentableBits < IntSz) {
1578 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1579 // numSignBits(op).
1580 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1581 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1582 if (OpsFromSigned)
1583 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1584 // Finally for unsigned check that fp precision >= bitwidth(op) -
1585 // numLeadingZeros(op).
1586 else {
1587 NumUsedLeadingBits[OpNo] =
1588 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1589 }
1590 }
1591 // NB: We could also check if op is known to be a power of 2 or zero (which
1592 // will always be representable). Its unlikely, however, that is we are
1593 // unable to bound op in any way we will be able to pass the overflow checks
1594 // later on.
1595
1596 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1597 return false;
1598 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1599 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1600 IsNonZero(OpNo);
1601 };
1602
1603 // If we have a constant rhs, see if we can losslessly convert it to an int.
1604 if (Op1FpC != nullptr) {
1605 // Signed + Mul req non-zero
1606 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1607 !match(Op1FpC, m_NonZeroFP()))
1608 return nullptr;
1609
1611 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1612 IntTy, DL);
1613 if (Op1IntC == nullptr)
1614 return nullptr;
1615 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1616 : Instruction::UIToFP,
1617 Op1IntC, FPTy, DL) != Op1FpC)
1618 return nullptr;
1619
1620 // First try to keep sign of cast the same.
1621 IntOps[1] = Op1IntC;
1622 }
1623
1624 // Ensure lhs/rhs integer types match.
1625 if (IntTy != IntOps[1]->getType())
1626 return nullptr;
1627
1628 if (Op1FpC == nullptr) {
1629 if (!IsValidPromotion(1))
1630 return nullptr;
1631 }
1632 if (!IsValidPromotion(0))
1633 return nullptr;
1634
1635 // Final we check if the integer version of the binop will not overflow.
1637 // Because of the precision check, we can often rule out overflows.
1638 bool NeedsOverflowCheck = true;
1639 // Try to conservatively rule out overflow based on the already done precision
1640 // checks.
1641 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1642 unsigned OverflowMaxCurBits =
1643 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1644 bool OutputSigned = OpsFromSigned;
1645 switch (BO.getOpcode()) {
1646 case Instruction::FAdd:
1647 IntOpc = Instruction::Add;
1648 OverflowMaxOutputBits += OverflowMaxCurBits;
1649 break;
1650 case Instruction::FSub:
1651 IntOpc = Instruction::Sub;
1652 OverflowMaxOutputBits += OverflowMaxCurBits;
1653 break;
1654 case Instruction::FMul:
1655 IntOpc = Instruction::Mul;
1656 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1657 break;
1658 default:
1659 llvm_unreachable("Unsupported binop");
1660 }
1661 // The precision check may have already ruled out overflow.
1662 if (OverflowMaxOutputBits < IntSz) {
1663 NeedsOverflowCheck = false;
1664 // We can bound unsigned overflow from sub to in range signed value (this is
1665 // what allows us to avoid the overflow check for sub).
1666 if (IntOpc == Instruction::Sub)
1667 OutputSigned = true;
1668 }
1669
1670 // Precision check did not rule out overflow, so need to check.
1671 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1672 // `IntOps[...]` arguments to `KnownOps[...]`.
1673 if (NeedsOverflowCheck &&
1674 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1675 return nullptr;
1676
1677 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1678 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1679 IntBO->setHasNoSignedWrap(OutputSigned);
1680 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1681 }
1682 if (OutputSigned)
1683 return new SIToFPInst(IntBinOp, FPTy);
1684 return new UIToFPInst(IntBinOp, FPTy);
1685}
1686
1687// Try to fold:
1688// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1689// -> ({s|u}itofp (int_binop x, y))
1690// 2) (fp_binop ({s|u}itofp x), FpC)
1691// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1692Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1693 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1694 Constant *Op1FpC = nullptr;
1695 // Check for:
1696 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1697 // 2) (binop ({s|u}itofp x), FpC)
1698 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1699 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1700 return nullptr;
1701
1702 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1703 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1704 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1705 return nullptr;
1706
1707 // Cache KnownBits a bit to potentially save some analysis.
1708 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1709
1710 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1711 // different constraints depending on the sign of the cast.
1712 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1713 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1714 IntOps, Op1FpC, OpsKnown))
1715 return R;
1716 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1717 Op1FpC, OpsKnown);
1718}
1719
1720/// A binop with a constant operand and a sign-extended boolean operand may be
1721/// converted into a select of constants by applying the binary operation to
1722/// the constant with the two possible values of the extended boolean (0 or -1).
1723Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1724 // TODO: Handle non-commutative binop (constant is operand 0).
1725 // TODO: Handle zext.
1726 // TODO: Peek through 'not' of cast.
1727 Value *BO0 = BO.getOperand(0);
1728 Value *BO1 = BO.getOperand(1);
1729 Value *X;
1730 Constant *C;
1731 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1732 !X->getType()->isIntOrIntVectorTy(1))
1733 return nullptr;
1734
1735 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1738 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1739 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1740 return createSelectInst(X, TVal, FVal);
1741}
1742
1744 bool IsTrueArm) {
1746 for (Value *Op : I.operands()) {
1747 Value *V = nullptr;
1748 if (Op == SI) {
1749 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1750 } else if (match(SI->getCondition(),
1753 m_Specific(Op), m_Value(V))) &&
1755 // Pass
1756 } else {
1757 V = Op;
1758 }
1759 Ops.push_back(V);
1760 }
1761
1762 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1763}
1764
1766 Value *NewOp, InstCombiner &IC) {
1767 Instruction *Clone = I.clone();
1768 Clone->replaceUsesOfWith(SI, NewOp);
1770 IC.InsertNewInstBefore(Clone, I.getIterator());
1771 return Clone;
1772}
1773
1775 bool FoldWithMultiUse) {
1776 // Don't modify shared select instructions unless set FoldWithMultiUse
1777 if (!SI->hasOneUse() && !FoldWithMultiUse)
1778 return nullptr;
1779
1780 Value *TV = SI->getTrueValue();
1781 Value *FV = SI->getFalseValue();
1782
1783 // Bool selects with constant operands can be folded to logical ops.
1784 if (SI->getType()->isIntOrIntVectorTy(1))
1785 return nullptr;
1786
1787 // Avoid breaking min/max reduction pattern,
1788 // which is necessary for vectorization later.
1790 for (Value *IntrinOp : Op.operands())
1791 if (auto *PN = dyn_cast<PHINode>(IntrinOp))
1792 for (Value *PhiOp : PN->operands())
1793 if (PhiOp == &Op)
1794 return nullptr;
1795
1796 // Test if a FCmpInst instruction is used exclusively by a select as
1797 // part of a minimum or maximum operation. If so, refrain from doing
1798 // any other folding. This helps out other analyses which understand
1799 // non-obfuscated minimum and maximum idioms. And in this case, at
1800 // least one of the comparison operands has at least one user besides
1801 // the compare (the select), which would often largely negate the
1802 // benefit of folding anyway.
1803 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1804 if (CI->hasOneUse()) {
1805 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1806 if (((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) &&
1807 !CI->isCommutative())
1808 return nullptr;
1809 }
1810 }
1811
1812 // Make sure that one of the select arms folds successfully.
1813 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1814 Value *NewFV =
1815 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1816 if (!NewTV && !NewFV)
1817 return nullptr;
1818
1819 // Create an instruction for the arm that did not fold.
1820 if (!NewTV)
1821 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1822 if (!NewFV)
1823 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1824 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1825}
1826
1828 Value *InValue, BasicBlock *InBB,
1829 const DataLayout &DL,
1830 const SimplifyQuery SQ) {
1831 // NB: It is a precondition of this transform that the operands be
1832 // phi translatable!
1834 for (Value *Op : I.operands()) {
1835 if (Op == PN)
1836 Ops.push_back(InValue);
1837 else
1838 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1839 }
1840
1841 // Don't consider the simplification successful if we get back a constant
1842 // expression. That's just an instruction in hiding.
1843 // Also reject the case where we simplify back to the phi node. We wouldn't
1844 // be able to remove it in that case.
1846 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1847 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1848 return NewVal;
1849
1850 // Check if incoming PHI value can be replaced with constant
1851 // based on implied condition.
1852 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1853 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1854 if (TerminatorBI && TerminatorBI->isConditional() &&
1855 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1856 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1857 std::optional<bool> ImpliedCond = isImpliedCondition(
1858 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1859 DL, LHSIsTrue);
1860 if (ImpliedCond)
1861 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1862 }
1863
1864 return nullptr;
1865}
1866
1868 bool AllowMultipleUses) {
1869 unsigned NumPHIValues = PN->getNumIncomingValues();
1870 if (NumPHIValues == 0)
1871 return nullptr;
1872
1873 // We normally only transform phis with a single use. However, if a PHI has
1874 // multiple uses and they are all the same operation, we can fold *all* of the
1875 // uses into the PHI.
1876 bool OneUse = PN->hasOneUse();
1877 bool IdenticalUsers = false;
1878 if (!AllowMultipleUses && !OneUse) {
1879 // Walk the use list for the instruction, comparing them to I.
1880 for (User *U : PN->users()) {
1882 if (UI != &I && !I.isIdenticalTo(UI))
1883 return nullptr;
1884 }
1885 // Otherwise, we can replace *all* users with the new PHI we form.
1886 IdenticalUsers = true;
1887 }
1888
1889 // Check that all operands are phi-translatable.
1890 for (Value *Op : I.operands()) {
1891 if (Op == PN)
1892 continue;
1893
1894 // Non-instructions never require phi-translation.
1895 auto *I = dyn_cast<Instruction>(Op);
1896 if (!I)
1897 continue;
1898
1899 // Phi-translate can handle phi nodes in the same block.
1900 if (isa<PHINode>(I))
1901 if (I->getParent() == PN->getParent())
1902 continue;
1903
1904 // Operand dominates the block, no phi-translation necessary.
1905 if (DT.dominates(I, PN->getParent()))
1906 continue;
1907
1908 // Not phi-translatable, bail out.
1909 return nullptr;
1910 }
1911
1912 // Check to see whether the instruction can be folded into each phi operand.
1913 // If there is one operand that does not fold, remember the BB it is in.
1914 SmallVector<Value *> NewPhiValues;
1915 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1916 bool SeenNonSimplifiedInVal = false;
1917 for (unsigned i = 0; i != NumPHIValues; ++i) {
1918 Value *InVal = PN->getIncomingValue(i);
1919 BasicBlock *InBB = PN->getIncomingBlock(i);
1920
1921 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1922 NewPhiValues.push_back(NewVal);
1923 continue;
1924 }
1925
1926 // Handle some cases that can't be fully simplified, but where we know that
1927 // the two instructions will fold into one.
1928 auto WillFold = [&]() {
1929 if (!InVal->hasUseList() || !InVal->hasOneUser())
1930 return false;
1931
1932 // icmp of ucmp/scmp with constant will fold to icmp.
1933 const APInt *Ignored;
1934 if (isa<CmpIntrinsic>(InVal) &&
1935 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1936 return true;
1937
1938 // icmp eq zext(bool), 0 will fold to !bool.
1939 if (isa<ZExtInst>(InVal) &&
1940 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1941 match(&I,
1943 return true;
1944
1945 return false;
1946 };
1947
1948 if (WillFold()) {
1949 OpsToMoveUseToIncomingBB.push_back(i);
1950 NewPhiValues.push_back(nullptr);
1951 continue;
1952 }
1953
1954 if (!OneUse && !IdenticalUsers)
1955 return nullptr;
1956
1957 if (SeenNonSimplifiedInVal)
1958 return nullptr; // More than one non-simplified value.
1959 SeenNonSimplifiedInVal = true;
1960
1961 // If there is exactly one non-simplified value, we can insert a copy of the
1962 // operation in that block. However, if this is a critical edge, we would
1963 // be inserting the computation on some other paths (e.g. inside a loop).
1964 // Only do this if the pred block is unconditionally branching into the phi
1965 // block. Also, make sure that the pred block is not dead code.
1967 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(InBB))
1968 return nullptr;
1969
1970 NewPhiValues.push_back(nullptr);
1971 OpsToMoveUseToIncomingBB.push_back(i);
1972
1973 // Do not push the operation across a loop backedge. This could result in
1974 // an infinite combine loop, and is generally non-profitable (especially
1975 // if the operation was originally outside the loop).
1976 if (isBackEdge(InBB, PN->getParent()))
1977 return nullptr;
1978 }
1979
1980 // Clone the instruction that uses the phi node and move it into the incoming
1981 // BB because we know that the next iteration of InstCombine will simplify it.
1983 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
1985 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
1986
1987 Instruction *Clone = Clones.lookup(OpBB);
1988 if (!Clone) {
1989 Clone = I.clone();
1990 for (Use &U : Clone->operands()) {
1991 if (U == PN)
1992 U = Op;
1993 else
1994 U = U->DoPHITranslation(PN->getParent(), OpBB);
1995 }
1996 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
1997 Clones.insert({OpBB, Clone});
1998 // We may have speculated the instruction.
2000 }
2001
2002 NewPhiValues[OpIndex] = Clone;
2003 }
2004
2005 // Okay, we can do the transformation: create the new PHI node.
2006 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
2007 InsertNewInstBefore(NewPN, PN->getIterator());
2008 NewPN->takeName(PN);
2009 NewPN->setDebugLoc(PN->getDebugLoc());
2010
2011 for (unsigned i = 0; i != NumPHIValues; ++i)
2012 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
2013
2014 if (IdenticalUsers) {
2015 // Collect and deduplicate users up-front to avoid iterator invalidation.
2017 for (User *U : PN->users()) {
2019 if (User == &I)
2020 continue;
2021 ToReplace.insert(User);
2022 }
2023 for (Instruction *I : ToReplace) {
2024 replaceInstUsesWith(*I, NewPN);
2026 }
2027 OneUse = true;
2028 }
2029
2030 if (OneUse) {
2031 replaceAllDbgUsesWith(*PN, *NewPN, *PN, DT);
2032 }
2033 return replaceInstUsesWith(I, NewPN);
2034}
2035
2037 if (!BO.isAssociative())
2038 return nullptr;
2039
2040 // Find the interleaved binary ops.
2041 auto Opc = BO.getOpcode();
2042 auto *BO0 = dyn_cast<BinaryOperator>(BO.getOperand(0));
2043 auto *BO1 = dyn_cast<BinaryOperator>(BO.getOperand(1));
2044 if (!BO0 || !BO1 || !BO0->hasNUses(2) || !BO1->hasNUses(2) ||
2045 BO0->getOpcode() != Opc || BO1->getOpcode() != Opc ||
2046 !BO0->isAssociative() || !BO1->isAssociative() ||
2047 BO0->getParent() != BO1->getParent())
2048 return nullptr;
2049
2050 assert(BO.isCommutative() && BO0->isCommutative() && BO1->isCommutative() &&
2051 "Expected commutative instructions!");
2052
2053 // Find the matching phis, forming the recurrences.
2054 PHINode *PN0, *PN1;
2055 Value *Start0, *Step0, *Start1, *Step1;
2056 if (!matchSimpleRecurrence(BO0, PN0, Start0, Step0) || !PN0->hasOneUse() ||
2057 !matchSimpleRecurrence(BO1, PN1, Start1, Step1) || !PN1->hasOneUse() ||
2058 PN0->getParent() != PN1->getParent())
2059 return nullptr;
2060
2061 assert(PN0->getNumIncomingValues() == 2 && PN1->getNumIncomingValues() == 2 &&
2062 "Expected PHIs with two incoming values!");
2063
2064 // Convert the start and step values to constants.
2065 auto *Init0 = dyn_cast<Constant>(Start0);
2066 auto *Init1 = dyn_cast<Constant>(Start1);
2067 auto *C0 = dyn_cast<Constant>(Step0);
2068 auto *C1 = dyn_cast<Constant>(Step1);
2069 if (!Init0 || !Init1 || !C0 || !C1)
2070 return nullptr;
2071
2072 // Fold the recurrence constants.
2073 auto *Init = ConstantFoldBinaryInstruction(Opc, Init0, Init1);
2074 auto *C = ConstantFoldBinaryInstruction(Opc, C0, C1);
2075 if (!Init || !C)
2076 return nullptr;
2077
2078 // Create the reduced PHI.
2079 auto *NewPN = PHINode::Create(PN0->getType(), PN0->getNumIncomingValues(),
2080 "reduced.phi");
2081
2082 // Create the new binary op.
2083 auto *NewBO = BinaryOperator::Create(Opc, NewPN, C);
2084 if (Opc == Instruction::FAdd || Opc == Instruction::FMul) {
2085 // Intersect FMF flags for FADD and FMUL.
2086 FastMathFlags Intersect = BO0->getFastMathFlags() &
2087 BO1->getFastMathFlags() & BO.getFastMathFlags();
2088 NewBO->setFastMathFlags(Intersect);
2089 } else {
2090 OverflowTracking Flags;
2091 Flags.AllKnownNonNegative = false;
2092 Flags.AllKnownNonZero = false;
2093 Flags.mergeFlags(*BO0);
2094 Flags.mergeFlags(*BO1);
2095 Flags.mergeFlags(BO);
2096 Flags.applyFlags(*NewBO);
2097 }
2098 NewBO->takeName(&BO);
2099
2100 for (unsigned I = 0, E = PN0->getNumIncomingValues(); I != E; ++I) {
2101 auto *V = PN0->getIncomingValue(I);
2102 auto *BB = PN0->getIncomingBlock(I);
2103 if (V == Init0) {
2104 assert(((PN1->getIncomingValue(0) == Init1 &&
2105 PN1->getIncomingBlock(0) == BB) ||
2106 (PN1->getIncomingValue(1) == Init1 &&
2107 PN1->getIncomingBlock(1) == BB)) &&
2108 "Invalid incoming block!");
2109 NewPN->addIncoming(Init, BB);
2110 } else if (V == BO0) {
2111 assert(((PN1->getIncomingValue(0) == BO1 &&
2112 PN1->getIncomingBlock(0) == BB) ||
2113 (PN1->getIncomingValue(1) == BO1 &&
2114 PN1->getIncomingBlock(1) == BB)) &&
2115 "Invalid incoming block!");
2116 NewPN->addIncoming(NewBO, BB);
2117 } else
2118 llvm_unreachable("Unexpected incoming value!");
2119 }
2120
2121 LLVM_DEBUG(dbgs() << " Combined " << *PN0 << "\n " << *BO0
2122 << "\n with " << *PN1 << "\n " << *BO1
2123 << '\n');
2124
2125 // Insert the new recurrence and remove the old (dead) ones.
2126 InsertNewInstWith(NewPN, PN0->getIterator());
2127 InsertNewInstWith(NewBO, BO0->getIterator());
2128
2135
2136 return replaceInstUsesWith(BO, NewBO);
2137}
2138
2140 // Attempt to fold binary operators whose operands are simple recurrences.
2141 if (auto *NewBO = foldBinopWithRecurrence(BO))
2142 return NewBO;
2143
2144 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
2145 // we are guarding against replicating the binop in >1 predecessor.
2146 // This could miss matching a phi with 2 constant incoming values.
2147 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
2148 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
2149 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
2150 Phi0->getNumOperands() != Phi1->getNumOperands())
2151 return nullptr;
2152
2153 // TODO: Remove the restriction for binop being in the same block as the phis.
2154 if (BO.getParent() != Phi0->getParent() ||
2155 BO.getParent() != Phi1->getParent())
2156 return nullptr;
2157
2158 // Fold if there is at least one specific constant value in phi0 or phi1's
2159 // incoming values that comes from the same block and this specific constant
2160 // value can be used to do optimization for specific binary operator.
2161 // For example:
2162 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
2163 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
2164 // %add = add i32 %phi0, %phi1
2165 // ==>
2166 // %add = phi i32 [%j, %bb0], [%i, %bb1]
2168 /*AllowRHSConstant*/ false);
2169 if (C) {
2170 SmallVector<Value *, 4> NewIncomingValues;
2171 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
2172 auto &Phi0Use = std::get<0>(T);
2173 auto &Phi1Use = std::get<1>(T);
2174 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
2175 return false;
2176 Value *Phi0UseV = Phi0Use.get();
2177 Value *Phi1UseV = Phi1Use.get();
2178 if (Phi0UseV == C)
2179 NewIncomingValues.push_back(Phi1UseV);
2180 else if (Phi1UseV == C)
2181 NewIncomingValues.push_back(Phi0UseV);
2182 else
2183 return false;
2184 return true;
2185 };
2186
2187 if (all_of(zip(Phi0->operands(), Phi1->operands()),
2188 CanFoldIncomingValuePair)) {
2189 PHINode *NewPhi =
2190 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
2191 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
2192 "The number of collected incoming values should equal the number "
2193 "of the original PHINode operands!");
2194 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
2195 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
2196 return NewPhi;
2197 }
2198 }
2199
2200 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2201 return nullptr;
2202
2203 // Match a pair of incoming constants for one of the predecessor blocks.
2204 BasicBlock *ConstBB, *OtherBB;
2205 Constant *C0, *C1;
2206 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2207 ConstBB = Phi0->getIncomingBlock(0);
2208 OtherBB = Phi0->getIncomingBlock(1);
2209 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2210 ConstBB = Phi0->getIncomingBlock(1);
2211 OtherBB = Phi0->getIncomingBlock(0);
2212 } else {
2213 return nullptr;
2214 }
2215 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2216 return nullptr;
2217
2218 // The block that we are hoisting to must reach here unconditionally.
2219 // Otherwise, we could be speculatively executing an expensive or
2220 // non-speculative op.
2221 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
2222 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2223 !DT.isReachableFromEntry(OtherBB))
2224 return nullptr;
2225
2226 // TODO: This check could be tightened to only apply to binops (div/rem) that
2227 // are not safe to speculatively execute. But that could allow hoisting
2228 // potentially expensive instructions (fdiv for example).
2229 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2231 return nullptr;
2232
2233 // Fold constants for the predecessor block with constant incoming values.
2234 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2235 if (!NewC)
2236 return nullptr;
2237
2238 // Make a new binop in the predecessor block with the non-constant incoming
2239 // values.
2240 Builder.SetInsertPoint(PredBlockBranch);
2241 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2242 Phi0->getIncomingValueForBlock(OtherBB),
2243 Phi1->getIncomingValueForBlock(OtherBB));
2244 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2245 NotFoldedNewBO->copyIRFlags(&BO);
2246
2247 // Replace the binop with a phi of the new values. The old phis are dead.
2248 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2249 NewPhi->addIncoming(NewBO, OtherBB);
2250 NewPhi->addIncoming(NewC, ConstBB);
2251 return NewPhi;
2252}
2253
2255 if (!isa<Constant>(I.getOperand(1)))
2256 return nullptr;
2257
2258 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
2259 if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
2260 return NewSel;
2261 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
2262 if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
2263 return NewPhi;
2264 }
2265 return nullptr;
2266}
2267
2269 // If this GEP has only 0 indices, it is the same pointer as
2270 // Src. If Src is not a trivial GEP too, don't combine
2271 // the indices.
2272 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2273 !Src.hasOneUse())
2274 return false;
2275 return true;
2276}
2277
2278/// Find a constant NewC that has property:
2279/// shuffle(NewC, ShMask) = C
2280/// Returns nullptr if such a constant does not exist e.g. ShMask=<0,0> C=<1,2>
2281///
2282/// A 1-to-1 mapping is not required. Example:
2283/// ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <poison,5,6,poison>
2285 VectorType *NewCTy) {
2286 if (isa<ScalableVectorType>(NewCTy)) {
2287 Constant *Splat = C->getSplatValue();
2288 if (!Splat)
2289 return nullptr;
2291 }
2292
2293 if (cast<FixedVectorType>(NewCTy)->getNumElements() >
2294 cast<FixedVectorType>(C->getType())->getNumElements())
2295 return nullptr;
2296
2297 unsigned NewCNumElts = cast<FixedVectorType>(NewCTy)->getNumElements();
2298 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2299 SmallVector<Constant *, 16> NewVecC(NewCNumElts, PoisonScalar);
2300 unsigned NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
2301 for (unsigned I = 0; I < NumElts; ++I) {
2302 Constant *CElt = C->getAggregateElement(I);
2303 if (ShMask[I] >= 0) {
2304 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2305 Constant *NewCElt = NewVecC[ShMask[I]];
2306 // Bail out if:
2307 // 1. The constant vector contains a constant expression.
2308 // 2. The shuffle needs an element of the constant vector that can't
2309 // be mapped to a new constant vector.
2310 // 3. This is a widening shuffle that copies elements of V1 into the
2311 // extended elements (extending with poison is allowed).
2312 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2313 I >= NewCNumElts)
2314 return nullptr;
2315 NewVecC[ShMask[I]] = CElt;
2316 }
2317 }
2318 return ConstantVector::get(NewVecC);
2319}
2320
2322 if (!isa<VectorType>(Inst.getType()))
2323 return nullptr;
2324
2325 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2326 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2327 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2328 cast<VectorType>(Inst.getType())->getElementCount());
2329 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2330 cast<VectorType>(Inst.getType())->getElementCount());
2331
2332 // If both operands of the binop are vector concatenations, then perform the
2333 // narrow binop on each pair of the source operands followed by concatenation
2334 // of the results.
2335 Value *L0, *L1, *R0, *R1;
2336 ArrayRef<int> Mask;
2337 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2338 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2339 LHS->hasOneUse() && RHS->hasOneUse() &&
2340 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2341 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2342 // This transform does not have the speculative execution constraint as
2343 // below because the shuffle is a concatenation. The new binops are
2344 // operating on exactly the same elements as the existing binop.
2345 // TODO: We could ease the mask requirement to allow different undef lanes,
2346 // but that requires an analysis of the binop-with-undef output value.
2347 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2348 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2349 BO->copyIRFlags(&Inst);
2350 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2351 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2352 BO->copyIRFlags(&Inst);
2353 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2354 }
2355
2356 auto createBinOpReverse = [&](Value *X, Value *Y) {
2357 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2358 if (auto *BO = dyn_cast<BinaryOperator>(V))
2359 BO->copyIRFlags(&Inst);
2360 Module *M = Inst.getModule();
2362 M, Intrinsic::vector_reverse, V->getType());
2363 return CallInst::Create(F, V);
2364 };
2365
2366 // NOTE: Reverse shuffles don't require the speculative execution protection
2367 // below because they don't affect which lanes take part in the computation.
2368
2369 Value *V1, *V2;
2370 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2371 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2372 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2373 (LHS->hasOneUse() || RHS->hasOneUse() ||
2374 (LHS == RHS && LHS->hasNUses(2))))
2375 return createBinOpReverse(V1, V2);
2376
2377 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2378 if (LHS->hasOneUse() && isSplatValue(RHS))
2379 return createBinOpReverse(V1, RHS);
2380 }
2381 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2382 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2383 return createBinOpReverse(LHS, V2);
2384
2385 auto createBinOpVPReverse = [&](Value *X, Value *Y, Value *EVL) {
2386 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2387 if (auto *BO = dyn_cast<BinaryOperator>(V))
2388 BO->copyIRFlags(&Inst);
2389
2390 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
2391 Value *AllTrueMask = Builder.CreateVectorSplat(EC, Builder.getTrue());
2392 Module *M = Inst.getModule();
2394 M, Intrinsic::experimental_vp_reverse, V->getType());
2395 return CallInst::Create(F, {V, AllTrueMask, EVL});
2396 };
2397
2398 Value *EVL;
2400 m_Value(V1), m_AllOnes(), m_Value(EVL)))) {
2401 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2403 m_Value(V2), m_AllOnes(), m_Specific(EVL))) &&
2404 (LHS->hasOneUse() || RHS->hasOneUse() ||
2405 (LHS == RHS && LHS->hasNUses(2))))
2406 return createBinOpVPReverse(V1, V2, EVL);
2407
2408 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2409 if (LHS->hasOneUse() && isSplatValue(RHS))
2410 return createBinOpVPReverse(V1, RHS, EVL);
2411 }
2412 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2413 else if (isSplatValue(LHS) &&
2415 m_Value(V2), m_AllOnes(), m_Value(EVL))))
2416 return createBinOpVPReverse(LHS, V2, EVL);
2417
2418 // It may not be safe to reorder shuffles and things like div, urem, etc.
2419 // because we may trap when executing those ops on unknown vector elements.
2420 // See PR20059.
2422 return nullptr;
2423
2424 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2425 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2426 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2427 BO->copyIRFlags(&Inst);
2428 return new ShuffleVectorInst(XY, M);
2429 };
2430
2431 // If both arguments of the binary operation are shuffles that use the same
2432 // mask and shuffle within a single vector, move the shuffle after the binop.
2433 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2434 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2435 V1->getType() == V2->getType() &&
2436 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2437 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2438 return createBinOpShuffle(V1, V2, Mask);
2439 }
2440
2441 // If both arguments of a commutative binop are select-shuffles that use the
2442 // same mask with commuted operands, the shuffles are unnecessary.
2443 if (Inst.isCommutative() &&
2444 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2445 match(RHS,
2446 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2447 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2448 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2449 // TODO: Allow shuffles that contain undefs in the mask?
2450 // That is legal, but it reduces undef knowledge.
2451 // TODO: Allow arbitrary shuffles by shuffling after binop?
2452 // That might be legal, but we have to deal with poison.
2453 if (LShuf->isSelect() &&
2454 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2455 RShuf->isSelect() &&
2456 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2457 // Example:
2458 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2459 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2460 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2461 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2462 NewBO->copyIRFlags(&Inst);
2463 return NewBO;
2464 }
2465 }
2466
2467 // If one argument is a shuffle within one vector and the other is a constant,
2468 // try moving the shuffle after the binary operation. This canonicalization
2469 // intends to move shuffles closer to other shuffles and binops closer to
2470 // other binops, so they can be folded. It may also enable demanded elements
2471 // transforms.
2472 Constant *C;
2474 m_Mask(Mask))),
2475 m_ImmConstant(C)))) {
2476 assert(Inst.getType()->getScalarType() == V1->getType()->getScalarType() &&
2477 "Shuffle should not change scalar type");
2478
2479 bool ConstOp1 = isa<Constant>(RHS);
2480 if (Constant *NewC =
2482 // For fixed vectors, lanes of NewC not used by the shuffle will be poison
2483 // which will cause UB for div/rem. Mask them with a safe constant.
2484 if (isa<FixedVectorType>(V1->getType()) && Inst.isIntDivRem())
2485 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2486
2487 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2488 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2489 Value *NewLHS = ConstOp1 ? V1 : NewC;
2490 Value *NewRHS = ConstOp1 ? NewC : V1;
2491 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2492 }
2493 }
2494
2495 // Try to reassociate to sink a splat shuffle after a binary operation.
2496 if (Inst.isAssociative() && Inst.isCommutative()) {
2497 // Canonicalize shuffle operand as LHS.
2498 if (isa<ShuffleVectorInst>(RHS))
2499 std::swap(LHS, RHS);
2500
2501 Value *X;
2502 ArrayRef<int> MaskC;
2503 int SplatIndex;
2504 Value *Y, *OtherOp;
2505 if (!match(LHS,
2506 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2507 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2508 X->getType() != Inst.getType() ||
2509 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2510 return nullptr;
2511
2512 // FIXME: This may not be safe if the analysis allows undef elements. By
2513 // moving 'Y' before the splat shuffle, we are implicitly assuming
2514 // that it is not undef/poison at the splat index.
2515 if (isSplatValue(OtherOp, SplatIndex)) {
2516 std::swap(Y, OtherOp);
2517 } else if (!isSplatValue(Y, SplatIndex)) {
2518 return nullptr;
2519 }
2520
2521 // X and Y are splatted values, so perform the binary operation on those
2522 // values followed by a splat followed by the 2nd binary operation:
2523 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2524 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2525 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2526 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2527 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2528
2529 // Intersect FMF on both new binops. Other (poison-generating) flags are
2530 // dropped to be safe.
2531 if (isa<FPMathOperator>(R)) {
2532 R->copyFastMathFlags(&Inst);
2533 R->andIRFlags(RHS);
2534 }
2535 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2536 NewInstBO->copyIRFlags(R);
2537 return R;
2538 }
2539
2540 return nullptr;
2541}
2542
2543/// Try to narrow the width of a binop if at least 1 operand is an extend of
2544/// of a value. This requires a potentially expensive known bits check to make
2545/// sure the narrow op does not overflow.
2546Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2547 // We need at least one extended operand.
2548 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2549
2550 // If this is a sub, we swap the operands since we always want an extension
2551 // on the RHS. The LHS can be an extension or a constant.
2552 if (BO.getOpcode() == Instruction::Sub)
2553 std::swap(Op0, Op1);
2554
2555 Value *X;
2556 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2557 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2558 return nullptr;
2559
2560 // If both operands are the same extension from the same source type and we
2561 // can eliminate at least one (hasOneUse), this might work.
2562 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2563 Value *Y;
2564 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2565 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2566 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2567 // If that did not match, see if we have a suitable constant operand.
2568 // Truncating and extending must produce the same constant.
2569 Constant *WideC;
2570 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2571 return nullptr;
2572 Constant *NarrowC = getLosslessInvCast(WideC, X->getType(), CastOpc, DL);
2573 if (!NarrowC)
2574 return nullptr;
2575 Y = NarrowC;
2576 }
2577
2578 // Swap back now that we found our operands.
2579 if (BO.getOpcode() == Instruction::Sub)
2580 std::swap(X, Y);
2581
2582 // Both operands have narrow versions. Last step: the math must not overflow
2583 // in the narrow width.
2584 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2585 return nullptr;
2586
2587 // bo (ext X), (ext Y) --> ext (bo X, Y)
2588 // bo (ext X), C --> ext (bo X, C')
2589 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2590 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2591 if (IsSext)
2592 NewBinOp->setHasNoSignedWrap();
2593 else
2594 NewBinOp->setHasNoUnsignedWrap();
2595 }
2596 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2597}
2598
2599/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2600/// transform.
2605
2606/// Thread a GEP operation with constant indices through the constant true/false
2607/// arms of a select.
2609 InstCombiner::BuilderTy &Builder) {
2610 if (!GEP.hasAllConstantIndices())
2611 return nullptr;
2612
2613 Instruction *Sel;
2614 Value *Cond;
2615 Constant *TrueC, *FalseC;
2616 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2617 !match(Sel,
2618 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2619 return nullptr;
2620
2621 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2622 // Propagate 'inbounds' and metadata from existing instructions.
2623 // Note: using IRBuilder to create the constants for efficiency.
2624 SmallVector<Value *, 4> IndexC(GEP.indices());
2625 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2626 Type *Ty = GEP.getSourceElementType();
2627 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2628 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2629 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2630}
2631
2632// Canonicalization:
2633// gep T, (gep i8, base, C1), (Index + C2) into
2634// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2636 GEPOperator *Src,
2637 InstCombinerImpl &IC) {
2638 if (GEP.getNumIndices() != 1)
2639 return nullptr;
2640 auto &DL = IC.getDataLayout();
2641 Value *Base;
2642 const APInt *C1;
2643 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2644 return nullptr;
2645 Value *VarIndex;
2646 const APInt *C2;
2647 Type *PtrTy = Src->getType()->getScalarType();
2648 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2649 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2650 return nullptr;
2651 if (C1->getBitWidth() != IndexSizeInBits ||
2652 C2->getBitWidth() != IndexSizeInBits)
2653 return nullptr;
2654 Type *BaseType = GEP.getSourceElementType();
2656 return nullptr;
2657 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2658 APInt NewOffset = TypeSize * *C2 + *C1;
2659 if (NewOffset.isZero() ||
2660 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2662 if (GEP.hasNoUnsignedWrap() &&
2663 cast<GEPOperator>(Src)->hasNoUnsignedWrap() &&
2664 match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()))) {
2666 if (GEP.isInBounds() && cast<GEPOperator>(Src)->isInBounds())
2667 Flags |= GEPNoWrapFlags::inBounds();
2668 }
2669
2670 Value *GEPConst =
2671 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset), "", Flags);
2672 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex, Flags);
2673 }
2674
2675 return nullptr;
2676}
2677
2678/// Combine constant offsets separated by variable offsets.
2679/// ptradd (ptradd (ptradd p, C1), x), C2 -> ptradd (ptradd p, x), C1+C2
2681 InstCombinerImpl &IC) {
2682 if (!GEP.hasAllConstantIndices())
2683 return nullptr;
2684
2687 auto *InnerGEP = dyn_cast<GetElementPtrInst>(GEP.getPointerOperand());
2688 while (true) {
2689 if (!InnerGEP)
2690 return nullptr;
2691
2692 NW = NW.intersectForReassociate(InnerGEP->getNoWrapFlags());
2693 if (InnerGEP->hasAllConstantIndices())
2694 break;
2695
2696 if (!InnerGEP->hasOneUse())
2697 return nullptr;
2698
2699 Skipped.push_back(InnerGEP);
2700 InnerGEP = dyn_cast<GetElementPtrInst>(InnerGEP->getPointerOperand());
2701 }
2702
2703 // The two constant offset GEPs are directly adjacent: Let normal offset
2704 // merging handle it.
2705 if (Skipped.empty())
2706 return nullptr;
2707
2708 // FIXME: This one-use check is not strictly necessary. Consider relaxing it
2709 // if profitable.
2710 if (!InnerGEP->hasOneUse())
2711 return nullptr;
2712
2713 // Don't bother with vector splats.
2714 Type *Ty = GEP.getType();
2715 if (InnerGEP->getType() != Ty)
2716 return nullptr;
2717
2718 const DataLayout &DL = IC.getDataLayout();
2719 APInt Offset(DL.getIndexTypeSizeInBits(Ty), 0);
2720 if (!GEP.accumulateConstantOffset(DL, Offset) ||
2721 !InnerGEP->accumulateConstantOffset(DL, Offset))
2722 return nullptr;
2723
2724 IC.replaceOperand(*Skipped.back(), 0, InnerGEP->getPointerOperand());
2725 for (GetElementPtrInst *SkippedGEP : Skipped)
2726 SkippedGEP->setNoWrapFlags(NW);
2727
2728 return IC.replaceInstUsesWith(
2729 GEP,
2730 IC.Builder.CreatePtrAdd(Skipped.front(), IC.Builder.getInt(Offset), "",
2731 NW.intersectForOffsetAdd(GEP.getNoWrapFlags())));
2732}
2733
2735 GEPOperator *Src) {
2736 // Combine Indices - If the source pointer to this getelementptr instruction
2737 // is a getelementptr instruction with matching element type, combine the
2738 // indices of the two getelementptr instructions into a single instruction.
2739 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2740 return nullptr;
2741
2742 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2743 return I;
2744
2745 if (auto *I = combineConstantOffsets(GEP, *this))
2746 return I;
2747
2748 if (Src->getResultElementType() != GEP.getSourceElementType())
2749 return nullptr;
2750
2751 // Find out whether the last index in the source GEP is a sequential idx.
2752 bool EndsWithSequential = false;
2753 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2754 I != E; ++I)
2755 EndsWithSequential = I.isSequential();
2756 if (!EndsWithSequential)
2757 return nullptr;
2758
2759 // Replace: gep (gep %P, long B), long A, ...
2760 // With: T = long A+B; gep %P, T, ...
2761 Value *SO1 = Src->getOperand(Src->getNumOperands() - 1);
2762 Value *GO1 = GEP.getOperand(1);
2763
2764 // If they aren't the same type, then the input hasn't been processed
2765 // by the loop above yet (which canonicalizes sequential index types to
2766 // intptr_t). Just avoid transforming this until the input has been
2767 // normalized.
2768 if (SO1->getType() != GO1->getType())
2769 return nullptr;
2770
2771 Value *Sum =
2772 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2773 // Only do the combine when we are sure the cost after the
2774 // merge is never more than that before the merge.
2775 if (Sum == nullptr)
2776 return nullptr;
2777
2779 Indices.append(Src->op_begin() + 1, Src->op_end() - 1);
2780 Indices.push_back(Sum);
2781 Indices.append(GEP.op_begin() + 2, GEP.op_end());
2782
2783 // Don't create GEPs with more than one non-zero index.
2784 unsigned NumNonZeroIndices = count_if(Indices, [](Value *Idx) {
2785 auto *C = dyn_cast<Constant>(Idx);
2786 return !C || !C->isNullValue();
2787 });
2788 if (NumNonZeroIndices > 1)
2789 return nullptr;
2790
2791 return replaceInstUsesWith(
2792 GEP, Builder.CreateGEP(
2793 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2795}
2796
2799 bool &DoesConsume, unsigned Depth) {
2800 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2801 // ~(~(X)) -> X.
2802 Value *A, *B;
2803 if (match(V, m_Not(m_Value(A)))) {
2804 DoesConsume = true;
2805 return A;
2806 }
2807
2808 Constant *C;
2809 // Constants can be considered to be not'ed values.
2810 if (match(V, m_ImmConstant(C)))
2811 return ConstantExpr::getNot(C);
2812
2814 return nullptr;
2815
2816 // The rest of the cases require that we invert all uses so don't bother
2817 // doing the analysis if we know we can't use the result.
2818 if (!WillInvertAllUses)
2819 return nullptr;
2820
2821 // Compares can be inverted if all of their uses are being modified to use
2822 // the ~V.
2823 if (auto *I = dyn_cast<CmpInst>(V)) {
2824 if (Builder != nullptr)
2825 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
2826 I->getOperand(1));
2827 return NonNull;
2828 }
2829
2830 // If `V` is of the form `A + B` then `-1 - V` can be folded into
2831 // `(-1 - B) - A` if we are willing to invert all of the uses.
2832 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
2833 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2834 DoesConsume, Depth))
2835 return Builder ? Builder->CreateSub(BV, A) : NonNull;
2836 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2837 DoesConsume, Depth))
2838 return Builder ? Builder->CreateSub(AV, B) : NonNull;
2839 return nullptr;
2840 }
2841
2842 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
2843 // into `A ^ B` if we are willing to invert all of the uses.
2844 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
2845 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2846 DoesConsume, Depth))
2847 return Builder ? Builder->CreateXor(A, BV) : NonNull;
2848 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2849 DoesConsume, Depth))
2850 return Builder ? Builder->CreateXor(AV, B) : NonNull;
2851 return nullptr;
2852 }
2853
2854 // If `V` is of the form `B - A` then `-1 - V` can be folded into
2855 // `A + (-1 - B)` if we are willing to invert all of the uses.
2856 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
2857 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2858 DoesConsume, Depth))
2859 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
2860 return nullptr;
2861 }
2862
2863 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
2864 // into `A s>> B` if we are willing to invert all of the uses.
2865 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
2866 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2867 DoesConsume, Depth))
2868 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
2869 return nullptr;
2870 }
2871
2872 Value *Cond;
2873 // LogicOps are special in that we canonicalize them at the cost of an
2874 // instruction.
2875 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
2877 // Selects/min/max with invertible operands are freely invertible
2878 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
2879 bool LocalDoesConsume = DoesConsume;
2880 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
2881 LocalDoesConsume, Depth))
2882 return nullptr;
2883 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2884 LocalDoesConsume, Depth)) {
2885 DoesConsume = LocalDoesConsume;
2886 if (Builder != nullptr) {
2887 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2888 DoesConsume, Depth);
2889 assert(NotB != nullptr &&
2890 "Unable to build inverted value for known freely invertable op");
2891 if (auto *II = dyn_cast<IntrinsicInst>(V))
2892 return Builder->CreateBinaryIntrinsic(
2893 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
2894 return Builder->CreateSelect(Cond, NotA, NotB);
2895 }
2896 return NonNull;
2897 }
2898 }
2899
2900 if (PHINode *PN = dyn_cast<PHINode>(V)) {
2901 bool LocalDoesConsume = DoesConsume;
2903 for (Use &U : PN->operands()) {
2904 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2905 Value *NewIncomingVal = getFreelyInvertedImpl(
2906 U.get(), /*WillInvertAllUses=*/false,
2907 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
2908 if (NewIncomingVal == nullptr)
2909 return nullptr;
2910 // Make sure that we can safely erase the original PHI node.
2911 if (NewIncomingVal == V)
2912 return nullptr;
2913 if (Builder != nullptr)
2914 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
2915 }
2916
2917 DoesConsume = LocalDoesConsume;
2918 if (Builder != nullptr) {
2920 Builder->SetInsertPoint(PN);
2921 PHINode *NewPN =
2922 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
2923 for (auto [Val, Pred] : IncomingValues)
2924 NewPN->addIncoming(Val, Pred);
2925 return NewPN;
2926 }
2927 return NonNull;
2928 }
2929
2930 if (match(V, m_SExtLike(m_Value(A)))) {
2931 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2932 DoesConsume, Depth))
2933 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
2934 return nullptr;
2935 }
2936
2937 if (match(V, m_Trunc(m_Value(A)))) {
2938 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2939 DoesConsume, Depth))
2940 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
2941 return nullptr;
2942 }
2943
2944 // De Morgan's Laws:
2945 // (~(A | B)) -> (~A & ~B)
2946 // (~(A & B)) -> (~A | ~B)
2947 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
2948 bool IsLogical, Value *A,
2949 Value *B) -> Value * {
2950 bool LocalDoesConsume = DoesConsume;
2951 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
2952 LocalDoesConsume, Depth))
2953 return nullptr;
2954 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2955 LocalDoesConsume, Depth)) {
2956 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2957 LocalDoesConsume, Depth);
2958 DoesConsume = LocalDoesConsume;
2959 if (IsLogical)
2960 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
2961 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
2962 }
2963
2964 return nullptr;
2965 };
2966
2967 if (match(V, m_Or(m_Value(A), m_Value(B))))
2968 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
2969 B);
2970
2971 if (match(V, m_And(m_Value(A), m_Value(B))))
2972 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
2973 B);
2974
2975 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
2976 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
2977 B);
2978
2979 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
2980 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
2981 B);
2982
2983 return nullptr;
2984}
2985
2986/// Return true if we should canonicalize the gep to an i8 ptradd.
2988 Value *PtrOp = GEP.getOperand(0);
2989 Type *GEPEltType = GEP.getSourceElementType();
2990 if (GEPEltType->isIntegerTy(8))
2991 return false;
2992
2993 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
2994 // intrinsic. This has better support in BasicAA.
2995 if (GEPEltType->isScalableTy())
2996 return true;
2997
2998 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
2999 // together.
3000 if (GEP.getNumIndices() == 1 &&
3001 match(GEP.getOperand(1),
3003 m_Shl(m_Value(), m_ConstantInt())))))
3004 return true;
3005
3006 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
3007 // possibly be merged together.
3008 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
3009 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
3010 any_of(GEP.indices(), [](Value *V) {
3011 const APInt *C;
3012 return match(V, m_APInt(C)) && !C->isZero();
3013 });
3014}
3015
3017 IRBuilderBase &Builder) {
3018 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
3019 if (!Op1)
3020 return nullptr;
3021
3022 // Don't fold a GEP into itself through a PHI node. This can only happen
3023 // through the back-edge of a loop. Folding a GEP into itself means that
3024 // the value of the previous iteration needs to be stored in the meantime,
3025 // thus requiring an additional register variable to be live, but not
3026 // actually achieving anything (the GEP still needs to be executed once per
3027 // loop iteration).
3028 if (Op1 == &GEP)
3029 return nullptr;
3030 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
3031
3032 int DI = -1;
3033
3034 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
3035 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
3036 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
3037 Op1->getSourceElementType() != Op2->getSourceElementType())
3038 return nullptr;
3039
3040 // As for Op1 above, don't try to fold a GEP into itself.
3041 if (Op2 == &GEP)
3042 return nullptr;
3043
3044 // Keep track of the type as we walk the GEP.
3045 Type *CurTy = nullptr;
3046
3047 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
3048 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
3049 return nullptr;
3050
3051 if (Op1->getOperand(J) != Op2->getOperand(J)) {
3052 if (DI == -1) {
3053 // We have not seen any differences yet in the GEPs feeding the
3054 // PHI yet, so we record this one if it is allowed to be a
3055 // variable.
3056
3057 // The first two arguments can vary for any GEP, the rest have to be
3058 // static for struct slots
3059 if (J > 1) {
3060 assert(CurTy && "No current type?");
3061 if (CurTy->isStructTy())
3062 return nullptr;
3063 }
3064
3065 DI = J;
3066 } else {
3067 // The GEP is different by more than one input. While this could be
3068 // extended to support GEPs that vary by more than one variable it
3069 // doesn't make sense since it greatly increases the complexity and
3070 // would result in an R+R+R addressing mode which no backend
3071 // directly supports and would need to be broken into several
3072 // simpler instructions anyway.
3073 return nullptr;
3074 }
3075 }
3076
3077 // Sink down a layer of the type for the next iteration.
3078 if (J > 0) {
3079 if (J == 1) {
3080 CurTy = Op1->getSourceElementType();
3081 } else {
3082 CurTy =
3083 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
3084 }
3085 }
3086 }
3087
3088 NW &= Op2->getNoWrapFlags();
3089 }
3090
3091 // If not all GEPs are identical we'll have to create a new PHI node.
3092 // Check that the old PHI node has only one use so that it will get
3093 // removed.
3094 if (DI != -1 && !PN->hasOneUse())
3095 return nullptr;
3096
3097 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
3098 NewGEP->setNoWrapFlags(NW);
3099
3100 if (DI == -1) {
3101 // All the GEPs feeding the PHI are identical. Clone one down into our
3102 // BB so that it can be merged with the current GEP.
3103 } else {
3104 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
3105 // into the current block so it can be merged, and create a new PHI to
3106 // set that index.
3107 PHINode *NewPN;
3108 {
3109 IRBuilderBase::InsertPointGuard Guard(Builder);
3110 Builder.SetInsertPoint(PN);
3111 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
3112 PN->getNumOperands());
3113 }
3114
3115 for (auto &I : PN->operands())
3116 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
3117 PN->getIncomingBlock(I));
3118
3119 NewGEP->setOperand(DI, NewPN);
3120 }
3121
3122 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
3123 return NewGEP;
3124}
3125
3127 Value *PtrOp = GEP.getOperand(0);
3128 SmallVector<Value *, 8> Indices(GEP.indices());
3129 Type *GEPType = GEP.getType();
3130 Type *GEPEltType = GEP.getSourceElementType();
3131 if (Value *V =
3132 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
3133 SQ.getWithInstruction(&GEP)))
3134 return replaceInstUsesWith(GEP, V);
3135
3136 // For vector geps, use the generic demanded vector support.
3137 // Skip if GEP return type is scalable. The number of elements is unknown at
3138 // compile-time.
3139 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
3140 auto VWidth = GEPFVTy->getNumElements();
3141 APInt PoisonElts(VWidth, 0);
3142 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
3143 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
3144 PoisonElts)) {
3145 if (V != &GEP)
3146 return replaceInstUsesWith(GEP, V);
3147 return &GEP;
3148 }
3149 }
3150
3151 // Eliminate unneeded casts for indices, and replace indices which displace
3152 // by multiples of a zero size type with zero.
3153 bool MadeChange = false;
3154
3155 // Index width may not be the same width as pointer width.
3156 // Data layout chooses the right type based on supported integer types.
3157 Type *NewScalarIndexTy =
3158 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
3159
3161 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
3162 ++I, ++GTI) {
3163 // Skip indices into struct types.
3164 if (GTI.isStruct())
3165 continue;
3166
3167 Type *IndexTy = (*I)->getType();
3168 Type *NewIndexType =
3169 IndexTy->isVectorTy()
3170 ? VectorType::get(NewScalarIndexTy,
3171 cast<VectorType>(IndexTy)->getElementCount())
3172 : NewScalarIndexTy;
3173
3174 // If the element type has zero size then any index over it is equivalent
3175 // to an index of zero, so replace it with zero if it is not zero already.
3176 Type *EltTy = GTI.getIndexedType();
3177 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
3178 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
3179 *I = Constant::getNullValue(NewIndexType);
3180 MadeChange = true;
3181 }
3182
3183 if (IndexTy != NewIndexType) {
3184 // If we are using a wider index than needed for this platform, shrink
3185 // it to what we need. If narrower, sign-extend it to what we need.
3186 // This explicit cast can make subsequent optimizations more obvious.
3187 if (IndexTy->getScalarSizeInBits() <
3188 NewIndexType->getScalarSizeInBits()) {
3189 if (GEP.hasNoUnsignedWrap() && GEP.hasNoUnsignedSignedWrap())
3190 *I = Builder.CreateZExt(*I, NewIndexType, "", /*IsNonNeg=*/true);
3191 else
3192 *I = Builder.CreateSExt(*I, NewIndexType);
3193 } else {
3194 *I = Builder.CreateTrunc(*I, NewIndexType, "", GEP.hasNoUnsignedWrap(),
3195 GEP.hasNoUnsignedSignedWrap());
3196 }
3197 MadeChange = true;
3198 }
3199 }
3200 if (MadeChange)
3201 return &GEP;
3202
3203 // Canonicalize constant GEPs to i8 type.
3204 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
3205 APInt Offset(DL.getIndexTypeSizeInBits(GEPType), 0);
3206 if (GEP.accumulateConstantOffset(DL, Offset))
3207 return replaceInstUsesWith(
3208 GEP, Builder.CreatePtrAdd(PtrOp, Builder.getInt(Offset), "",
3209 GEP.getNoWrapFlags()));
3210 }
3211
3213 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
3214 Value *NewGEP =
3215 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
3216 return replaceInstUsesWith(GEP, NewGEP);
3217 }
3218
3219 // Strip trailing zero indices.
3220 auto *LastIdx = dyn_cast<Constant>(Indices.back());
3221 if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
3222 return replaceInstUsesWith(
3223 GEP, Builder.CreateGEP(GEP.getSourceElementType(), PtrOp,
3224 drop_end(Indices), "", GEP.getNoWrapFlags()));
3225 }
3226
3227 // Strip leading zero indices.
3228 auto *FirstIdx = dyn_cast<Constant>(Indices.front());
3229 if (FirstIdx && FirstIdx->isNullValue() &&
3230 !FirstIdx->getType()->isVectorTy()) {
3232 ++GTI;
3233 if (!GTI.isStruct())
3234 return replaceInstUsesWith(GEP, Builder.CreateGEP(GTI.getIndexedType(),
3235 GEP.getPointerOperand(),
3236 drop_begin(Indices), "",
3237 GEP.getNoWrapFlags()));
3238 }
3239
3240 // Scalarize vector operands; prefer splat-of-gep.as canonical form.
3241 // Note that this looses information about undef lanes; we run it after
3242 // demanded bits to partially mitigate that loss.
3243 if (GEPType->isVectorTy() && llvm::any_of(GEP.operands(), [](Value *Op) {
3244 return Op->getType()->isVectorTy() && getSplatValue(Op);
3245 })) {
3246 SmallVector<Value *> NewOps;
3247 for (auto &Op : GEP.operands()) {
3248 if (Op->getType()->isVectorTy())
3249 if (Value *Scalar = getSplatValue(Op)) {
3250 NewOps.push_back(Scalar);
3251 continue;
3252 }
3253 NewOps.push_back(Op);
3254 }
3255
3256 Value *Res = Builder.CreateGEP(GEP.getSourceElementType(), NewOps[0],
3257 ArrayRef(NewOps).drop_front(), GEP.getName(),
3258 GEP.getNoWrapFlags());
3259 if (!Res->getType()->isVectorTy()) {
3260 ElementCount EC = cast<VectorType>(GEPType)->getElementCount();
3261 Res = Builder.CreateVectorSplat(EC, Res);
3262 }
3263 return replaceInstUsesWith(GEP, Res);
3264 }
3265
3266 bool SeenNonZeroIndex = false;
3267 for (auto [IdxNum, Idx] : enumerate(Indices)) {
3268 auto *C = dyn_cast<Constant>(Idx);
3269 if (C && C->isNullValue())
3270 continue;
3271
3272 if (!SeenNonZeroIndex) {
3273 SeenNonZeroIndex = true;
3274 continue;
3275 }
3276
3277 // GEP has multiple non-zero indices: Split it.
3278 ArrayRef<Value *> FrontIndices = ArrayRef(Indices).take_front(IdxNum);
3279 Value *FrontGEP =
3280 Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
3281 GEP.getName() + ".split", GEP.getNoWrapFlags());
3282
3283 SmallVector<Value *> BackIndices;
3284 BackIndices.push_back(Constant::getNullValue(NewScalarIndexTy));
3285 append_range(BackIndices, drop_begin(Indices, IdxNum));
3287 GetElementPtrInst::getIndexedType(GEPEltType, FrontIndices), FrontGEP,
3288 BackIndices, GEP.getNoWrapFlags());
3289 }
3290
3291 // Check to see if the inputs to the PHI node are getelementptr instructions.
3292 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
3293 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
3294 return replaceOperand(GEP, 0, NewPtrOp);
3295 }
3296
3297 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
3298 if (Instruction *I = visitGEPOfGEP(GEP, Src))
3299 return I;
3300
3301 if (GEP.getNumIndices() == 1) {
3302 unsigned AS = GEP.getPointerAddressSpace();
3303 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3304 DL.getIndexSizeInBits(AS)) {
3305 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3306
3307 if (TyAllocSize == 1) {
3308 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3309 // but only if the result pointer is only used as if it were an integer,
3310 // or both point to the same underlying object (otherwise provenance is
3311 // not necessarily retained).
3312 Value *X = GEP.getPointerOperand();
3313 Value *Y;
3314 if (match(GEP.getOperand(1),
3316 GEPType == Y->getType()) {
3317 bool HasSameUnderlyingObject =
3319 bool Changed = false;
3320 GEP.replaceUsesWithIf(Y, [&](Use &U) {
3321 bool ShouldReplace = HasSameUnderlyingObject ||
3322 isa<ICmpInst>(U.getUser()) ||
3323 isa<PtrToIntInst>(U.getUser());
3324 Changed |= ShouldReplace;
3325 return ShouldReplace;
3326 });
3327 return Changed ? &GEP : nullptr;
3328 }
3329 } else if (auto *ExactIns =
3330 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3331 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3332 Value *V;
3333 if (ExactIns->isExact()) {
3334 if ((has_single_bit(TyAllocSize) &&
3335 match(GEP.getOperand(1),
3336 m_Shr(m_Value(V),
3337 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3338 match(GEP.getOperand(1),
3339 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3340 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3341 GEP.getPointerOperand(), V,
3342 GEP.getNoWrapFlags());
3343 }
3344 }
3345 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3346 // Try to canonicalize non-i8 element type to i8 if the index is an
3347 // exact instruction. If the index is an exact instruction (div/shr)
3348 // with a constant RHS, we can fold the non-i8 element scale into the
3349 // div/shr (similiar to the mul case, just inverted).
3350 const APInt *C;
3351 std::optional<APInt> NewC;
3352 if (has_single_bit(TyAllocSize) &&
3353 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3354 C->uge(countr_zero(TyAllocSize)))
3355 NewC = *C - countr_zero(TyAllocSize);
3356 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3357 APInt Quot;
3358 uint64_t Rem;
3359 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3360 if (Rem == 0)
3361 NewC = Quot;
3362 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3363 APInt Quot;
3364 int64_t Rem;
3365 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3366 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3367 if (!Quot.isAllOnes() && Rem == 0)
3368 NewC = Quot;
3369 }
3370
3371 if (NewC.has_value()) {
3372 Value *NewOp = Builder.CreateBinOp(
3373 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3374 ConstantInt::get(V->getType(), *NewC));
3375 cast<BinaryOperator>(NewOp)->setIsExact();
3376 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3377 GEP.getPointerOperand(), NewOp,
3378 GEP.getNoWrapFlags());
3379 }
3380 }
3381 }
3382 }
3383 }
3384 // We do not handle pointer-vector geps here.
3385 if (GEPType->isVectorTy())
3386 return nullptr;
3387
3388 if (!GEP.isInBounds()) {
3389 unsigned IdxWidth =
3390 DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
3391 APInt BasePtrOffset(IdxWidth, 0);
3392 Value *UnderlyingPtrOp =
3393 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL, BasePtrOffset);
3394 bool CanBeNull, CanBeFreed;
3395 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3396 DL, CanBeNull, CanBeFreed);
3397 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3398 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3399 BasePtrOffset.isNonNegative()) {
3400 APInt AllocSize(IdxWidth, DerefBytes);
3401 if (BasePtrOffset.ule(AllocSize)) {
3403 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3404 }
3405 }
3406 }
3407 }
3408
3409 // nusw + nneg -> nuw
3410 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3411 all_of(GEP.indices(), [&](Value *Idx) {
3412 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3413 })) {
3414 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3415 return &GEP;
3416 }
3417
3418 // These rewrites are trying to preserve inbounds/nuw attributes. So we want
3419 // to do this after having tried to derive "nuw" above.
3420 if (GEP.getNumIndices() == 1) {
3421 // Given (gep p, x+y) we want to determine the common nowrap flags for both
3422 // geps if transforming into (gep (gep p, x), y).
3423 auto GetPreservedNoWrapFlags = [&](bool AddIsNUW) {
3424 // We can preserve both "inbounds nuw", "nusw nuw" and "nuw" if we know
3425 // that x + y does not have unsigned wrap.
3426 if (GEP.hasNoUnsignedWrap() && AddIsNUW)
3427 return GEP.getNoWrapFlags();
3428 return GEPNoWrapFlags::none();
3429 };
3430
3431 // Try to replace ADD + GEP with GEP + GEP.
3432 Value *Idx1, *Idx2;
3433 if (match(GEP.getOperand(1),
3434 m_OneUse(m_AddLike(m_Value(Idx1), m_Value(Idx2))))) {
3435 // %idx = add i64 %idx1, %idx2
3436 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3437 // as:
3438 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3439 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3440 bool NUW = match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()));
3441 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3442 auto *NewPtr =
3443 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3444 Idx1, "", NWFlags);
3445 return replaceInstUsesWith(GEP,
3446 Builder.CreateGEP(GEP.getSourceElementType(),
3447 NewPtr, Idx2, "", NWFlags));
3448 }
3449 ConstantInt *C;
3450 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAddLike(
3451 m_Value(Idx1), m_ConstantInt(C))))))) {
3452 // %add = add nsw i32 %idx1, idx2
3453 // %sidx = sext i32 %add to i64
3454 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3455 // as:
3456 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3457 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3458 bool NUW = match(GEP.getOperand(1),
3460 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3461 auto *NewPtr = Builder.CreateGEP(
3462 GEP.getSourceElementType(), GEP.getPointerOperand(),
3463 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "", NWFlags);
3464 return replaceInstUsesWith(
3465 GEP,
3466 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3467 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3468 "", NWFlags));
3469 }
3470 }
3471
3473 return R;
3474
3475 return nullptr;
3476}
3477
3479 Instruction *AI) {
3481 return true;
3482 if (auto *LI = dyn_cast<LoadInst>(V))
3483 return isa<GlobalVariable>(LI->getPointerOperand());
3484 // Two distinct allocations will never be equal.
3485 return isAllocLikeFn(V, &TLI) && V != AI;
3486}
3487
3488/// Given a call CB which uses an address UsedV, return true if we can prove the
3489/// call's only possible effect is storing to V.
3490static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3491 const TargetLibraryInfo &TLI) {
3492 if (!CB.use_empty())
3493 // TODO: add recursion if returned attribute is present
3494 return false;
3495
3496 if (CB.isTerminator())
3497 // TODO: remove implementation restriction
3498 return false;
3499
3500 if (!CB.willReturn() || !CB.doesNotThrow())
3501 return false;
3502
3503 // If the only possible side effect of the call is writing to the alloca,
3504 // and the result isn't used, we can safely remove any reads implied by the
3505 // call including those which might read the alloca itself.
3506 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3507 return Dest && Dest->Ptr == UsedV;
3508}
3509
3510static std::optional<ModRefInfo>
3512 const TargetLibraryInfo &TLI, bool KnowInit) {
3514 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3515 Worklist.push_back(AI);
3517
3518 do {
3519 Instruction *PI = Worklist.pop_back_val();
3520 for (User *U : PI->users()) {
3522 switch (I->getOpcode()) {
3523 default:
3524 // Give up the moment we see something we can't handle.
3525 return std::nullopt;
3526
3527 case Instruction::AddrSpaceCast:
3528 case Instruction::BitCast:
3529 case Instruction::GetElementPtr:
3530 Users.emplace_back(I);
3531 Worklist.push_back(I);
3532 continue;
3533
3534 case Instruction::ICmp: {
3535 ICmpInst *ICI = cast<ICmpInst>(I);
3536 // We can fold eq/ne comparisons with null to false/true, respectively.
3537 // We also fold comparisons in some conditions provided the alloc has
3538 // not escaped (see isNeverEqualToUnescapedAlloc).
3539 if (!ICI->isEquality())
3540 return std::nullopt;
3541 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3542 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3543 return std::nullopt;
3544
3545 // Do not fold compares to aligned_alloc calls, as they may have to
3546 // return null in case the required alignment cannot be satisfied,
3547 // unless we can prove that both alignment and size are valid.
3548 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3549 // Check if alignment and size of a call to aligned_alloc is valid,
3550 // that is alignment is a power-of-2 and the size is a multiple of the
3551 // alignment.
3552 const APInt *Alignment;
3553 const APInt *Size;
3554 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3555 match(CB->getArgOperand(1), m_APInt(Size)) &&
3556 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3557 };
3558 auto *CB = dyn_cast<CallBase>(AI);
3559 LibFunc TheLibFunc;
3560 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3561 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3562 !AlignmentAndSizeKnownValid(CB))
3563 return std::nullopt;
3564 Users.emplace_back(I);
3565 continue;
3566 }
3567
3568 case Instruction::Call:
3569 // Ignore no-op and store intrinsics.
3571 switch (II->getIntrinsicID()) {
3572 default:
3573 return std::nullopt;
3574
3575 case Intrinsic::memmove:
3576 case Intrinsic::memcpy:
3577 case Intrinsic::memset: {
3579 if (MI->isVolatile())
3580 return std::nullopt;
3581 // Note: this could also be ModRef, but we can still interpret that
3582 // as just Mod in that case.
3583 ModRefInfo NewAccess =
3584 MI->getRawDest() == PI ? ModRefInfo::Mod : ModRefInfo::Ref;
3585 if ((Access & ~NewAccess) != ModRefInfo::NoModRef)
3586 return std::nullopt;
3587 Access |= NewAccess;
3588 [[fallthrough]];
3589 }
3590 case Intrinsic::assume:
3591 case Intrinsic::invariant_start:
3592 case Intrinsic::invariant_end:
3593 case Intrinsic::lifetime_start:
3594 case Intrinsic::lifetime_end:
3595 case Intrinsic::objectsize:
3596 Users.emplace_back(I);
3597 continue;
3598 case Intrinsic::launder_invariant_group:
3599 case Intrinsic::strip_invariant_group:
3600 Users.emplace_back(I);
3601 Worklist.push_back(I);
3602 continue;
3603 }
3604 }
3605
3606 if (Family && getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3607 getAllocationFamily(I, &TLI) == Family) {
3608 Users.emplace_back(I);
3609 continue;
3610 }
3611
3612 if (Family && getReallocatedOperand(cast<CallBase>(I)) == PI &&
3613 getAllocationFamily(I, &TLI) == Family) {
3614 Users.emplace_back(I);
3615 Worklist.push_back(I);
3616 continue;
3617 }
3618
3619 if (!isRefSet(Access) &&
3620 isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3622 Users.emplace_back(I);
3623 continue;
3624 }
3625
3626 return std::nullopt;
3627
3628 case Instruction::Store: {
3630 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3631 return std::nullopt;
3632 if (isRefSet(Access))
3633 return std::nullopt;
3635 Users.emplace_back(I);
3636 continue;
3637 }
3638
3639 case Instruction::Load: {
3640 LoadInst *LI = cast<LoadInst>(I);
3641 if (LI->isVolatile() || LI->getPointerOperand() != PI)
3642 return std::nullopt;
3643 if (isModSet(Access))
3644 return std::nullopt;
3646 Users.emplace_back(I);
3647 continue;
3648 }
3649 }
3650 llvm_unreachable("missing a return?");
3651 }
3652 } while (!Worklist.empty());
3653
3655 return Access;
3656}
3657
3660
3661 // If we have a malloc call which is only used in any amount of comparisons to
3662 // null and free calls, delete the calls and replace the comparisons with true
3663 // or false as appropriate.
3664
3665 // This is based on the principle that we can substitute our own allocation
3666 // function (which will never return null) rather than knowledge of the
3667 // specific function being called. In some sense this can change the permitted
3668 // outputs of a program (when we convert a malloc to an alloca, the fact that
3669 // the allocation is now on the stack is potentially visible, for example),
3670 // but we believe in a permissible manner.
3672
3673 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3674 // before each store.
3676 std::unique_ptr<DIBuilder> DIB;
3677 if (isa<AllocaInst>(MI)) {
3678 findDbgUsers(&MI, DVRs);
3679 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3680 }
3681
3682 // Determine what getInitialValueOfAllocation would return without actually
3683 // allocating the result.
3684 bool KnowInitUndef = false;
3685 bool KnowInitZero = false;
3686 Constant *Init =
3688 if (Init) {
3689 if (isa<UndefValue>(Init))
3690 KnowInitUndef = true;
3691 else if (Init->isNullValue())
3692 KnowInitZero = true;
3693 }
3694 // The various sanitizers don't actually return undef memory, but rather
3695 // memory initialized with special forms of runtime poison
3696 auto &F = *MI.getFunction();
3697 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
3698 F.hasFnAttribute(Attribute::SanitizeAddress))
3699 KnowInitUndef = false;
3700
3701 auto Removable =
3702 isAllocSiteRemovable(&MI, Users, TLI, KnowInitZero | KnowInitUndef);
3703 if (Removable) {
3704 for (WeakTrackingVH &User : Users) {
3705 // Lowering all @llvm.objectsize and MTI calls first because they may use
3706 // a bitcast/GEP of the alloca we are removing.
3707 if (!User)
3708 continue;
3709
3711
3713 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3714 SmallVector<Instruction *> InsertedInstructions;
3715 Value *Result = lowerObjectSizeCall(
3716 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3717 for (Instruction *Inserted : InsertedInstructions)
3718 Worklist.add(Inserted);
3719 replaceInstUsesWith(*I, Result);
3721 User = nullptr; // Skip examining in the next loop.
3722 continue;
3723 }
3724 if (auto *MTI = dyn_cast<MemTransferInst>(I)) {
3725 if (KnowInitZero && isRefSet(*Removable)) {
3727 Builder.SetInsertPoint(MTI);
3728 auto *M = Builder.CreateMemSet(
3729 MTI->getRawDest(),
3730 ConstantInt::get(Type::getInt8Ty(MI.getContext()), 0),
3731 MTI->getLength(), MTI->getDestAlign());
3732 M->copyMetadata(*MTI);
3733 }
3734 }
3735 }
3736 }
3737 for (WeakTrackingVH &User : Users) {
3738 if (!User)
3739 continue;
3740
3742
3743 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3745 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3746 C->isFalseWhenEqual()));
3747 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3748 for (auto *DVR : DVRs)
3749 if (DVR->isAddressOfVariable())
3751 } else {
3752 // Casts, GEP, or anything else: we're about to delete this instruction,
3753 // so it can not have any valid uses.
3754 Constant *Replace;
3755 if (isa<LoadInst>(I)) {
3756 assert(KnowInitZero || KnowInitUndef);
3757 Replace = KnowInitUndef ? UndefValue::get(I->getType())
3758 : Constant::getNullValue(I->getType());
3759 } else
3760 Replace = PoisonValue::get(I->getType());
3761 replaceInstUsesWith(*I, Replace);
3762 }
3764 }
3765
3767 // Replace invoke with a NOP intrinsic to maintain the original CFG
3768 Module *M = II->getModule();
3769 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3770 auto *NewII = InvokeInst::Create(
3771 F, II->getNormalDest(), II->getUnwindDest(), {}, "", II->getParent());
3772 NewII->setDebugLoc(II->getDebugLoc());
3773 }
3774
3775 // Remove debug intrinsics which describe the value contained within the
3776 // alloca. In addition to removing dbg.{declare,addr} which simply point to
3777 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
3778 //
3779 // ```
3780 // define void @foo(i32 %0) {
3781 // %a = alloca i32 ; Deleted.
3782 // store i32 %0, i32* %a
3783 // dbg.value(i32 %0, "arg0") ; Not deleted.
3784 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
3785 // call void @trivially_inlinable_no_op(i32* %a)
3786 // ret void
3787 // }
3788 // ```
3789 //
3790 // This may not be required if we stop describing the contents of allocas
3791 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
3792 // the LowerDbgDeclare utility.
3793 //
3794 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
3795 // "arg0" dbg.value may be stale after the call. However, failing to remove
3796 // the DW_OP_deref dbg.value causes large gaps in location coverage.
3797 //
3798 // FIXME: the Assignment Tracking project has now likely made this
3799 // redundant (and it's sometimes harmful).
3800 for (auto *DVR : DVRs)
3801 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3802 DVR->eraseFromParent();
3803
3804 return eraseInstFromFunction(MI);
3805 }
3806 return nullptr;
3807}
3808
3809/// Move the call to free before a NULL test.
3810///
3811/// Check if this free is accessed after its argument has been test
3812/// against NULL (property 0).
3813/// If yes, it is legal to move this call in its predecessor block.
3814///
3815/// The move is performed only if the block containing the call to free
3816/// will be removed, i.e.:
3817/// 1. it has only one predecessor P, and P has two successors
3818/// 2. it contains the call, noops, and an unconditional branch
3819/// 3. its successor is the same as its predecessor's successor
3820///
3821/// The profitability is out-of concern here and this function should
3822/// be called only if the caller knows this transformation would be
3823/// profitable (e.g., for code size).
3825 const DataLayout &DL) {
3826 Value *Op = FI.getArgOperand(0);
3827 BasicBlock *FreeInstrBB = FI.getParent();
3828 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
3829
3830 // Validate part of constraint #1: Only one predecessor
3831 // FIXME: We can extend the number of predecessor, but in that case, we
3832 // would duplicate the call to free in each predecessor and it may
3833 // not be profitable even for code size.
3834 if (!PredBB)
3835 return nullptr;
3836
3837 // Validate constraint #2: Does this block contains only the call to
3838 // free, noops, and an unconditional branch?
3839 BasicBlock *SuccBB;
3840 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
3841 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
3842 return nullptr;
3843
3844 // If there are only 2 instructions in the block, at this point,
3845 // this is the call to free and unconditional.
3846 // If there are more than 2 instructions, check that they are noops
3847 // i.e., they won't hurt the performance of the generated code.
3848 if (FreeInstrBB->size() != 2) {
3849 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
3850 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3851 continue;
3852 auto *Cast = dyn_cast<CastInst>(&Inst);
3853 if (!Cast || !Cast->isNoopCast(DL))
3854 return nullptr;
3855 }
3856 }
3857 // Validate the rest of constraint #1 by matching on the pred branch.
3858 Instruction *TI = PredBB->getTerminator();
3859 BasicBlock *TrueBB, *FalseBB;
3860 CmpPredicate Pred;
3861 if (!match(TI, m_Br(m_ICmp(Pred,
3863 m_Specific(Op->stripPointerCasts())),
3864 m_Zero()),
3865 TrueBB, FalseBB)))
3866 return nullptr;
3867 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
3868 return nullptr;
3869
3870 // Validate constraint #3: Ensure the null case just falls through.
3871 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
3872 return nullptr;
3873 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
3874 "Broken CFG: missing edge from predecessor to successor");
3875
3876 // At this point, we know that everything in FreeInstrBB can be moved
3877 // before TI.
3878 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
3879 if (&Instr == FreeInstrBBTerminator)
3880 break;
3881 Instr.moveBeforePreserving(TI->getIterator());
3882 }
3883 assert(FreeInstrBB->size() == 1 &&
3884 "Only the branch instruction should remain");
3885
3886 // Now that we've moved the call to free before the NULL check, we have to
3887 // remove any attributes on its parameter that imply it's non-null, because
3888 // those attributes might have only been valid because of the NULL check, and
3889 // we can get miscompiles if we keep them. This is conservative if non-null is
3890 // also implied by something other than the NULL check, but it's guaranteed to
3891 // be correct, and the conservativeness won't matter in practice, since the
3892 // attributes are irrelevant for the call to free itself and the pointer
3893 // shouldn't be used after the call.
3894 AttributeList Attrs = FI.getAttributes();
3895 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
3896 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3897 if (Dereferenceable.isValid()) {
3898 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
3899 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
3900 Attribute::Dereferenceable);
3901 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
3902 }
3903 FI.setAttributes(Attrs);
3904
3905 return &FI;
3906}
3907
3909 // free undef -> unreachable.
3910 if (isa<UndefValue>(Op)) {
3911 // Leave a marker since we can't modify the CFG here.
3913 return eraseInstFromFunction(FI);
3914 }
3915
3916 // If we have 'free null' delete the instruction. This can happen in stl code
3917 // when lots of inlining happens.
3919 return eraseInstFromFunction(FI);
3920
3921 // If we had free(realloc(...)) with no intervening uses, then eliminate the
3922 // realloc() entirely.
3924 if (CI && CI->hasOneUse())
3925 if (Value *ReallocatedOp = getReallocatedOperand(CI))
3926 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
3927
3928 // If we optimize for code size, try to move the call to free before the null
3929 // test so that simplify cfg can remove the empty block and dead code
3930 // elimination the branch. I.e., helps to turn something like:
3931 // if (foo) free(foo);
3932 // into
3933 // free(foo);
3934 //
3935 // Note that we can only do this for 'free' and not for any flavor of
3936 // 'operator delete'; there is no 'operator delete' symbol for which we are
3937 // permitted to invent a call, even if we're passing in a null pointer.
3938 if (MinimizeSize) {
3939 LibFunc Func;
3940 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
3942 return I;
3943 }
3944
3945 return nullptr;
3946}
3947
3949 Value *RetVal = RI.getReturnValue();
3950 if (!RetVal)
3951 return nullptr;
3952
3953 Function *F = RI.getFunction();
3954 Type *RetTy = RetVal->getType();
3955 if (RetTy->isPointerTy()) {
3956 bool HasDereferenceable =
3957 F->getAttributes().getRetDereferenceableBytes() > 0;
3958 if (F->hasRetAttribute(Attribute::NonNull) ||
3959 (HasDereferenceable &&
3961 if (Value *V = simplifyNonNullOperand(RetVal, HasDereferenceable))
3962 return replaceOperand(RI, 0, V);
3963 }
3964 }
3965
3966 if (!AttributeFuncs::isNoFPClassCompatibleType(RetTy))
3967 return nullptr;
3968
3969 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
3970 if (ReturnClass == fcNone)
3971 return nullptr;
3972
3973 KnownFPClass KnownClass;
3974 Value *Simplified =
3975 SimplifyDemandedUseFPClass(RetVal, ~ReturnClass, KnownClass, &RI);
3976 if (!Simplified)
3977 return nullptr;
3978
3979 return ReturnInst::Create(RI.getContext(), Simplified);
3980}
3981
3982// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
3984 // Try to remove the previous instruction if it must lead to unreachable.
3985 // This includes instructions like stores and "llvm.assume" that may not get
3986 // removed by simple dead code elimination.
3987 bool Changed = false;
3988 while (Instruction *Prev = I.getPrevNode()) {
3989 // While we theoretically can erase EH, that would result in a block that
3990 // used to start with an EH no longer starting with EH, which is invalid.
3991 // To make it valid, we'd need to fixup predecessors to no longer refer to
3992 // this block, but that changes CFG, which is not allowed in InstCombine.
3993 if (Prev->isEHPad())
3994 break; // Can not drop any more instructions. We're done here.
3995
3997 break; // Can not drop any more instructions. We're done here.
3998 // Otherwise, this instruction can be freely erased,
3999 // even if it is not side-effect free.
4000
4001 // A value may still have uses before we process it here (for example, in
4002 // another unreachable block), so convert those to poison.
4003 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
4004 eraseInstFromFunction(*Prev);
4005 Changed = true;
4006 }
4007 return Changed;
4008}
4009
4014
4016 assert(BI.isUnconditional() && "Only for unconditional branches.");
4017
4018 // If this store is the second-to-last instruction in the basic block
4019 // (excluding debug info) and if the block ends with
4020 // an unconditional branch, try to move the store to the successor block.
4021
4022 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
4023 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
4024 do {
4025 if (BBI != FirstInstr)
4026 --BBI;
4027 } while (BBI != FirstInstr && BBI->isDebugOrPseudoInst());
4028
4029 return dyn_cast<StoreInst>(BBI);
4030 };
4031
4032 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
4034 return &BI;
4035
4036 return nullptr;
4037}
4038
4041 if (!DeadEdges.insert({From, To}).second)
4042 return;
4043
4044 // Replace phi node operands in successor with poison.
4045 for (PHINode &PN : To->phis())
4046 for (Use &U : PN.incoming_values())
4047 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
4048 replaceUse(U, PoisonValue::get(PN.getType()));
4049 addToWorklist(&PN);
4050 MadeIRChange = true;
4051 }
4052
4053 Worklist.push_back(To);
4054}
4055
4056// Under the assumption that I is unreachable, remove it and following
4057// instructions. Changes are reported directly to MadeIRChange.
4060 BasicBlock *BB = I->getParent();
4061 for (Instruction &Inst : make_early_inc_range(
4062 make_range(std::next(BB->getTerminator()->getReverseIterator()),
4063 std::next(I->getReverseIterator())))) {
4064 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
4065 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
4066 MadeIRChange = true;
4067 }
4068 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
4069 continue;
4070 // RemoveDIs: erase debug-info on this instruction manually.
4071 Inst.dropDbgRecords();
4073 MadeIRChange = true;
4074 }
4075
4078 MadeIRChange = true;
4079 for (Value *V : Changed)
4081 }
4082
4083 // Handle potentially dead successors.
4084 for (BasicBlock *Succ : successors(BB))
4085 addDeadEdge(BB, Succ, Worklist);
4086}
4087
4090 while (!Worklist.empty()) {
4091 BasicBlock *BB = Worklist.pop_back_val();
4092 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
4093 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
4094 }))
4095 continue;
4096
4098 }
4099}
4100
4102 BasicBlock *LiveSucc) {
4104 for (BasicBlock *Succ : successors(BB)) {
4105 // The live successor isn't dead.
4106 if (Succ == LiveSucc)
4107 continue;
4108
4109 addDeadEdge(BB, Succ, Worklist);
4110 }
4111
4113}
4114
4116 if (BI.isUnconditional())
4118
4119 // Change br (not X), label True, label False to: br X, label False, True
4120 Value *Cond = BI.getCondition();
4121 Value *X;
4122 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
4123 // Swap Destinations and condition...
4124 BI.swapSuccessors();
4125 if (BPI)
4126 BPI->swapSuccEdgesProbabilities(BI.getParent());
4127 return replaceOperand(BI, 0, X);
4128 }
4129
4130 // Canonicalize logical-and-with-invert as logical-or-with-invert.
4131 // This is done by inverting the condition and swapping successors:
4132 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
4133 Value *Y;
4134 if (isa<SelectInst>(Cond) &&
4135 match(Cond,
4137 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
4138 Value *Or = Builder.CreateLogicalOr(NotX, Y);
4139 BI.swapSuccessors();
4140 if (BPI)
4141 BPI->swapSuccEdgesProbabilities(BI.getParent());
4142 return replaceOperand(BI, 0, Or);
4143 }
4144
4145 // If the condition is irrelevant, remove the use so that other
4146 // transforms on the condition become more effective.
4147 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
4148 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
4149
4150 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
4151 CmpPredicate Pred;
4152 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
4153 !isCanonicalPredicate(Pred)) {
4154 // Swap destinations and condition.
4155 auto *Cmp = cast<CmpInst>(Cond);
4156 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
4157 BI.swapSuccessors();
4158 if (BPI)
4159 BPI->swapSuccEdgesProbabilities(BI.getParent());
4160 Worklist.push(Cmp);
4161 return &BI;
4162 }
4163
4164 if (isa<UndefValue>(Cond)) {
4165 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
4166 return nullptr;
4167 }
4168 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4170 BI.getSuccessor(!CI->getZExtValue()));
4171 return nullptr;
4172 }
4173
4174 // Replace all dominated uses of the condition with true/false
4175 // Ignore constant expressions to avoid iterating over uses on other
4176 // functions.
4177 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
4178 for (auto &U : make_early_inc_range(Cond->uses())) {
4179 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
4180 if (DT.dominates(Edge0, U)) {
4181 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
4182 addToWorklist(cast<Instruction>(U.getUser()));
4183 continue;
4184 }
4185 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
4186 if (DT.dominates(Edge1, U)) {
4187 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
4188 addToWorklist(cast<Instruction>(U.getUser()));
4189 }
4190 }
4191 }
4192
4193 DC.registerBranch(&BI);
4194 return nullptr;
4195}
4196
4197// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
4198// we can prove that both (switch C) and (switch X) go to the default when cond
4199// is false/true.
4202 bool IsTrueArm) {
4203 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
4204 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
4205 if (!C)
4206 return nullptr;
4207
4208 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
4209 if (CstBB != SI.getDefaultDest())
4210 return nullptr;
4211 Value *X = Select->getOperand(3 - CstOpIdx);
4212 CmpPredicate Pred;
4213 const APInt *RHSC;
4214 if (!match(Select->getCondition(),
4215 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
4216 return nullptr;
4217 if (IsTrueArm)
4218 Pred = ICmpInst::getInversePredicate(Pred);
4219
4220 // See whether we can replace the select with X
4222 for (auto Case : SI.cases())
4223 if (!CR.contains(Case.getCaseValue()->getValue()))
4224 return nullptr;
4225
4226 return X;
4227}
4228
4230 Value *Cond = SI.getCondition();
4231 Value *Op0;
4232 ConstantInt *AddRHS;
4233 if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
4234 // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
4235 for (auto Case : SI.cases()) {
4236 Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
4237 assert(isa<ConstantInt>(NewCase) &&
4238 "Result of expression should be constant");
4239 Case.setValue(cast<ConstantInt>(NewCase));
4240 }
4241 return replaceOperand(SI, 0, Op0);
4242 }
4243
4244 ConstantInt *SubLHS;
4245 if (match(Cond, m_Sub(m_ConstantInt(SubLHS), m_Value(Op0)))) {
4246 // Change 'switch (1-X) case 1:' into 'switch (X) case 0'.
4247 for (auto Case : SI.cases()) {
4248 Constant *NewCase = ConstantExpr::getSub(SubLHS, Case.getCaseValue());
4249 assert(isa<ConstantInt>(NewCase) &&
4250 "Result of expression should be constant");
4251 Case.setValue(cast<ConstantInt>(NewCase));
4252 }
4253 return replaceOperand(SI, 0, Op0);
4254 }
4255
4256 uint64_t ShiftAmt;
4257 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
4258 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
4259 all_of(SI.cases(), [&](const auto &Case) {
4260 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
4261 })) {
4262 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
4264 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
4265 Shl->hasOneUse()) {
4266 Value *NewCond = Op0;
4267 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
4268 // If the shift may wrap, we need to mask off the shifted bits.
4269 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
4270 NewCond = Builder.CreateAnd(
4271 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
4272 }
4273 for (auto Case : SI.cases()) {
4274 const APInt &CaseVal = Case.getCaseValue()->getValue();
4275 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
4276 : CaseVal.lshr(ShiftAmt);
4277 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
4278 }
4279 return replaceOperand(SI, 0, NewCond);
4280 }
4281 }
4282
4283 // Fold switch(zext/sext(X)) into switch(X) if possible.
4284 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
4285 bool IsZExt = isa<ZExtInst>(Cond);
4286 Type *SrcTy = Op0->getType();
4287 unsigned NewWidth = SrcTy->getScalarSizeInBits();
4288
4289 if (all_of(SI.cases(), [&](const auto &Case) {
4290 const APInt &CaseVal = Case.getCaseValue()->getValue();
4291 return IsZExt ? CaseVal.isIntN(NewWidth)
4292 : CaseVal.isSignedIntN(NewWidth);
4293 })) {
4294 for (auto &Case : SI.cases()) {
4295 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4296 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4297 }
4298 return replaceOperand(SI, 0, Op0);
4299 }
4300 }
4301
4302 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
4303 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
4304 if (Value *V =
4305 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
4306 return replaceOperand(SI, 0, V);
4307 if (Value *V =
4308 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
4309 return replaceOperand(SI, 0, V);
4310 }
4311
4312 KnownBits Known = computeKnownBits(Cond, &SI);
4313 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
4314 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
4315
4316 // Compute the number of leading bits we can ignore.
4317 // TODO: A better way to determine this would use ComputeNumSignBits().
4318 for (const auto &C : SI.cases()) {
4319 LeadingKnownZeros =
4320 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
4321 LeadingKnownOnes =
4322 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
4323 }
4324
4325 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
4326
4327 // Shrink the condition operand if the new type is smaller than the old type.
4328 // But do not shrink to a non-standard type, because backend can't generate
4329 // good code for that yet.
4330 // TODO: We can make it aggressive again after fixing PR39569.
4331 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
4332 shouldChangeType(Known.getBitWidth(), NewWidth)) {
4333 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
4334 Builder.SetInsertPoint(&SI);
4335 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
4336
4337 for (auto Case : SI.cases()) {
4338 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4339 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4340 }
4341 return replaceOperand(SI, 0, NewCond);
4342 }
4343
4344 if (isa<UndefValue>(Cond)) {
4345 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
4346 return nullptr;
4347 }
4348 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4350 SI.findCaseValue(CI)->getCaseSuccessor());
4351 return nullptr;
4352 }
4353
4354 return nullptr;
4355}
4356
4358InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
4360 if (!WO)
4361 return nullptr;
4362
4363 Intrinsic::ID OvID = WO->getIntrinsicID();
4364 const APInt *C = nullptr;
4365 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
4366 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
4367 OvID == Intrinsic::umul_with_overflow)) {
4368 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
4369 if (C->isAllOnes())
4370 return BinaryOperator::CreateNeg(WO->getLHS());
4371 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
4372 if (C->isPowerOf2()) {
4373 return BinaryOperator::CreateShl(
4374 WO->getLHS(),
4375 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4376 }
4377 }
4378 }
4379
4380 // We're extracting from an overflow intrinsic. See if we're the only user.
4381 // That allows us to simplify multiple result intrinsics to simpler things
4382 // that just get one value.
4383 if (!WO->hasOneUse())
4384 return nullptr;
4385
4386 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4387 // and replace it with a traditional binary instruction.
4388 if (*EV.idx_begin() == 0) {
4389 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4390 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4391 // Replace the old instruction's uses with poison.
4392 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4394 return BinaryOperator::Create(BinOp, LHS, RHS);
4395 }
4396
4397 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4398
4399 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4400 if (OvID == Intrinsic::usub_with_overflow)
4401 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4402
4403 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4404 // +1 is not possible because we assume signed values.
4405 if (OvID == Intrinsic::smul_with_overflow &&
4406 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4407 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4408
4409 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4410 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4411 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4412 // Only handle even bitwidths for performance reasons.
4413 if (BitWidth % 2 == 0)
4414 return new ICmpInst(
4415 ICmpInst::ICMP_UGT, WO->getLHS(),
4416 ConstantInt::get(WO->getLHS()->getType(),
4418 }
4419
4420 // If only the overflow result is used, and the right hand side is a
4421 // constant (or constant splat), we can remove the intrinsic by directly
4422 // checking for overflow.
4423 if (C) {
4424 // Compute the no-wrap range for LHS given RHS=C, then construct an
4425 // equivalent icmp, potentially using an offset.
4426 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
4427 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4428
4429 CmpInst::Predicate Pred;
4430 APInt NewRHSC, Offset;
4431 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4432 auto *OpTy = WO->getRHS()->getType();
4433 auto *NewLHS = WO->getLHS();
4434 if (Offset != 0)
4435 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4436 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4437 ConstantInt::get(OpTy, NewRHSC));
4438 }
4439
4440 return nullptr;
4441}
4442
4445 InstCombiner::BuilderTy &Builder) {
4446 // Helper to fold frexp of select to select of frexp.
4447
4448 if (!SelectInst->hasOneUse() || !FrexpCall->hasOneUse())
4449 return nullptr;
4451 Value *TrueVal = SelectInst->getTrueValue();
4452 Value *FalseVal = SelectInst->getFalseValue();
4453
4454 const APFloat *ConstVal = nullptr;
4455 Value *VarOp = nullptr;
4456 bool ConstIsTrue = false;
4457
4458 if (match(TrueVal, m_APFloat(ConstVal))) {
4459 VarOp = FalseVal;
4460 ConstIsTrue = true;
4461 } else if (match(FalseVal, m_APFloat(ConstVal))) {
4462 VarOp = TrueVal;
4463 ConstIsTrue = false;
4464 } else {
4465 return nullptr;
4466 }
4467
4468 Builder.SetInsertPoint(&EV);
4469
4470 CallInst *NewFrexp =
4471 Builder.CreateCall(FrexpCall->getCalledFunction(), {VarOp}, "frexp");
4472 NewFrexp->copyIRFlags(FrexpCall);
4473
4474 Value *NewEV = Builder.CreateExtractValue(NewFrexp, 0, "mantissa");
4475
4476 int Exp;
4477 APFloat Mantissa = frexp(*ConstVal, Exp, APFloat::rmNearestTiesToEven);
4478
4479 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4480
4481 Value *NewSel = Builder.CreateSelectFMF(
4482 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4483 ConstIsTrue ? NewEV : ConstantMantissa, SelectInst, "select.frexp");
4484 return NewSel;
4485}
4487 Value *Agg = EV.getAggregateOperand();
4488
4489 if (!EV.hasIndices())
4490 return replaceInstUsesWith(EV, Agg);
4491
4492 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4493 SQ.getWithInstruction(&EV)))
4494 return replaceInstUsesWith(EV, V);
4495
4496 Value *Cond, *TrueVal, *FalseVal;
4498 m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal)))))) {
4499 auto *SelInst =
4500 cast<SelectInst>(cast<IntrinsicInst>(Agg)->getArgOperand(0));
4501 if (Value *Result =
4502 foldFrexpOfSelect(EV, cast<IntrinsicInst>(Agg), SelInst, Builder))
4503 return replaceInstUsesWith(EV, Result);
4504 }
4506 // We're extracting from an insertvalue instruction, compare the indices
4507 const unsigned *exti, *exte, *insi, *inse;
4508 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4509 exte = EV.idx_end(), inse = IV->idx_end();
4510 exti != exte && insi != inse;
4511 ++exti, ++insi) {
4512 if (*insi != *exti)
4513 // The insert and extract both reference distinctly different elements.
4514 // This means the extract is not influenced by the insert, and we can
4515 // replace the aggregate operand of the extract with the aggregate
4516 // operand of the insert. i.e., replace
4517 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4518 // %E = extractvalue { i32, { i32 } } %I, 0
4519 // with
4520 // %E = extractvalue { i32, { i32 } } %A, 0
4521 return ExtractValueInst::Create(IV->getAggregateOperand(),
4522 EV.getIndices());
4523 }
4524 if (exti == exte && insi == inse)
4525 // Both iterators are at the end: Index lists are identical. Replace
4526 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4527 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4528 // with "i32 42"
4529 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4530 if (exti == exte) {
4531 // The extract list is a prefix of the insert list. i.e. replace
4532 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4533 // %E = extractvalue { i32, { i32 } } %I, 1
4534 // with
4535 // %X = extractvalue { i32, { i32 } } %A, 1
4536 // %E = insertvalue { i32 } %X, i32 42, 0
4537 // by switching the order of the insert and extract (though the
4538 // insertvalue should be left in, since it may have other uses).
4539 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4540 EV.getIndices());
4541 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4542 ArrayRef(insi, inse));
4543 }
4544 if (insi == inse)
4545 // The insert list is a prefix of the extract list
4546 // We can simply remove the common indices from the extract and make it
4547 // operate on the inserted value instead of the insertvalue result.
4548 // i.e., replace
4549 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4550 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4551 // with
4552 // %E extractvalue { i32 } { i32 42 }, 0
4553 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4554 ArrayRef(exti, exte));
4555 }
4556
4557 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4558 return R;
4559
4560 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4561 // Bail out if the aggregate contains scalable vector type
4562 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4563 STy && STy->isScalableTy())
4564 return nullptr;
4565
4566 // If the (non-volatile) load only has one use, we can rewrite this to a
4567 // load from a GEP. This reduces the size of the load. If a load is used
4568 // only by extractvalue instructions then this either must have been
4569 // optimized before, or it is a struct with padding, in which case we
4570 // don't want to do the transformation as it loses padding knowledge.
4571 if (L->isSimple() && L->hasOneUse()) {
4572 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4573 SmallVector<Value*, 4> Indices;
4574 // Prefix an i32 0 since we need the first element.
4575 Indices.push_back(Builder.getInt32(0));
4576 for (unsigned Idx : EV.indices())
4577 Indices.push_back(Builder.getInt32(Idx));
4578
4579 // We need to insert these at the location of the old load, not at that of
4580 // the extractvalue.
4581 Builder.SetInsertPoint(L);
4582 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4583 L->getPointerOperand(), Indices);
4584 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
4585 // Whatever aliasing information we had for the orignal load must also
4586 // hold for the smaller load, so propagate the annotations.
4587 NL->setAAMetadata(L->getAAMetadata());
4588 // Returning the load directly will cause the main loop to insert it in
4589 // the wrong spot, so use replaceInstUsesWith().
4590 return replaceInstUsesWith(EV, NL);
4591 }
4592 }
4593
4594 if (auto *PN = dyn_cast<PHINode>(Agg))
4595 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4596 return Res;
4597
4598 // Canonicalize extract (select Cond, TV, FV)
4599 // -> select cond, (extract TV), (extract FV)
4600 if (auto *SI = dyn_cast<SelectInst>(Agg))
4601 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4602 return R;
4603
4604 // We could simplify extracts from other values. Note that nested extracts may
4605 // already be simplified implicitly by the above: extract (extract (insert) )
4606 // will be translated into extract ( insert ( extract ) ) first and then just
4607 // the value inserted, if appropriate. Similarly for extracts from single-use
4608 // loads: extract (extract (load)) will be translated to extract (load (gep))
4609 // and if again single-use then via load (gep (gep)) to load (gep).
4610 // However, double extracts from e.g. function arguments or return values
4611 // aren't handled yet.
4612 return nullptr;
4613}
4614
4615/// Return 'true' if the given typeinfo will match anything.
4616static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4617 switch (Personality) {
4621 // The GCC C EH and Rust personality only exists to support cleanups, so
4622 // it's not clear what the semantics of catch clauses are.
4623 return false;
4625 return false;
4627 // While __gnat_all_others_value will match any Ada exception, it doesn't
4628 // match foreign exceptions (or didn't, before gcc-4.7).
4629 return false;
4640 return TypeInfo->isNullValue();
4641 }
4642 llvm_unreachable("invalid enum");
4643}
4644
4645static bool shorter_filter(const Value *LHS, const Value *RHS) {
4646 return
4647 cast<ArrayType>(LHS->getType())->getNumElements()
4648 <
4649 cast<ArrayType>(RHS->getType())->getNumElements();
4650}
4651
4653 // The logic here should be correct for any real-world personality function.
4654 // However if that turns out not to be true, the offending logic can always
4655 // be conditioned on the personality function, like the catch-all logic is.
4656 EHPersonality Personality =
4657 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4658
4659 // Simplify the list of clauses, eg by removing repeated catch clauses
4660 // (these are often created by inlining).
4661 bool MakeNewInstruction = false; // If true, recreate using the following:
4662 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4663 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4664
4665 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4666 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4667 bool isLastClause = i + 1 == e;
4668 if (LI.isCatch(i)) {
4669 // A catch clause.
4670 Constant *CatchClause = LI.getClause(i);
4671 Constant *TypeInfo = CatchClause->stripPointerCasts();
4672
4673 // If we already saw this clause, there is no point in having a second
4674 // copy of it.
4675 if (AlreadyCaught.insert(TypeInfo).second) {
4676 // This catch clause was not already seen.
4677 NewClauses.push_back(CatchClause);
4678 } else {
4679 // Repeated catch clause - drop the redundant copy.
4680 MakeNewInstruction = true;
4681 }
4682
4683 // If this is a catch-all then there is no point in keeping any following
4684 // clauses or marking the landingpad as having a cleanup.
4685 if (isCatchAll(Personality, TypeInfo)) {
4686 if (!isLastClause)
4687 MakeNewInstruction = true;
4688 CleanupFlag = false;
4689 break;
4690 }
4691 } else {
4692 // A filter clause. If any of the filter elements were already caught
4693 // then they can be dropped from the filter. It is tempting to try to
4694 // exploit the filter further by saying that any typeinfo that does not
4695 // occur in the filter can't be caught later (and thus can be dropped).
4696 // However this would be wrong, since typeinfos can match without being
4697 // equal (for example if one represents a C++ class, and the other some
4698 // class derived from it).
4699 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4700 Constant *FilterClause = LI.getClause(i);
4701 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4702 unsigned NumTypeInfos = FilterType->getNumElements();
4703
4704 // An empty filter catches everything, so there is no point in keeping any
4705 // following clauses or marking the landingpad as having a cleanup. By
4706 // dealing with this case here the following code is made a bit simpler.
4707 if (!NumTypeInfos) {
4708 NewClauses.push_back(FilterClause);
4709 if (!isLastClause)
4710 MakeNewInstruction = true;
4711 CleanupFlag = false;
4712 break;
4713 }
4714
4715 bool MakeNewFilter = false; // If true, make a new filter.
4716 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4717 if (isa<ConstantAggregateZero>(FilterClause)) {
4718 // Not an empty filter - it contains at least one null typeinfo.
4719 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4720 Constant *TypeInfo =
4722 // If this typeinfo is a catch-all then the filter can never match.
4723 if (isCatchAll(Personality, TypeInfo)) {
4724 // Throw the filter away.
4725 MakeNewInstruction = true;
4726 continue;
4727 }
4728
4729 // There is no point in having multiple copies of this typeinfo, so
4730 // discard all but the first copy if there is more than one.
4731 NewFilterElts.push_back(TypeInfo);
4732 if (NumTypeInfos > 1)
4733 MakeNewFilter = true;
4734 } else {
4735 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4736 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4737 NewFilterElts.reserve(NumTypeInfos);
4738
4739 // Remove any filter elements that were already caught or that already
4740 // occurred in the filter. While there, see if any of the elements are
4741 // catch-alls. If so, the filter can be discarded.
4742 bool SawCatchAll = false;
4743 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4744 Constant *Elt = Filter->getOperand(j);
4745 Constant *TypeInfo = Elt->stripPointerCasts();
4746 if (isCatchAll(Personality, TypeInfo)) {
4747 // This element is a catch-all. Bail out, noting this fact.
4748 SawCatchAll = true;
4749 break;
4750 }
4751
4752 // Even if we've seen a type in a catch clause, we don't want to
4753 // remove it from the filter. An unexpected type handler may be
4754 // set up for a call site which throws an exception of the same
4755 // type caught. In order for the exception thrown by the unexpected
4756 // handler to propagate correctly, the filter must be correctly
4757 // described for the call site.
4758 //
4759 // Example:
4760 //
4761 // void unexpected() { throw 1;}
4762 // void foo() throw (int) {
4763 // std::set_unexpected(unexpected);
4764 // try {
4765 // throw 2.0;
4766 // } catch (int i) {}
4767 // }
4768
4769 // There is no point in having multiple copies of the same typeinfo in
4770 // a filter, so only add it if we didn't already.
4771 if (SeenInFilter.insert(TypeInfo).second)
4772 NewFilterElts.push_back(cast<Constant>(Elt));
4773 }
4774 // A filter containing a catch-all cannot match anything by definition.
4775 if (SawCatchAll) {
4776 // Throw the filter away.
4777 MakeNewInstruction = true;
4778 continue;
4779 }
4780
4781 // If we dropped something from the filter, make a new one.
4782 if (NewFilterElts.size() < NumTypeInfos)
4783 MakeNewFilter = true;
4784 }
4785 if (MakeNewFilter) {
4786 FilterType = ArrayType::get(FilterType->getElementType(),
4787 NewFilterElts.size());
4788 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
4789 MakeNewInstruction = true;
4790 }
4791
4792 NewClauses.push_back(FilterClause);
4793
4794 // If the new filter is empty then it will catch everything so there is
4795 // no point in keeping any following clauses or marking the landingpad
4796 // as having a cleanup. The case of the original filter being empty was
4797 // already handled above.
4798 if (MakeNewFilter && !NewFilterElts.size()) {
4799 assert(MakeNewInstruction && "New filter but not a new instruction!");
4800 CleanupFlag = false;
4801 break;
4802 }
4803 }
4804 }
4805
4806 // If several filters occur in a row then reorder them so that the shortest
4807 // filters come first (those with the smallest number of elements). This is
4808 // advantageous because shorter filters are more likely to match, speeding up
4809 // unwinding, but mostly because it increases the effectiveness of the other
4810 // filter optimizations below.
4811 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
4812 unsigned j;
4813 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
4814 for (j = i; j != e; ++j)
4815 if (!isa<ArrayType>(NewClauses[j]->getType()))
4816 break;
4817
4818 // Check whether the filters are already sorted by length. We need to know
4819 // if sorting them is actually going to do anything so that we only make a
4820 // new landingpad instruction if it does.
4821 for (unsigned k = i; k + 1 < j; ++k)
4822 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
4823 // Not sorted, so sort the filters now. Doing an unstable sort would be
4824 // correct too but reordering filters pointlessly might confuse users.
4825 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
4827 MakeNewInstruction = true;
4828 break;
4829 }
4830
4831 // Look for the next batch of filters.
4832 i = j + 1;
4833 }
4834
4835 // If typeinfos matched if and only if equal, then the elements of a filter L
4836 // that occurs later than a filter F could be replaced by the intersection of
4837 // the elements of F and L. In reality two typeinfos can match without being
4838 // equal (for example if one represents a C++ class, and the other some class
4839 // derived from it) so it would be wrong to perform this transform in general.
4840 // However the transform is correct and useful if F is a subset of L. In that
4841 // case L can be replaced by F, and thus removed altogether since repeating a
4842 // filter is pointless. So here we look at all pairs of filters F and L where
4843 // L follows F in the list of clauses, and remove L if every element of F is
4844 // an element of L. This can occur when inlining C++ functions with exception
4845 // specifications.
4846 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
4847 // Examine each filter in turn.
4848 Value *Filter = NewClauses[i];
4849 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
4850 if (!FTy)
4851 // Not a filter - skip it.
4852 continue;
4853 unsigned FElts = FTy->getNumElements();
4854 // Examine each filter following this one. Doing this backwards means that
4855 // we don't have to worry about filters disappearing under us when removed.
4856 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
4857 Value *LFilter = NewClauses[j];
4858 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
4859 if (!LTy)
4860 // Not a filter - skip it.
4861 continue;
4862 // If Filter is a subset of LFilter, i.e. every element of Filter is also
4863 // an element of LFilter, then discard LFilter.
4864 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
4865 // If Filter is empty then it is a subset of LFilter.
4866 if (!FElts) {
4867 // Discard LFilter.
4868 NewClauses.erase(J);
4869 MakeNewInstruction = true;
4870 // Move on to the next filter.
4871 continue;
4872 }
4873 unsigned LElts = LTy->getNumElements();
4874 // If Filter is longer than LFilter then it cannot be a subset of it.
4875 if (FElts > LElts)
4876 // Move on to the next filter.
4877 continue;
4878 // At this point we know that LFilter has at least one element.
4879 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
4880 // Filter is a subset of LFilter iff Filter contains only zeros (as we
4881 // already know that Filter is not longer than LFilter).
4883 assert(FElts <= LElts && "Should have handled this case earlier!");
4884 // Discard LFilter.
4885 NewClauses.erase(J);
4886 MakeNewInstruction = true;
4887 }
4888 // Move on to the next filter.
4889 continue;
4890 }
4891 ConstantArray *LArray = cast<ConstantArray>(LFilter);
4892 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
4893 // Since Filter is non-empty and contains only zeros, it is a subset of
4894 // LFilter iff LFilter contains a zero.
4895 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
4896 for (unsigned l = 0; l != LElts; ++l)
4897 if (LArray->getOperand(l)->isNullValue()) {
4898 // LFilter contains a zero - discard it.
4899 NewClauses.erase(J);
4900 MakeNewInstruction = true;
4901 break;
4902 }
4903 // Move on to the next filter.
4904 continue;
4905 }
4906 // At this point we know that both filters are ConstantArrays. Loop over
4907 // operands to see whether every element of Filter is also an element of
4908 // LFilter. Since filters tend to be short this is probably faster than
4909 // using a method that scales nicely.
4911 bool AllFound = true;
4912 for (unsigned f = 0; f != FElts; ++f) {
4913 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
4914 AllFound = false;
4915 for (unsigned l = 0; l != LElts; ++l) {
4916 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
4917 if (LTypeInfo == FTypeInfo) {
4918 AllFound = true;
4919 break;
4920 }
4921 }
4922 if (!AllFound)
4923 break;
4924 }
4925 if (AllFound) {
4926 // Discard LFilter.
4927 NewClauses.erase(J);
4928 MakeNewInstruction = true;
4929 }
4930 // Move on to the next filter.
4931 }
4932 }
4933
4934 // If we changed any of the clauses, replace the old landingpad instruction
4935 // with a new one.
4936 if (MakeNewInstruction) {
4938 NewClauses.size());
4939 for (Constant *C : NewClauses)
4940 NLI->addClause(C);
4941 // A landing pad with no clauses must have the cleanup flag set. It is
4942 // theoretically possible, though highly unlikely, that we eliminated all
4943 // clauses. If so, force the cleanup flag to true.
4944 if (NewClauses.empty())
4945 CleanupFlag = true;
4946 NLI->setCleanup(CleanupFlag);
4947 return NLI;
4948 }
4949
4950 // Even if none of the clauses changed, we may nonetheless have understood
4951 // that the cleanup flag is pointless. Clear it if so.
4952 if (LI.isCleanup() != CleanupFlag) {
4953 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
4954 LI.setCleanup(CleanupFlag);
4955 return &LI;
4956 }
4957
4958 return nullptr;
4959}
4960
4961Value *
4963 // Try to push freeze through instructions that propagate but don't produce
4964 // poison as far as possible. If an operand of freeze does not produce poison
4965 // then push the freeze through to the operands that are not guaranteed
4966 // non-poison. The actual transform is as follows.
4967 // Op1 = ... ; Op1 can be poison
4968 // Op0 = Inst(Op1, NonPoisonOps...)
4969 // ... = Freeze(Op0)
4970 // =>
4971 // Op1 = ...
4972 // Op1.fr = Freeze(Op1)
4973 // ... = Inst(Op1.fr, NonPoisonOps...)
4974
4975 auto CanPushFreeze = [](Value *V) {
4976 if (!isa<Instruction>(V) || isa<PHINode>(V))
4977 return false;
4978
4979 // We can't push the freeze through an instruction which can itself create
4980 // poison. If the only source of new poison is flags, we can simply
4981 // strip them (since we know the only use is the freeze and nothing can
4982 // benefit from them.)
4984 /*ConsiderFlagsAndMetadata*/ false);
4985 };
4986
4987 // Pushing freezes up long instruction chains can be expensive. Instead,
4988 // we directly push the freeze all the way to the leaves. However, we leave
4989 // deduplication of freezes on the same value for freezeOtherUses().
4990 Use *OrigUse = &OrigFI.getOperandUse(0);
4993 Worklist.push_back(OrigUse);
4994 while (!Worklist.empty()) {
4995 auto *U = Worklist.pop_back_val();
4996 Value *V = U->get();
4997 if (!CanPushFreeze(V)) {
4998 // If we can't push through the original instruction, abort the transform.
4999 if (U == OrigUse)
5000 return nullptr;
5001
5002 auto *UserI = cast<Instruction>(U->getUser());
5003 Builder.SetInsertPoint(UserI);
5004 Value *Frozen = Builder.CreateFreeze(V, V->getName() + ".fr");
5005 U->set(Frozen);
5006 continue;
5007 }
5008
5009 auto *I = cast<Instruction>(V);
5010 if (!Visited.insert(I).second)
5011 continue;
5012
5013 // reverse() to emit freezes in a more natural order.
5014 for (Use &Op : reverse(I->operands())) {
5015 Value *OpV = Op.get();
5017 continue;
5018 Worklist.push_back(&Op);
5019 }
5020
5021 I->dropPoisonGeneratingAnnotations();
5022 this->Worklist.add(I);
5023 }
5024
5025 return OrigUse->get();
5026}
5027
5029 PHINode *PN) {
5030 // Detect whether this is a recurrence with a start value and some number of
5031 // backedge values. We'll check whether we can push the freeze through the
5032 // backedge values (possibly dropping poison flags along the way) until we
5033 // reach the phi again. In that case, we can move the freeze to the start
5034 // value.
5035 Use *StartU = nullptr;
5037 for (Use &U : PN->incoming_values()) {
5038 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
5039 // Add backedge value to worklist.
5040 Worklist.push_back(U.get());
5041 continue;
5042 }
5043
5044 // Don't bother handling multiple start values.
5045 if (StartU)
5046 return nullptr;
5047 StartU = &U;
5048 }
5049
5050 if (!StartU || Worklist.empty())
5051 return nullptr; // Not a recurrence.
5052
5053 Value *StartV = StartU->get();
5054 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
5055 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
5056 // We can't insert freeze if the start value is the result of the
5057 // terminator (e.g. an invoke).
5058 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
5059 return nullptr;
5060
5063 while (!Worklist.empty()) {
5064 Value *V = Worklist.pop_back_val();
5065 if (!Visited.insert(V).second)
5066 continue;
5067
5068 if (Visited.size() > 32)
5069 return nullptr; // Limit the total number of values we inspect.
5070
5071 // Assume that PN is non-poison, because it will be after the transform.
5072 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
5073 continue;
5074
5077 /*ConsiderFlagsAndMetadata*/ false))
5078 return nullptr;
5079
5080 DropFlags.push_back(I);
5081 append_range(Worklist, I->operands());
5082 }
5083
5084 for (Instruction *I : DropFlags)
5085 I->dropPoisonGeneratingAnnotations();
5086
5087 if (StartNeedsFreeze) {
5088 Builder.SetInsertPoint(StartBB->getTerminator());
5089 Value *FrozenStartV = Builder.CreateFreeze(StartV,
5090 StartV->getName() + ".fr");
5091 replaceUse(*StartU, FrozenStartV);
5092 }
5093 return replaceInstUsesWith(FI, PN);
5094}
5095
5097 Value *Op = FI.getOperand(0);
5098
5099 if (isa<Constant>(Op) || Op->hasOneUse())
5100 return false;
5101
5102 // Move the freeze directly after the definition of its operand, so that
5103 // it dominates the maximum number of uses. Note that it may not dominate
5104 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
5105 // the normal/default destination. This is why the domination check in the
5106 // replacement below is still necessary.
5107 BasicBlock::iterator MoveBefore;
5108 if (isa<Argument>(Op)) {
5109 MoveBefore =
5111 } else {
5112 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
5113 if (!MoveBeforeOpt)
5114 return false;
5115 MoveBefore = *MoveBeforeOpt;
5116 }
5117
5118 // Re-point iterator to come after any debug-info records.
5119 MoveBefore.setHeadBit(false);
5120
5121 bool Changed = false;
5122 if (&FI != &*MoveBefore) {
5123 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
5124 Changed = true;
5125 }
5126
5127 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
5128 bool Dominates = DT.dominates(&FI, U);
5129 Changed |= Dominates;
5130 return Dominates;
5131 });
5132
5133 return Changed;
5134}
5135
5136// Check if any direct or bitcast user of this value is a shuffle instruction.
5138 for (auto *U : V->users()) {
5140 return true;
5141 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
5142 return true;
5143 }
5144 return false;
5145}
5146
5148 Value *Op0 = I.getOperand(0);
5149
5150 if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
5151 return replaceInstUsesWith(I, V);
5152
5153 // freeze (phi const, x) --> phi const, (freeze x)
5154 if (auto *PN = dyn_cast<PHINode>(Op0)) {
5155 if (Instruction *NV = foldOpIntoPhi(I, PN))
5156 return NV;
5157 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
5158 return NV;
5159 }
5160
5162 return replaceInstUsesWith(I, NI);
5163
5164 // If I is freeze(undef), check its uses and fold it to a fixed constant.
5165 // - or: pick -1
5166 // - select's condition: if the true value is constant, choose it by making
5167 // the condition true.
5168 // - phi: pick the common constant across operands
5169 // - default: pick 0
5170 //
5171 // Note that this transform is intentionally done here rather than
5172 // via an analysis in InstSimplify or at individual user sites. That is
5173 // because we must produce the same value for all uses of the freeze -
5174 // it's the reason "freeze" exists!
5175 //
5176 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
5177 // duplicating logic for binops at least.
5178 auto getUndefReplacement = [&](Type *Ty) {
5179 auto pickCommonConstantFromPHI = [](PHINode &PN) -> Value * {
5180 // phi(freeze(undef), C, C). Choose C for freeze so the PHI can be
5181 // removed.
5182 Constant *BestValue = nullptr;
5183 for (Value *V : PN.incoming_values()) {
5184 if (match(V, m_Freeze(m_Undef())))
5185 continue;
5186
5188 if (!C)
5189 return nullptr;
5190
5192 return nullptr;
5193
5194 if (BestValue && BestValue != C)
5195 return nullptr;
5196
5197 BestValue = C;
5198 }
5199 return BestValue;
5200 };
5201
5202 Value *NullValue = Constant::getNullValue(Ty);
5203 Value *BestValue = nullptr;
5204 for (auto *U : I.users()) {
5205 Value *V = NullValue;
5206 if (match(U, m_Or(m_Value(), m_Value())))
5208 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
5209 V = ConstantInt::getTrue(Ty);
5210 else if (match(U, m_c_Select(m_Specific(&I), m_Value(V)))) {
5211 if (V == &I || !isGuaranteedNotToBeUndefOrPoison(V, &AC, &I, &DT))
5212 V = NullValue;
5213 } else if (auto *PHI = dyn_cast<PHINode>(U)) {
5214 if (Value *MaybeV = pickCommonConstantFromPHI(*PHI))
5215 V = MaybeV;
5216 }
5217
5218 if (!BestValue)
5219 BestValue = V;
5220 else if (BestValue != V)
5221 BestValue = NullValue;
5222 }
5223 assert(BestValue && "Must have at least one use");
5224 assert(BestValue != &I && "Cannot replace with itself");
5225 return BestValue;
5226 };
5227
5228 if (match(Op0, m_Undef())) {
5229 // Don't fold freeze(undef/poison) if it's used as a vector operand in
5230 // a shuffle. This may improve codegen for shuffles that allow
5231 // unspecified inputs.
5233 return nullptr;
5234 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
5235 }
5236
5237 auto getFreezeVectorReplacement = [](Constant *C) -> Constant * {
5238 Type *Ty = C->getType();
5239 auto *VTy = dyn_cast<FixedVectorType>(Ty);
5240 if (!VTy)
5241 return nullptr;
5242 unsigned NumElts = VTy->getNumElements();
5243 Constant *BestValue = Constant::getNullValue(VTy->getScalarType());
5244 for (unsigned i = 0; i != NumElts; ++i) {
5245 Constant *EltC = C->getAggregateElement(i);
5246 if (EltC && !match(EltC, m_Undef())) {
5247 BestValue = EltC;
5248 break;
5249 }
5250 }
5251 return Constant::replaceUndefsWith(C, BestValue);
5252 };
5253
5254 Constant *C;
5255 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement() &&
5256 !C->containsConstantExpression()) {
5257 if (Constant *Repl = getFreezeVectorReplacement(C))
5258 return replaceInstUsesWith(I, Repl);
5259 }
5260
5261 // Replace uses of Op with freeze(Op).
5262 if (freezeOtherUses(I))
5263 return &I;
5264
5265 return nullptr;
5266}
5267
5268/// Check for case where the call writes to an otherwise dead alloca. This
5269/// shows up for unused out-params in idiomatic C/C++ code. Note that this
5270/// helper *only* analyzes the write; doesn't check any other legality aspect.
5272 auto *CB = dyn_cast<CallBase>(I);
5273 if (!CB)
5274 // TODO: handle e.g. store to alloca here - only worth doing if we extend
5275 // to allow reload along used path as described below. Otherwise, this
5276 // is simply a store to a dead allocation which will be removed.
5277 return false;
5278 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
5279 if (!Dest)
5280 return false;
5281 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
5282 if (!AI)
5283 // TODO: allow malloc?
5284 return false;
5285 // TODO: allow memory access dominated by move point? Note that since AI
5286 // could have a reference to itself captured by the call, we would need to
5287 // account for cycles in doing so.
5288 SmallVector<const User *> AllocaUsers;
5290 auto pushUsers = [&](const Instruction &I) {
5291 for (const User *U : I.users()) {
5292 if (Visited.insert(U).second)
5293 AllocaUsers.push_back(U);
5294 }
5295 };
5296 pushUsers(*AI);
5297 while (!AllocaUsers.empty()) {
5298 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
5299 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
5300 pushUsers(*UserI);
5301 continue;
5302 }
5303 if (UserI == CB)
5304 continue;
5305 // TODO: support lifetime.start/end here
5306 return false;
5307 }
5308 return true;
5309}
5310
5311/// Try to move the specified instruction from its current block into the
5312/// beginning of DestBlock, which can only happen if it's safe to move the
5313/// instruction past all of the instructions between it and the end of its
5314/// block.
5316 BasicBlock *DestBlock) {
5317 BasicBlock *SrcBlock = I->getParent();
5318
5319 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
5320 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
5321 I->isTerminator())
5322 return false;
5323
5324 // Do not sink static or dynamic alloca instructions. Static allocas must
5325 // remain in the entry block, and dynamic allocas must not be sunk in between
5326 // a stacksave / stackrestore pair, which would incorrectly shorten its
5327 // lifetime.
5328 if (isa<AllocaInst>(I))
5329 return false;
5330
5331 // Do not sink into catchswitch blocks.
5332 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
5333 return false;
5334
5335 // Do not sink convergent call instructions.
5336 if (auto *CI = dyn_cast<CallInst>(I)) {
5337 if (CI->isConvergent())
5338 return false;
5339 }
5340
5341 // Unless we can prove that the memory write isn't visibile except on the
5342 // path we're sinking to, we must bail.
5343 if (I->mayWriteToMemory()) {
5344 if (!SoleWriteToDeadLocal(I, TLI))
5345 return false;
5346 }
5347
5348 // We can only sink load instructions if there is nothing between the load and
5349 // the end of block that could change the value.
5350 if (I->mayReadFromMemory() &&
5351 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
5352 // We don't want to do any sophisticated alias analysis, so we only check
5353 // the instructions after I in I's parent block if we try to sink to its
5354 // successor block.
5355 if (DestBlock->getUniquePredecessor() != I->getParent())
5356 return false;
5357 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
5358 E = I->getParent()->end();
5359 Scan != E; ++Scan)
5360 if (Scan->mayWriteToMemory())
5361 return false;
5362 }
5363
5364 I->dropDroppableUses([&](const Use *U) {
5365 auto *I = dyn_cast<Instruction>(U->getUser());
5366 if (I && I->getParent() != DestBlock) {
5367 Worklist.add(I);
5368 return true;
5369 }
5370 return false;
5371 });
5372 /// FIXME: We could remove droppable uses that are not dominated by
5373 /// the new position.
5374
5375 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
5376 I->moveBefore(*DestBlock, InsertPos);
5377 ++NumSunkInst;
5378
5379 // Also sink all related debug uses from the source basic block. Otherwise we
5380 // get debug use before the def. Attempt to salvage debug uses first, to
5381 // maximise the range variables have location for. If we cannot salvage, then
5382 // mark the location undef: we know it was supposed to receive a new location
5383 // here, but that computation has been sunk.
5384 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
5385 findDbgUsers(I, DbgVariableRecords);
5386 if (!DbgVariableRecords.empty())
5387 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
5388 DbgVariableRecords);
5389
5390 // PS: there are numerous flaws with this behaviour, not least that right now
5391 // assignments can be re-ordered past other assignments to the same variable
5392 // if they use different Values. Creating more undef assignements can never be
5393 // undone. And salvaging all users outside of this block can un-necessarily
5394 // alter the lifetime of the live-value that the variable refers to.
5395 // Some of these things can be resolved by tolerating debug use-before-defs in
5396 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
5397 // being used for more architectures.
5398
5399 return true;
5400}
5401
5403 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
5404 BasicBlock *DestBlock,
5405 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
5406 // For all debug values in the destination block, the sunk instruction
5407 // will still be available, so they do not need to be dropped.
5408
5409 // Fetch all DbgVariableRecords not already in the destination.
5410 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
5411 for (auto &DVR : DbgVariableRecords)
5412 if (DVR->getParent() != DestBlock)
5413 DbgVariableRecordsToSalvage.push_back(DVR);
5414
5415 // Fetch a second collection, of DbgVariableRecords in the source block that
5416 // we're going to sink.
5417 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5418 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5419 if (DVR->getParent() == SrcBlock)
5420 DbgVariableRecordsToSink.push_back(DVR);
5421
5422 // Sort DbgVariableRecords according to their position in the block. This is a
5423 // partial order: DbgVariableRecords attached to different instructions will
5424 // be ordered by the instruction order, but DbgVariableRecords attached to the
5425 // same instruction won't have an order.
5426 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5427 return B->getInstruction()->comesBefore(A->getInstruction());
5428 };
5429 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5430
5431 // If there are two assignments to the same variable attached to the same
5432 // instruction, the ordering between the two assignments is important. Scan
5433 // for this (rare) case and establish which is the last assignment.
5434 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5436 if (DbgVariableRecordsToSink.size() > 1) {
5438 // Count how many assignments to each variable there is per instruction.
5439 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5440 DebugVariable DbgUserVariable =
5441 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5442 DVR->getDebugLoc()->getInlinedAt());
5443 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5444 }
5445
5446 // If there are any instructions with two assignments, add them to the
5447 // FilterOutMap to record that they need extra filtering.
5449 for (auto It : CountMap) {
5450 if (It.second > 1) {
5451 FilterOutMap[It.first] = nullptr;
5452 DupSet.insert(It.first.first);
5453 }
5454 }
5455
5456 // For all instruction/variable pairs needing extra filtering, find the
5457 // latest assignment.
5458 for (const Instruction *Inst : DupSet) {
5459 for (DbgVariableRecord &DVR :
5460 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5461 DebugVariable DbgUserVariable =
5462 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5463 DVR.getDebugLoc()->getInlinedAt());
5464 auto FilterIt =
5465 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5466 if (FilterIt == FilterOutMap.end())
5467 continue;
5468 if (FilterIt->second != nullptr)
5469 continue;
5470 FilterIt->second = &DVR;
5471 }
5472 }
5473 }
5474
5475 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5476 // out any duplicate assignments identified above.
5478 SmallSet<DebugVariable, 4> SunkVariables;
5479 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5481 continue;
5482
5483 DebugVariable DbgUserVariable =
5484 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5485 DVR->getDebugLoc()->getInlinedAt());
5486
5487 // For any variable where there were multiple assignments in the same place,
5488 // ignore all but the last assignment.
5489 if (!FilterOutMap.empty()) {
5490 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5491 auto It = FilterOutMap.find(IVP);
5492
5493 // Filter out.
5494 if (It != FilterOutMap.end() && It->second != DVR)
5495 continue;
5496 }
5497
5498 if (!SunkVariables.insert(DbgUserVariable).second)
5499 continue;
5500
5501 if (DVR->isDbgAssign())
5502 continue;
5503
5504 DVRClones.emplace_back(DVR->clone());
5505 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5506 }
5507
5508 // Perform salvaging without the clones, then sink the clones.
5509 if (DVRClones.empty())
5510 return;
5511
5512 salvageDebugInfoForDbgValues(*I, DbgVariableRecordsToSalvage);
5513
5514 // The clones are in reverse order of original appearance. Assert that the
5515 // head bit is set on the iterator as we _should_ have received it via
5516 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5517 // we'll repeatedly insert at the head, such as:
5518 // DVR-3 (third insertion goes here)
5519 // DVR-2 (second insertion goes here)
5520 // DVR-1 (first insertion goes here)
5521 // Any-Prior-DVRs
5522 // InsertPtInst
5523 assert(InsertPos.getHeadBit());
5524 for (DbgVariableRecord *DVRClone : DVRClones) {
5525 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5526 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5527 }
5528}
5529
5531 while (!Worklist.isEmpty()) {
5532 // Walk deferred instructions in reverse order, and push them to the
5533 // worklist, which means they'll end up popped from the worklist in-order.
5534 while (Instruction *I = Worklist.popDeferred()) {
5535 // Check to see if we can DCE the instruction. We do this already here to
5536 // reduce the number of uses and thus allow other folds to trigger.
5537 // Note that eraseInstFromFunction() may push additional instructions on
5538 // the deferred worklist, so this will DCE whole instruction chains.
5541 ++NumDeadInst;
5542 continue;
5543 }
5544
5545 Worklist.push(I);
5546 }
5547
5548 Instruction *I = Worklist.removeOne();
5549 if (I == nullptr) continue; // skip null values.
5550
5551 // Check to see if we can DCE the instruction.
5554 ++NumDeadInst;
5555 continue;
5556 }
5557
5558 if (!DebugCounter::shouldExecute(VisitCounter))
5559 continue;
5560
5561 // See if we can trivially sink this instruction to its user if we can
5562 // prove that the successor is not executed more frequently than our block.
5563 // Return the UserBlock if successful.
5564 auto getOptionalSinkBlockForInst =
5565 [this](Instruction *I) -> std::optional<BasicBlock *> {
5566 if (!EnableCodeSinking)
5567 return std::nullopt;
5568
5569 BasicBlock *BB = I->getParent();
5570 BasicBlock *UserParent = nullptr;
5571 unsigned NumUsers = 0;
5572
5573 for (Use &U : I->uses()) {
5574 User *User = U.getUser();
5575 if (User->isDroppable())
5576 continue;
5577 if (NumUsers > MaxSinkNumUsers)
5578 return std::nullopt;
5579
5580 Instruction *UserInst = cast<Instruction>(User);
5581 // Special handling for Phi nodes - get the block the use occurs in.
5582 BasicBlock *UserBB = UserInst->getParent();
5583 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5584 UserBB = PN->getIncomingBlock(U);
5585 // Bail out if we have uses in different blocks. We don't do any
5586 // sophisticated analysis (i.e finding NearestCommonDominator of these
5587 // use blocks).
5588 if (UserParent && UserParent != UserBB)
5589 return std::nullopt;
5590 UserParent = UserBB;
5591
5592 // Make sure these checks are done only once, naturally we do the checks
5593 // the first time we get the userparent, this will save compile time.
5594 if (NumUsers == 0) {
5595 // Try sinking to another block. If that block is unreachable, then do
5596 // not bother. SimplifyCFG should handle it.
5597 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5598 return std::nullopt;
5599
5600 auto *Term = UserParent->getTerminator();
5601 // See if the user is one of our successors that has only one
5602 // predecessor, so that we don't have to split the critical edge.
5603 // Another option where we can sink is a block that ends with a
5604 // terminator that does not pass control to other block (such as
5605 // return or unreachable or resume). In this case:
5606 // - I dominates the User (by SSA form);
5607 // - the User will be executed at most once.
5608 // So sinking I down to User is always profitable or neutral.
5609 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5610 return std::nullopt;
5611
5612 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5613 }
5614
5615 NumUsers++;
5616 }
5617
5618 // No user or only has droppable users.
5619 if (!UserParent)
5620 return std::nullopt;
5621
5622 return UserParent;
5623 };
5624
5625 auto OptBB = getOptionalSinkBlockForInst(I);
5626 if (OptBB) {
5627 auto *UserParent = *OptBB;
5628 // Okay, the CFG is simple enough, try to sink this instruction.
5629 if (tryToSinkInstruction(I, UserParent)) {
5630 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5631 MadeIRChange = true;
5632 // We'll add uses of the sunk instruction below, but since
5633 // sinking can expose opportunities for it's *operands* add
5634 // them to the worklist
5635 for (Use &U : I->operands())
5636 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5637 Worklist.push(OpI);
5638 }
5639 }
5640
5641 // Now that we have an instruction, try combining it to simplify it.
5642 Builder.SetInsertPoint(I);
5643 Builder.CollectMetadataToCopy(
5644 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5645
5646#ifndef NDEBUG
5647 std::string OrigI;
5648#endif
5649 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5650 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5651
5652 if (Instruction *Result = visit(*I)) {
5653 ++NumCombined;
5654 // Should we replace the old instruction with a new one?
5655 if (Result != I) {
5656 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5657 << " New = " << *Result << '\n');
5658
5659 // We copy the old instruction's DebugLoc to the new instruction, unless
5660 // InstCombine already assigned a DebugLoc to it, in which case we
5661 // should trust the more specifically selected DebugLoc.
5662 Result->setDebugLoc(Result->getDebugLoc().orElse(I->getDebugLoc()));
5663 // We also copy annotation metadata to the new instruction.
5664 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5665 // Everything uses the new instruction now.
5666 I->replaceAllUsesWith(Result);
5667
5668 // Move the name to the new instruction first.
5669 Result->takeName(I);
5670
5671 // Insert the new instruction into the basic block...
5672 BasicBlock *InstParent = I->getParent();
5673 BasicBlock::iterator InsertPos = I->getIterator();
5674
5675 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5676 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5677 // We need to fix up the insertion point.
5678 if (isa<PHINode>(I)) // PHI -> Non-PHI
5679 InsertPos = InstParent->getFirstInsertionPt();
5680 else // Non-PHI -> PHI
5681 InsertPos = InstParent->getFirstNonPHIIt();
5682 }
5683
5684 Result->insertInto(InstParent, InsertPos);
5685
5686 // Push the new instruction and any users onto the worklist.
5687 Worklist.pushUsersToWorkList(*Result);
5688 Worklist.push(Result);
5689
5691 } else {
5692 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5693 << " New = " << *I << '\n');
5694
5695 // If the instruction was modified, it's possible that it is now dead.
5696 // if so, remove it.
5699 } else {
5700 Worklist.pushUsersToWorkList(*I);
5701 Worklist.push(I);
5702 }
5703 }
5704 MadeIRChange = true;
5705 }
5706 }
5707
5708 Worklist.zap();
5709 return MadeIRChange;
5710}
5711
5712// Track the scopes used by !alias.scope and !noalias. In a function, a
5713// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5714// by both sets. If not, the declaration of the scope can be safely omitted.
5715// The MDNode of the scope can be omitted as well for the instructions that are
5716// part of this function. We do not do that at this point, as this might become
5717// too time consuming to do.
5719 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5720 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5721
5722public:
5724 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5725 if (!I->hasMetadataOtherThanDebugLoc())
5726 return;
5727
5728 auto Track = [](Metadata *ScopeList, auto &Container) {
5729 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5730 if (!MDScopeList || !Container.insert(MDScopeList).second)
5731 return;
5732 for (const auto &MDOperand : MDScopeList->operands())
5733 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5734 Container.insert(MDScope);
5735 };
5736
5737 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5738 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5739 }
5740
5743 if (!Decl)
5744 return false;
5745
5746 assert(Decl->use_empty() &&
5747 "llvm.experimental.noalias.scope.decl in use ?");
5748 const MDNode *MDSL = Decl->getScopeList();
5749 assert(MDSL->getNumOperands() == 1 &&
5750 "llvm.experimental.noalias.scope should refer to a single scope");
5751 auto &MDOperand = MDSL->getOperand(0);
5752 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5753 return !UsedAliasScopesAndLists.contains(MD) ||
5754 !UsedNoAliasScopesAndLists.contains(MD);
5755
5756 // Not an MDNode ? throw away.
5757 return true;
5758 }
5759};
5760
5761/// Populate the IC worklist from a function, by walking it in reverse
5762/// post-order and adding all reachable code to the worklist.
5763///
5764/// This has a couple of tricks to make the code faster and more powerful. In
5765/// particular, we constant fold and DCE instructions as we go, to avoid adding
5766/// them to the worklist (this significantly speeds up instcombine on code where
5767/// many instructions are dead or constant). Additionally, if we find a branch
5768/// whose condition is a known constant, we only visit the reachable successors.
5770 bool MadeIRChange = false;
5772 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
5773 DenseMap<Constant *, Constant *> FoldedConstants;
5774 AliasScopeTracker SeenAliasScopes;
5775
5776 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
5777 for (BasicBlock *Succ : successors(BB))
5778 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
5779 for (PHINode &PN : Succ->phis())
5780 for (Use &U : PN.incoming_values())
5781 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5782 U.set(PoisonValue::get(PN.getType()));
5783 MadeIRChange = true;
5784 }
5785 };
5786
5787 for (BasicBlock *BB : RPOT) {
5788 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
5789 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
5790 })) {
5791 HandleOnlyLiveSuccessor(BB, nullptr);
5792 continue;
5793 }
5794 LiveBlocks.insert(BB);
5795
5796 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
5797 // ConstantProp instruction if trivially constant.
5798 if (!Inst.use_empty() &&
5799 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5800 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
5801 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
5802 << '\n');
5803 Inst.replaceAllUsesWith(C);
5804 ++NumConstProp;
5805 if (isInstructionTriviallyDead(&Inst, &TLI))
5806 Inst.eraseFromParent();
5807 MadeIRChange = true;
5808 continue;
5809 }
5810
5811 // See if we can constant fold its operands.
5812 for (Use &U : Inst.operands()) {
5814 continue;
5815
5816 auto *C = cast<Constant>(U);
5817 Constant *&FoldRes = FoldedConstants[C];
5818 if (!FoldRes)
5819 FoldRes = ConstantFoldConstant(C, DL, &TLI);
5820
5821 if (FoldRes != C) {
5822 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
5823 << "\n Old = " << *C
5824 << "\n New = " << *FoldRes << '\n');
5825 U = FoldRes;
5826 MadeIRChange = true;
5827 }
5828 }
5829
5830 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
5831 // these call instructions consumes non-trivial amount of time and
5832 // provides no value for the optimization.
5833 if (!Inst.isDebugOrPseudoInst()) {
5834 InstrsForInstructionWorklist.push_back(&Inst);
5835 SeenAliasScopes.analyse(&Inst);
5836 }
5837 }
5838
5839 // If this is a branch or switch on a constant, mark only the single
5840 // live successor. Otherwise assume all successors are live.
5841 Instruction *TI = BB->getTerminator();
5842 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
5843 if (isa<UndefValue>(BI->getCondition())) {
5844 // Branch on undef is UB.
5845 HandleOnlyLiveSuccessor(BB, nullptr);
5846 continue;
5847 }
5848 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5849 bool CondVal = Cond->getZExtValue();
5850 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5851 continue;
5852 }
5853 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5854 if (isa<UndefValue>(SI->getCondition())) {
5855 // Switch on undef is UB.
5856 HandleOnlyLiveSuccessor(BB, nullptr);
5857 continue;
5858 }
5859 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5860 HandleOnlyLiveSuccessor(BB,
5861 SI->findCaseValue(Cond)->getCaseSuccessor());
5862 continue;
5863 }
5864 }
5865 }
5866
5867 // Remove instructions inside unreachable blocks. This prevents the
5868 // instcombine code from having to deal with some bad special cases, and
5869 // reduces use counts of instructions.
5870 for (BasicBlock &BB : F) {
5871 if (LiveBlocks.count(&BB))
5872 continue;
5873
5874 unsigned NumDeadInstInBB;
5875 NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB);
5876
5877 MadeIRChange |= NumDeadInstInBB != 0;
5878 NumDeadInst += NumDeadInstInBB;
5879 }
5880
5881 // Once we've found all of the instructions to add to instcombine's worklist,
5882 // add them in reverse order. This way instcombine will visit from the top
5883 // of the function down. This jives well with the way that it adds all uses
5884 // of instructions to the worklist after doing a transformation, thus avoiding
5885 // some N^2 behavior in pathological cases.
5886 Worklist.reserve(InstrsForInstructionWorklist.size());
5887 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
5888 // DCE instruction if trivially dead. As we iterate in reverse program
5889 // order here, we will clean up whole chains of dead instructions.
5890 if (isInstructionTriviallyDead(Inst, &TLI) ||
5891 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
5892 ++NumDeadInst;
5893 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
5894 salvageDebugInfo(*Inst);
5895 Inst->eraseFromParent();
5896 MadeIRChange = true;
5897 continue;
5898 }
5899
5900 Worklist.push(Inst);
5901 }
5902
5903 return MadeIRChange;
5904}
5905
5907 // Collect backedges.
5909 for (BasicBlock *BB : RPOT) {
5910 Visited.insert(BB);
5911 for (BasicBlock *Succ : successors(BB))
5912 if (Visited.contains(Succ))
5913 BackEdges.insert({BB, Succ});
5914 }
5915 ComputedBackEdges = true;
5916}
5917
5923 const InstCombineOptions &Opts) {
5924 auto &DL = F.getDataLayout();
5925 bool VerifyFixpoint = Opts.VerifyFixpoint &&
5926 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
5927
5928 /// Builder - This is an IRBuilder that automatically inserts new
5929 /// instructions into the worklist when they are created.
5931 F.getContext(), TargetFolder(DL),
5932 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
5933 Worklist.add(I);
5934 if (auto *Assume = dyn_cast<AssumeInst>(I))
5935 AC.registerAssumption(Assume);
5936 }));
5937
5939
5940 // Lower dbg.declare intrinsics otherwise their value may be clobbered
5941 // by instcombiner.
5942 bool MadeIRChange = false;
5944 MadeIRChange = LowerDbgDeclare(F);
5945
5946 // Iterate while there is work to do.
5947 unsigned Iteration = 0;
5948 while (true) {
5949 if (Iteration >= Opts.MaxIterations && !VerifyFixpoint) {
5950 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
5951 << " on " << F.getName()
5952 << " reached; stopping without verifying fixpoint\n");
5953 break;
5954 }
5955
5956 ++Iteration;
5957 ++NumWorklistIterations;
5958 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
5959 << F.getName() << "\n");
5960
5961 InstCombinerImpl IC(Worklist, Builder, F, AA, AC, TLI, TTI, DT, ORE, BFI,
5962 BPI, PSI, DL, RPOT);
5964 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
5965 MadeChangeInThisIteration |= IC.run();
5966 if (!MadeChangeInThisIteration)
5967 break;
5968
5969 MadeIRChange = true;
5970 if (Iteration > Opts.MaxIterations) {
5972 "Instruction Combining on " + Twine(F.getName()) +
5973 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
5974 " iterations. " +
5975 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
5976 "'instcombine-no-verify-fixpoint' to suppress this error.");
5977 }
5978 }
5979
5980 if (Iteration == 1)
5981 ++NumOneIteration;
5982 else if (Iteration == 2)
5983 ++NumTwoIterations;
5984 else if (Iteration == 3)
5985 ++NumThreeIterations;
5986 else
5987 ++NumFourOrMoreIterations;
5988
5989 return MadeIRChange;
5990}
5991
5993
5995 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
5996 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
5997 OS, MapClassName2PassName);
5998 OS << '<';
5999 OS << "max-iterations=" << Options.MaxIterations << ";";
6000 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
6001 OS << '>';
6002}
6003
6004char InstCombinePass::ID = 0;
6005
6008 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
6009 // No changes since last InstCombine pass, exit early.
6010 if (LRT.shouldSkip(&ID))
6011 return PreservedAnalyses::all();
6012
6013 auto &AC = AM.getResult<AssumptionAnalysis>(F);
6014 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
6015 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
6017 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
6018
6019 auto *AA = &AM.getResult<AAManager>(F);
6020 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
6021 ProfileSummaryInfo *PSI =
6022 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
6023 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
6024 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
6026
6027 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6028 BFI, BPI, PSI, Options)) {
6029 // No changes, all analyses are preserved.
6030 LRT.update(&ID, /*Changed=*/false);
6031 return PreservedAnalyses::all();
6032 }
6033
6034 // Mark all the analyses that instcombine updates as preserved.
6036 LRT.update(&ID, /*Changed=*/true);
6039 return PA;
6040}
6041
6057
6059 if (skipFunction(F))
6060 return false;
6061
6062 // Required analyses.
6063 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
6064 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
6065 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
6067 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6069
6070 // Optional analyses.
6071 ProfileSummaryInfo *PSI =
6073 BlockFrequencyInfo *BFI =
6074 (PSI && PSI->hasProfileSummary()) ?
6076 nullptr;
6077 BranchProbabilityInfo *BPI = nullptr;
6078 if (auto *WrapperPass =
6080 BPI = &WrapperPass->getBPI();
6081
6082 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6083 BFI, BPI, PSI, InstCombineOptions());
6084}
6085
6087
6091
6093 "Combine redundant instructions", false, false)
6104 "Combine redundant instructions", false, false)
6105
6106// Initialization Routines
6110
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI)
DXIL Resource Access
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition IVUsers.cpp:48
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * combineConstantOffsets(GetElementPtrInst &GEP, InstCombinerImpl &IC)
Combine constant offsets separated by variable offsets.
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static std::optional< ModRefInfo > isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI, bool KnowInit)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file contains the declarations for metadata subclasses.
#define T
uint64_t IntrinsicInst * II
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
unsigned OpIndex
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static unsigned getScalarSizeInBits(Type *Ty)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:234
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition APInt.cpp:1758
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:423
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition APInt.cpp:1890
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:371
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:380
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1488
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1928
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:827
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1960
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:334
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:440
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:306
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1941
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:851
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition ArrayRef.h:224
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
uint64_t getNumElements() const
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
size_t size() const
Definition BasicBlock.h:480
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition InstrTypes.h:294
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
ConstantArray - Constant Array Declarations.
Definition Constants.h:433
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition Constants.h:776
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition Constants.h:517
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
Definition Constant.h:219
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:167
bool empty() const
Definition DenseMap.h:109
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:222
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:322
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
FunctionPass(char &pid)
Definition Pass.h:316
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition Pass.cpp:188
const BasicBlock & getEntryBlock() const
Definition Function.h:807
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
static GEPNoWrapFlags all()
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForReassociate(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep (gep, p, y), x).
bool hasNoUnsignedWrap() const
bool isInBounds() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
GEPNoWrapFlags getNoWrapFlags() const
Definition Operator.h:425
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2039
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition IRBuilder.h:538
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition IRBuilder.h:75
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2783
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI InstCombinePass(InstCombineOptions Opts={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * foldBinopWithRecurrence(BinaryOperator &BO)
Try to fold binary operators whose operands are simple interleaved recurrences to a single recurrence...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, Instruction *CxtI, unsigned Depth=0)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
SimplifyQuery SQ
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
TargetLibraryInfo & TLI
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
const DataLayout & DL
DomConditionCache DC
const bool MinimizeSize
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
AssumptionCache & AC
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
BuilderTy & Builder
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition InstCombine.h:68
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void add(Instruction *I)
Add instruction to the worklist.
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
bool isShift() const
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
bool isIntDivRem() const
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
A wrapper class for inspecting calls to intrinsic functions.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
Tracking metadata reference owned by Metadata.
Definition Metadata.h:900
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition Metadata.h:64
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition Operator.h:111
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition Operator.h:105
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition Constants.h:1468
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition Registry.h:44
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:150
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition SmallPtrSet.h:99
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:338
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:62
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:295
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:294
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:107
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Use * op_iterator
Definition User.h:279
op_range operands()
Definition User.h:292
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:21
op_iterator op_begin()
Definition User.h:284
const Use & getOperandUse(unsigned i) const
Definition User.h:245
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
op_iterator op_end()
Definition User.h:286
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition User.cpp:115
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:759
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
iterator_range< user_iterator > users()
Definition Value.h:426
bool hasUseList() const
Check if this Value has a use-list.
Definition Value.h:344
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition Value.cpp:150
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
bool use_empty() const
Definition Value.h:346
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:881
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Value handle that is nullable, but tries to track the Value.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
reverse_self_iterator getReverseIterator()
Definition ilist_node.h:126
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2060
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &)
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition Local.cpp:2485
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1727
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool succ_empty(const Instruction *I)
Definition CFG.h:256
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI FunctionPass * createInstructionCombiningPass()
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2474
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1725
auto successors(const MachineBasicBlock *BB)
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2138
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Definition APFloat.h:1555
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition Local.cpp:2468
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:186
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:754
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1734
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:402
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition Local.cpp:1795
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1662
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition Local.cpp:2037
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition Local.cpp:2414
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:325
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
@ ModRef
The access may reference and may modify the value stored in memory.
Definition ModRef.h:36
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ NoModRef
The access neither references nor modifies the value stored in memory.
Definition ModRef.h:30
TargetTransformInfo TTI
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:1963
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1899
cl::opt< bool > ProfcheckDisableMetadataFixes("profcheck-disable-metadata-fixes", cl::Hidden, cl::init(false), cl::desc("Disable metadata propagation fixes discovered through Issue #147390"))
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition STLExtras.h:2090
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
bool isRefSet(const ModRefInfo MRI)
Definition ModRef.h:52
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:304
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
Definition APFloat.cpp:324
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition KnownBits.h:251
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:248
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
SimplifyQuery getWithInstruction(const Instruction *I) const