LLVM 22.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APFloat.h"
37#include "llvm/ADT/APInt.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/ADT/DenseMap.h"
42#include "llvm/ADT/Statistic.h"
47#include "llvm/Analysis/CFG.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constant.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DIBuilder.h"
67#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/DebugInfo.h"
70#include "llvm/IR/Dominators.h"
72#include "llvm/IR/Function.h"
74#include "llvm/IR/IRBuilder.h"
75#include "llvm/IR/InstrTypes.h"
76#include "llvm/IR/Instruction.h"
79#include "llvm/IR/Intrinsics.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/Operator.h"
82#include "llvm/IR/PassManager.h"
84#include "llvm/IR/Type.h"
85#include "llvm/IR/Use.h"
86#include "llvm/IR/User.h"
87#include "llvm/IR/Value.h"
88#include "llvm/IR/ValueHandle.h"
93#include "llvm/Support/Debug.h"
102#include <algorithm>
103#include <cassert>
104#include <cstdint>
105#include <memory>
106#include <optional>
107#include <string>
108#include <utility>
109
110#define DEBUG_TYPE "instcombine"
112#include <optional>
113
114using namespace llvm;
115using namespace llvm::PatternMatch;
116
117STATISTIC(NumWorklistIterations,
118 "Number of instruction combining iterations performed");
119STATISTIC(NumOneIteration, "Number of functions with one iteration");
120STATISTIC(NumTwoIterations, "Number of functions with two iterations");
121STATISTIC(NumThreeIterations, "Number of functions with three iterations");
122STATISTIC(NumFourOrMoreIterations,
123 "Number of functions with four or more iterations");
124
125STATISTIC(NumCombined , "Number of insts combined");
126STATISTIC(NumConstProp, "Number of constant folds");
127STATISTIC(NumDeadInst , "Number of dead inst eliminated");
128STATISTIC(NumSunkInst , "Number of instructions sunk");
129STATISTIC(NumExpand, "Number of expansions");
130STATISTIC(NumFactor , "Number of factorizations");
131STATISTIC(NumReassoc , "Number of reassociations");
132DEBUG_COUNTER(VisitCounter, "instcombine-visit",
133 "Controls which instructions are visited");
134
135static cl::opt<bool>
136EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
137 cl::init(true));
138
140 "instcombine-max-sink-users", cl::init(32),
141 cl::desc("Maximum number of undroppable users for instruction sinking"));
142
144MaxArraySize("instcombine-maxarray-size", cl::init(1024),
145 cl::desc("Maximum array size considered when doing a combine"));
146
148
149// FIXME: Remove this flag when it is no longer necessary to convert
150// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
151// increases variable availability at the cost of accuracy. Variables that
152// cannot be promoted by mem2reg or SROA will be described as living in memory
153// for their entire lifetime. However, passes like DSE and instcombine can
154// delete stores to the alloca, leading to misleading and inaccurate debug
155// information. This flag can be removed when those passes are fixed.
156static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
157 cl::Hidden, cl::init(true));
158
159std::optional<Instruction *>
161 // Handle target specific intrinsics
162 if (II.getCalledFunction()->isTargetIntrinsic()) {
163 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
164 }
165 return std::nullopt;
166}
167
169 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
170 bool &KnownBitsComputed) {
171 // Handle target specific intrinsics
172 if (II.getCalledFunction()->isTargetIntrinsic()) {
173 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
174 *this, II, DemandedMask, Known, KnownBitsComputed);
175 }
176 return std::nullopt;
177}
178
180 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
181 APInt &PoisonElts2, APInt &PoisonElts3,
182 std::function<void(Instruction *, unsigned, APInt, APInt &)>
183 SimplifyAndSetOp) {
184 // Handle target specific intrinsics
185 if (II.getCalledFunction()->isTargetIntrinsic()) {
186 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
187 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
188 SimplifyAndSetOp);
189 }
190 return std::nullopt;
191}
192
193bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
194 // Approved exception for TTI use: This queries a legality property of the
195 // target, not an profitability heuristic. Ideally this should be part of
196 // DataLayout instead.
197 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
198}
199
200Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
201 if (!RewriteGEP)
202 return llvm::emitGEPOffset(&Builder, DL, GEP);
203
204 IRBuilderBase::InsertPointGuard Guard(Builder);
205 auto *Inst = dyn_cast<Instruction>(GEP);
206 if (Inst)
207 Builder.SetInsertPoint(Inst);
208
209 Value *Offset = EmitGEPOffset(GEP);
210 // Rewrite non-trivial GEPs to avoid duplicating the offset arithmetic.
211 if (Inst && !GEP->hasAllConstantIndices() &&
212 !GEP->getSourceElementType()->isIntegerTy(8)) {
214 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
215 Offset, "", GEP->getNoWrapFlags()));
217 }
218 return Offset;
219}
220
221Value *InstCombinerImpl::EmitGEPOffsets(ArrayRef<GEPOperator *> GEPs,
222 GEPNoWrapFlags NW, Type *IdxTy,
223 bool RewriteGEPs) {
224 auto Add = [&](Value *Sum, Value *Offset) -> Value * {
225 if (Sum)
226 return Builder.CreateAdd(Sum, Offset, "", NW.hasNoUnsignedWrap(),
227 NW.isInBounds());
228 else
229 return Offset;
230 };
231
232 Value *Sum = nullptr;
233 Value *OneUseSum = nullptr;
234 Value *OneUseBase = nullptr;
235 GEPNoWrapFlags OneUseFlags = GEPNoWrapFlags::all();
236 for (GEPOperator *GEP : reverse(GEPs)) {
237 Value *Offset;
238 {
239 // Expand the offset at the point of the previous GEP to enable rewriting.
240 // However, use the original insertion point for calculating Sum.
241 IRBuilderBase::InsertPointGuard Guard(Builder);
242 auto *Inst = dyn_cast<Instruction>(GEP);
243 if (RewriteGEPs && Inst)
244 Builder.SetInsertPoint(Inst);
245
247 if (Offset->getType() != IdxTy)
248 Offset = Builder.CreateVectorSplat(
249 cast<VectorType>(IdxTy)->getElementCount(), Offset);
250 if (GEP->hasOneUse()) {
251 // Offsets of one-use GEPs will be merged into the next multi-use GEP.
252 OneUseSum = Add(OneUseSum, Offset);
253 OneUseFlags = OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags());
254 if (!OneUseBase)
255 OneUseBase = GEP->getPointerOperand();
256 continue;
257 }
258
259 if (OneUseSum)
260 Offset = Add(OneUseSum, Offset);
261
262 // Rewrite the GEP to reuse the computed offset. This also includes
263 // offsets from preceding one-use GEPs.
264 if (RewriteGEPs && Inst &&
265 !(GEP->getSourceElementType()->isIntegerTy(8) &&
266 GEP->getOperand(1) == Offset)) {
268 *Inst,
269 Builder.CreatePtrAdd(
270 OneUseBase ? OneUseBase : GEP->getPointerOperand(), Offset, "",
271 OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags())));
273 }
274 }
275
276 Sum = Add(Sum, Offset);
277 OneUseSum = OneUseBase = nullptr;
278 OneUseFlags = GEPNoWrapFlags::all();
279 }
280 if (OneUseSum)
281 Sum = Add(Sum, OneUseSum);
282 if (!Sum)
283 return Constant::getNullValue(IdxTy);
284 return Sum;
285}
286
287/// Legal integers and common types are considered desirable. This is used to
288/// avoid creating instructions with types that may not be supported well by the
289/// the backend.
290/// NOTE: This treats i8, i16 and i32 specially because they are common
291/// types in frontend languages.
292bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
293 switch (BitWidth) {
294 case 8:
295 case 16:
296 case 32:
297 return true;
298 default:
299 return DL.isLegalInteger(BitWidth);
300 }
301}
302
303/// Return true if it is desirable to convert an integer computation from a
304/// given bit width to a new bit width.
305/// We don't want to convert from a legal or desirable type (like i8) to an
306/// illegal type or from a smaller to a larger illegal type. A width of '1'
307/// is always treated as a desirable type because i1 is a fundamental type in
308/// IR, and there are many specialized optimizations for i1 types.
309/// Common/desirable widths are equally treated as legal to convert to, in
310/// order to open up more combining opportunities.
311bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
312 unsigned ToWidth) const {
313 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
314 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
315
316 // Convert to desirable widths even if they are not legal types.
317 // Only shrink types, to prevent infinite loops.
318 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
319 return true;
320
321 // If this is a legal or desiable integer from type, and the result would be
322 // an illegal type, don't do the transformation.
323 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
324 return false;
325
326 // Otherwise, if both are illegal, do not increase the size of the result. We
327 // do allow things like i160 -> i64, but not i64 -> i160.
328 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
329 return false;
330
331 return true;
332}
333
334/// Return true if it is desirable to convert a computation from 'From' to 'To'.
335/// We don't want to convert from a legal to an illegal type or from a smaller
336/// to a larger illegal type. i1 is always treated as a legal type because it is
337/// a fundamental type in IR, and there are many specialized optimizations for
338/// i1 types.
339bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
340 // TODO: This could be extended to allow vectors. Datalayout changes might be
341 // needed to properly support that.
342 if (!From->isIntegerTy() || !To->isIntegerTy())
343 return false;
344
345 unsigned FromWidth = From->getPrimitiveSizeInBits();
346 unsigned ToWidth = To->getPrimitiveSizeInBits();
347 return shouldChangeType(FromWidth, ToWidth);
348}
349
350// Return true, if No Signed Wrap should be maintained for I.
351// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
352// where both B and C should be ConstantInts, results in a constant that does
353// not overflow. This function only handles the Add/Sub/Mul opcodes. For
354// all other opcodes, the function conservatively returns false.
357 if (!OBO || !OBO->hasNoSignedWrap())
358 return false;
359
360 const APInt *BVal, *CVal;
361 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
362 return false;
363
364 // We reason about Add/Sub/Mul Only.
365 bool Overflow = false;
366 switch (I.getOpcode()) {
367 case Instruction::Add:
368 (void)BVal->sadd_ov(*CVal, Overflow);
369 break;
370 case Instruction::Sub:
371 (void)BVal->ssub_ov(*CVal, Overflow);
372 break;
373 case Instruction::Mul:
374 (void)BVal->smul_ov(*CVal, Overflow);
375 break;
376 default:
377 // Conservatively return false for other opcodes.
378 return false;
379 }
380 return !Overflow;
381}
382
385 return OBO && OBO->hasNoUnsignedWrap();
386}
387
390 return OBO && OBO->hasNoSignedWrap();
391}
392
393/// Conservatively clears subclassOptionalData after a reassociation or
394/// commutation. We preserve fast-math flags when applicable as they can be
395/// preserved.
398 if (!FPMO) {
399 I.clearSubclassOptionalData();
400 return;
401 }
402
403 FastMathFlags FMF = I.getFastMathFlags();
404 I.clearSubclassOptionalData();
405 I.setFastMathFlags(FMF);
406}
407
408/// Combine constant operands of associative operations either before or after a
409/// cast to eliminate one of the associative operations:
410/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
411/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
413 InstCombinerImpl &IC) {
414 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
415 if (!Cast || !Cast->hasOneUse())
416 return false;
417
418 // TODO: Enhance logic for other casts and remove this check.
419 auto CastOpcode = Cast->getOpcode();
420 if (CastOpcode != Instruction::ZExt)
421 return false;
422
423 // TODO: Enhance logic for other BinOps and remove this check.
424 if (!BinOp1->isBitwiseLogicOp())
425 return false;
426
427 auto AssocOpcode = BinOp1->getOpcode();
428 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
429 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
430 return false;
431
432 Constant *C1, *C2;
433 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
434 !match(BinOp2->getOperand(1), m_Constant(C2)))
435 return false;
436
437 // TODO: This assumes a zext cast.
438 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
439 // to the destination type might lose bits.
440
441 // Fold the constants together in the destination type:
442 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
443 const DataLayout &DL = IC.getDataLayout();
444 Type *DestTy = C1->getType();
445 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
446 if (!CastC2)
447 return false;
448 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
449 if (!FoldedC)
450 return false;
451
452 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
453 IC.replaceOperand(*BinOp1, 1, FoldedC);
455 Cast->dropPoisonGeneratingFlags();
456 return true;
457}
458
459// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
460// inttoptr ( ptrtoint (x) ) --> x
461Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
462 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
463 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
464 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
465 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
466 Type *CastTy = IntToPtr->getDestTy();
467 if (PtrToInt &&
468 CastTy->getPointerAddressSpace() ==
469 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
470 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
471 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
472 return PtrToInt->getOperand(0);
473 }
474 return nullptr;
475}
476
477/// This performs a few simplifications for operators that are associative or
478/// commutative:
479///
480/// Commutative operators:
481///
482/// 1. Order operands such that they are listed from right (least complex) to
483/// left (most complex). This puts constants before unary operators before
484/// binary operators.
485///
486/// Associative operators:
487///
488/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
489/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
490///
491/// Associative and commutative operators:
492///
493/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
494/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
495/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
496/// if C1 and C2 are constants.
498 Instruction::BinaryOps Opcode = I.getOpcode();
499 bool Changed = false;
500
501 do {
502 // Order operands such that they are listed from right (least complex) to
503 // left (most complex). This puts constants before unary operators before
504 // binary operators.
505 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
506 getComplexity(I.getOperand(1)))
507 Changed = !I.swapOperands();
508
509 if (I.isCommutative()) {
510 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
511 replaceOperand(I, 0, Pair->first);
512 replaceOperand(I, 1, Pair->second);
513 Changed = true;
514 }
515 }
516
517 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
518 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
519
520 if (I.isAssociative()) {
521 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
522 if (Op0 && Op0->getOpcode() == Opcode) {
523 Value *A = Op0->getOperand(0);
524 Value *B = Op0->getOperand(1);
525 Value *C = I.getOperand(1);
526
527 // Does "B op C" simplify?
528 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
529 // It simplifies to V. Form "A op V".
530 replaceOperand(I, 0, A);
531 replaceOperand(I, 1, V);
532 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
533 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
534
535 // Conservatively clear all optional flags since they may not be
536 // preserved by the reassociation. Reset nsw/nuw based on the above
537 // analysis.
539
540 // Note: this is only valid because SimplifyBinOp doesn't look at
541 // the operands to Op0.
542 if (IsNUW)
543 I.setHasNoUnsignedWrap(true);
544
545 if (IsNSW)
546 I.setHasNoSignedWrap(true);
547
548 Changed = true;
549 ++NumReassoc;
550 continue;
551 }
552 }
553
554 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
555 if (Op1 && Op1->getOpcode() == Opcode) {
556 Value *A = I.getOperand(0);
557 Value *B = Op1->getOperand(0);
558 Value *C = Op1->getOperand(1);
559
560 // Does "A op B" simplify?
561 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
562 // It simplifies to V. Form "V op C".
563 replaceOperand(I, 0, V);
564 replaceOperand(I, 1, C);
565 // Conservatively clear the optional flags, since they may not be
566 // preserved by the reassociation.
568 Changed = true;
569 ++NumReassoc;
570 continue;
571 }
572 }
573 }
574
575 if (I.isAssociative() && I.isCommutative()) {
576 if (simplifyAssocCastAssoc(&I, *this)) {
577 Changed = true;
578 ++NumReassoc;
579 continue;
580 }
581
582 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
583 if (Op0 && Op0->getOpcode() == Opcode) {
584 Value *A = Op0->getOperand(0);
585 Value *B = Op0->getOperand(1);
586 Value *C = I.getOperand(1);
587
588 // Does "C op A" simplify?
589 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
590 // It simplifies to V. Form "V op B".
591 replaceOperand(I, 0, V);
592 replaceOperand(I, 1, B);
593 // Conservatively clear the optional flags, since they may not be
594 // preserved by the reassociation.
596 Changed = true;
597 ++NumReassoc;
598 continue;
599 }
600 }
601
602 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
603 if (Op1 && Op1->getOpcode() == Opcode) {
604 Value *A = I.getOperand(0);
605 Value *B = Op1->getOperand(0);
606 Value *C = Op1->getOperand(1);
607
608 // Does "C op A" simplify?
609 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
610 // It simplifies to V. Form "B op V".
611 replaceOperand(I, 0, B);
612 replaceOperand(I, 1, V);
613 // Conservatively clear the optional flags, since they may not be
614 // preserved by the reassociation.
616 Changed = true;
617 ++NumReassoc;
618 continue;
619 }
620 }
621
622 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
623 // if C1 and C2 are constants.
624 Value *A, *B;
625 Constant *C1, *C2, *CRes;
626 if (Op0 && Op1 &&
627 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
628 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
629 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
630 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
631 bool IsNUW = hasNoUnsignedWrap(I) &&
632 hasNoUnsignedWrap(*Op0) &&
633 hasNoUnsignedWrap(*Op1);
634 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
635 BinaryOperator::CreateNUW(Opcode, A, B) :
636 BinaryOperator::Create(Opcode, A, B);
637
638 if (isa<FPMathOperator>(NewBO)) {
639 FastMathFlags Flags = I.getFastMathFlags() &
640 Op0->getFastMathFlags() &
641 Op1->getFastMathFlags();
642 NewBO->setFastMathFlags(Flags);
643 }
644 InsertNewInstWith(NewBO, I.getIterator());
645 NewBO->takeName(Op1);
646 replaceOperand(I, 0, NewBO);
647 replaceOperand(I, 1, CRes);
648 // Conservatively clear the optional flags, since they may not be
649 // preserved by the reassociation.
651 if (IsNUW)
652 I.setHasNoUnsignedWrap(true);
653
654 Changed = true;
655 continue;
656 }
657 }
658
659 // No further simplifications.
660 return Changed;
661 } while (true);
662}
663
664/// Return whether "X LOp (Y ROp Z)" is always equal to
665/// "(X LOp Y) ROp (X LOp Z)".
668 // X & (Y | Z) <--> (X & Y) | (X & Z)
669 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
670 if (LOp == Instruction::And)
671 return ROp == Instruction::Or || ROp == Instruction::Xor;
672
673 // X | (Y & Z) <--> (X | Y) & (X | Z)
674 if (LOp == Instruction::Or)
675 return ROp == Instruction::And;
676
677 // X * (Y + Z) <--> (X * Y) + (X * Z)
678 // X * (Y - Z) <--> (X * Y) - (X * Z)
679 if (LOp == Instruction::Mul)
680 return ROp == Instruction::Add || ROp == Instruction::Sub;
681
682 return false;
683}
684
685/// Return whether "(X LOp Y) ROp Z" is always equal to
686/// "(X ROp Z) LOp (Y ROp Z)".
690 return leftDistributesOverRight(ROp, LOp);
691
692 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
694
695 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
696 // but this requires knowing that the addition does not overflow and other
697 // such subtleties.
698}
699
700/// This function returns identity value for given opcode, which can be used to
701/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
703 if (isa<Constant>(V))
704 return nullptr;
705
706 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
707}
708
709/// This function predicates factorization using distributive laws. By default,
710/// it just returns the 'Op' inputs. But for special-cases like
711/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
712/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
713/// allow more factorization opportunities.
716 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
717 assert(Op && "Expected a binary operator");
718 LHS = Op->getOperand(0);
719 RHS = Op->getOperand(1);
720 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
721 Constant *C;
722 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
723 // X << C --> X * (1 << C)
725 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
726 assert(RHS && "Constant folding of immediate constants failed");
727 return Instruction::Mul;
728 }
729 // TODO: We can add other conversions e.g. shr => div etc.
730 }
731 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
732 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
734 // lshr nneg C, X --> ashr nneg C, X
735 return Instruction::AShr;
736 }
737 }
738 return Op->getOpcode();
739}
740
741/// This tries to simplify binary operations by factorizing out common terms
742/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
745 Instruction::BinaryOps InnerOpcode, Value *A,
746 Value *B, Value *C, Value *D) {
747 assert(A && B && C && D && "All values must be provided");
748
749 Value *V = nullptr;
750 Value *RetVal = nullptr;
751 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
752 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
753
754 // Does "X op' Y" always equal "Y op' X"?
755 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
756
757 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
758 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
759 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
760 // commutative case, "(A op' B) op (C op' A)"?
761 if (A == C || (InnerCommutative && A == D)) {
762 if (A != C)
763 std::swap(C, D);
764 // Consider forming "A op' (B op D)".
765 // If "B op D" simplifies then it can be formed with no cost.
766 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
767
768 // If "B op D" doesn't simplify then only go on if one of the existing
769 // operations "A op' B" and "C op' D" will be zapped as no longer used.
770 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
771 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
772 if (V)
773 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
774 }
775 }
776
777 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
778 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
779 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
780 // commutative case, "(A op' B) op (B op' D)"?
781 if (B == D || (InnerCommutative && B == C)) {
782 if (B != D)
783 std::swap(C, D);
784 // Consider forming "(A op C) op' B".
785 // If "A op C" simplifies then it can be formed with no cost.
786 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
787
788 // If "A op C" doesn't simplify then only go on if one of the existing
789 // operations "A op' B" and "C op' D" will be zapped as no longer used.
790 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
791 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
792 if (V)
793 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
794 }
795 }
796
797 if (!RetVal)
798 return nullptr;
799
800 ++NumFactor;
801 RetVal->takeName(&I);
802
803 // Try to add no-overflow flags to the final value.
804 if (isa<BinaryOperator>(RetVal)) {
805 bool HasNSW = false;
806 bool HasNUW = false;
808 HasNSW = I.hasNoSignedWrap();
809 HasNUW = I.hasNoUnsignedWrap();
810 }
811 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
812 HasNSW &= LOBO->hasNoSignedWrap();
813 HasNUW &= LOBO->hasNoUnsignedWrap();
814 }
815
816 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
817 HasNSW &= ROBO->hasNoSignedWrap();
818 HasNUW &= ROBO->hasNoUnsignedWrap();
819 }
820
821 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
822 // We can propagate 'nsw' if we know that
823 // %Y = mul nsw i16 %X, C
824 // %Z = add nsw i16 %Y, %X
825 // =>
826 // %Z = mul nsw i16 %X, C+1
827 //
828 // iff C+1 isn't INT_MIN
829 const APInt *CInt;
830 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
831 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
832
833 // nuw can be propagated with any constant or nuw value.
834 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
835 }
836 }
837 return RetVal;
838}
839
840// If `I` has one Const operand and the other matches `(ctpop (not x))`,
841// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
842// This is only useful is the new subtract can fold so we only handle the
843// following cases:
844// 1) (add/sub/disjoint_or C, (ctpop (not x))
845// -> (add/sub/disjoint_or C', (ctpop x))
846// 1) (cmp pred C, (ctpop (not x))
847// -> (cmp pred C', (ctpop x))
849 unsigned Opc = I->getOpcode();
850 unsigned ConstIdx = 1;
851 switch (Opc) {
852 default:
853 return nullptr;
854 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
855 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
856 // is constant.
857 case Instruction::Sub:
858 ConstIdx = 0;
859 break;
860 case Instruction::ICmp:
861 // Signed predicates aren't correct in some edge cases like for i2 types, as
862 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
863 // comparisons against it are simplfied to unsigned.
864 if (cast<ICmpInst>(I)->isSigned())
865 return nullptr;
866 break;
867 case Instruction::Or:
868 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
869 return nullptr;
870 [[fallthrough]];
871 case Instruction::Add:
872 break;
873 }
874
875 Value *Op;
876 // Find ctpop.
877 if (!match(I->getOperand(1 - ConstIdx),
879 return nullptr;
880
881 Constant *C;
882 // Check other operand is ImmConstant.
883 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
884 return nullptr;
885
886 Type *Ty = Op->getType();
887 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
888 // Need extra check for icmp. Note if this check is true, it generally means
889 // the icmp will simplify to true/false.
890 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
891 Constant *Cmp =
893 if (!Cmp || !Cmp->isZeroValue())
894 return nullptr;
895 }
896
897 // Check we can invert `(not x)` for free.
898 bool Consumes = false;
899 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
900 return nullptr;
901 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
902 assert(NotOp != nullptr &&
903 "Desync between isFreeToInvert and getFreelyInverted");
904
905 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
906
907 Value *R = nullptr;
908
909 // Do the transformation here to avoid potentially introducing an infinite
910 // loop.
911 switch (Opc) {
912 case Instruction::Sub:
913 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
914 break;
915 case Instruction::Or:
916 case Instruction::Add:
917 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
918 break;
919 case Instruction::ICmp:
920 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
921 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
922 break;
923 default:
924 llvm_unreachable("Unhandled Opcode");
925 }
926 assert(R != nullptr);
927 return replaceInstUsesWith(*I, R);
928}
929
930// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
931// IFF
932// 1) the logic_shifts match
933// 2) either both binops are binops and one is `and` or
934// BinOp1 is `and`
935// (logic_shift (inv_logic_shift C1, C), C) == C1 or
936//
937// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
938//
939// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
940// IFF
941// 1) the logic_shifts match
942// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
943//
944// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
945//
946// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
947// IFF
948// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
949// 2) Binop2 is `not`
950//
951// -> (arithmetic_shift Binop1((not X), Y), Amt)
952
954 const DataLayout &DL = I.getDataLayout();
955 auto IsValidBinOpc = [](unsigned Opc) {
956 switch (Opc) {
957 default:
958 return false;
959 case Instruction::And:
960 case Instruction::Or:
961 case Instruction::Xor:
962 case Instruction::Add:
963 // Skip Sub as we only match constant masks which will canonicalize to use
964 // add.
965 return true;
966 }
967 };
968
969 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
970 // constraints.
971 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
972 unsigned ShOpc) {
973 assert(ShOpc != Instruction::AShr);
974 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
975 ShOpc == Instruction::Shl;
976 };
977
978 auto GetInvShift = [](unsigned ShOpc) {
979 assert(ShOpc != Instruction::AShr);
980 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
981 };
982
983 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
984 unsigned ShOpc, Constant *CMask,
985 Constant *CShift) {
986 // If the BinOp1 is `and` we don't need to check the mask.
987 if (BinOpc1 == Instruction::And)
988 return true;
989
990 // For all other possible transfers we need complete distributable
991 // binop/shift (anything but `add` + `lshr`).
992 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
993 return false;
994
995 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
996 // vecs, otherwise the mask will be simplified and the following check will
997 // handle it).
998 if (BinOpc2 == Instruction::And)
999 return true;
1000
1001 // Otherwise, need mask that meets the below requirement.
1002 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
1003 Constant *MaskInvShift =
1004 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1005 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
1006 CMask;
1007 };
1008
1009 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
1010 Constant *CMask, *CShift;
1011 Value *X, *Y, *ShiftedX, *Mask, *Shift;
1012 if (!match(I.getOperand(ShOpnum),
1013 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
1014 return nullptr;
1015 if (!match(I.getOperand(1 - ShOpnum),
1017 m_OneUse(m_Shift(m_Value(X), m_Specific(Shift))),
1018 m_Value(ShiftedX)),
1019 m_Value(Mask))))
1020 return nullptr;
1021 // Make sure we are matching instruction shifts and not ConstantExpr
1022 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
1023 auto *IX = dyn_cast<Instruction>(ShiftedX);
1024 if (!IY || !IX)
1025 return nullptr;
1026
1027 // LHS and RHS need same shift opcode
1028 unsigned ShOpc = IY->getOpcode();
1029 if (ShOpc != IX->getOpcode())
1030 return nullptr;
1031
1032 // Make sure binop is real instruction and not ConstantExpr
1033 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
1034 if (!BO2)
1035 return nullptr;
1036
1037 unsigned BinOpc = BO2->getOpcode();
1038 // Make sure we have valid binops.
1039 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
1040 return nullptr;
1041
1042 if (ShOpc == Instruction::AShr) {
1043 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
1044 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
1045 Value *NotX = Builder.CreateNot(X);
1046 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
1048 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
1049 }
1050
1051 return nullptr;
1052 }
1053
1054 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
1055 // distribute to drop the shift irrelevant of constants.
1056 if (BinOpc == I.getOpcode() &&
1057 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
1058 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
1059 Value *NewBinOp1 = Builder.CreateBinOp(
1060 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
1061 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
1062 }
1063
1064 // Otherwise we can only distribute by constant shifting the mask, so
1065 // ensure we have constants.
1066 if (!match(Shift, m_ImmConstant(CShift)))
1067 return nullptr;
1068 if (!match(Mask, m_ImmConstant(CMask)))
1069 return nullptr;
1070
1071 // Check if we can distribute the binops.
1072 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1073 return nullptr;
1074
1075 Constant *NewCMask =
1076 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1077 Value *NewBinOp2 = Builder.CreateBinOp(
1078 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1079 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1080 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1081 NewBinOp1, CShift);
1082 };
1083
1084 if (Instruction *R = MatchBinOp(0))
1085 return R;
1086 return MatchBinOp(1);
1087}
1088
1089// (Binop (zext C), (select C, T, F))
1090// -> (select C, (binop 1, T), (binop 0, F))
1091//
1092// (Binop (sext C), (select C, T, F))
1093// -> (select C, (binop -1, T), (binop 0, F))
1094//
1095// Attempt to simplify binary operations into a select with folded args, when
1096// one operand of the binop is a select instruction and the other operand is a
1097// zext/sext extension, whose value is the select condition.
1100 // TODO: this simplification may be extended to any speculatable instruction,
1101 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1102 Instruction::BinaryOps Opc = I.getOpcode();
1103 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1104 Value *A, *CondVal, *TrueVal, *FalseVal;
1105 Value *CastOp;
1106
1107 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1108 return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
1109 A->getType()->getScalarSizeInBits() == 1 &&
1110 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1111 m_Value(FalseVal)));
1112 };
1113
1114 // Make sure one side of the binop is a select instruction, and the other is a
1115 // zero/sign extension operating on a i1.
1116 if (MatchSelectAndCast(LHS, RHS))
1117 CastOp = LHS;
1118 else if (MatchSelectAndCast(RHS, LHS))
1119 CastOp = RHS;
1120 else
1121 return nullptr;
1122
1123 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1124 bool IsCastOpRHS = (CastOp == RHS);
1125 bool IsZExt = isa<ZExtInst>(CastOp);
1126 Constant *C;
1127
1128 if (IsTrueArm) {
1129 C = Constant::getNullValue(V->getType());
1130 } else if (IsZExt) {
1131 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1132 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
1133 } else {
1134 C = Constant::getAllOnesValue(V->getType());
1135 }
1136
1137 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
1138 : Builder.CreateBinOp(Opc, C, V);
1139 };
1140
1141 // If the value used in the zext/sext is the select condition, or the negated
1142 // of the select condition, the binop can be simplified.
1143 if (CondVal == A) {
1144 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1145 return SelectInst::Create(CondVal, NewTrueVal,
1146 NewFoldedConst(true, FalseVal));
1147 }
1148
1149 if (match(A, m_Not(m_Specific(CondVal)))) {
1150 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1151 return SelectInst::Create(CondVal, NewTrueVal,
1152 NewFoldedConst(false, FalseVal));
1153 }
1154
1155 return nullptr;
1156}
1157
1159 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1162 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1163 Value *A, *B, *C, *D;
1164 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1165
1166 if (Op0)
1167 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1168 if (Op1)
1169 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1170
1171 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1172 // a common term.
1173 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1174 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1175 return V;
1176
1177 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1178 // term.
1179 if (Op0)
1180 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1181 if (Value *V =
1182 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1183 return V;
1184
1185 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1186 // term.
1187 if (Op1)
1188 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1189 if (Value *V =
1190 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1191 return V;
1192
1193 return nullptr;
1194}
1195
1196/// This tries to simplify binary operations which some other binary operation
1197/// distributes over either by factorizing out common terms
1198/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1199/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1200/// Returns the simplified value, or null if it didn't simplify.
1202 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1205 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1206
1207 // Factorization.
1208 if (Value *R = tryFactorizationFolds(I))
1209 return R;
1210
1211 // Expansion.
1212 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1213 // The instruction has the form "(A op' B) op C". See if expanding it out
1214 // to "(A op C) op' (B op C)" results in simplifications.
1215 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1216 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1217
1218 // Disable the use of undef because it's not safe to distribute undef.
1219 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1220 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1221 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1222
1223 // Do "A op C" and "B op C" both simplify?
1224 if (L && R) {
1225 // They do! Return "L op' R".
1226 ++NumExpand;
1227 C = Builder.CreateBinOp(InnerOpcode, L, R);
1228 C->takeName(&I);
1229 return C;
1230 }
1231
1232 // Does "A op C" simplify to the identity value for the inner opcode?
1233 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1234 // They do! Return "B op C".
1235 ++NumExpand;
1236 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1237 C->takeName(&I);
1238 return C;
1239 }
1240
1241 // Does "B op C" simplify to the identity value for the inner opcode?
1242 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1243 // They do! Return "A op C".
1244 ++NumExpand;
1245 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1246 C->takeName(&I);
1247 return C;
1248 }
1249 }
1250
1251 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1252 // The instruction has the form "A op (B op' C)". See if expanding it out
1253 // to "(A op B) op' (A op C)" results in simplifications.
1254 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1255 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1256
1257 // Disable the use of undef because it's not safe to distribute undef.
1258 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1259 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1260 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1261
1262 // Do "A op B" and "A op C" both simplify?
1263 if (L && R) {
1264 // They do! Return "L op' R".
1265 ++NumExpand;
1266 A = Builder.CreateBinOp(InnerOpcode, L, R);
1267 A->takeName(&I);
1268 return A;
1269 }
1270
1271 // Does "A op B" simplify to the identity value for the inner opcode?
1272 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1273 // They do! Return "A op C".
1274 ++NumExpand;
1275 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1276 A->takeName(&I);
1277 return A;
1278 }
1279
1280 // Does "A op C" simplify to the identity value for the inner opcode?
1281 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1282 // They do! Return "A op B".
1283 ++NumExpand;
1284 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1285 A->takeName(&I);
1286 return A;
1287 }
1288 }
1289
1290 return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
1291}
1292
1293static std::optional<std::pair<Value *, Value *>>
1295 if (LHS->getParent() != RHS->getParent())
1296 return std::nullopt;
1297
1298 if (LHS->getNumIncomingValues() < 2)
1299 return std::nullopt;
1300
1301 if (!equal(LHS->blocks(), RHS->blocks()))
1302 return std::nullopt;
1303
1304 Value *L0 = LHS->getIncomingValue(0);
1305 Value *R0 = RHS->getIncomingValue(0);
1306
1307 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1308 Value *L1 = LHS->getIncomingValue(I);
1309 Value *R1 = RHS->getIncomingValue(I);
1310
1311 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1312 continue;
1313
1314 return std::nullopt;
1315 }
1316
1317 return std::optional(std::pair(L0, R0));
1318}
1319
1320std::optional<std::pair<Value *, Value *>>
1321InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1324 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1325 return std::nullopt;
1326 switch (LHSInst->getOpcode()) {
1327 case Instruction::PHI:
1329 case Instruction::Select: {
1330 Value *Cond = LHSInst->getOperand(0);
1331 Value *TrueVal = LHSInst->getOperand(1);
1332 Value *FalseVal = LHSInst->getOperand(2);
1333 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1334 FalseVal == RHSInst->getOperand(1))
1335 return std::pair(TrueVal, FalseVal);
1336 return std::nullopt;
1337 }
1338 case Instruction::Call: {
1339 // Match min(a, b) and max(a, b)
1340 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1341 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1342 if (LHSMinMax && RHSMinMax &&
1343 LHSMinMax->getPredicate() ==
1345 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1346 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1347 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1348 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1349 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1350 return std::nullopt;
1351 }
1352 default:
1353 return std::nullopt;
1354 }
1355}
1356
1358 Value *LHS,
1359 Value *RHS) {
1360 Value *A, *B, *C, *D, *E, *F;
1361 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1362 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1363 if (!LHSIsSelect && !RHSIsSelect)
1364 return nullptr;
1365
1367 ? nullptr
1368 : cast<SelectInst>(LHSIsSelect ? LHS : RHS);
1369
1370 FastMathFlags FMF;
1372 if (isa<FPMathOperator>(&I)) {
1373 FMF = I.getFastMathFlags();
1374 Builder.setFastMathFlags(FMF);
1375 }
1376
1377 Instruction::BinaryOps Opcode = I.getOpcode();
1378 SimplifyQuery Q = SQ.getWithInstruction(&I);
1379
1380 Value *Cond, *True = nullptr, *False = nullptr;
1381
1382 // Special-case for add/negate combination. Replace the zero in the negation
1383 // with the trailing add operand:
1384 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1385 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1386 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1387 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1388 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1389 return nullptr;
1390 Value *N;
1391 if (True && match(FVal, m_Neg(m_Value(N)))) {
1392 Value *Sub = Builder.CreateSub(Z, N);
1393 return Builder.CreateSelect(Cond, True, Sub, I.getName(), SI);
1394 }
1395 if (False && match(TVal, m_Neg(m_Value(N)))) {
1396 Value *Sub = Builder.CreateSub(Z, N);
1397 return Builder.CreateSelect(Cond, Sub, False, I.getName(), SI);
1398 }
1399 return nullptr;
1400 };
1401
1402 if (LHSIsSelect && RHSIsSelect && A == D) {
1403 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1404 Cond = A;
1405 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1406 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1407
1408 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1409 if (False && !True)
1410 True = Builder.CreateBinOp(Opcode, B, E);
1411 else if (True && !False)
1412 False = Builder.CreateBinOp(Opcode, C, F);
1413 }
1414 } else if (LHSIsSelect && LHS->hasOneUse()) {
1415 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1416 Cond = A;
1417 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1418 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1419 if (Value *NewSel = foldAddNegate(B, C, RHS))
1420 return NewSel;
1421 } else if (RHSIsSelect && RHS->hasOneUse()) {
1422 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1423 Cond = D;
1424 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1425 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1426 if (Value *NewSel = foldAddNegate(E, F, LHS))
1427 return NewSel;
1428 }
1429
1430 if (!True || !False)
1431 return nullptr;
1432
1433 Value *NewSI = Builder.CreateSelect(Cond, True, False, I.getName(), SI);
1434 NewSI->takeName(&I);
1435 return NewSI;
1436}
1437
1438/// Freely adapt every user of V as-if V was changed to !V.
1439/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1441 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1442 for (User *U : make_early_inc_range(I->users())) {
1443 if (U == IgnoredUser)
1444 continue; // Don't consider this user.
1445 switch (cast<Instruction>(U)->getOpcode()) {
1446 case Instruction::Select: {
1447 auto *SI = cast<SelectInst>(U);
1448 SI->swapValues();
1449 SI->swapProfMetadata();
1450 break;
1451 }
1452 case Instruction::Br: {
1454 BI->swapSuccessors(); // swaps prof metadata too
1455 if (BPI)
1456 BPI->swapSuccEdgesProbabilities(BI->getParent());
1457 break;
1458 }
1459 case Instruction::Xor:
1461 // Add to worklist for DCE.
1463 break;
1464 default:
1465 llvm_unreachable("Got unexpected user - out of sync with "
1466 "canFreelyInvertAllUsersOf() ?");
1467 }
1468 }
1469
1470 // Update pre-existing debug value uses.
1471 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords;
1472 llvm::findDbgValues(I, DbgVariableRecords);
1473
1474 for (DbgVariableRecord *DbgVal : DbgVariableRecords) {
1475 SmallVector<uint64_t, 1> Ops = {dwarf::DW_OP_not};
1476 for (unsigned Idx = 0, End = DbgVal->getNumVariableLocationOps();
1477 Idx != End; ++Idx)
1478 if (DbgVal->getVariableLocationOp(Idx) == I)
1479 DbgVal->setExpression(
1480 DIExpression::appendOpsToArg(DbgVal->getExpression(), Ops, Idx));
1481 }
1482}
1483
1484/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1485/// constant zero (which is the 'negate' form).
1486Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1487 Value *NegV;
1488 if (match(V, m_Neg(m_Value(NegV))))
1489 return NegV;
1490
1491 // Constants can be considered to be negated values if they can be folded.
1493 return ConstantExpr::getNeg(C);
1494
1496 if (C->getType()->getElementType()->isIntegerTy())
1497 return ConstantExpr::getNeg(C);
1498
1500 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1501 Constant *Elt = CV->getAggregateElement(i);
1502 if (!Elt)
1503 return nullptr;
1504
1505 if (isa<UndefValue>(Elt))
1506 continue;
1507
1508 if (!isa<ConstantInt>(Elt))
1509 return nullptr;
1510 }
1511 return ConstantExpr::getNeg(CV);
1512 }
1513
1514 // Negate integer vector splats.
1515 if (auto *CV = dyn_cast<Constant>(V))
1516 if (CV->getType()->isVectorTy() &&
1517 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1518 return ConstantExpr::getNeg(CV);
1519
1520 return nullptr;
1521}
1522
1523// Try to fold:
1524// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1525// -> ({s|u}itofp (int_binop x, y))
1526// 2) (fp_binop ({s|u}itofp x), FpC)
1527// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1528//
1529// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1530Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1531 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1533
1534 Type *FPTy = BO.getType();
1535 Type *IntTy = IntOps[0]->getType();
1536
1537 unsigned IntSz = IntTy->getScalarSizeInBits();
1538 // This is the maximum number of inuse bits by the integer where the int -> fp
1539 // casts are exact.
1540 unsigned MaxRepresentableBits =
1542
1543 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1544 // checks later on.
1545 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1546
1547 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1548 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1549 auto IsNonZero = [&](unsigned OpNo) -> bool {
1550 if (OpsKnown[OpNo].hasKnownBits() &&
1551 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1552 return true;
1553 return isKnownNonZero(IntOps[OpNo], SQ);
1554 };
1555
1556 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1557 // NB: This matches the impl in ValueTracking, we just try to use cached
1558 // knownbits here. If we ever start supporting WithCache for
1559 // `isKnownNonNegative`, change this to an explicit call.
1560 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1561 };
1562
1563 // Check if we know for certain that ({s|u}itofp op) is exact.
1564 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1565 // Can we treat this operand as the desired sign?
1566 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1567 !IsNonNeg(OpNo))
1568 return false;
1569
1570 // If fp precision >= bitwidth(op) then its exact.
1571 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1572 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1573 // handled specially. We can't, however, increase the bound arbitrarily for
1574 // `sitofp` as for larger sizes, it won't sign extend.
1575 if (MaxRepresentableBits < IntSz) {
1576 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1577 // numSignBits(op).
1578 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1579 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1580 if (OpsFromSigned)
1581 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1582 // Finally for unsigned check that fp precision >= bitwidth(op) -
1583 // numLeadingZeros(op).
1584 else {
1585 NumUsedLeadingBits[OpNo] =
1586 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1587 }
1588 }
1589 // NB: We could also check if op is known to be a power of 2 or zero (which
1590 // will always be representable). Its unlikely, however, that is we are
1591 // unable to bound op in any way we will be able to pass the overflow checks
1592 // later on.
1593
1594 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1595 return false;
1596 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1597 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1598 IsNonZero(OpNo);
1599 };
1600
1601 // If we have a constant rhs, see if we can losslessly convert it to an int.
1602 if (Op1FpC != nullptr) {
1603 // Signed + Mul req non-zero
1604 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1605 !match(Op1FpC, m_NonZeroFP()))
1606 return nullptr;
1607
1609 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1610 IntTy, DL);
1611 if (Op1IntC == nullptr)
1612 return nullptr;
1613 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1614 : Instruction::UIToFP,
1615 Op1IntC, FPTy, DL) != Op1FpC)
1616 return nullptr;
1617
1618 // First try to keep sign of cast the same.
1619 IntOps[1] = Op1IntC;
1620 }
1621
1622 // Ensure lhs/rhs integer types match.
1623 if (IntTy != IntOps[1]->getType())
1624 return nullptr;
1625
1626 if (Op1FpC == nullptr) {
1627 if (!IsValidPromotion(1))
1628 return nullptr;
1629 }
1630 if (!IsValidPromotion(0))
1631 return nullptr;
1632
1633 // Final we check if the integer version of the binop will not overflow.
1635 // Because of the precision check, we can often rule out overflows.
1636 bool NeedsOverflowCheck = true;
1637 // Try to conservatively rule out overflow based on the already done precision
1638 // checks.
1639 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1640 unsigned OverflowMaxCurBits =
1641 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1642 bool OutputSigned = OpsFromSigned;
1643 switch (BO.getOpcode()) {
1644 case Instruction::FAdd:
1645 IntOpc = Instruction::Add;
1646 OverflowMaxOutputBits += OverflowMaxCurBits;
1647 break;
1648 case Instruction::FSub:
1649 IntOpc = Instruction::Sub;
1650 OverflowMaxOutputBits += OverflowMaxCurBits;
1651 break;
1652 case Instruction::FMul:
1653 IntOpc = Instruction::Mul;
1654 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1655 break;
1656 default:
1657 llvm_unreachable("Unsupported binop");
1658 }
1659 // The precision check may have already ruled out overflow.
1660 if (OverflowMaxOutputBits < IntSz) {
1661 NeedsOverflowCheck = false;
1662 // We can bound unsigned overflow from sub to in range signed value (this is
1663 // what allows us to avoid the overflow check for sub).
1664 if (IntOpc == Instruction::Sub)
1665 OutputSigned = true;
1666 }
1667
1668 // Precision check did not rule out overflow, so need to check.
1669 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1670 // `IntOps[...]` arguments to `KnownOps[...]`.
1671 if (NeedsOverflowCheck &&
1672 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1673 return nullptr;
1674
1675 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1676 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1677 IntBO->setHasNoSignedWrap(OutputSigned);
1678 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1679 }
1680 if (OutputSigned)
1681 return new SIToFPInst(IntBinOp, FPTy);
1682 return new UIToFPInst(IntBinOp, FPTy);
1683}
1684
1685// Try to fold:
1686// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1687// -> ({s|u}itofp (int_binop x, y))
1688// 2) (fp_binop ({s|u}itofp x), FpC)
1689// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1690Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1691 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1692 Constant *Op1FpC = nullptr;
1693 // Check for:
1694 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1695 // 2) (binop ({s|u}itofp x), FpC)
1696 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1697 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1698 return nullptr;
1699
1700 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1701 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1702 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1703 return nullptr;
1704
1705 // Cache KnownBits a bit to potentially save some analysis.
1706 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1707
1708 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1709 // different constraints depending on the sign of the cast.
1710 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1711 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1712 IntOps, Op1FpC, OpsKnown))
1713 return R;
1714 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1715 Op1FpC, OpsKnown);
1716}
1717
1718/// A binop with a constant operand and a sign-extended boolean operand may be
1719/// converted into a select of constants by applying the binary operation to
1720/// the constant with the two possible values of the extended boolean (0 or -1).
1721Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1722 // TODO: Handle non-commutative binop (constant is operand 0).
1723 // TODO: Handle zext.
1724 // TODO: Peek through 'not' of cast.
1725 Value *BO0 = BO.getOperand(0);
1726 Value *BO1 = BO.getOperand(1);
1727 Value *X;
1728 Constant *C;
1729 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1730 !X->getType()->isIntOrIntVectorTy(1))
1731 return nullptr;
1732
1733 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1736 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1737 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1738 return SelectInst::Create(X, TVal, FVal);
1739}
1740
1742 bool IsTrueArm) {
1744 for (Value *Op : I.operands()) {
1745 Value *V = nullptr;
1746 if (Op == SI) {
1747 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1748 } else if (match(SI->getCondition(),
1751 m_Specific(Op), m_Value(V))) &&
1753 // Pass
1754 } else {
1755 V = Op;
1756 }
1757 Ops.push_back(V);
1758 }
1759
1760 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1761}
1762
1764 Value *NewOp, InstCombiner &IC) {
1765 Instruction *Clone = I.clone();
1766 Clone->replaceUsesOfWith(SI, NewOp);
1768 IC.InsertNewInstBefore(Clone, I.getIterator());
1769 return Clone;
1770}
1771
1773 bool FoldWithMultiUse) {
1774 // Don't modify shared select instructions unless set FoldWithMultiUse
1775 if (!SI->hasOneUse() && !FoldWithMultiUse)
1776 return nullptr;
1777
1778 Value *TV = SI->getTrueValue();
1779 Value *FV = SI->getFalseValue();
1780
1781 // Bool selects with constant operands can be folded to logical ops.
1782 if (SI->getType()->isIntOrIntVectorTy(1))
1783 return nullptr;
1784
1785 // Avoid breaking min/max reduction pattern,
1786 // which is necessary for vectorization later.
1788 for (Value *IntrinOp : Op.operands())
1789 if (auto *PN = dyn_cast<PHINode>(IntrinOp))
1790 for (Value *PhiOp : PN->operands())
1791 if (PhiOp == &Op)
1792 return nullptr;
1793
1794 // Test if a FCmpInst instruction is used exclusively by a select as
1795 // part of a minimum or maximum operation. If so, refrain from doing
1796 // any other folding. This helps out other analyses which understand
1797 // non-obfuscated minimum and maximum idioms. And in this case, at
1798 // least one of the comparison operands has at least one user besides
1799 // the compare (the select), which would often largely negate the
1800 // benefit of folding anyway.
1801 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1802 if (CI->hasOneUse()) {
1803 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1804 if (((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) &&
1805 !CI->isCommutative())
1806 return nullptr;
1807 }
1808 }
1809
1810 // Make sure that one of the select arms folds successfully.
1811 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1812 Value *NewFV =
1813 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1814 if (!NewTV && !NewFV)
1815 return nullptr;
1816
1817 // Create an instruction for the arm that did not fold.
1818 if (!NewTV)
1819 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1820 if (!NewFV)
1821 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1822 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1823}
1824
1826 Value *InValue, BasicBlock *InBB,
1827 const DataLayout &DL,
1828 const SimplifyQuery SQ) {
1829 // NB: It is a precondition of this transform that the operands be
1830 // phi translatable!
1832 for (Value *Op : I.operands()) {
1833 if (Op == PN)
1834 Ops.push_back(InValue);
1835 else
1836 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1837 }
1838
1839 // Don't consider the simplification successful if we get back a constant
1840 // expression. That's just an instruction in hiding.
1841 // Also reject the case where we simplify back to the phi node. We wouldn't
1842 // be able to remove it in that case.
1844 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1845 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1846 return NewVal;
1847
1848 // Check if incoming PHI value can be replaced with constant
1849 // based on implied condition.
1850 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1851 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1852 if (TerminatorBI && TerminatorBI->isConditional() &&
1853 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1854 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1855 std::optional<bool> ImpliedCond = isImpliedCondition(
1856 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1857 DL, LHSIsTrue);
1858 if (ImpliedCond)
1859 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1860 }
1861
1862 return nullptr;
1863}
1864
1866 bool AllowMultipleUses) {
1867 unsigned NumPHIValues = PN->getNumIncomingValues();
1868 if (NumPHIValues == 0)
1869 return nullptr;
1870
1871 // We normally only transform phis with a single use. However, if a PHI has
1872 // multiple uses and they are all the same operation, we can fold *all* of the
1873 // uses into the PHI.
1874 bool OneUse = PN->hasOneUse();
1875 bool IdenticalUsers = false;
1876 if (!AllowMultipleUses && !OneUse) {
1877 // Walk the use list for the instruction, comparing them to I.
1878 for (User *U : PN->users()) {
1880 if (UI != &I && !I.isIdenticalTo(UI))
1881 return nullptr;
1882 }
1883 // Otherwise, we can replace *all* users with the new PHI we form.
1884 IdenticalUsers = true;
1885 }
1886
1887 // Check that all operands are phi-translatable.
1888 for (Value *Op : I.operands()) {
1889 if (Op == PN)
1890 continue;
1891
1892 // Non-instructions never require phi-translation.
1893 auto *I = dyn_cast<Instruction>(Op);
1894 if (!I)
1895 continue;
1896
1897 // Phi-translate can handle phi nodes in the same block.
1898 if (isa<PHINode>(I))
1899 if (I->getParent() == PN->getParent())
1900 continue;
1901
1902 // Operand dominates the block, no phi-translation necessary.
1903 if (DT.dominates(I, PN->getParent()))
1904 continue;
1905
1906 // Not phi-translatable, bail out.
1907 return nullptr;
1908 }
1909
1910 // Check to see whether the instruction can be folded into each phi operand.
1911 // If there is one operand that does not fold, remember the BB it is in.
1912 SmallVector<Value *> NewPhiValues;
1913 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1914 bool SeenNonSimplifiedInVal = false;
1915 for (unsigned i = 0; i != NumPHIValues; ++i) {
1916 Value *InVal = PN->getIncomingValue(i);
1917 BasicBlock *InBB = PN->getIncomingBlock(i);
1918
1919 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1920 NewPhiValues.push_back(NewVal);
1921 continue;
1922 }
1923
1924 // Handle some cases that can't be fully simplified, but where we know that
1925 // the two instructions will fold into one.
1926 auto WillFold = [&]() {
1927 if (!InVal->hasUseList() || !InVal->hasOneUser())
1928 return false;
1929
1930 // icmp of ucmp/scmp with constant will fold to icmp.
1931 const APInt *Ignored;
1932 if (isa<CmpIntrinsic>(InVal) &&
1933 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1934 return true;
1935
1936 // icmp eq zext(bool), 0 will fold to !bool.
1937 if (isa<ZExtInst>(InVal) &&
1938 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1939 match(&I,
1941 return true;
1942
1943 return false;
1944 };
1945
1946 if (WillFold()) {
1947 OpsToMoveUseToIncomingBB.push_back(i);
1948 NewPhiValues.push_back(nullptr);
1949 continue;
1950 }
1951
1952 if (!OneUse && !IdenticalUsers)
1953 return nullptr;
1954
1955 if (SeenNonSimplifiedInVal)
1956 return nullptr; // More than one non-simplified value.
1957 SeenNonSimplifiedInVal = true;
1958
1959 // If there is exactly one non-simplified value, we can insert a copy of the
1960 // operation in that block. However, if this is a critical edge, we would
1961 // be inserting the computation on some other paths (e.g. inside a loop).
1962 // Only do this if the pred block is unconditionally branching into the phi
1963 // block. Also, make sure that the pred block is not dead code.
1965 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(InBB))
1966 return nullptr;
1967
1968 NewPhiValues.push_back(nullptr);
1969 OpsToMoveUseToIncomingBB.push_back(i);
1970
1971 // If the InVal is an invoke at the end of the pred block, then we can't
1972 // insert a computation after it without breaking the edge.
1973 if (isa<InvokeInst>(InVal))
1974 if (cast<Instruction>(InVal)->getParent() == InBB)
1975 return nullptr;
1976
1977 // Do not push the operation across a loop backedge. This could result in
1978 // an infinite combine loop, and is generally non-profitable (especially
1979 // if the operation was originally outside the loop).
1980 if (isBackEdge(InBB, PN->getParent()))
1981 return nullptr;
1982 }
1983
1984 // Clone the instruction that uses the phi node and move it into the incoming
1985 // BB because we know that the next iteration of InstCombine will simplify it.
1987 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
1989 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
1990
1991 Instruction *Clone = Clones.lookup(OpBB);
1992 if (!Clone) {
1993 Clone = I.clone();
1994 for (Use &U : Clone->operands()) {
1995 if (U == PN)
1996 U = Op;
1997 else
1998 U = U->DoPHITranslation(PN->getParent(), OpBB);
1999 }
2000 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
2001 Clones.insert({OpBB, Clone});
2002 // We may have speculated the instruction.
2004 }
2005
2006 NewPhiValues[OpIndex] = Clone;
2007 }
2008
2009 // Okay, we can do the transformation: create the new PHI node.
2010 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
2011 InsertNewInstBefore(NewPN, PN->getIterator());
2012 NewPN->takeName(PN);
2013 NewPN->setDebugLoc(PN->getDebugLoc());
2014
2015 for (unsigned i = 0; i != NumPHIValues; ++i)
2016 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
2017
2018 if (IdenticalUsers) {
2019 // Collect and deduplicate users up-front to avoid iterator invalidation.
2021 for (User *U : PN->users()) {
2023 if (User == &I)
2024 continue;
2025 ToReplace.insert(User);
2026 }
2027 for (Instruction *I : ToReplace) {
2028 replaceInstUsesWith(*I, NewPN);
2030 }
2031 OneUse = true;
2032 }
2033
2034 if (OneUse) {
2035 replaceAllDbgUsesWith(*PN, *NewPN, *PN, DT);
2036 }
2037 return replaceInstUsesWith(I, NewPN);
2038}
2039
2041 if (!BO.isAssociative())
2042 return nullptr;
2043
2044 // Find the interleaved binary ops.
2045 auto Opc = BO.getOpcode();
2046 auto *BO0 = dyn_cast<BinaryOperator>(BO.getOperand(0));
2047 auto *BO1 = dyn_cast<BinaryOperator>(BO.getOperand(1));
2048 if (!BO0 || !BO1 || !BO0->hasNUses(2) || !BO1->hasNUses(2) ||
2049 BO0->getOpcode() != Opc || BO1->getOpcode() != Opc ||
2050 !BO0->isAssociative() || !BO1->isAssociative() ||
2051 BO0->getParent() != BO1->getParent())
2052 return nullptr;
2053
2054 assert(BO.isCommutative() && BO0->isCommutative() && BO1->isCommutative() &&
2055 "Expected commutative instructions!");
2056
2057 // Find the matching phis, forming the recurrences.
2058 PHINode *PN0, *PN1;
2059 Value *Start0, *Step0, *Start1, *Step1;
2060 if (!matchSimpleRecurrence(BO0, PN0, Start0, Step0) || !PN0->hasOneUse() ||
2061 !matchSimpleRecurrence(BO1, PN1, Start1, Step1) || !PN1->hasOneUse() ||
2062 PN0->getParent() != PN1->getParent())
2063 return nullptr;
2064
2065 assert(PN0->getNumIncomingValues() == 2 && PN1->getNumIncomingValues() == 2 &&
2066 "Expected PHIs with two incoming values!");
2067
2068 // Convert the start and step values to constants.
2069 auto *Init0 = dyn_cast<Constant>(Start0);
2070 auto *Init1 = dyn_cast<Constant>(Start1);
2071 auto *C0 = dyn_cast<Constant>(Step0);
2072 auto *C1 = dyn_cast<Constant>(Step1);
2073 if (!Init0 || !Init1 || !C0 || !C1)
2074 return nullptr;
2075
2076 // Fold the recurrence constants.
2077 auto *Init = ConstantFoldBinaryInstruction(Opc, Init0, Init1);
2078 auto *C = ConstantFoldBinaryInstruction(Opc, C0, C1);
2079 if (!Init || !C)
2080 return nullptr;
2081
2082 // Create the reduced PHI.
2083 auto *NewPN = PHINode::Create(PN0->getType(), PN0->getNumIncomingValues(),
2084 "reduced.phi");
2085
2086 // Create the new binary op.
2087 auto *NewBO = BinaryOperator::Create(Opc, NewPN, C);
2088 if (Opc == Instruction::FAdd || Opc == Instruction::FMul) {
2089 // Intersect FMF flags for FADD and FMUL.
2090 FastMathFlags Intersect = BO0->getFastMathFlags() &
2091 BO1->getFastMathFlags() & BO.getFastMathFlags();
2092 NewBO->setFastMathFlags(Intersect);
2093 } else {
2094 OverflowTracking Flags;
2095 Flags.AllKnownNonNegative = false;
2096 Flags.AllKnownNonZero = false;
2097 Flags.mergeFlags(*BO0);
2098 Flags.mergeFlags(*BO1);
2099 Flags.mergeFlags(BO);
2100 Flags.applyFlags(*NewBO);
2101 }
2102 NewBO->takeName(&BO);
2103
2104 for (unsigned I = 0, E = PN0->getNumIncomingValues(); I != E; ++I) {
2105 auto *V = PN0->getIncomingValue(I);
2106 auto *BB = PN0->getIncomingBlock(I);
2107 if (V == Init0) {
2108 assert(((PN1->getIncomingValue(0) == Init1 &&
2109 PN1->getIncomingBlock(0) == BB) ||
2110 (PN1->getIncomingValue(1) == Init1 &&
2111 PN1->getIncomingBlock(1) == BB)) &&
2112 "Invalid incoming block!");
2113 NewPN->addIncoming(Init, BB);
2114 } else if (V == BO0) {
2115 assert(((PN1->getIncomingValue(0) == BO1 &&
2116 PN1->getIncomingBlock(0) == BB) ||
2117 (PN1->getIncomingValue(1) == BO1 &&
2118 PN1->getIncomingBlock(1) == BB)) &&
2119 "Invalid incoming block!");
2120 NewPN->addIncoming(NewBO, BB);
2121 } else
2122 llvm_unreachable("Unexpected incoming value!");
2123 }
2124
2125 LLVM_DEBUG(dbgs() << " Combined " << *PN0 << "\n " << *BO0
2126 << "\n with " << *PN1 << "\n " << *BO1
2127 << '\n');
2128
2129 // Insert the new recurrence and remove the old (dead) ones.
2130 InsertNewInstWith(NewPN, PN0->getIterator());
2131 InsertNewInstWith(NewBO, BO0->getIterator());
2132
2139
2140 return replaceInstUsesWith(BO, NewBO);
2141}
2142
2144 // Attempt to fold binary operators whose operands are simple recurrences.
2145 if (auto *NewBO = foldBinopWithRecurrence(BO))
2146 return NewBO;
2147
2148 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
2149 // we are guarding against replicating the binop in >1 predecessor.
2150 // This could miss matching a phi with 2 constant incoming values.
2151 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
2152 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
2153 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
2154 Phi0->getNumOperands() != Phi1->getNumOperands())
2155 return nullptr;
2156
2157 // TODO: Remove the restriction for binop being in the same block as the phis.
2158 if (BO.getParent() != Phi0->getParent() ||
2159 BO.getParent() != Phi1->getParent())
2160 return nullptr;
2161
2162 // Fold if there is at least one specific constant value in phi0 or phi1's
2163 // incoming values that comes from the same block and this specific constant
2164 // value can be used to do optimization for specific binary operator.
2165 // For example:
2166 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
2167 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
2168 // %add = add i32 %phi0, %phi1
2169 // ==>
2170 // %add = phi i32 [%j, %bb0], [%i, %bb1]
2172 /*AllowRHSConstant*/ false);
2173 if (C) {
2174 SmallVector<Value *, 4> NewIncomingValues;
2175 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
2176 auto &Phi0Use = std::get<0>(T);
2177 auto &Phi1Use = std::get<1>(T);
2178 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
2179 return false;
2180 Value *Phi0UseV = Phi0Use.get();
2181 Value *Phi1UseV = Phi1Use.get();
2182 if (Phi0UseV == C)
2183 NewIncomingValues.push_back(Phi1UseV);
2184 else if (Phi1UseV == C)
2185 NewIncomingValues.push_back(Phi0UseV);
2186 else
2187 return false;
2188 return true;
2189 };
2190
2191 if (all_of(zip(Phi0->operands(), Phi1->operands()),
2192 CanFoldIncomingValuePair)) {
2193 PHINode *NewPhi =
2194 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
2195 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
2196 "The number of collected incoming values should equal the number "
2197 "of the original PHINode operands!");
2198 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
2199 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
2200 return NewPhi;
2201 }
2202 }
2203
2204 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2205 return nullptr;
2206
2207 // Match a pair of incoming constants for one of the predecessor blocks.
2208 BasicBlock *ConstBB, *OtherBB;
2209 Constant *C0, *C1;
2210 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2211 ConstBB = Phi0->getIncomingBlock(0);
2212 OtherBB = Phi0->getIncomingBlock(1);
2213 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2214 ConstBB = Phi0->getIncomingBlock(1);
2215 OtherBB = Phi0->getIncomingBlock(0);
2216 } else {
2217 return nullptr;
2218 }
2219 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2220 return nullptr;
2221
2222 // The block that we are hoisting to must reach here unconditionally.
2223 // Otherwise, we could be speculatively executing an expensive or
2224 // non-speculative op.
2225 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
2226 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2227 !DT.isReachableFromEntry(OtherBB))
2228 return nullptr;
2229
2230 // TODO: This check could be tightened to only apply to binops (div/rem) that
2231 // are not safe to speculatively execute. But that could allow hoisting
2232 // potentially expensive instructions (fdiv for example).
2233 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2235 return nullptr;
2236
2237 // Fold constants for the predecessor block with constant incoming values.
2238 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2239 if (!NewC)
2240 return nullptr;
2241
2242 // Make a new binop in the predecessor block with the non-constant incoming
2243 // values.
2244 Builder.SetInsertPoint(PredBlockBranch);
2245 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2246 Phi0->getIncomingValueForBlock(OtherBB),
2247 Phi1->getIncomingValueForBlock(OtherBB));
2248 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2249 NotFoldedNewBO->copyIRFlags(&BO);
2250
2251 // Replace the binop with a phi of the new values. The old phis are dead.
2252 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2253 NewPhi->addIncoming(NewBO, OtherBB);
2254 NewPhi->addIncoming(NewC, ConstBB);
2255 return NewPhi;
2256}
2257
2259 if (!isa<Constant>(I.getOperand(1)))
2260 return nullptr;
2261
2262 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
2263 if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
2264 return NewSel;
2265 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
2266 if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
2267 return NewPhi;
2268 }
2269 return nullptr;
2270}
2271
2273 // If this GEP has only 0 indices, it is the same pointer as
2274 // Src. If Src is not a trivial GEP too, don't combine
2275 // the indices.
2276 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2277 !Src.hasOneUse())
2278 return false;
2279 return true;
2280}
2281
2282/// Find a constant NewC that has property:
2283/// shuffle(NewC, ShMask) = C
2284/// Returns nullptr if such a constant does not exist e.g. ShMask=<0,0> C=<1,2>
2285///
2286/// A 1-to-1 mapping is not required. Example:
2287/// ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <poison,5,6,poison>
2289 VectorType *NewCTy) {
2290 if (isa<ScalableVectorType>(NewCTy)) {
2291 Constant *Splat = C->getSplatValue();
2292 if (!Splat)
2293 return nullptr;
2295 }
2296
2297 if (cast<FixedVectorType>(NewCTy)->getNumElements() >
2298 cast<FixedVectorType>(C->getType())->getNumElements())
2299 return nullptr;
2300
2301 unsigned NewCNumElts = cast<FixedVectorType>(NewCTy)->getNumElements();
2302 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2303 SmallVector<Constant *, 16> NewVecC(NewCNumElts, PoisonScalar);
2304 unsigned NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
2305 for (unsigned I = 0; I < NumElts; ++I) {
2306 Constant *CElt = C->getAggregateElement(I);
2307 if (ShMask[I] >= 0) {
2308 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2309 Constant *NewCElt = NewVecC[ShMask[I]];
2310 // Bail out if:
2311 // 1. The constant vector contains a constant expression.
2312 // 2. The shuffle needs an element of the constant vector that can't
2313 // be mapped to a new constant vector.
2314 // 3. This is a widening shuffle that copies elements of V1 into the
2315 // extended elements (extending with poison is allowed).
2316 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2317 I >= NewCNumElts)
2318 return nullptr;
2319 NewVecC[ShMask[I]] = CElt;
2320 }
2321 }
2322 return ConstantVector::get(NewVecC);
2323}
2324
2326 if (!isa<VectorType>(Inst.getType()))
2327 return nullptr;
2328
2329 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2330 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2331 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2332 cast<VectorType>(Inst.getType())->getElementCount());
2333 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2334 cast<VectorType>(Inst.getType())->getElementCount());
2335
2336 // If both operands of the binop are vector concatenations, then perform the
2337 // narrow binop on each pair of the source operands followed by concatenation
2338 // of the results.
2339 Value *L0, *L1, *R0, *R1;
2340 ArrayRef<int> Mask;
2341 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2342 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2343 LHS->hasOneUse() && RHS->hasOneUse() &&
2344 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2345 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2346 // This transform does not have the speculative execution constraint as
2347 // below because the shuffle is a concatenation. The new binops are
2348 // operating on exactly the same elements as the existing binop.
2349 // TODO: We could ease the mask requirement to allow different undef lanes,
2350 // but that requires an analysis of the binop-with-undef output value.
2351 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2352 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2353 BO->copyIRFlags(&Inst);
2354 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2355 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2356 BO->copyIRFlags(&Inst);
2357 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2358 }
2359
2360 auto createBinOpReverse = [&](Value *X, Value *Y) {
2361 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2362 if (auto *BO = dyn_cast<BinaryOperator>(V))
2363 BO->copyIRFlags(&Inst);
2364 Module *M = Inst.getModule();
2366 M, Intrinsic::vector_reverse, V->getType());
2367 return CallInst::Create(F, V);
2368 };
2369
2370 // NOTE: Reverse shuffles don't require the speculative execution protection
2371 // below because they don't affect which lanes take part in the computation.
2372
2373 Value *V1, *V2;
2374 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2375 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2376 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2377 (LHS->hasOneUse() || RHS->hasOneUse() ||
2378 (LHS == RHS && LHS->hasNUses(2))))
2379 return createBinOpReverse(V1, V2);
2380
2381 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2382 if (LHS->hasOneUse() && isSplatValue(RHS))
2383 return createBinOpReverse(V1, RHS);
2384 }
2385 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2386 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2387 return createBinOpReverse(LHS, V2);
2388
2389 auto createBinOpVPReverse = [&](Value *X, Value *Y, Value *EVL) {
2390 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2391 if (auto *BO = dyn_cast<BinaryOperator>(V))
2392 BO->copyIRFlags(&Inst);
2393
2394 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
2395 Value *AllTrueMask = Builder.CreateVectorSplat(EC, Builder.getTrue());
2396 Module *M = Inst.getModule();
2398 M, Intrinsic::experimental_vp_reverse, V->getType());
2399 return CallInst::Create(F, {V, AllTrueMask, EVL});
2400 };
2401
2402 Value *EVL;
2404 m_Value(V1), m_AllOnes(), m_Value(EVL)))) {
2405 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2407 m_Value(V2), m_AllOnes(), m_Specific(EVL))) &&
2408 (LHS->hasOneUse() || RHS->hasOneUse() ||
2409 (LHS == RHS && LHS->hasNUses(2))))
2410 return createBinOpVPReverse(V1, V2, EVL);
2411
2412 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2413 if (LHS->hasOneUse() && isSplatValue(RHS))
2414 return createBinOpVPReverse(V1, RHS, EVL);
2415 }
2416 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2417 else if (isSplatValue(LHS) &&
2419 m_Value(V2), m_AllOnes(), m_Value(EVL))))
2420 return createBinOpVPReverse(LHS, V2, EVL);
2421
2422 // It may not be safe to reorder shuffles and things like div, urem, etc.
2423 // because we may trap when executing those ops on unknown vector elements.
2424 // See PR20059.
2426 return nullptr;
2427
2428 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2429 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2430 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2431 BO->copyIRFlags(&Inst);
2432 return new ShuffleVectorInst(XY, M);
2433 };
2434
2435 // If both arguments of the binary operation are shuffles that use the same
2436 // mask and shuffle within a single vector, move the shuffle after the binop.
2437 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2438 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2439 V1->getType() == V2->getType() &&
2440 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2441 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2442 return createBinOpShuffle(V1, V2, Mask);
2443 }
2444
2445 // If both arguments of a commutative binop are select-shuffles that use the
2446 // same mask with commuted operands, the shuffles are unnecessary.
2447 if (Inst.isCommutative() &&
2448 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2449 match(RHS,
2450 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2451 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2452 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2453 // TODO: Allow shuffles that contain undefs in the mask?
2454 // That is legal, but it reduces undef knowledge.
2455 // TODO: Allow arbitrary shuffles by shuffling after binop?
2456 // That might be legal, but we have to deal with poison.
2457 if (LShuf->isSelect() &&
2458 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2459 RShuf->isSelect() &&
2460 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2461 // Example:
2462 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2463 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2464 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2465 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2466 NewBO->copyIRFlags(&Inst);
2467 return NewBO;
2468 }
2469 }
2470
2471 // If one argument is a shuffle within one vector and the other is a constant,
2472 // try moving the shuffle after the binary operation. This canonicalization
2473 // intends to move shuffles closer to other shuffles and binops closer to
2474 // other binops, so they can be folded. It may also enable demanded elements
2475 // transforms.
2476 Constant *C;
2478 m_Mask(Mask))),
2479 m_ImmConstant(C)))) {
2480 assert(Inst.getType()->getScalarType() == V1->getType()->getScalarType() &&
2481 "Shuffle should not change scalar type");
2482
2483 bool ConstOp1 = isa<Constant>(RHS);
2484 if (Constant *NewC =
2486 // For fixed vectors, lanes of NewC not used by the shuffle will be poison
2487 // which will cause UB for div/rem. Mask them with a safe constant.
2488 if (isa<FixedVectorType>(V1->getType()) && Inst.isIntDivRem())
2489 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2490
2491 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2492 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2493 Value *NewLHS = ConstOp1 ? V1 : NewC;
2494 Value *NewRHS = ConstOp1 ? NewC : V1;
2495 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2496 }
2497 }
2498
2499 // Try to reassociate to sink a splat shuffle after a binary operation.
2500 if (Inst.isAssociative() && Inst.isCommutative()) {
2501 // Canonicalize shuffle operand as LHS.
2502 if (isa<ShuffleVectorInst>(RHS))
2503 std::swap(LHS, RHS);
2504
2505 Value *X;
2506 ArrayRef<int> MaskC;
2507 int SplatIndex;
2508 Value *Y, *OtherOp;
2509 if (!match(LHS,
2510 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2511 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2512 X->getType() != Inst.getType() ||
2513 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2514 return nullptr;
2515
2516 // FIXME: This may not be safe if the analysis allows undef elements. By
2517 // moving 'Y' before the splat shuffle, we are implicitly assuming
2518 // that it is not undef/poison at the splat index.
2519 if (isSplatValue(OtherOp, SplatIndex)) {
2520 std::swap(Y, OtherOp);
2521 } else if (!isSplatValue(Y, SplatIndex)) {
2522 return nullptr;
2523 }
2524
2525 // X and Y are splatted values, so perform the binary operation on those
2526 // values followed by a splat followed by the 2nd binary operation:
2527 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2528 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2529 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2530 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2531 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2532
2533 // Intersect FMF on both new binops. Other (poison-generating) flags are
2534 // dropped to be safe.
2535 if (isa<FPMathOperator>(R)) {
2536 R->copyFastMathFlags(&Inst);
2537 R->andIRFlags(RHS);
2538 }
2539 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2540 NewInstBO->copyIRFlags(R);
2541 return R;
2542 }
2543
2544 return nullptr;
2545}
2546
2547/// Try to narrow the width of a binop if at least 1 operand is an extend of
2548/// of a value. This requires a potentially expensive known bits check to make
2549/// sure the narrow op does not overflow.
2550Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2551 // We need at least one extended operand.
2552 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2553
2554 // If this is a sub, we swap the operands since we always want an extension
2555 // on the RHS. The LHS can be an extension or a constant.
2556 if (BO.getOpcode() == Instruction::Sub)
2557 std::swap(Op0, Op1);
2558
2559 Value *X;
2560 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2561 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2562 return nullptr;
2563
2564 // If both operands are the same extension from the same source type and we
2565 // can eliminate at least one (hasOneUse), this might work.
2566 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2567 Value *Y;
2568 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2569 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2570 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2571 // If that did not match, see if we have a suitable constant operand.
2572 // Truncating and extending must produce the same constant.
2573 Constant *WideC;
2574 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2575 return nullptr;
2576 Constant *NarrowC = getLosslessInvCast(WideC, X->getType(), CastOpc, DL);
2577 if (!NarrowC)
2578 return nullptr;
2579 Y = NarrowC;
2580 }
2581
2582 // Swap back now that we found our operands.
2583 if (BO.getOpcode() == Instruction::Sub)
2584 std::swap(X, Y);
2585
2586 // Both operands have narrow versions. Last step: the math must not overflow
2587 // in the narrow width.
2588 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2589 return nullptr;
2590
2591 // bo (ext X), (ext Y) --> ext (bo X, Y)
2592 // bo (ext X), C --> ext (bo X, C')
2593 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2594 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2595 if (IsSext)
2596 NewBinOp->setHasNoSignedWrap();
2597 else
2598 NewBinOp->setHasNoUnsignedWrap();
2599 }
2600 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2601}
2602
2603/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2604/// transform.
2609
2610/// Thread a GEP operation with constant indices through the constant true/false
2611/// arms of a select.
2613 InstCombiner::BuilderTy &Builder) {
2614 if (!GEP.hasAllConstantIndices())
2615 return nullptr;
2616
2617 Instruction *Sel;
2618 Value *Cond;
2619 Constant *TrueC, *FalseC;
2620 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2621 !match(Sel,
2622 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2623 return nullptr;
2624
2625 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2626 // Propagate 'inbounds' and metadata from existing instructions.
2627 // Note: using IRBuilder to create the constants for efficiency.
2628 SmallVector<Value *, 4> IndexC(GEP.indices());
2629 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2630 Type *Ty = GEP.getSourceElementType();
2631 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2632 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2633 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2634}
2635
2636// Canonicalization:
2637// gep T, (gep i8, base, C1), (Index + C2) into
2638// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2640 GEPOperator *Src,
2641 InstCombinerImpl &IC) {
2642 if (GEP.getNumIndices() != 1)
2643 return nullptr;
2644 auto &DL = IC.getDataLayout();
2645 Value *Base;
2646 const APInt *C1;
2647 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2648 return nullptr;
2649 Value *VarIndex;
2650 const APInt *C2;
2651 Type *PtrTy = Src->getType()->getScalarType();
2652 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2653 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2654 return nullptr;
2655 if (C1->getBitWidth() != IndexSizeInBits ||
2656 C2->getBitWidth() != IndexSizeInBits)
2657 return nullptr;
2658 Type *BaseType = GEP.getSourceElementType();
2660 return nullptr;
2661 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2662 APInt NewOffset = TypeSize * *C2 + *C1;
2663 if (NewOffset.isZero() ||
2664 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2666 if (GEP.hasNoUnsignedWrap() &&
2667 cast<GEPOperator>(Src)->hasNoUnsignedWrap() &&
2668 match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()))) {
2670 if (GEP.isInBounds() && cast<GEPOperator>(Src)->isInBounds())
2671 Flags |= GEPNoWrapFlags::inBounds();
2672 }
2673
2674 Value *GEPConst =
2675 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset), "", Flags);
2676 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex, Flags);
2677 }
2678
2679 return nullptr;
2680}
2681
2682/// Combine constant offsets separated by variable offsets.
2683/// ptradd (ptradd (ptradd p, C1), x), C2 -> ptradd (ptradd p, x), C1+C2
2685 InstCombinerImpl &IC) {
2686 if (!GEP.hasAllConstantIndices())
2687 return nullptr;
2688
2691 auto *InnerGEP = dyn_cast<GetElementPtrInst>(GEP.getPointerOperand());
2692 while (true) {
2693 if (!InnerGEP)
2694 return nullptr;
2695
2696 NW = NW.intersectForReassociate(InnerGEP->getNoWrapFlags());
2697 if (InnerGEP->hasAllConstantIndices())
2698 break;
2699
2700 if (!InnerGEP->hasOneUse())
2701 return nullptr;
2702
2703 Skipped.push_back(InnerGEP);
2704 InnerGEP = dyn_cast<GetElementPtrInst>(InnerGEP->getPointerOperand());
2705 }
2706
2707 // The two constant offset GEPs are directly adjacent: Let normal offset
2708 // merging handle it.
2709 if (Skipped.empty())
2710 return nullptr;
2711
2712 // FIXME: This one-use check is not strictly necessary. Consider relaxing it
2713 // if profitable.
2714 if (!InnerGEP->hasOneUse())
2715 return nullptr;
2716
2717 // Don't bother with vector splats.
2718 Type *Ty = GEP.getType();
2719 if (InnerGEP->getType() != Ty)
2720 return nullptr;
2721
2722 const DataLayout &DL = IC.getDataLayout();
2723 APInt Offset(DL.getIndexTypeSizeInBits(Ty), 0);
2724 if (!GEP.accumulateConstantOffset(DL, Offset) ||
2725 !InnerGEP->accumulateConstantOffset(DL, Offset))
2726 return nullptr;
2727
2728 IC.replaceOperand(*Skipped.back(), 0, InnerGEP->getPointerOperand());
2729 for (GetElementPtrInst *SkippedGEP : Skipped)
2730 SkippedGEP->setNoWrapFlags(NW);
2731
2732 return IC.replaceInstUsesWith(
2733 GEP,
2734 IC.Builder.CreatePtrAdd(Skipped.front(), IC.Builder.getInt(Offset), "",
2735 NW.intersectForOffsetAdd(GEP.getNoWrapFlags())));
2736}
2737
2739 GEPOperator *Src) {
2740 // Combine Indices - If the source pointer to this getelementptr instruction
2741 // is a getelementptr instruction with matching element type, combine the
2742 // indices of the two getelementptr instructions into a single instruction.
2743 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2744 return nullptr;
2745
2746 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2747 return I;
2748
2749 if (auto *I = combineConstantOffsets(GEP, *this))
2750 return I;
2751
2752 if (Src->getResultElementType() != GEP.getSourceElementType())
2753 return nullptr;
2754
2755 // Find out whether the last index in the source GEP is a sequential idx.
2756 bool EndsWithSequential = false;
2757 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2758 I != E; ++I)
2759 EndsWithSequential = I.isSequential();
2760 if (!EndsWithSequential)
2761 return nullptr;
2762
2763 // Replace: gep (gep %P, long B), long A, ...
2764 // With: T = long A+B; gep %P, T, ...
2765 Value *SO1 = Src->getOperand(Src->getNumOperands() - 1);
2766 Value *GO1 = GEP.getOperand(1);
2767
2768 // If they aren't the same type, then the input hasn't been processed
2769 // by the loop above yet (which canonicalizes sequential index types to
2770 // intptr_t). Just avoid transforming this until the input has been
2771 // normalized.
2772 if (SO1->getType() != GO1->getType())
2773 return nullptr;
2774
2775 Value *Sum =
2776 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2777 // Only do the combine when we are sure the cost after the
2778 // merge is never more than that before the merge.
2779 if (Sum == nullptr)
2780 return nullptr;
2781
2783 Indices.append(Src->op_begin() + 1, Src->op_end() - 1);
2784 Indices.push_back(Sum);
2785 Indices.append(GEP.op_begin() + 2, GEP.op_end());
2786
2787 // Don't create GEPs with more than one non-zero index.
2788 unsigned NumNonZeroIndices = count_if(Indices, [](Value *Idx) {
2789 auto *C = dyn_cast<Constant>(Idx);
2790 return !C || !C->isNullValue();
2791 });
2792 if (NumNonZeroIndices > 1)
2793 return nullptr;
2794
2795 return replaceInstUsesWith(
2796 GEP, Builder.CreateGEP(
2797 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2799}
2800
2803 bool &DoesConsume, unsigned Depth) {
2804 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2805 // ~(~(X)) -> X.
2806 Value *A, *B;
2807 if (match(V, m_Not(m_Value(A)))) {
2808 DoesConsume = true;
2809 return A;
2810 }
2811
2812 Constant *C;
2813 // Constants can be considered to be not'ed values.
2814 if (match(V, m_ImmConstant(C)))
2815 return ConstantExpr::getNot(C);
2816
2818 return nullptr;
2819
2820 // The rest of the cases require that we invert all uses so don't bother
2821 // doing the analysis if we know we can't use the result.
2822 if (!WillInvertAllUses)
2823 return nullptr;
2824
2825 // Compares can be inverted if all of their uses are being modified to use
2826 // the ~V.
2827 if (auto *I = dyn_cast<CmpInst>(V)) {
2828 if (Builder != nullptr)
2829 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
2830 I->getOperand(1));
2831 return NonNull;
2832 }
2833
2834 // If `V` is of the form `A + B` then `-1 - V` can be folded into
2835 // `(-1 - B) - A` if we are willing to invert all of the uses.
2836 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
2837 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2838 DoesConsume, Depth))
2839 return Builder ? Builder->CreateSub(BV, A) : NonNull;
2840 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2841 DoesConsume, Depth))
2842 return Builder ? Builder->CreateSub(AV, B) : NonNull;
2843 return nullptr;
2844 }
2845
2846 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
2847 // into `A ^ B` if we are willing to invert all of the uses.
2848 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
2849 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2850 DoesConsume, Depth))
2851 return Builder ? Builder->CreateXor(A, BV) : NonNull;
2852 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2853 DoesConsume, Depth))
2854 return Builder ? Builder->CreateXor(AV, B) : NonNull;
2855 return nullptr;
2856 }
2857
2858 // If `V` is of the form `B - A` then `-1 - V` can be folded into
2859 // `A + (-1 - B)` if we are willing to invert all of the uses.
2860 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
2861 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2862 DoesConsume, Depth))
2863 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
2864 return nullptr;
2865 }
2866
2867 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
2868 // into `A s>> B` if we are willing to invert all of the uses.
2869 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
2870 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2871 DoesConsume, Depth))
2872 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
2873 return nullptr;
2874 }
2875
2876 Value *Cond;
2877 // LogicOps are special in that we canonicalize them at the cost of an
2878 // instruction.
2879 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
2881 // Selects/min/max with invertible operands are freely invertible
2882 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
2883 bool LocalDoesConsume = DoesConsume;
2884 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
2885 LocalDoesConsume, Depth))
2886 return nullptr;
2887 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2888 LocalDoesConsume, Depth)) {
2889 DoesConsume = LocalDoesConsume;
2890 if (Builder != nullptr) {
2891 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2892 DoesConsume, Depth);
2893 assert(NotB != nullptr &&
2894 "Unable to build inverted value for known freely invertable op");
2895 if (auto *II = dyn_cast<IntrinsicInst>(V))
2896 return Builder->CreateBinaryIntrinsic(
2897 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
2898 return Builder->CreateSelect(Cond, NotA, NotB);
2899 }
2900 return NonNull;
2901 }
2902 }
2903
2904 if (PHINode *PN = dyn_cast<PHINode>(V)) {
2905 bool LocalDoesConsume = DoesConsume;
2907 for (Use &U : PN->operands()) {
2908 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2909 Value *NewIncomingVal = getFreelyInvertedImpl(
2910 U.get(), /*WillInvertAllUses=*/false,
2911 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
2912 if (NewIncomingVal == nullptr)
2913 return nullptr;
2914 // Make sure that we can safely erase the original PHI node.
2915 if (NewIncomingVal == V)
2916 return nullptr;
2917 if (Builder != nullptr)
2918 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
2919 }
2920
2921 DoesConsume = LocalDoesConsume;
2922 if (Builder != nullptr) {
2924 Builder->SetInsertPoint(PN);
2925 PHINode *NewPN =
2926 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
2927 for (auto [Val, Pred] : IncomingValues)
2928 NewPN->addIncoming(Val, Pred);
2929 return NewPN;
2930 }
2931 return NonNull;
2932 }
2933
2934 if (match(V, m_SExtLike(m_Value(A)))) {
2935 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2936 DoesConsume, Depth))
2937 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
2938 return nullptr;
2939 }
2940
2941 if (match(V, m_Trunc(m_Value(A)))) {
2942 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2943 DoesConsume, Depth))
2944 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
2945 return nullptr;
2946 }
2947
2948 // De Morgan's Laws:
2949 // (~(A | B)) -> (~A & ~B)
2950 // (~(A & B)) -> (~A | ~B)
2951 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
2952 bool IsLogical, Value *A,
2953 Value *B) -> Value * {
2954 bool LocalDoesConsume = DoesConsume;
2955 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
2956 LocalDoesConsume, Depth))
2957 return nullptr;
2958 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2959 LocalDoesConsume, Depth)) {
2960 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2961 LocalDoesConsume, Depth);
2962 DoesConsume = LocalDoesConsume;
2963 if (IsLogical)
2964 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
2965 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
2966 }
2967
2968 return nullptr;
2969 };
2970
2971 if (match(V, m_Or(m_Value(A), m_Value(B))))
2972 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
2973 B);
2974
2975 if (match(V, m_And(m_Value(A), m_Value(B))))
2976 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
2977 B);
2978
2979 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
2980 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
2981 B);
2982
2983 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
2984 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
2985 B);
2986
2987 return nullptr;
2988}
2989
2990/// Return true if we should canonicalize the gep to an i8 ptradd.
2992 Value *PtrOp = GEP.getOperand(0);
2993 Type *GEPEltType = GEP.getSourceElementType();
2994 if (GEPEltType->isIntegerTy(8))
2995 return false;
2996
2997 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
2998 // intrinsic. This has better support in BasicAA.
2999 if (GEPEltType->isScalableTy())
3000 return true;
3001
3002 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
3003 // together.
3004 if (GEP.getNumIndices() == 1 &&
3005 match(GEP.getOperand(1),
3007 m_Shl(m_Value(), m_ConstantInt())))))
3008 return true;
3009
3010 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
3011 // possibly be merged together.
3012 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
3013 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
3014 any_of(GEP.indices(), [](Value *V) {
3015 const APInt *C;
3016 return match(V, m_APInt(C)) && !C->isZero();
3017 });
3018}
3019
3021 IRBuilderBase &Builder) {
3022 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
3023 if (!Op1)
3024 return nullptr;
3025
3026 // Don't fold a GEP into itself through a PHI node. This can only happen
3027 // through the back-edge of a loop. Folding a GEP into itself means that
3028 // the value of the previous iteration needs to be stored in the meantime,
3029 // thus requiring an additional register variable to be live, but not
3030 // actually achieving anything (the GEP still needs to be executed once per
3031 // loop iteration).
3032 if (Op1 == &GEP)
3033 return nullptr;
3034 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
3035
3036 int DI = -1;
3037
3038 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
3039 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
3040 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
3041 Op1->getSourceElementType() != Op2->getSourceElementType())
3042 return nullptr;
3043
3044 // As for Op1 above, don't try to fold a GEP into itself.
3045 if (Op2 == &GEP)
3046 return nullptr;
3047
3048 // Keep track of the type as we walk the GEP.
3049 Type *CurTy = nullptr;
3050
3051 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
3052 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
3053 return nullptr;
3054
3055 if (Op1->getOperand(J) != Op2->getOperand(J)) {
3056 if (DI == -1) {
3057 // We have not seen any differences yet in the GEPs feeding the
3058 // PHI yet, so we record this one if it is allowed to be a
3059 // variable.
3060
3061 // The first two arguments can vary for any GEP, the rest have to be
3062 // static for struct slots
3063 if (J > 1) {
3064 assert(CurTy && "No current type?");
3065 if (CurTy->isStructTy())
3066 return nullptr;
3067 }
3068
3069 DI = J;
3070 } else {
3071 // The GEP is different by more than one input. While this could be
3072 // extended to support GEPs that vary by more than one variable it
3073 // doesn't make sense since it greatly increases the complexity and
3074 // would result in an R+R+R addressing mode which no backend
3075 // directly supports and would need to be broken into several
3076 // simpler instructions anyway.
3077 return nullptr;
3078 }
3079 }
3080
3081 // Sink down a layer of the type for the next iteration.
3082 if (J > 0) {
3083 if (J == 1) {
3084 CurTy = Op1->getSourceElementType();
3085 } else {
3086 CurTy =
3087 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
3088 }
3089 }
3090 }
3091
3092 NW &= Op2->getNoWrapFlags();
3093 }
3094
3095 // If not all GEPs are identical we'll have to create a new PHI node.
3096 // Check that the old PHI node has only one use so that it will get
3097 // removed.
3098 if (DI != -1 && !PN->hasOneUse())
3099 return nullptr;
3100
3101 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
3102 NewGEP->setNoWrapFlags(NW);
3103
3104 if (DI == -1) {
3105 // All the GEPs feeding the PHI are identical. Clone one down into our
3106 // BB so that it can be merged with the current GEP.
3107 } else {
3108 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
3109 // into the current block so it can be merged, and create a new PHI to
3110 // set that index.
3111 PHINode *NewPN;
3112 {
3113 IRBuilderBase::InsertPointGuard Guard(Builder);
3114 Builder.SetInsertPoint(PN);
3115 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
3116 PN->getNumOperands());
3117 }
3118
3119 for (auto &I : PN->operands())
3120 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
3121 PN->getIncomingBlock(I));
3122
3123 NewGEP->setOperand(DI, NewPN);
3124 }
3125
3126 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
3127 return NewGEP;
3128}
3129
3131 Value *PtrOp = GEP.getOperand(0);
3132 SmallVector<Value *, 8> Indices(GEP.indices());
3133 Type *GEPType = GEP.getType();
3134 Type *GEPEltType = GEP.getSourceElementType();
3135 if (Value *V =
3136 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
3137 SQ.getWithInstruction(&GEP)))
3138 return replaceInstUsesWith(GEP, V);
3139
3140 // For vector geps, use the generic demanded vector support.
3141 // Skip if GEP return type is scalable. The number of elements is unknown at
3142 // compile-time.
3143 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
3144 auto VWidth = GEPFVTy->getNumElements();
3145 APInt PoisonElts(VWidth, 0);
3146 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
3147 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
3148 PoisonElts)) {
3149 if (V != &GEP)
3150 return replaceInstUsesWith(GEP, V);
3151 return &GEP;
3152 }
3153 }
3154
3155 // Eliminate unneeded casts for indices, and replace indices which displace
3156 // by multiples of a zero size type with zero.
3157 bool MadeChange = false;
3158
3159 // Index width may not be the same width as pointer width.
3160 // Data layout chooses the right type based on supported integer types.
3161 Type *NewScalarIndexTy =
3162 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
3163
3165 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
3166 ++I, ++GTI) {
3167 // Skip indices into struct types.
3168 if (GTI.isStruct())
3169 continue;
3170
3171 Type *IndexTy = (*I)->getType();
3172 Type *NewIndexType =
3173 IndexTy->isVectorTy()
3174 ? VectorType::get(NewScalarIndexTy,
3175 cast<VectorType>(IndexTy)->getElementCount())
3176 : NewScalarIndexTy;
3177
3178 // If the element type has zero size then any index over it is equivalent
3179 // to an index of zero, so replace it with zero if it is not zero already.
3180 Type *EltTy = GTI.getIndexedType();
3181 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
3182 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
3183 *I = Constant::getNullValue(NewIndexType);
3184 MadeChange = true;
3185 }
3186
3187 if (IndexTy != NewIndexType) {
3188 // If we are using a wider index than needed for this platform, shrink
3189 // it to what we need. If narrower, sign-extend it to what we need.
3190 // This explicit cast can make subsequent optimizations more obvious.
3191 if (IndexTy->getScalarSizeInBits() <
3192 NewIndexType->getScalarSizeInBits()) {
3193 if (GEP.hasNoUnsignedWrap() && GEP.hasNoUnsignedSignedWrap())
3194 *I = Builder.CreateZExt(*I, NewIndexType, "", /*IsNonNeg=*/true);
3195 else
3196 *I = Builder.CreateSExt(*I, NewIndexType);
3197 } else {
3198 *I = Builder.CreateTrunc(*I, NewIndexType, "", GEP.hasNoUnsignedWrap(),
3199 GEP.hasNoUnsignedSignedWrap());
3200 }
3201 MadeChange = true;
3202 }
3203 }
3204 if (MadeChange)
3205 return &GEP;
3206
3207 // Canonicalize constant GEPs to i8 type.
3208 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
3209 APInt Offset(DL.getIndexTypeSizeInBits(GEPType), 0);
3210 if (GEP.accumulateConstantOffset(DL, Offset))
3211 return replaceInstUsesWith(
3212 GEP, Builder.CreatePtrAdd(PtrOp, Builder.getInt(Offset), "",
3213 GEP.getNoWrapFlags()));
3214 }
3215
3217 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
3218 Value *NewGEP =
3219 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
3220 return replaceInstUsesWith(GEP, NewGEP);
3221 }
3222
3223 // Strip trailing zero indices.
3224 auto *LastIdx = dyn_cast<Constant>(Indices.back());
3225 if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
3226 return replaceInstUsesWith(
3227 GEP, Builder.CreateGEP(GEP.getSourceElementType(), PtrOp,
3228 drop_end(Indices), "", GEP.getNoWrapFlags()));
3229 }
3230
3231 // Strip leading zero indices.
3232 auto *FirstIdx = dyn_cast<Constant>(Indices.front());
3233 if (FirstIdx && FirstIdx->isNullValue() &&
3234 !FirstIdx->getType()->isVectorTy()) {
3236 ++GTI;
3237 if (!GTI.isStruct())
3238 return replaceInstUsesWith(GEP, Builder.CreateGEP(GTI.getIndexedType(),
3239 GEP.getPointerOperand(),
3240 drop_begin(Indices), "",
3241 GEP.getNoWrapFlags()));
3242 }
3243
3244 // Scalarize vector operands; prefer splat-of-gep.as canonical form.
3245 // Note that this looses information about undef lanes; we run it after
3246 // demanded bits to partially mitigate that loss.
3247 if (GEPType->isVectorTy() && llvm::any_of(GEP.operands(), [](Value *Op) {
3248 return Op->getType()->isVectorTy() && getSplatValue(Op);
3249 })) {
3250 SmallVector<Value *> NewOps;
3251 for (auto &Op : GEP.operands()) {
3252 if (Op->getType()->isVectorTy())
3253 if (Value *Scalar = getSplatValue(Op)) {
3254 NewOps.push_back(Scalar);
3255 continue;
3256 }
3257 NewOps.push_back(Op);
3258 }
3259
3260 Value *Res = Builder.CreateGEP(GEP.getSourceElementType(), NewOps[0],
3261 ArrayRef(NewOps).drop_front(), GEP.getName(),
3262 GEP.getNoWrapFlags());
3263 if (!Res->getType()->isVectorTy()) {
3264 ElementCount EC = cast<VectorType>(GEPType)->getElementCount();
3265 Res = Builder.CreateVectorSplat(EC, Res);
3266 }
3267 return replaceInstUsesWith(GEP, Res);
3268 }
3269
3270 bool SeenNonZeroIndex = false;
3271 for (auto [IdxNum, Idx] : enumerate(Indices)) {
3272 auto *C = dyn_cast<Constant>(Idx);
3273 if (C && C->isNullValue())
3274 continue;
3275
3276 if (!SeenNonZeroIndex) {
3277 SeenNonZeroIndex = true;
3278 continue;
3279 }
3280
3281 // GEP has multiple non-zero indices: Split it.
3282 ArrayRef<Value *> FrontIndices = ArrayRef(Indices).take_front(IdxNum);
3283 Value *FrontGEP =
3284 Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
3285 GEP.getName() + ".split", GEP.getNoWrapFlags());
3286
3287 SmallVector<Value *> BackIndices;
3288 BackIndices.push_back(Constant::getNullValue(NewScalarIndexTy));
3289 append_range(BackIndices, drop_begin(Indices, IdxNum));
3291 GetElementPtrInst::getIndexedType(GEPEltType, FrontIndices), FrontGEP,
3292 BackIndices, GEP.getNoWrapFlags());
3293 }
3294
3295 // Check to see if the inputs to the PHI node are getelementptr instructions.
3296 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
3297 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
3298 return replaceOperand(GEP, 0, NewPtrOp);
3299 }
3300
3301 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
3302 if (Instruction *I = visitGEPOfGEP(GEP, Src))
3303 return I;
3304
3305 if (GEP.getNumIndices() == 1) {
3306 unsigned AS = GEP.getPointerAddressSpace();
3307 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3308 DL.getIndexSizeInBits(AS)) {
3309 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3310
3311 if (TyAllocSize == 1) {
3312 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3313 // but only if the result pointer is only used as if it were an integer,
3314 // or both point to the same underlying object (otherwise provenance is
3315 // not necessarily retained).
3316 Value *X = GEP.getPointerOperand();
3317 Value *Y;
3318 if (match(GEP.getOperand(1),
3320 GEPType == Y->getType()) {
3321 bool HasSameUnderlyingObject =
3323 bool Changed = false;
3324 GEP.replaceUsesWithIf(Y, [&](Use &U) {
3325 bool ShouldReplace = HasSameUnderlyingObject ||
3326 isa<ICmpInst>(U.getUser()) ||
3327 isa<PtrToIntInst>(U.getUser());
3328 Changed |= ShouldReplace;
3329 return ShouldReplace;
3330 });
3331 return Changed ? &GEP : nullptr;
3332 }
3333 } else if (auto *ExactIns =
3334 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3335 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3336 Value *V;
3337 if (ExactIns->isExact()) {
3338 if ((has_single_bit(TyAllocSize) &&
3339 match(GEP.getOperand(1),
3340 m_Shr(m_Value(V),
3341 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3342 match(GEP.getOperand(1),
3343 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3344 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3345 GEP.getPointerOperand(), V,
3346 GEP.getNoWrapFlags());
3347 }
3348 }
3349 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3350 // Try to canonicalize non-i8 element type to i8 if the index is an
3351 // exact instruction. If the index is an exact instruction (div/shr)
3352 // with a constant RHS, we can fold the non-i8 element scale into the
3353 // div/shr (similiar to the mul case, just inverted).
3354 const APInt *C;
3355 std::optional<APInt> NewC;
3356 if (has_single_bit(TyAllocSize) &&
3357 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3358 C->uge(countr_zero(TyAllocSize)))
3359 NewC = *C - countr_zero(TyAllocSize);
3360 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3361 APInt Quot;
3362 uint64_t Rem;
3363 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3364 if (Rem == 0)
3365 NewC = Quot;
3366 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3367 APInt Quot;
3368 int64_t Rem;
3369 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3370 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3371 if (!Quot.isAllOnes() && Rem == 0)
3372 NewC = Quot;
3373 }
3374
3375 if (NewC.has_value()) {
3376 Value *NewOp = Builder.CreateBinOp(
3377 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3378 ConstantInt::get(V->getType(), *NewC));
3379 cast<BinaryOperator>(NewOp)->setIsExact();
3380 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3381 GEP.getPointerOperand(), NewOp,
3382 GEP.getNoWrapFlags());
3383 }
3384 }
3385 }
3386 }
3387 }
3388 // We do not handle pointer-vector geps here.
3389 if (GEPType->isVectorTy())
3390 return nullptr;
3391
3392 if (!GEP.isInBounds()) {
3393 unsigned IdxWidth =
3394 DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
3395 APInt BasePtrOffset(IdxWidth, 0);
3396 Value *UnderlyingPtrOp =
3397 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL, BasePtrOffset);
3398 bool CanBeNull, CanBeFreed;
3399 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3400 DL, CanBeNull, CanBeFreed);
3401 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3402 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3403 BasePtrOffset.isNonNegative()) {
3404 APInt AllocSize(IdxWidth, DerefBytes);
3405 if (BasePtrOffset.ule(AllocSize)) {
3407 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3408 }
3409 }
3410 }
3411 }
3412
3413 // nusw + nneg -> nuw
3414 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3415 all_of(GEP.indices(), [&](Value *Idx) {
3416 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3417 })) {
3418 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3419 return &GEP;
3420 }
3421
3422 // These rewrites are trying to preserve inbounds/nuw attributes. So we want
3423 // to do this after having tried to derive "nuw" above.
3424 if (GEP.getNumIndices() == 1) {
3425 // Given (gep p, x+y) we want to determine the common nowrap flags for both
3426 // geps if transforming into (gep (gep p, x), y).
3427 auto GetPreservedNoWrapFlags = [&](bool AddIsNUW) {
3428 // We can preserve both "inbounds nuw", "nusw nuw" and "nuw" if we know
3429 // that x + y does not have unsigned wrap.
3430 if (GEP.hasNoUnsignedWrap() && AddIsNUW)
3431 return GEP.getNoWrapFlags();
3432 return GEPNoWrapFlags::none();
3433 };
3434
3435 // Try to replace ADD + GEP with GEP + GEP.
3436 Value *Idx1, *Idx2;
3437 if (match(GEP.getOperand(1),
3438 m_OneUse(m_AddLike(m_Value(Idx1), m_Value(Idx2))))) {
3439 // %idx = add i64 %idx1, %idx2
3440 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3441 // as:
3442 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3443 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3444 bool NUW = match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()));
3445 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3446 auto *NewPtr =
3447 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3448 Idx1, "", NWFlags);
3449 return replaceInstUsesWith(GEP,
3450 Builder.CreateGEP(GEP.getSourceElementType(),
3451 NewPtr, Idx2, "", NWFlags));
3452 }
3453 ConstantInt *C;
3454 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAddLike(
3455 m_Value(Idx1), m_ConstantInt(C))))))) {
3456 // %add = add nsw i32 %idx1, idx2
3457 // %sidx = sext i32 %add to i64
3458 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3459 // as:
3460 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3461 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3462 bool NUW = match(GEP.getOperand(1),
3464 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3465 auto *NewPtr = Builder.CreateGEP(
3466 GEP.getSourceElementType(), GEP.getPointerOperand(),
3467 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "", NWFlags);
3468 return replaceInstUsesWith(
3469 GEP,
3470 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3471 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3472 "", NWFlags));
3473 }
3474 }
3475
3477 return R;
3478
3479 return nullptr;
3480}
3481
3483 Instruction *AI) {
3485 return true;
3486 if (auto *LI = dyn_cast<LoadInst>(V))
3487 return isa<GlobalVariable>(LI->getPointerOperand());
3488 // Two distinct allocations will never be equal.
3489 return isAllocLikeFn(V, &TLI) && V != AI;
3490}
3491
3492/// Given a call CB which uses an address UsedV, return true if we can prove the
3493/// call's only possible effect is storing to V.
3494static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3495 const TargetLibraryInfo &TLI) {
3496 if (!CB.use_empty())
3497 // TODO: add recursion if returned attribute is present
3498 return false;
3499
3500 if (CB.isTerminator())
3501 // TODO: remove implementation restriction
3502 return false;
3503
3504 if (!CB.willReturn() || !CB.doesNotThrow())
3505 return false;
3506
3507 // If the only possible side effect of the call is writing to the alloca,
3508 // and the result isn't used, we can safely remove any reads implied by the
3509 // call including those which might read the alloca itself.
3510 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3511 return Dest && Dest->Ptr == UsedV;
3512}
3513
3514static std::optional<ModRefInfo>
3516 const TargetLibraryInfo &TLI, bool KnowInit) {
3518 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3519 Worklist.push_back(AI);
3521
3522 do {
3523 Instruction *PI = Worklist.pop_back_val();
3524 for (User *U : PI->users()) {
3526 switch (I->getOpcode()) {
3527 default:
3528 // Give up the moment we see something we can't handle.
3529 return std::nullopt;
3530
3531 case Instruction::AddrSpaceCast:
3532 case Instruction::BitCast:
3533 case Instruction::GetElementPtr:
3534 Users.emplace_back(I);
3535 Worklist.push_back(I);
3536 continue;
3537
3538 case Instruction::ICmp: {
3539 ICmpInst *ICI = cast<ICmpInst>(I);
3540 // We can fold eq/ne comparisons with null to false/true, respectively.
3541 // We also fold comparisons in some conditions provided the alloc has
3542 // not escaped (see isNeverEqualToUnescapedAlloc).
3543 if (!ICI->isEquality())
3544 return std::nullopt;
3545 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3546 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3547 return std::nullopt;
3548
3549 // Do not fold compares to aligned_alloc calls, as they may have to
3550 // return null in case the required alignment cannot be satisfied,
3551 // unless we can prove that both alignment and size are valid.
3552 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3553 // Check if alignment and size of a call to aligned_alloc is valid,
3554 // that is alignment is a power-of-2 and the size is a multiple of the
3555 // alignment.
3556 const APInt *Alignment;
3557 const APInt *Size;
3558 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3559 match(CB->getArgOperand(1), m_APInt(Size)) &&
3560 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3561 };
3562 auto *CB = dyn_cast<CallBase>(AI);
3563 LibFunc TheLibFunc;
3564 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3565 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3566 !AlignmentAndSizeKnownValid(CB))
3567 return std::nullopt;
3568 Users.emplace_back(I);
3569 continue;
3570 }
3571
3572 case Instruction::Call:
3573 // Ignore no-op and store intrinsics.
3575 switch (II->getIntrinsicID()) {
3576 default:
3577 return std::nullopt;
3578
3579 case Intrinsic::memmove:
3580 case Intrinsic::memcpy:
3581 case Intrinsic::memset: {
3583 if (MI->isVolatile())
3584 return std::nullopt;
3585 // Note: this could also be ModRef, but we can still interpret that
3586 // as just Mod in that case.
3587 ModRefInfo NewAccess =
3588 MI->getRawDest() == PI ? ModRefInfo::Mod : ModRefInfo::Ref;
3589 if ((Access & ~NewAccess) != ModRefInfo::NoModRef)
3590 return std::nullopt;
3591 Access |= NewAccess;
3592 [[fallthrough]];
3593 }
3594 case Intrinsic::assume:
3595 case Intrinsic::invariant_start:
3596 case Intrinsic::invariant_end:
3597 case Intrinsic::lifetime_start:
3598 case Intrinsic::lifetime_end:
3599 case Intrinsic::objectsize:
3600 Users.emplace_back(I);
3601 continue;
3602 case Intrinsic::launder_invariant_group:
3603 case Intrinsic::strip_invariant_group:
3604 Users.emplace_back(I);
3605 Worklist.push_back(I);
3606 continue;
3607 }
3608 }
3609
3610 if (Family && getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3611 getAllocationFamily(I, &TLI) == Family) {
3612 Users.emplace_back(I);
3613 continue;
3614 }
3615
3616 if (Family && getReallocatedOperand(cast<CallBase>(I)) == PI &&
3617 getAllocationFamily(I, &TLI) == Family) {
3618 Users.emplace_back(I);
3619 Worklist.push_back(I);
3620 continue;
3621 }
3622
3623 if (!isRefSet(Access) &&
3624 isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3626 Users.emplace_back(I);
3627 continue;
3628 }
3629
3630 return std::nullopt;
3631
3632 case Instruction::Store: {
3634 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3635 return std::nullopt;
3636 if (isRefSet(Access))
3637 return std::nullopt;
3639 Users.emplace_back(I);
3640 continue;
3641 }
3642
3643 case Instruction::Load: {
3644 LoadInst *LI = cast<LoadInst>(I);
3645 if (LI->isVolatile() || LI->getPointerOperand() != PI)
3646 return std::nullopt;
3647 if (isModSet(Access))
3648 return std::nullopt;
3650 Users.emplace_back(I);
3651 continue;
3652 }
3653 }
3654 llvm_unreachable("missing a return?");
3655 }
3656 } while (!Worklist.empty());
3657
3659 return Access;
3660}
3661
3664
3665 // If we have a malloc call which is only used in any amount of comparisons to
3666 // null and free calls, delete the calls and replace the comparisons with true
3667 // or false as appropriate.
3668
3669 // This is based on the principle that we can substitute our own allocation
3670 // function (which will never return null) rather than knowledge of the
3671 // specific function being called. In some sense this can change the permitted
3672 // outputs of a program (when we convert a malloc to an alloca, the fact that
3673 // the allocation is now on the stack is potentially visible, for example),
3674 // but we believe in a permissible manner.
3676
3677 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3678 // before each store.
3680 std::unique_ptr<DIBuilder> DIB;
3681 if (isa<AllocaInst>(MI)) {
3682 findDbgUsers(&MI, DVRs);
3683 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3684 }
3685
3686 // Determine what getInitialValueOfAllocation would return without actually
3687 // allocating the result.
3688 bool KnowInitUndef = false;
3689 bool KnowInitZero = false;
3690 Constant *Init =
3692 if (Init) {
3693 if (isa<UndefValue>(Init))
3694 KnowInitUndef = true;
3695 else if (Init->isNullValue())
3696 KnowInitZero = true;
3697 }
3698 // The various sanitizers don't actually return undef memory, but rather
3699 // memory initialized with special forms of runtime poison
3700 auto &F = *MI.getFunction();
3701 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
3702 F.hasFnAttribute(Attribute::SanitizeAddress))
3703 KnowInitUndef = false;
3704
3705 auto Removable =
3706 isAllocSiteRemovable(&MI, Users, TLI, KnowInitZero | KnowInitUndef);
3707 if (Removable) {
3708 for (WeakTrackingVH &User : Users) {
3709 // Lowering all @llvm.objectsize and MTI calls first because they may use
3710 // a bitcast/GEP of the alloca we are removing.
3711 if (!User)
3712 continue;
3713
3715
3717 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3718 SmallVector<Instruction *> InsertedInstructions;
3719 Value *Result = lowerObjectSizeCall(
3720 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3721 for (Instruction *Inserted : InsertedInstructions)
3722 Worklist.add(Inserted);
3723 replaceInstUsesWith(*I, Result);
3725 User = nullptr; // Skip examining in the next loop.
3726 continue;
3727 }
3728 if (auto *MTI = dyn_cast<MemTransferInst>(I)) {
3729 if (KnowInitZero && isRefSet(*Removable)) {
3731 Builder.SetInsertPoint(MTI);
3732 auto *M = Builder.CreateMemSet(
3733 MTI->getRawDest(),
3734 ConstantInt::get(Type::getInt8Ty(MI.getContext()), 0),
3735 MTI->getLength(), MTI->getDestAlign());
3736 M->copyMetadata(*MTI);
3737 }
3738 }
3739 }
3740 }
3741 for (WeakTrackingVH &User : Users) {
3742 if (!User)
3743 continue;
3744
3746
3747 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3749 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3750 C->isFalseWhenEqual()));
3751 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3752 for (auto *DVR : DVRs)
3753 if (DVR->isAddressOfVariable())
3755 } else {
3756 // Casts, GEP, or anything else: we're about to delete this instruction,
3757 // so it can not have any valid uses.
3758 Constant *Replace;
3759 if (isa<LoadInst>(I)) {
3760 assert(KnowInitZero || KnowInitUndef);
3761 Replace = KnowInitUndef ? UndefValue::get(I->getType())
3762 : Constant::getNullValue(I->getType());
3763 } else
3764 Replace = PoisonValue::get(I->getType());
3765 replaceInstUsesWith(*I, Replace);
3766 }
3768 }
3769
3771 // Replace invoke with a NOP intrinsic to maintain the original CFG
3772 Module *M = II->getModule();
3773 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3774 auto *NewII = InvokeInst::Create(
3775 F, II->getNormalDest(), II->getUnwindDest(), {}, "", II->getParent());
3776 NewII->setDebugLoc(II->getDebugLoc());
3777 }
3778
3779 // Remove debug intrinsics which describe the value contained within the
3780 // alloca. In addition to removing dbg.{declare,addr} which simply point to
3781 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
3782 //
3783 // ```
3784 // define void @foo(i32 %0) {
3785 // %a = alloca i32 ; Deleted.
3786 // store i32 %0, i32* %a
3787 // dbg.value(i32 %0, "arg0") ; Not deleted.
3788 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
3789 // call void @trivially_inlinable_no_op(i32* %a)
3790 // ret void
3791 // }
3792 // ```
3793 //
3794 // This may not be required if we stop describing the contents of allocas
3795 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
3796 // the LowerDbgDeclare utility.
3797 //
3798 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
3799 // "arg0" dbg.value may be stale after the call. However, failing to remove
3800 // the DW_OP_deref dbg.value causes large gaps in location coverage.
3801 //
3802 // FIXME: the Assignment Tracking project has now likely made this
3803 // redundant (and it's sometimes harmful).
3804 for (auto *DVR : DVRs)
3805 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3806 DVR->eraseFromParent();
3807
3808 return eraseInstFromFunction(MI);
3809 }
3810 return nullptr;
3811}
3812
3813/// Move the call to free before a NULL test.
3814///
3815/// Check if this free is accessed after its argument has been test
3816/// against NULL (property 0).
3817/// If yes, it is legal to move this call in its predecessor block.
3818///
3819/// The move is performed only if the block containing the call to free
3820/// will be removed, i.e.:
3821/// 1. it has only one predecessor P, and P has two successors
3822/// 2. it contains the call, noops, and an unconditional branch
3823/// 3. its successor is the same as its predecessor's successor
3824///
3825/// The profitability is out-of concern here and this function should
3826/// be called only if the caller knows this transformation would be
3827/// profitable (e.g., for code size).
3829 const DataLayout &DL) {
3830 Value *Op = FI.getArgOperand(0);
3831 BasicBlock *FreeInstrBB = FI.getParent();
3832 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
3833
3834 // Validate part of constraint #1: Only one predecessor
3835 // FIXME: We can extend the number of predecessor, but in that case, we
3836 // would duplicate the call to free in each predecessor and it may
3837 // not be profitable even for code size.
3838 if (!PredBB)
3839 return nullptr;
3840
3841 // Validate constraint #2: Does this block contains only the call to
3842 // free, noops, and an unconditional branch?
3843 BasicBlock *SuccBB;
3844 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
3845 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
3846 return nullptr;
3847
3848 // If there are only 2 instructions in the block, at this point,
3849 // this is the call to free and unconditional.
3850 // If there are more than 2 instructions, check that they are noops
3851 // i.e., they won't hurt the performance of the generated code.
3852 if (FreeInstrBB->size() != 2) {
3853 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
3854 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3855 continue;
3856 auto *Cast = dyn_cast<CastInst>(&Inst);
3857 if (!Cast || !Cast->isNoopCast(DL))
3858 return nullptr;
3859 }
3860 }
3861 // Validate the rest of constraint #1 by matching on the pred branch.
3862 Instruction *TI = PredBB->getTerminator();
3863 BasicBlock *TrueBB, *FalseBB;
3864 CmpPredicate Pred;
3865 if (!match(TI, m_Br(m_ICmp(Pred,
3867 m_Specific(Op->stripPointerCasts())),
3868 m_Zero()),
3869 TrueBB, FalseBB)))
3870 return nullptr;
3871 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
3872 return nullptr;
3873
3874 // Validate constraint #3: Ensure the null case just falls through.
3875 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
3876 return nullptr;
3877 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
3878 "Broken CFG: missing edge from predecessor to successor");
3879
3880 // At this point, we know that everything in FreeInstrBB can be moved
3881 // before TI.
3882 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
3883 if (&Instr == FreeInstrBBTerminator)
3884 break;
3885 Instr.moveBeforePreserving(TI->getIterator());
3886 }
3887 assert(FreeInstrBB->size() == 1 &&
3888 "Only the branch instruction should remain");
3889
3890 // Now that we've moved the call to free before the NULL check, we have to
3891 // remove any attributes on its parameter that imply it's non-null, because
3892 // those attributes might have only been valid because of the NULL check, and
3893 // we can get miscompiles if we keep them. This is conservative if non-null is
3894 // also implied by something other than the NULL check, but it's guaranteed to
3895 // be correct, and the conservativeness won't matter in practice, since the
3896 // attributes are irrelevant for the call to free itself and the pointer
3897 // shouldn't be used after the call.
3898 AttributeList Attrs = FI.getAttributes();
3899 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
3900 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3901 if (Dereferenceable.isValid()) {
3902 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
3903 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
3904 Attribute::Dereferenceable);
3905 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
3906 }
3907 FI.setAttributes(Attrs);
3908
3909 return &FI;
3910}
3911
3913 // free undef -> unreachable.
3914 if (isa<UndefValue>(Op)) {
3915 // Leave a marker since we can't modify the CFG here.
3917 return eraseInstFromFunction(FI);
3918 }
3919
3920 // If we have 'free null' delete the instruction. This can happen in stl code
3921 // when lots of inlining happens.
3923 return eraseInstFromFunction(FI);
3924
3925 // If we had free(realloc(...)) with no intervening uses, then eliminate the
3926 // realloc() entirely.
3928 if (CI && CI->hasOneUse())
3929 if (Value *ReallocatedOp = getReallocatedOperand(CI))
3930 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
3931
3932 // If we optimize for code size, try to move the call to free before the null
3933 // test so that simplify cfg can remove the empty block and dead code
3934 // elimination the branch. I.e., helps to turn something like:
3935 // if (foo) free(foo);
3936 // into
3937 // free(foo);
3938 //
3939 // Note that we can only do this for 'free' and not for any flavor of
3940 // 'operator delete'; there is no 'operator delete' symbol for which we are
3941 // permitted to invent a call, even if we're passing in a null pointer.
3942 if (MinimizeSize) {
3943 LibFunc Func;
3944 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
3946 return I;
3947 }
3948
3949 return nullptr;
3950}
3951
3953 Value *RetVal = RI.getReturnValue();
3954 if (!RetVal)
3955 return nullptr;
3956
3957 Function *F = RI.getFunction();
3958 Type *RetTy = RetVal->getType();
3959 if (RetTy->isPointerTy()) {
3960 bool HasDereferenceable =
3961 F->getAttributes().getRetDereferenceableBytes() > 0;
3962 if (F->hasRetAttribute(Attribute::NonNull) ||
3963 (HasDereferenceable &&
3965 if (Value *V = simplifyNonNullOperand(RetVal, HasDereferenceable))
3966 return replaceOperand(RI, 0, V);
3967 }
3968 }
3969
3970 if (!AttributeFuncs::isNoFPClassCompatibleType(RetTy))
3971 return nullptr;
3972
3973 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
3974 if (ReturnClass == fcNone)
3975 return nullptr;
3976
3977 KnownFPClass KnownClass;
3978 Value *Simplified =
3979 SimplifyDemandedUseFPClass(RetVal, ~ReturnClass, KnownClass, &RI);
3980 if (!Simplified)
3981 return nullptr;
3982
3983 return ReturnInst::Create(RI.getContext(), Simplified);
3984}
3985
3986// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
3988 // Try to remove the previous instruction if it must lead to unreachable.
3989 // This includes instructions like stores and "llvm.assume" that may not get
3990 // removed by simple dead code elimination.
3991 bool Changed = false;
3992 while (Instruction *Prev = I.getPrevNode()) {
3993 // While we theoretically can erase EH, that would result in a block that
3994 // used to start with an EH no longer starting with EH, which is invalid.
3995 // To make it valid, we'd need to fixup predecessors to no longer refer to
3996 // this block, but that changes CFG, which is not allowed in InstCombine.
3997 if (Prev->isEHPad())
3998 break; // Can not drop any more instructions. We're done here.
3999
4001 break; // Can not drop any more instructions. We're done here.
4002 // Otherwise, this instruction can be freely erased,
4003 // even if it is not side-effect free.
4004
4005 // A value may still have uses before we process it here (for example, in
4006 // another unreachable block), so convert those to poison.
4007 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
4008 eraseInstFromFunction(*Prev);
4009 Changed = true;
4010 }
4011 return Changed;
4012}
4013
4018
4020 assert(BI.isUnconditional() && "Only for unconditional branches.");
4021
4022 // If this store is the second-to-last instruction in the basic block
4023 // (excluding debug info) and if the block ends with
4024 // an unconditional branch, try to move the store to the successor block.
4025
4026 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
4027 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
4028 do {
4029 if (BBI != FirstInstr)
4030 --BBI;
4031 } while (BBI != FirstInstr && BBI->isDebugOrPseudoInst());
4032
4033 return dyn_cast<StoreInst>(BBI);
4034 };
4035
4036 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
4038 return &BI;
4039
4040 return nullptr;
4041}
4042
4045 if (!DeadEdges.insert({From, To}).second)
4046 return;
4047
4048 // Replace phi node operands in successor with poison.
4049 for (PHINode &PN : To->phis())
4050 for (Use &U : PN.incoming_values())
4051 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
4052 replaceUse(U, PoisonValue::get(PN.getType()));
4053 addToWorklist(&PN);
4054 MadeIRChange = true;
4055 }
4056
4057 Worklist.push_back(To);
4058}
4059
4060// Under the assumption that I is unreachable, remove it and following
4061// instructions. Changes are reported directly to MadeIRChange.
4064 BasicBlock *BB = I->getParent();
4065 for (Instruction &Inst : make_early_inc_range(
4066 make_range(std::next(BB->getTerminator()->getReverseIterator()),
4067 std::next(I->getReverseIterator())))) {
4068 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
4069 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
4070 MadeIRChange = true;
4071 }
4072 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
4073 continue;
4074 // RemoveDIs: erase debug-info on this instruction manually.
4075 Inst.dropDbgRecords();
4077 MadeIRChange = true;
4078 }
4079
4082 MadeIRChange = true;
4083 for (Value *V : Changed)
4085 }
4086
4087 // Handle potentially dead successors.
4088 for (BasicBlock *Succ : successors(BB))
4089 addDeadEdge(BB, Succ, Worklist);
4090}
4091
4094 while (!Worklist.empty()) {
4095 BasicBlock *BB = Worklist.pop_back_val();
4096 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
4097 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
4098 }))
4099 continue;
4100
4102 }
4103}
4104
4106 BasicBlock *LiveSucc) {
4108 for (BasicBlock *Succ : successors(BB)) {
4109 // The live successor isn't dead.
4110 if (Succ == LiveSucc)
4111 continue;
4112
4113 addDeadEdge(BB, Succ, Worklist);
4114 }
4115
4117}
4118
4120 if (BI.isUnconditional())
4122
4123 // Change br (not X), label True, label False to: br X, label False, True
4124 Value *Cond = BI.getCondition();
4125 Value *X;
4126 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
4127 // Swap Destinations and condition...
4128 BI.swapSuccessors();
4129 if (BPI)
4130 BPI->swapSuccEdgesProbabilities(BI.getParent());
4131 return replaceOperand(BI, 0, X);
4132 }
4133
4134 // Canonicalize logical-and-with-invert as logical-or-with-invert.
4135 // This is done by inverting the condition and swapping successors:
4136 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
4137 Value *Y;
4138 if (isa<SelectInst>(Cond) &&
4139 match(Cond,
4141 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
4142 Value *Or = Builder.CreateLogicalOr(NotX, Y);
4143 BI.swapSuccessors();
4144 if (BPI)
4145 BPI->swapSuccEdgesProbabilities(BI.getParent());
4146 return replaceOperand(BI, 0, Or);
4147 }
4148
4149 // If the condition is irrelevant, remove the use so that other
4150 // transforms on the condition become more effective.
4151 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
4152 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
4153
4154 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
4155 CmpPredicate Pred;
4156 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
4157 !isCanonicalPredicate(Pred)) {
4158 // Swap destinations and condition.
4159 auto *Cmp = cast<CmpInst>(Cond);
4160 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
4161 BI.swapSuccessors();
4162 if (BPI)
4163 BPI->swapSuccEdgesProbabilities(BI.getParent());
4164 Worklist.push(Cmp);
4165 return &BI;
4166 }
4167
4168 if (isa<UndefValue>(Cond)) {
4169 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
4170 return nullptr;
4171 }
4172 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4174 BI.getSuccessor(!CI->getZExtValue()));
4175 return nullptr;
4176 }
4177
4178 // Replace all dominated uses of the condition with true/false
4179 // Ignore constant expressions to avoid iterating over uses on other
4180 // functions.
4181 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
4182 for (auto &U : make_early_inc_range(Cond->uses())) {
4183 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
4184 if (DT.dominates(Edge0, U)) {
4185 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
4186 addToWorklist(cast<Instruction>(U.getUser()));
4187 continue;
4188 }
4189 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
4190 if (DT.dominates(Edge1, U)) {
4191 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
4192 addToWorklist(cast<Instruction>(U.getUser()));
4193 }
4194 }
4195 }
4196
4197 DC.registerBranch(&BI);
4198 return nullptr;
4199}
4200
4201// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
4202// we can prove that both (switch C) and (switch X) go to the default when cond
4203// is false/true.
4206 bool IsTrueArm) {
4207 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
4208 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
4209 if (!C)
4210 return nullptr;
4211
4212 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
4213 if (CstBB != SI.getDefaultDest())
4214 return nullptr;
4215 Value *X = Select->getOperand(3 - CstOpIdx);
4216 CmpPredicate Pred;
4217 const APInt *RHSC;
4218 if (!match(Select->getCondition(),
4219 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
4220 return nullptr;
4221 if (IsTrueArm)
4222 Pred = ICmpInst::getInversePredicate(Pred);
4223
4224 // See whether we can replace the select with X
4226 for (auto Case : SI.cases())
4227 if (!CR.contains(Case.getCaseValue()->getValue()))
4228 return nullptr;
4229
4230 return X;
4231}
4232
4234 Value *Cond = SI.getCondition();
4235 Value *Op0;
4236 ConstantInt *AddRHS;
4237 if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
4238 // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
4239 for (auto Case : SI.cases()) {
4240 Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
4241 assert(isa<ConstantInt>(NewCase) &&
4242 "Result of expression should be constant");
4243 Case.setValue(cast<ConstantInt>(NewCase));
4244 }
4245 return replaceOperand(SI, 0, Op0);
4246 }
4247
4248 ConstantInt *SubLHS;
4249 if (match(Cond, m_Sub(m_ConstantInt(SubLHS), m_Value(Op0)))) {
4250 // Change 'switch (1-X) case 1:' into 'switch (X) case 0'.
4251 for (auto Case : SI.cases()) {
4252 Constant *NewCase = ConstantExpr::getSub(SubLHS, Case.getCaseValue());
4253 assert(isa<ConstantInt>(NewCase) &&
4254 "Result of expression should be constant");
4255 Case.setValue(cast<ConstantInt>(NewCase));
4256 }
4257 return replaceOperand(SI, 0, Op0);
4258 }
4259
4260 uint64_t ShiftAmt;
4261 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
4262 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
4263 all_of(SI.cases(), [&](const auto &Case) {
4264 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
4265 })) {
4266 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
4268 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
4269 Shl->hasOneUse()) {
4270 Value *NewCond = Op0;
4271 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
4272 // If the shift may wrap, we need to mask off the shifted bits.
4273 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
4274 NewCond = Builder.CreateAnd(
4275 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
4276 }
4277 for (auto Case : SI.cases()) {
4278 const APInt &CaseVal = Case.getCaseValue()->getValue();
4279 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
4280 : CaseVal.lshr(ShiftAmt);
4281 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
4282 }
4283 return replaceOperand(SI, 0, NewCond);
4284 }
4285 }
4286
4287 // Fold switch(zext/sext(X)) into switch(X) if possible.
4288 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
4289 bool IsZExt = isa<ZExtInst>(Cond);
4290 Type *SrcTy = Op0->getType();
4291 unsigned NewWidth = SrcTy->getScalarSizeInBits();
4292
4293 if (all_of(SI.cases(), [&](const auto &Case) {
4294 const APInt &CaseVal = Case.getCaseValue()->getValue();
4295 return IsZExt ? CaseVal.isIntN(NewWidth)
4296 : CaseVal.isSignedIntN(NewWidth);
4297 })) {
4298 for (auto &Case : SI.cases()) {
4299 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4300 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4301 }
4302 return replaceOperand(SI, 0, Op0);
4303 }
4304 }
4305
4306 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
4307 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
4308 if (Value *V =
4309 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
4310 return replaceOperand(SI, 0, V);
4311 if (Value *V =
4312 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
4313 return replaceOperand(SI, 0, V);
4314 }
4315
4316 KnownBits Known = computeKnownBits(Cond, &SI);
4317 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
4318 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
4319
4320 // Compute the number of leading bits we can ignore.
4321 // TODO: A better way to determine this would use ComputeNumSignBits().
4322 for (const auto &C : SI.cases()) {
4323 LeadingKnownZeros =
4324 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
4325 LeadingKnownOnes =
4326 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
4327 }
4328
4329 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
4330
4331 // Shrink the condition operand if the new type is smaller than the old type.
4332 // But do not shrink to a non-standard type, because backend can't generate
4333 // good code for that yet.
4334 // TODO: We can make it aggressive again after fixing PR39569.
4335 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
4336 shouldChangeType(Known.getBitWidth(), NewWidth)) {
4337 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
4338 Builder.SetInsertPoint(&SI);
4339 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
4340
4341 for (auto Case : SI.cases()) {
4342 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4343 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4344 }
4345 return replaceOperand(SI, 0, NewCond);
4346 }
4347
4348 if (isa<UndefValue>(Cond)) {
4349 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
4350 return nullptr;
4351 }
4352 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4354 SI.findCaseValue(CI)->getCaseSuccessor());
4355 return nullptr;
4356 }
4357
4358 return nullptr;
4359}
4360
4362InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
4364 if (!WO)
4365 return nullptr;
4366
4367 Intrinsic::ID OvID = WO->getIntrinsicID();
4368 const APInt *C = nullptr;
4369 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
4370 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
4371 OvID == Intrinsic::umul_with_overflow)) {
4372 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
4373 if (C->isAllOnes())
4374 return BinaryOperator::CreateNeg(WO->getLHS());
4375 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
4376 if (C->isPowerOf2()) {
4377 return BinaryOperator::CreateShl(
4378 WO->getLHS(),
4379 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4380 }
4381 }
4382 }
4383
4384 // We're extracting from an overflow intrinsic. See if we're the only user.
4385 // That allows us to simplify multiple result intrinsics to simpler things
4386 // that just get one value.
4387 if (!WO->hasOneUse())
4388 return nullptr;
4389
4390 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4391 // and replace it with a traditional binary instruction.
4392 if (*EV.idx_begin() == 0) {
4393 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4394 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4395 // Replace the old instruction's uses with poison.
4396 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4398 return BinaryOperator::Create(BinOp, LHS, RHS);
4399 }
4400
4401 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4402
4403 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4404 if (OvID == Intrinsic::usub_with_overflow)
4405 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4406
4407 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4408 // +1 is not possible because we assume signed values.
4409 if (OvID == Intrinsic::smul_with_overflow &&
4410 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4411 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4412
4413 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4414 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4415 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4416 // Only handle even bitwidths for performance reasons.
4417 if (BitWidth % 2 == 0)
4418 return new ICmpInst(
4419 ICmpInst::ICMP_UGT, WO->getLHS(),
4420 ConstantInt::get(WO->getLHS()->getType(),
4422 }
4423
4424 // If only the overflow result is used, and the right hand side is a
4425 // constant (or constant splat), we can remove the intrinsic by directly
4426 // checking for overflow.
4427 if (C) {
4428 // Compute the no-wrap range for LHS given RHS=C, then construct an
4429 // equivalent icmp, potentially using an offset.
4430 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
4431 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4432
4433 CmpInst::Predicate Pred;
4434 APInt NewRHSC, Offset;
4435 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4436 auto *OpTy = WO->getRHS()->getType();
4437 auto *NewLHS = WO->getLHS();
4438 if (Offset != 0)
4439 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4440 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4441 ConstantInt::get(OpTy, NewRHSC));
4442 }
4443
4444 return nullptr;
4445}
4446
4449 InstCombiner::BuilderTy &Builder) {
4450 // Helper to fold frexp of select to select of frexp.
4451
4452 if (!SelectInst->hasOneUse() || !FrexpCall->hasOneUse())
4453 return nullptr;
4455 Value *TrueVal = SelectInst->getTrueValue();
4456 Value *FalseVal = SelectInst->getFalseValue();
4457
4458 const APFloat *ConstVal = nullptr;
4459 Value *VarOp = nullptr;
4460 bool ConstIsTrue = false;
4461
4462 if (match(TrueVal, m_APFloat(ConstVal))) {
4463 VarOp = FalseVal;
4464 ConstIsTrue = true;
4465 } else if (match(FalseVal, m_APFloat(ConstVal))) {
4466 VarOp = TrueVal;
4467 ConstIsTrue = false;
4468 } else {
4469 return nullptr;
4470 }
4471
4472 Builder.SetInsertPoint(&EV);
4473
4474 CallInst *NewFrexp =
4475 Builder.CreateCall(FrexpCall->getCalledFunction(), {VarOp}, "frexp");
4476 NewFrexp->copyIRFlags(FrexpCall);
4477
4478 Value *NewEV = Builder.CreateExtractValue(NewFrexp, 0, "mantissa");
4479
4480 int Exp;
4481 APFloat Mantissa = frexp(*ConstVal, Exp, APFloat::rmNearestTiesToEven);
4482
4483 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4484
4485 Value *NewSel = Builder.CreateSelectFMF(
4486 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4487 ConstIsTrue ? NewEV : ConstantMantissa, SelectInst, "select.frexp");
4488 return NewSel;
4489}
4491 Value *Agg = EV.getAggregateOperand();
4492
4493 if (!EV.hasIndices())
4494 return replaceInstUsesWith(EV, Agg);
4495
4496 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4497 SQ.getWithInstruction(&EV)))
4498 return replaceInstUsesWith(EV, V);
4499
4500 Value *Cond, *TrueVal, *FalseVal;
4502 m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal)))))) {
4503 auto *SelInst =
4504 cast<SelectInst>(cast<IntrinsicInst>(Agg)->getArgOperand(0));
4505 if (Value *Result =
4506 foldFrexpOfSelect(EV, cast<IntrinsicInst>(Agg), SelInst, Builder))
4507 return replaceInstUsesWith(EV, Result);
4508 }
4510 // We're extracting from an insertvalue instruction, compare the indices
4511 const unsigned *exti, *exte, *insi, *inse;
4512 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4513 exte = EV.idx_end(), inse = IV->idx_end();
4514 exti != exte && insi != inse;
4515 ++exti, ++insi) {
4516 if (*insi != *exti)
4517 // The insert and extract both reference distinctly different elements.
4518 // This means the extract is not influenced by the insert, and we can
4519 // replace the aggregate operand of the extract with the aggregate
4520 // operand of the insert. i.e., replace
4521 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4522 // %E = extractvalue { i32, { i32 } } %I, 0
4523 // with
4524 // %E = extractvalue { i32, { i32 } } %A, 0
4525 return ExtractValueInst::Create(IV->getAggregateOperand(),
4526 EV.getIndices());
4527 }
4528 if (exti == exte && insi == inse)
4529 // Both iterators are at the end: Index lists are identical. Replace
4530 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4531 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4532 // with "i32 42"
4533 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4534 if (exti == exte) {
4535 // The extract list is a prefix of the insert list. i.e. replace
4536 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4537 // %E = extractvalue { i32, { i32 } } %I, 1
4538 // with
4539 // %X = extractvalue { i32, { i32 } } %A, 1
4540 // %E = insertvalue { i32 } %X, i32 42, 0
4541 // by switching the order of the insert and extract (though the
4542 // insertvalue should be left in, since it may have other uses).
4543 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4544 EV.getIndices());
4545 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4546 ArrayRef(insi, inse));
4547 }
4548 if (insi == inse)
4549 // The insert list is a prefix of the extract list
4550 // We can simply remove the common indices from the extract and make it
4551 // operate on the inserted value instead of the insertvalue result.
4552 // i.e., replace
4553 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4554 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4555 // with
4556 // %E extractvalue { i32 } { i32 42 }, 0
4557 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4558 ArrayRef(exti, exte));
4559 }
4560
4561 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4562 return R;
4563
4564 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4565 // Bail out if the aggregate contains scalable vector type
4566 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4567 STy && STy->isScalableTy())
4568 return nullptr;
4569
4570 // If the (non-volatile) load only has one use, we can rewrite this to a
4571 // load from a GEP. This reduces the size of the load. If a load is used
4572 // only by extractvalue instructions then this either must have been
4573 // optimized before, or it is a struct with padding, in which case we
4574 // don't want to do the transformation as it loses padding knowledge.
4575 if (L->isSimple() && L->hasOneUse()) {
4576 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4577 SmallVector<Value*, 4> Indices;
4578 // Prefix an i32 0 since we need the first element.
4579 Indices.push_back(Builder.getInt32(0));
4580 for (unsigned Idx : EV.indices())
4581 Indices.push_back(Builder.getInt32(Idx));
4582
4583 // We need to insert these at the location of the old load, not at that of
4584 // the extractvalue.
4585 Builder.SetInsertPoint(L);
4586 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4587 L->getPointerOperand(), Indices);
4588 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
4589 // Whatever aliasing information we had for the orignal load must also
4590 // hold for the smaller load, so propagate the annotations.
4591 NL->setAAMetadata(L->getAAMetadata());
4592 // Returning the load directly will cause the main loop to insert it in
4593 // the wrong spot, so use replaceInstUsesWith().
4594 return replaceInstUsesWith(EV, NL);
4595 }
4596 }
4597
4598 if (auto *PN = dyn_cast<PHINode>(Agg))
4599 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4600 return Res;
4601
4602 // Canonicalize extract (select Cond, TV, FV)
4603 // -> select cond, (extract TV), (extract FV)
4604 if (auto *SI = dyn_cast<SelectInst>(Agg))
4605 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4606 return R;
4607
4608 // We could simplify extracts from other values. Note that nested extracts may
4609 // already be simplified implicitly by the above: extract (extract (insert) )
4610 // will be translated into extract ( insert ( extract ) ) first and then just
4611 // the value inserted, if appropriate. Similarly for extracts from single-use
4612 // loads: extract (extract (load)) will be translated to extract (load (gep))
4613 // and if again single-use then via load (gep (gep)) to load (gep).
4614 // However, double extracts from e.g. function arguments or return values
4615 // aren't handled yet.
4616 return nullptr;
4617}
4618
4619/// Return 'true' if the given typeinfo will match anything.
4620static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4621 switch (Personality) {
4625 // The GCC C EH and Rust personality only exists to support cleanups, so
4626 // it's not clear what the semantics of catch clauses are.
4627 return false;
4629 return false;
4631 // While __gnat_all_others_value will match any Ada exception, it doesn't
4632 // match foreign exceptions (or didn't, before gcc-4.7).
4633 return false;
4644 return TypeInfo->isNullValue();
4645 }
4646 llvm_unreachable("invalid enum");
4647}
4648
4649static bool shorter_filter(const Value *LHS, const Value *RHS) {
4650 return
4651 cast<ArrayType>(LHS->getType())->getNumElements()
4652 <
4653 cast<ArrayType>(RHS->getType())->getNumElements();
4654}
4655
4657 // The logic here should be correct for any real-world personality function.
4658 // However if that turns out not to be true, the offending logic can always
4659 // be conditioned on the personality function, like the catch-all logic is.
4660 EHPersonality Personality =
4661 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4662
4663 // Simplify the list of clauses, eg by removing repeated catch clauses
4664 // (these are often created by inlining).
4665 bool MakeNewInstruction = false; // If true, recreate using the following:
4666 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4667 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4668
4669 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4670 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4671 bool isLastClause = i + 1 == e;
4672 if (LI.isCatch(i)) {
4673 // A catch clause.
4674 Constant *CatchClause = LI.getClause(i);
4675 Constant *TypeInfo = CatchClause->stripPointerCasts();
4676
4677 // If we already saw this clause, there is no point in having a second
4678 // copy of it.
4679 if (AlreadyCaught.insert(TypeInfo).second) {
4680 // This catch clause was not already seen.
4681 NewClauses.push_back(CatchClause);
4682 } else {
4683 // Repeated catch clause - drop the redundant copy.
4684 MakeNewInstruction = true;
4685 }
4686
4687 // If this is a catch-all then there is no point in keeping any following
4688 // clauses or marking the landingpad as having a cleanup.
4689 if (isCatchAll(Personality, TypeInfo)) {
4690 if (!isLastClause)
4691 MakeNewInstruction = true;
4692 CleanupFlag = false;
4693 break;
4694 }
4695 } else {
4696 // A filter clause. If any of the filter elements were already caught
4697 // then they can be dropped from the filter. It is tempting to try to
4698 // exploit the filter further by saying that any typeinfo that does not
4699 // occur in the filter can't be caught later (and thus can be dropped).
4700 // However this would be wrong, since typeinfos can match without being
4701 // equal (for example if one represents a C++ class, and the other some
4702 // class derived from it).
4703 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4704 Constant *FilterClause = LI.getClause(i);
4705 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4706 unsigned NumTypeInfos = FilterType->getNumElements();
4707
4708 // An empty filter catches everything, so there is no point in keeping any
4709 // following clauses or marking the landingpad as having a cleanup. By
4710 // dealing with this case here the following code is made a bit simpler.
4711 if (!NumTypeInfos) {
4712 NewClauses.push_back(FilterClause);
4713 if (!isLastClause)
4714 MakeNewInstruction = true;
4715 CleanupFlag = false;
4716 break;
4717 }
4718
4719 bool MakeNewFilter = false; // If true, make a new filter.
4720 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4721 if (isa<ConstantAggregateZero>(FilterClause)) {
4722 // Not an empty filter - it contains at least one null typeinfo.
4723 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4724 Constant *TypeInfo =
4726 // If this typeinfo is a catch-all then the filter can never match.
4727 if (isCatchAll(Personality, TypeInfo)) {
4728 // Throw the filter away.
4729 MakeNewInstruction = true;
4730 continue;
4731 }
4732
4733 // There is no point in having multiple copies of this typeinfo, so
4734 // discard all but the first copy if there is more than one.
4735 NewFilterElts.push_back(TypeInfo);
4736 if (NumTypeInfos > 1)
4737 MakeNewFilter = true;
4738 } else {
4739 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4740 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4741 NewFilterElts.reserve(NumTypeInfos);
4742
4743 // Remove any filter elements that were already caught or that already
4744 // occurred in the filter. While there, see if any of the elements are
4745 // catch-alls. If so, the filter can be discarded.
4746 bool SawCatchAll = false;
4747 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4748 Constant *Elt = Filter->getOperand(j);
4749 Constant *TypeInfo = Elt->stripPointerCasts();
4750 if (isCatchAll(Personality, TypeInfo)) {
4751 // This element is a catch-all. Bail out, noting this fact.
4752 SawCatchAll = true;
4753 break;
4754 }
4755
4756 // Even if we've seen a type in a catch clause, we don't want to
4757 // remove it from the filter. An unexpected type handler may be
4758 // set up for a call site which throws an exception of the same
4759 // type caught. In order for the exception thrown by the unexpected
4760 // handler to propagate correctly, the filter must be correctly
4761 // described for the call site.
4762 //
4763 // Example:
4764 //
4765 // void unexpected() { throw 1;}
4766 // void foo() throw (int) {
4767 // std::set_unexpected(unexpected);
4768 // try {
4769 // throw 2.0;
4770 // } catch (int i) {}
4771 // }
4772
4773 // There is no point in having multiple copies of the same typeinfo in
4774 // a filter, so only add it if we didn't already.
4775 if (SeenInFilter.insert(TypeInfo).second)
4776 NewFilterElts.push_back(cast<Constant>(Elt));
4777 }
4778 // A filter containing a catch-all cannot match anything by definition.
4779 if (SawCatchAll) {
4780 // Throw the filter away.
4781 MakeNewInstruction = true;
4782 continue;
4783 }
4784
4785 // If we dropped something from the filter, make a new one.
4786 if (NewFilterElts.size() < NumTypeInfos)
4787 MakeNewFilter = true;
4788 }
4789 if (MakeNewFilter) {
4790 FilterType = ArrayType::get(FilterType->getElementType(),
4791 NewFilterElts.size());
4792 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
4793 MakeNewInstruction = true;
4794 }
4795
4796 NewClauses.push_back(FilterClause);
4797
4798 // If the new filter is empty then it will catch everything so there is
4799 // no point in keeping any following clauses or marking the landingpad
4800 // as having a cleanup. The case of the original filter being empty was
4801 // already handled above.
4802 if (MakeNewFilter && !NewFilterElts.size()) {
4803 assert(MakeNewInstruction && "New filter but not a new instruction!");
4804 CleanupFlag = false;
4805 break;
4806 }
4807 }
4808 }
4809
4810 // If several filters occur in a row then reorder them so that the shortest
4811 // filters come first (those with the smallest number of elements). This is
4812 // advantageous because shorter filters are more likely to match, speeding up
4813 // unwinding, but mostly because it increases the effectiveness of the other
4814 // filter optimizations below.
4815 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
4816 unsigned j;
4817 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
4818 for (j = i; j != e; ++j)
4819 if (!isa<ArrayType>(NewClauses[j]->getType()))
4820 break;
4821
4822 // Check whether the filters are already sorted by length. We need to know
4823 // if sorting them is actually going to do anything so that we only make a
4824 // new landingpad instruction if it does.
4825 for (unsigned k = i; k + 1 < j; ++k)
4826 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
4827 // Not sorted, so sort the filters now. Doing an unstable sort would be
4828 // correct too but reordering filters pointlessly might confuse users.
4829 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
4831 MakeNewInstruction = true;
4832 break;
4833 }
4834
4835 // Look for the next batch of filters.
4836 i = j + 1;
4837 }
4838
4839 // If typeinfos matched if and only if equal, then the elements of a filter L
4840 // that occurs later than a filter F could be replaced by the intersection of
4841 // the elements of F and L. In reality two typeinfos can match without being
4842 // equal (for example if one represents a C++ class, and the other some class
4843 // derived from it) so it would be wrong to perform this transform in general.
4844 // However the transform is correct and useful if F is a subset of L. In that
4845 // case L can be replaced by F, and thus removed altogether since repeating a
4846 // filter is pointless. So here we look at all pairs of filters F and L where
4847 // L follows F in the list of clauses, and remove L if every element of F is
4848 // an element of L. This can occur when inlining C++ functions with exception
4849 // specifications.
4850 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
4851 // Examine each filter in turn.
4852 Value *Filter = NewClauses[i];
4853 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
4854 if (!FTy)
4855 // Not a filter - skip it.
4856 continue;
4857 unsigned FElts = FTy->getNumElements();
4858 // Examine each filter following this one. Doing this backwards means that
4859 // we don't have to worry about filters disappearing under us when removed.
4860 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
4861 Value *LFilter = NewClauses[j];
4862 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
4863 if (!LTy)
4864 // Not a filter - skip it.
4865 continue;
4866 // If Filter is a subset of LFilter, i.e. every element of Filter is also
4867 // an element of LFilter, then discard LFilter.
4868 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
4869 // If Filter is empty then it is a subset of LFilter.
4870 if (!FElts) {
4871 // Discard LFilter.
4872 NewClauses.erase(J);
4873 MakeNewInstruction = true;
4874 // Move on to the next filter.
4875 continue;
4876 }
4877 unsigned LElts = LTy->getNumElements();
4878 // If Filter is longer than LFilter then it cannot be a subset of it.
4879 if (FElts > LElts)
4880 // Move on to the next filter.
4881 continue;
4882 // At this point we know that LFilter has at least one element.
4883 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
4884 // Filter is a subset of LFilter iff Filter contains only zeros (as we
4885 // already know that Filter is not longer than LFilter).
4887 assert(FElts <= LElts && "Should have handled this case earlier!");
4888 // Discard LFilter.
4889 NewClauses.erase(J);
4890 MakeNewInstruction = true;
4891 }
4892 // Move on to the next filter.
4893 continue;
4894 }
4895 ConstantArray *LArray = cast<ConstantArray>(LFilter);
4896 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
4897 // Since Filter is non-empty and contains only zeros, it is a subset of
4898 // LFilter iff LFilter contains a zero.
4899 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
4900 for (unsigned l = 0; l != LElts; ++l)
4901 if (LArray->getOperand(l)->isNullValue()) {
4902 // LFilter contains a zero - discard it.
4903 NewClauses.erase(J);
4904 MakeNewInstruction = true;
4905 break;
4906 }
4907 // Move on to the next filter.
4908 continue;
4909 }
4910 // At this point we know that both filters are ConstantArrays. Loop over
4911 // operands to see whether every element of Filter is also an element of
4912 // LFilter. Since filters tend to be short this is probably faster than
4913 // using a method that scales nicely.
4915 bool AllFound = true;
4916 for (unsigned f = 0; f != FElts; ++f) {
4917 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
4918 AllFound = false;
4919 for (unsigned l = 0; l != LElts; ++l) {
4920 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
4921 if (LTypeInfo == FTypeInfo) {
4922 AllFound = true;
4923 break;
4924 }
4925 }
4926 if (!AllFound)
4927 break;
4928 }
4929 if (AllFound) {
4930 // Discard LFilter.
4931 NewClauses.erase(J);
4932 MakeNewInstruction = true;
4933 }
4934 // Move on to the next filter.
4935 }
4936 }
4937
4938 // If we changed any of the clauses, replace the old landingpad instruction
4939 // with a new one.
4940 if (MakeNewInstruction) {
4942 NewClauses.size());
4943 for (Constant *C : NewClauses)
4944 NLI->addClause(C);
4945 // A landing pad with no clauses must have the cleanup flag set. It is
4946 // theoretically possible, though highly unlikely, that we eliminated all
4947 // clauses. If so, force the cleanup flag to true.
4948 if (NewClauses.empty())
4949 CleanupFlag = true;
4950 NLI->setCleanup(CleanupFlag);
4951 return NLI;
4952 }
4953
4954 // Even if none of the clauses changed, we may nonetheless have understood
4955 // that the cleanup flag is pointless. Clear it if so.
4956 if (LI.isCleanup() != CleanupFlag) {
4957 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
4958 LI.setCleanup(CleanupFlag);
4959 return &LI;
4960 }
4961
4962 return nullptr;
4963}
4964
4965Value *
4967 // Try to push freeze through instructions that propagate but don't produce
4968 // poison as far as possible. If an operand of freeze does not produce poison
4969 // then push the freeze through to the operands that are not guaranteed
4970 // non-poison. The actual transform is as follows.
4971 // Op1 = ... ; Op1 can be poison
4972 // Op0 = Inst(Op1, NonPoisonOps...)
4973 // ... = Freeze(Op0)
4974 // =>
4975 // Op1 = ...
4976 // Op1.fr = Freeze(Op1)
4977 // ... = Inst(Op1.fr, NonPoisonOps...)
4978
4979 auto CanPushFreeze = [](Value *V) {
4980 if (!isa<Instruction>(V) || isa<PHINode>(V))
4981 return false;
4982
4983 // We can't push the freeze through an instruction which can itself create
4984 // poison. If the only source of new poison is flags, we can simply
4985 // strip them (since we know the only use is the freeze and nothing can
4986 // benefit from them.)
4988 /*ConsiderFlagsAndMetadata*/ false);
4989 };
4990
4991 // Pushing freezes up long instruction chains can be expensive. Instead,
4992 // we directly push the freeze all the way to the leaves. However, we leave
4993 // deduplication of freezes on the same value for freezeOtherUses().
4994 Use *OrigUse = &OrigFI.getOperandUse(0);
4997 Worklist.push_back(OrigUse);
4998 while (!Worklist.empty()) {
4999 auto *U = Worklist.pop_back_val();
5000 Value *V = U->get();
5001 if (!CanPushFreeze(V)) {
5002 // If we can't push through the original instruction, abort the transform.
5003 if (U == OrigUse)
5004 return nullptr;
5005
5006 auto *UserI = cast<Instruction>(U->getUser());
5007 Builder.SetInsertPoint(UserI);
5008 Value *Frozen = Builder.CreateFreeze(V, V->getName() + ".fr");
5009 U->set(Frozen);
5010 continue;
5011 }
5012
5013 auto *I = cast<Instruction>(V);
5014 if (!Visited.insert(I).second)
5015 continue;
5016
5017 // reverse() to emit freezes in a more natural order.
5018 for (Use &Op : reverse(I->operands())) {
5019 Value *OpV = Op.get();
5021 continue;
5022 Worklist.push_back(&Op);
5023 }
5024
5025 I->dropPoisonGeneratingAnnotations();
5026 this->Worklist.add(I);
5027 }
5028
5029 return OrigUse->get();
5030}
5031
5033 PHINode *PN) {
5034 // Detect whether this is a recurrence with a start value and some number of
5035 // backedge values. We'll check whether we can push the freeze through the
5036 // backedge values (possibly dropping poison flags along the way) until we
5037 // reach the phi again. In that case, we can move the freeze to the start
5038 // value.
5039 Use *StartU = nullptr;
5041 for (Use &U : PN->incoming_values()) {
5042 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
5043 // Add backedge value to worklist.
5044 Worklist.push_back(U.get());
5045 continue;
5046 }
5047
5048 // Don't bother handling multiple start values.
5049 if (StartU)
5050 return nullptr;
5051 StartU = &U;
5052 }
5053
5054 if (!StartU || Worklist.empty())
5055 return nullptr; // Not a recurrence.
5056
5057 Value *StartV = StartU->get();
5058 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
5059 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
5060 // We can't insert freeze if the start value is the result of the
5061 // terminator (e.g. an invoke).
5062 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
5063 return nullptr;
5064
5067 while (!Worklist.empty()) {
5068 Value *V = Worklist.pop_back_val();
5069 if (!Visited.insert(V).second)
5070 continue;
5071
5072 if (Visited.size() > 32)
5073 return nullptr; // Limit the total number of values we inspect.
5074
5075 // Assume that PN is non-poison, because it will be after the transform.
5076 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
5077 continue;
5078
5081 /*ConsiderFlagsAndMetadata*/ false))
5082 return nullptr;
5083
5084 DropFlags.push_back(I);
5085 append_range(Worklist, I->operands());
5086 }
5087
5088 for (Instruction *I : DropFlags)
5089 I->dropPoisonGeneratingAnnotations();
5090
5091 if (StartNeedsFreeze) {
5092 Builder.SetInsertPoint(StartBB->getTerminator());
5093 Value *FrozenStartV = Builder.CreateFreeze(StartV,
5094 StartV->getName() + ".fr");
5095 replaceUse(*StartU, FrozenStartV);
5096 }
5097 return replaceInstUsesWith(FI, PN);
5098}
5099
5101 Value *Op = FI.getOperand(0);
5102
5103 if (isa<Constant>(Op) || Op->hasOneUse())
5104 return false;
5105
5106 // Move the freeze directly after the definition of its operand, so that
5107 // it dominates the maximum number of uses. Note that it may not dominate
5108 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
5109 // the normal/default destination. This is why the domination check in the
5110 // replacement below is still necessary.
5111 BasicBlock::iterator MoveBefore;
5112 if (isa<Argument>(Op)) {
5113 MoveBefore =
5115 } else {
5116 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
5117 if (!MoveBeforeOpt)
5118 return false;
5119 MoveBefore = *MoveBeforeOpt;
5120 }
5121
5122 // Re-point iterator to come after any debug-info records.
5123 MoveBefore.setHeadBit(false);
5124
5125 bool Changed = false;
5126 if (&FI != &*MoveBefore) {
5127 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
5128 Changed = true;
5129 }
5130
5131 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
5132 bool Dominates = DT.dominates(&FI, U);
5133 Changed |= Dominates;
5134 return Dominates;
5135 });
5136
5137 return Changed;
5138}
5139
5140// Check if any direct or bitcast user of this value is a shuffle instruction.
5142 for (auto *U : V->users()) {
5144 return true;
5145 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
5146 return true;
5147 }
5148 return false;
5149}
5150
5152 Value *Op0 = I.getOperand(0);
5153
5154 if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
5155 return replaceInstUsesWith(I, V);
5156
5157 // freeze (phi const, x) --> phi const, (freeze x)
5158 if (auto *PN = dyn_cast<PHINode>(Op0)) {
5159 if (Instruction *NV = foldOpIntoPhi(I, PN))
5160 return NV;
5161 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
5162 return NV;
5163 }
5164
5166 return replaceInstUsesWith(I, NI);
5167
5168 // If I is freeze(undef), check its uses and fold it to a fixed constant.
5169 // - or: pick -1
5170 // - select's condition: if the true value is constant, choose it by making
5171 // the condition true.
5172 // - default: pick 0
5173 //
5174 // Note that this transform is intentionally done here rather than
5175 // via an analysis in InstSimplify or at individual user sites. That is
5176 // because we must produce the same value for all uses of the freeze -
5177 // it's the reason "freeze" exists!
5178 //
5179 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
5180 // duplicating logic for binops at least.
5181 auto getUndefReplacement = [&](Type *Ty) {
5182 Value *BestValue = nullptr;
5183 Value *NullValue = Constant::getNullValue(Ty);
5184 for (const auto *U : I.users()) {
5185 Value *V = NullValue;
5186 if (match(U, m_Or(m_Value(), m_Value())))
5188 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
5189 V = ConstantInt::getTrue(Ty);
5190 else if (match(U, m_c_Select(m_Specific(&I), m_Value(V)))) {
5192 V = NullValue;
5193 }
5194
5195 if (!BestValue)
5196 BestValue = V;
5197 else if (BestValue != V)
5198 BestValue = NullValue;
5199 }
5200 assert(BestValue && "Must have at least one use");
5201 return BestValue;
5202 };
5203
5204 if (match(Op0, m_Undef())) {
5205 // Don't fold freeze(undef/poison) if it's used as a vector operand in
5206 // a shuffle. This may improve codegen for shuffles that allow
5207 // unspecified inputs.
5209 return nullptr;
5210 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
5211 }
5212
5213 auto getFreezeVectorReplacement = [](Constant *C) -> Constant * {
5214 Type *Ty = C->getType();
5215 auto *VTy = dyn_cast<FixedVectorType>(Ty);
5216 if (!VTy)
5217 return nullptr;
5218 unsigned NumElts = VTy->getNumElements();
5219 Constant *BestValue = Constant::getNullValue(VTy->getScalarType());
5220 for (unsigned i = 0; i != NumElts; ++i) {
5221 Constant *EltC = C->getAggregateElement(i);
5222 if (EltC && !match(EltC, m_Undef())) {
5223 BestValue = EltC;
5224 break;
5225 }
5226 }
5227 return Constant::replaceUndefsWith(C, BestValue);
5228 };
5229
5230 Constant *C;
5231 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement() &&
5232 !C->containsConstantExpression()) {
5233 if (Constant *Repl = getFreezeVectorReplacement(C))
5234 return replaceInstUsesWith(I, Repl);
5235 }
5236
5237 // Replace uses of Op with freeze(Op).
5238 if (freezeOtherUses(I))
5239 return &I;
5240
5241 return nullptr;
5242}
5243
5244/// Check for case where the call writes to an otherwise dead alloca. This
5245/// shows up for unused out-params in idiomatic C/C++ code. Note that this
5246/// helper *only* analyzes the write; doesn't check any other legality aspect.
5248 auto *CB = dyn_cast<CallBase>(I);
5249 if (!CB)
5250 // TODO: handle e.g. store to alloca here - only worth doing if we extend
5251 // to allow reload along used path as described below. Otherwise, this
5252 // is simply a store to a dead allocation which will be removed.
5253 return false;
5254 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
5255 if (!Dest)
5256 return false;
5257 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
5258 if (!AI)
5259 // TODO: allow malloc?
5260 return false;
5261 // TODO: allow memory access dominated by move point? Note that since AI
5262 // could have a reference to itself captured by the call, we would need to
5263 // account for cycles in doing so.
5264 SmallVector<const User *> AllocaUsers;
5266 auto pushUsers = [&](const Instruction &I) {
5267 for (const User *U : I.users()) {
5268 if (Visited.insert(U).second)
5269 AllocaUsers.push_back(U);
5270 }
5271 };
5272 pushUsers(*AI);
5273 while (!AllocaUsers.empty()) {
5274 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
5275 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
5276 pushUsers(*UserI);
5277 continue;
5278 }
5279 if (UserI == CB)
5280 continue;
5281 // TODO: support lifetime.start/end here
5282 return false;
5283 }
5284 return true;
5285}
5286
5287/// Try to move the specified instruction from its current block into the
5288/// beginning of DestBlock, which can only happen if it's safe to move the
5289/// instruction past all of the instructions between it and the end of its
5290/// block.
5292 BasicBlock *DestBlock) {
5293 BasicBlock *SrcBlock = I->getParent();
5294
5295 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
5296 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
5297 I->isTerminator())
5298 return false;
5299
5300 // Do not sink static or dynamic alloca instructions. Static allocas must
5301 // remain in the entry block, and dynamic allocas must not be sunk in between
5302 // a stacksave / stackrestore pair, which would incorrectly shorten its
5303 // lifetime.
5304 if (isa<AllocaInst>(I))
5305 return false;
5306
5307 // Do not sink into catchswitch blocks.
5308 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
5309 return false;
5310
5311 // Do not sink convergent call instructions.
5312 if (auto *CI = dyn_cast<CallInst>(I)) {
5313 if (CI->isConvergent())
5314 return false;
5315 }
5316
5317 // Unless we can prove that the memory write isn't visibile except on the
5318 // path we're sinking to, we must bail.
5319 if (I->mayWriteToMemory()) {
5320 if (!SoleWriteToDeadLocal(I, TLI))
5321 return false;
5322 }
5323
5324 // We can only sink load instructions if there is nothing between the load and
5325 // the end of block that could change the value.
5326 if (I->mayReadFromMemory() &&
5327 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
5328 // We don't want to do any sophisticated alias analysis, so we only check
5329 // the instructions after I in I's parent block if we try to sink to its
5330 // successor block.
5331 if (DestBlock->getUniquePredecessor() != I->getParent())
5332 return false;
5333 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
5334 E = I->getParent()->end();
5335 Scan != E; ++Scan)
5336 if (Scan->mayWriteToMemory())
5337 return false;
5338 }
5339
5340 I->dropDroppableUses([&](const Use *U) {
5341 auto *I = dyn_cast<Instruction>(U->getUser());
5342 if (I && I->getParent() != DestBlock) {
5343 Worklist.add(I);
5344 return true;
5345 }
5346 return false;
5347 });
5348 /// FIXME: We could remove droppable uses that are not dominated by
5349 /// the new position.
5350
5351 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
5352 I->moveBefore(*DestBlock, InsertPos);
5353 ++NumSunkInst;
5354
5355 // Also sink all related debug uses from the source basic block. Otherwise we
5356 // get debug use before the def. Attempt to salvage debug uses first, to
5357 // maximise the range variables have location for. If we cannot salvage, then
5358 // mark the location undef: we know it was supposed to receive a new location
5359 // here, but that computation has been sunk.
5360 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
5361 findDbgUsers(I, DbgVariableRecords);
5362 if (!DbgVariableRecords.empty())
5363 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
5364 DbgVariableRecords);
5365
5366 // PS: there are numerous flaws with this behaviour, not least that right now
5367 // assignments can be re-ordered past other assignments to the same variable
5368 // if they use different Values. Creating more undef assignements can never be
5369 // undone. And salvaging all users outside of this block can un-necessarily
5370 // alter the lifetime of the live-value that the variable refers to.
5371 // Some of these things can be resolved by tolerating debug use-before-defs in
5372 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
5373 // being used for more architectures.
5374
5375 return true;
5376}
5377
5379 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
5380 BasicBlock *DestBlock,
5381 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
5382 // For all debug values in the destination block, the sunk instruction
5383 // will still be available, so they do not need to be dropped.
5384
5385 // Fetch all DbgVariableRecords not already in the destination.
5386 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
5387 for (auto &DVR : DbgVariableRecords)
5388 if (DVR->getParent() != DestBlock)
5389 DbgVariableRecordsToSalvage.push_back(DVR);
5390
5391 // Fetch a second collection, of DbgVariableRecords in the source block that
5392 // we're going to sink.
5393 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5394 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5395 if (DVR->getParent() == SrcBlock)
5396 DbgVariableRecordsToSink.push_back(DVR);
5397
5398 // Sort DbgVariableRecords according to their position in the block. This is a
5399 // partial order: DbgVariableRecords attached to different instructions will
5400 // be ordered by the instruction order, but DbgVariableRecords attached to the
5401 // same instruction won't have an order.
5402 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5403 return B->getInstruction()->comesBefore(A->getInstruction());
5404 };
5405 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5406
5407 // If there are two assignments to the same variable attached to the same
5408 // instruction, the ordering between the two assignments is important. Scan
5409 // for this (rare) case and establish which is the last assignment.
5410 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5412 if (DbgVariableRecordsToSink.size() > 1) {
5414 // Count how many assignments to each variable there is per instruction.
5415 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5416 DebugVariable DbgUserVariable =
5417 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5418 DVR->getDebugLoc()->getInlinedAt());
5419 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5420 }
5421
5422 // If there are any instructions with two assignments, add them to the
5423 // FilterOutMap to record that they need extra filtering.
5425 for (auto It : CountMap) {
5426 if (It.second > 1) {
5427 FilterOutMap[It.first] = nullptr;
5428 DupSet.insert(It.first.first);
5429 }
5430 }
5431
5432 // For all instruction/variable pairs needing extra filtering, find the
5433 // latest assignment.
5434 for (const Instruction *Inst : DupSet) {
5435 for (DbgVariableRecord &DVR :
5436 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5437 DebugVariable DbgUserVariable =
5438 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5439 DVR.getDebugLoc()->getInlinedAt());
5440 auto FilterIt =
5441 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5442 if (FilterIt == FilterOutMap.end())
5443 continue;
5444 if (FilterIt->second != nullptr)
5445 continue;
5446 FilterIt->second = &DVR;
5447 }
5448 }
5449 }
5450
5451 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5452 // out any duplicate assignments identified above.
5454 SmallSet<DebugVariable, 4> SunkVariables;
5455 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5457 continue;
5458
5459 DebugVariable DbgUserVariable =
5460 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5461 DVR->getDebugLoc()->getInlinedAt());
5462
5463 // For any variable where there were multiple assignments in the same place,
5464 // ignore all but the last assignment.
5465 if (!FilterOutMap.empty()) {
5466 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5467 auto It = FilterOutMap.find(IVP);
5468
5469 // Filter out.
5470 if (It != FilterOutMap.end() && It->second != DVR)
5471 continue;
5472 }
5473
5474 if (!SunkVariables.insert(DbgUserVariable).second)
5475 continue;
5476
5477 if (DVR->isDbgAssign())
5478 continue;
5479
5480 DVRClones.emplace_back(DVR->clone());
5481 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5482 }
5483
5484 // Perform salvaging without the clones, then sink the clones.
5485 if (DVRClones.empty())
5486 return;
5487
5488 salvageDebugInfoForDbgValues(*I, DbgVariableRecordsToSalvage);
5489
5490 // The clones are in reverse order of original appearance. Assert that the
5491 // head bit is set on the iterator as we _should_ have received it via
5492 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5493 // we'll repeatedly insert at the head, such as:
5494 // DVR-3 (third insertion goes here)
5495 // DVR-2 (second insertion goes here)
5496 // DVR-1 (first insertion goes here)
5497 // Any-Prior-DVRs
5498 // InsertPtInst
5499 assert(InsertPos.getHeadBit());
5500 for (DbgVariableRecord *DVRClone : DVRClones) {
5501 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5502 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5503 }
5504}
5505
5507 while (!Worklist.isEmpty()) {
5508 // Walk deferred instructions in reverse order, and push them to the
5509 // worklist, which means they'll end up popped from the worklist in-order.
5510 while (Instruction *I = Worklist.popDeferred()) {
5511 // Check to see if we can DCE the instruction. We do this already here to
5512 // reduce the number of uses and thus allow other folds to trigger.
5513 // Note that eraseInstFromFunction() may push additional instructions on
5514 // the deferred worklist, so this will DCE whole instruction chains.
5517 ++NumDeadInst;
5518 continue;
5519 }
5520
5521 Worklist.push(I);
5522 }
5523
5524 Instruction *I = Worklist.removeOne();
5525 if (I == nullptr) continue; // skip null values.
5526
5527 // Check to see if we can DCE the instruction.
5530 ++NumDeadInst;
5531 continue;
5532 }
5533
5534 if (!DebugCounter::shouldExecute(VisitCounter))
5535 continue;
5536
5537 // See if we can trivially sink this instruction to its user if we can
5538 // prove that the successor is not executed more frequently than our block.
5539 // Return the UserBlock if successful.
5540 auto getOptionalSinkBlockForInst =
5541 [this](Instruction *I) -> std::optional<BasicBlock *> {
5542 if (!EnableCodeSinking)
5543 return std::nullopt;
5544
5545 BasicBlock *BB = I->getParent();
5546 BasicBlock *UserParent = nullptr;
5547 unsigned NumUsers = 0;
5548
5549 for (Use &U : I->uses()) {
5550 User *User = U.getUser();
5551 if (User->isDroppable())
5552 continue;
5553 if (NumUsers > MaxSinkNumUsers)
5554 return std::nullopt;
5555
5556 Instruction *UserInst = cast<Instruction>(User);
5557 // Special handling for Phi nodes - get the block the use occurs in.
5558 BasicBlock *UserBB = UserInst->getParent();
5559 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5560 UserBB = PN->getIncomingBlock(U);
5561 // Bail out if we have uses in different blocks. We don't do any
5562 // sophisticated analysis (i.e finding NearestCommonDominator of these
5563 // use blocks).
5564 if (UserParent && UserParent != UserBB)
5565 return std::nullopt;
5566 UserParent = UserBB;
5567
5568 // Make sure these checks are done only once, naturally we do the checks
5569 // the first time we get the userparent, this will save compile time.
5570 if (NumUsers == 0) {
5571 // Try sinking to another block. If that block is unreachable, then do
5572 // not bother. SimplifyCFG should handle it.
5573 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5574 return std::nullopt;
5575
5576 auto *Term = UserParent->getTerminator();
5577 // See if the user is one of our successors that has only one
5578 // predecessor, so that we don't have to split the critical edge.
5579 // Another option where we can sink is a block that ends with a
5580 // terminator that does not pass control to other block (such as
5581 // return or unreachable or resume). In this case:
5582 // - I dominates the User (by SSA form);
5583 // - the User will be executed at most once.
5584 // So sinking I down to User is always profitable or neutral.
5585 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5586 return std::nullopt;
5587
5588 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5589 }
5590
5591 NumUsers++;
5592 }
5593
5594 // No user or only has droppable users.
5595 if (!UserParent)
5596 return std::nullopt;
5597
5598 return UserParent;
5599 };
5600
5601 auto OptBB = getOptionalSinkBlockForInst(I);
5602 if (OptBB) {
5603 auto *UserParent = *OptBB;
5604 // Okay, the CFG is simple enough, try to sink this instruction.
5605 if (tryToSinkInstruction(I, UserParent)) {
5606 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5607 MadeIRChange = true;
5608 // We'll add uses of the sunk instruction below, but since
5609 // sinking can expose opportunities for it's *operands* add
5610 // them to the worklist
5611 for (Use &U : I->operands())
5612 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5613 Worklist.push(OpI);
5614 }
5615 }
5616
5617 // Now that we have an instruction, try combining it to simplify it.
5618 Builder.SetInsertPoint(I);
5619 Builder.CollectMetadataToCopy(
5620 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5621
5622#ifndef NDEBUG
5623 std::string OrigI;
5624#endif
5625 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5626 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5627
5628 if (Instruction *Result = visit(*I)) {
5629 ++NumCombined;
5630 // Should we replace the old instruction with a new one?
5631 if (Result != I) {
5632 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5633 << " New = " << *Result << '\n');
5634
5635 // We copy the old instruction's DebugLoc to the new instruction, unless
5636 // InstCombine already assigned a DebugLoc to it, in which case we
5637 // should trust the more specifically selected DebugLoc.
5638 Result->setDebugLoc(Result->getDebugLoc().orElse(I->getDebugLoc()));
5639 // We also copy annotation metadata to the new instruction.
5640 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5641 // Everything uses the new instruction now.
5642 I->replaceAllUsesWith(Result);
5643
5644 // Move the name to the new instruction first.
5645 Result->takeName(I);
5646
5647 // Insert the new instruction into the basic block...
5648 BasicBlock *InstParent = I->getParent();
5649 BasicBlock::iterator InsertPos = I->getIterator();
5650
5651 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5652 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5653 // We need to fix up the insertion point.
5654 if (isa<PHINode>(I)) // PHI -> Non-PHI
5655 InsertPos = InstParent->getFirstInsertionPt();
5656 else // Non-PHI -> PHI
5657 InsertPos = InstParent->getFirstNonPHIIt();
5658 }
5659
5660 Result->insertInto(InstParent, InsertPos);
5661
5662 // Push the new instruction and any users onto the worklist.
5663 Worklist.pushUsersToWorkList(*Result);
5664 Worklist.push(Result);
5665
5667 } else {
5668 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5669 << " New = " << *I << '\n');
5670
5671 // If the instruction was modified, it's possible that it is now dead.
5672 // if so, remove it.
5675 } else {
5676 Worklist.pushUsersToWorkList(*I);
5677 Worklist.push(I);
5678 }
5679 }
5680 MadeIRChange = true;
5681 }
5682 }
5683
5684 Worklist.zap();
5685 return MadeIRChange;
5686}
5687
5688// Track the scopes used by !alias.scope and !noalias. In a function, a
5689// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5690// by both sets. If not, the declaration of the scope can be safely omitted.
5691// The MDNode of the scope can be omitted as well for the instructions that are
5692// part of this function. We do not do that at this point, as this might become
5693// too time consuming to do.
5695 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5696 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5697
5698public:
5700 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5701 if (!I->hasMetadataOtherThanDebugLoc())
5702 return;
5703
5704 auto Track = [](Metadata *ScopeList, auto &Container) {
5705 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5706 if (!MDScopeList || !Container.insert(MDScopeList).second)
5707 return;
5708 for (const auto &MDOperand : MDScopeList->operands())
5709 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5710 Container.insert(MDScope);
5711 };
5712
5713 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5714 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5715 }
5716
5719 if (!Decl)
5720 return false;
5721
5722 assert(Decl->use_empty() &&
5723 "llvm.experimental.noalias.scope.decl in use ?");
5724 const MDNode *MDSL = Decl->getScopeList();
5725 assert(MDSL->getNumOperands() == 1 &&
5726 "llvm.experimental.noalias.scope should refer to a single scope");
5727 auto &MDOperand = MDSL->getOperand(0);
5728 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5729 return !UsedAliasScopesAndLists.contains(MD) ||
5730 !UsedNoAliasScopesAndLists.contains(MD);
5731
5732 // Not an MDNode ? throw away.
5733 return true;
5734 }
5735};
5736
5737/// Populate the IC worklist from a function, by walking it in reverse
5738/// post-order and adding all reachable code to the worklist.
5739///
5740/// This has a couple of tricks to make the code faster and more powerful. In
5741/// particular, we constant fold and DCE instructions as we go, to avoid adding
5742/// them to the worklist (this significantly speeds up instcombine on code where
5743/// many instructions are dead or constant). Additionally, if we find a branch
5744/// whose condition is a known constant, we only visit the reachable successors.
5746 bool MadeIRChange = false;
5748 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
5749 DenseMap<Constant *, Constant *> FoldedConstants;
5750 AliasScopeTracker SeenAliasScopes;
5751
5752 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
5753 for (BasicBlock *Succ : successors(BB))
5754 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
5755 for (PHINode &PN : Succ->phis())
5756 for (Use &U : PN.incoming_values())
5757 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5758 U.set(PoisonValue::get(PN.getType()));
5759 MadeIRChange = true;
5760 }
5761 };
5762
5763 for (BasicBlock *BB : RPOT) {
5764 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
5765 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
5766 })) {
5767 HandleOnlyLiveSuccessor(BB, nullptr);
5768 continue;
5769 }
5770 LiveBlocks.insert(BB);
5771
5772 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
5773 // ConstantProp instruction if trivially constant.
5774 if (!Inst.use_empty() &&
5775 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5776 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
5777 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
5778 << '\n');
5779 Inst.replaceAllUsesWith(C);
5780 ++NumConstProp;
5781 if (isInstructionTriviallyDead(&Inst, &TLI))
5782 Inst.eraseFromParent();
5783 MadeIRChange = true;
5784 continue;
5785 }
5786
5787 // See if we can constant fold its operands.
5788 for (Use &U : Inst.operands()) {
5790 continue;
5791
5792 auto *C = cast<Constant>(U);
5793 Constant *&FoldRes = FoldedConstants[C];
5794 if (!FoldRes)
5795 FoldRes = ConstantFoldConstant(C, DL, &TLI);
5796
5797 if (FoldRes != C) {
5798 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
5799 << "\n Old = " << *C
5800 << "\n New = " << *FoldRes << '\n');
5801 U = FoldRes;
5802 MadeIRChange = true;
5803 }
5804 }
5805
5806 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
5807 // these call instructions consumes non-trivial amount of time and
5808 // provides no value for the optimization.
5809 if (!Inst.isDebugOrPseudoInst()) {
5810 InstrsForInstructionWorklist.push_back(&Inst);
5811 SeenAliasScopes.analyse(&Inst);
5812 }
5813 }
5814
5815 // If this is a branch or switch on a constant, mark only the single
5816 // live successor. Otherwise assume all successors are live.
5817 Instruction *TI = BB->getTerminator();
5818 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
5819 if (isa<UndefValue>(BI->getCondition())) {
5820 // Branch on undef is UB.
5821 HandleOnlyLiveSuccessor(BB, nullptr);
5822 continue;
5823 }
5824 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5825 bool CondVal = Cond->getZExtValue();
5826 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5827 continue;
5828 }
5829 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5830 if (isa<UndefValue>(SI->getCondition())) {
5831 // Switch on undef is UB.
5832 HandleOnlyLiveSuccessor(BB, nullptr);
5833 continue;
5834 }
5835 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5836 HandleOnlyLiveSuccessor(BB,
5837 SI->findCaseValue(Cond)->getCaseSuccessor());
5838 continue;
5839 }
5840 }
5841 }
5842
5843 // Remove instructions inside unreachable blocks. This prevents the
5844 // instcombine code from having to deal with some bad special cases, and
5845 // reduces use counts of instructions.
5846 for (BasicBlock &BB : F) {
5847 if (LiveBlocks.count(&BB))
5848 continue;
5849
5850 unsigned NumDeadInstInBB;
5851 NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB);
5852
5853 MadeIRChange |= NumDeadInstInBB != 0;
5854 NumDeadInst += NumDeadInstInBB;
5855 }
5856
5857 // Once we've found all of the instructions to add to instcombine's worklist,
5858 // add them in reverse order. This way instcombine will visit from the top
5859 // of the function down. This jives well with the way that it adds all uses
5860 // of instructions to the worklist after doing a transformation, thus avoiding
5861 // some N^2 behavior in pathological cases.
5862 Worklist.reserve(InstrsForInstructionWorklist.size());
5863 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
5864 // DCE instruction if trivially dead. As we iterate in reverse program
5865 // order here, we will clean up whole chains of dead instructions.
5866 if (isInstructionTriviallyDead(Inst, &TLI) ||
5867 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
5868 ++NumDeadInst;
5869 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
5870 salvageDebugInfo(*Inst);
5871 Inst->eraseFromParent();
5872 MadeIRChange = true;
5873 continue;
5874 }
5875
5876 Worklist.push(Inst);
5877 }
5878
5879 return MadeIRChange;
5880}
5881
5883 // Collect backedges.
5885 for (BasicBlock *BB : RPOT) {
5886 Visited.insert(BB);
5887 for (BasicBlock *Succ : successors(BB))
5888 if (Visited.contains(Succ))
5889 BackEdges.insert({BB, Succ});
5890 }
5891 ComputedBackEdges = true;
5892}
5893
5899 const InstCombineOptions &Opts) {
5900 auto &DL = F.getDataLayout();
5901 bool VerifyFixpoint = Opts.VerifyFixpoint &&
5902 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
5903
5904 /// Builder - This is an IRBuilder that automatically inserts new
5905 /// instructions into the worklist when they are created.
5907 F.getContext(), TargetFolder(DL),
5908 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
5909 Worklist.add(I);
5910 if (auto *Assume = dyn_cast<AssumeInst>(I))
5911 AC.registerAssumption(Assume);
5912 }));
5913
5915
5916 // Lower dbg.declare intrinsics otherwise their value may be clobbered
5917 // by instcombiner.
5918 bool MadeIRChange = false;
5920 MadeIRChange = LowerDbgDeclare(F);
5921
5922 // Iterate while there is work to do.
5923 unsigned Iteration = 0;
5924 while (true) {
5925 if (Iteration >= Opts.MaxIterations && !VerifyFixpoint) {
5926 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
5927 << " on " << F.getName()
5928 << " reached; stopping without verifying fixpoint\n");
5929 break;
5930 }
5931
5932 ++Iteration;
5933 ++NumWorklistIterations;
5934 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
5935 << F.getName() << "\n");
5936
5937 InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,
5938 ORE, BFI, BPI, PSI, DL, RPOT);
5940 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
5941 MadeChangeInThisIteration |= IC.run();
5942 if (!MadeChangeInThisIteration)
5943 break;
5944
5945 MadeIRChange = true;
5946 if (Iteration > Opts.MaxIterations) {
5948 "Instruction Combining on " + Twine(F.getName()) +
5949 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
5950 " iterations. " +
5951 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
5952 "'instcombine-no-verify-fixpoint' to suppress this error.");
5953 }
5954 }
5955
5956 if (Iteration == 1)
5957 ++NumOneIteration;
5958 else if (Iteration == 2)
5959 ++NumTwoIterations;
5960 else if (Iteration == 3)
5961 ++NumThreeIterations;
5962 else
5963 ++NumFourOrMoreIterations;
5964
5965 return MadeIRChange;
5966}
5967
5969
5971 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
5972 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
5973 OS, MapClassName2PassName);
5974 OS << '<';
5975 OS << "max-iterations=" << Options.MaxIterations << ";";
5976 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
5977 OS << '>';
5978}
5979
5980char InstCombinePass::ID = 0;
5981
5984 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
5985 // No changes since last InstCombine pass, exit early.
5986 if (LRT.shouldSkip(&ID))
5987 return PreservedAnalyses::all();
5988
5989 auto &AC = AM.getResult<AssumptionAnalysis>(F);
5990 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
5991 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
5993 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
5994
5995 auto *AA = &AM.getResult<AAManager>(F);
5996 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
5997 ProfileSummaryInfo *PSI =
5998 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
5999 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
6000 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
6002
6003 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6004 BFI, BPI, PSI, Options)) {
6005 // No changes, all analyses are preserved.
6006 LRT.update(&ID, /*Changed=*/false);
6007 return PreservedAnalyses::all();
6008 }
6009
6010 // Mark all the analyses that instcombine updates as preserved.
6012 LRT.update(&ID, /*Changed=*/true);
6015 return PA;
6016}
6017
6033
6035 if (skipFunction(F))
6036 return false;
6037
6038 // Required analyses.
6039 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
6040 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
6041 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
6043 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6045
6046 // Optional analyses.
6047 ProfileSummaryInfo *PSI =
6049 BlockFrequencyInfo *BFI =
6050 (PSI && PSI->hasProfileSummary()) ?
6052 nullptr;
6053 BranchProbabilityInfo *BPI = nullptr;
6054 if (auto *WrapperPass =
6056 BPI = &WrapperPass->getBPI();
6057
6058 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6059 BFI, BPI, PSI, InstCombineOptions());
6060}
6061
6063
6067
6069 "Combine redundant instructions", false, false)
6080 "Combine redundant instructions", false, false)
6081
6082// Initialization Routines
6086
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI)
DXIL Resource Access
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition IVUsers.cpp:48
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * combineConstantOffsets(GetElementPtrInst &GEP, InstCombinerImpl &IC)
Combine constant offsets separated by variable offsets.
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static std::optional< ModRefInfo > isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI, bool KnowInit)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file contains the declarations for metadata subclasses.
#define T
uint64_t IntrinsicInst * II
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
unsigned OpIndex
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
cl::opt< bool > ProfcheckDisableMetadataFixes("profcheck-disable-metadata-fixes", cl::Hidden, cl::init(false), cl::desc("Disable metadata propagation fixes discovered through Issue #147390"))
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static unsigned getScalarSizeInBits(Type *Ty)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:234
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition APInt.cpp:1758
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:423
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition APInt.cpp:1890
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:371
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:380
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1488
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1928
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:827
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1960
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:334
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:440
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:306
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1941
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:851
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition ArrayRef.h:224
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
uint64_t getNumElements() const
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
size_t size() const
Definition BasicBlock.h:480
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition InstrTypes.h:294
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:701
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:703
@ ICMP_NE
not equal
Definition InstrTypes.h:700
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:829
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:791
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
ConstantArray - Constant Array Declarations.
Definition Constants.h:433
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition Constants.h:776
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition Constants.h:517
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
Definition Constant.h:219
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:187
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:165
bool empty() const
Definition DenseMap.h:107
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:214
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:322
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
FunctionPass(char &pid)
Definition Pass.h:316
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition Pass.cpp:188
const BasicBlock & getEntryBlock() const
Definition Function.h:807
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
static GEPNoWrapFlags all()
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForReassociate(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep (gep, p, y), x).
bool hasNoUnsignedWrap() const
bool isInBounds() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
GEPNoWrapFlags getNoWrapFlags() const
Definition Operator.h:425
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2036
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition IRBuilder.h:538
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition IRBuilder.h:75
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2780
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI InstCombinePass(InstCombineOptions Opts={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * foldBinopWithRecurrence(BinaryOperator &BO)
Try to fold binary operators whose operands are simple interleaved recurrences to a single recurrence...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, Instruction *CxtI, unsigned Depth=0)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
SimplifyQuery SQ
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
TargetLibraryInfo & TLI
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
const DataLayout & DL
DomConditionCache DC
const bool MinimizeSize
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
AssumptionCache & AC
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
BuilderTy & Builder
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition InstCombine.h:68
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void add(Instruction *I)
Add instruction to the worklist.
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
bool isShift() const
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
bool isIntDivRem() const
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
A wrapper class for inspecting calls to intrinsic functions.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Metadata node.
Definition Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1445
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1451
Tracking metadata reference owned by Metadata.
Definition Metadata.h:899
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition Metadata.h:63
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition Operator.h:111
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition Operator.h:105
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition Constants.h:1468
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition Registry.h:44
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:168
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition SmallPtrSet.h:99
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:356
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:181
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:62
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:295
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:294
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:107
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Use * op_iterator
Definition User.h:279
op_range operands()
Definition User.h:292
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:21
op_iterator op_begin()
Definition User.h:284
const Use & getOperandUse(unsigned i) const
Definition User.h:245
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
op_iterator op_end()
Definition User.h:286
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition User.cpp:115
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:759
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
iterator_range< user_iterator > users()
Definition Value.h:426
bool hasUseList() const
Check if this Value has a use-list.
Definition Value.h:344
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition Value.cpp:150
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
bool use_empty() const
Definition Value.h:346
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1101
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:881
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Value handle that is nullable, but tries to track the Value.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
reverse_self_iterator getReverseIterator()
Definition ilist_node.h:137
self_iterator getIterator()
Definition ilist_node.h:134
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:311
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:824
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2047
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &)
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition Local.cpp:2485
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1714
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool succ_empty(const Instruction *I)
Definition CFG.h:256
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI FunctionPass * createInstructionCombiningPass()
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2461
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1725
auto successors(const MachineBasicBlock *BB)
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2125
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:627
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Definition APFloat.h:1555
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition Local.cpp:2468
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:186
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1721
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:402
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:401
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition Local.cpp:1795
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1662
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition Local.cpp:2037
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition Local.cpp:2414
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:318
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
@ ModRef
The access may reference and may modify the value stored in memory.
Definition ModRef.h:36
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ NoModRef
The access neither references nor modifies the value stored in memory.
Definition ModRef.h:30
TargetTransformInfo TTI
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:1950
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1886
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition STLExtras.h:2077
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
bool isRefSet(const ModRefInfo MRI)
Definition ModRef.h:52
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:853
#define N
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:304
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
Definition APFloat.cpp:324
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition KnownBits.h:244
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:241
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
SimplifyQuery getWithInstruction(const Instruction *I) const