LLVM 21.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APFloat.h"
37#include "llvm/ADT/APInt.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/ADT/DenseMap.h"
42#include "llvm/ADT/Statistic.h"
47#include "llvm/Analysis/CFG.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constant.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DIBuilder.h"
67#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/DebugInfo.h"
70#include "llvm/IR/Dominators.h"
72#include "llvm/IR/Function.h"
74#include "llvm/IR/IRBuilder.h"
75#include "llvm/IR/InstrTypes.h"
76#include "llvm/IR/Instruction.h"
79#include "llvm/IR/Intrinsics.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/Operator.h"
82#include "llvm/IR/PassManager.h"
84#include "llvm/IR/Type.h"
85#include "llvm/IR/Use.h"
86#include "llvm/IR/User.h"
87#include "llvm/IR/Value.h"
88#include "llvm/IR/ValueHandle.h"
93#include "llvm/Support/Debug.h"
101#include <algorithm>
102#include <cassert>
103#include <cstdint>
104#include <memory>
105#include <optional>
106#include <string>
107#include <utility>
108
109#define DEBUG_TYPE "instcombine"
111#include <optional>
112
113using namespace llvm;
114using namespace llvm::PatternMatch;
115
116STATISTIC(NumWorklistIterations,
117 "Number of instruction combining iterations performed");
118STATISTIC(NumOneIteration, "Number of functions with one iteration");
119STATISTIC(NumTwoIterations, "Number of functions with two iterations");
120STATISTIC(NumThreeIterations, "Number of functions with three iterations");
121STATISTIC(NumFourOrMoreIterations,
122 "Number of functions with four or more iterations");
123
124STATISTIC(NumCombined , "Number of insts combined");
125STATISTIC(NumConstProp, "Number of constant folds");
126STATISTIC(NumDeadInst , "Number of dead inst eliminated");
127STATISTIC(NumSunkInst , "Number of instructions sunk");
128STATISTIC(NumExpand, "Number of expansions");
129STATISTIC(NumFactor , "Number of factorizations");
130STATISTIC(NumReassoc , "Number of reassociations");
131DEBUG_COUNTER(VisitCounter, "instcombine-visit",
132 "Controls which instructions are visited");
133
134static cl::opt<bool>
135EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
136 cl::init(true));
137
139 "instcombine-max-sink-users", cl::init(32),
140 cl::desc("Maximum number of undroppable users for instruction sinking"));
141
143MaxArraySize("instcombine-maxarray-size", cl::init(1024),
144 cl::desc("Maximum array size considered when doing a combine"));
145
146// FIXME: Remove this flag when it is no longer necessary to convert
147// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
148// increases variable availability at the cost of accuracy. Variables that
149// cannot be promoted by mem2reg or SROA will be described as living in memory
150// for their entire lifetime. However, passes like DSE and instcombine can
151// delete stores to the alloca, leading to misleading and inaccurate debug
152// information. This flag can be removed when those passes are fixed.
153static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
154 cl::Hidden, cl::init(true));
155
156std::optional<Instruction *>
158 // Handle target specific intrinsics
159 if (II.getCalledFunction()->isTargetIntrinsic()) {
160 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
161 }
162 return std::nullopt;
163}
164
166 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
167 bool &KnownBitsComputed) {
168 // Handle target specific intrinsics
169 if (II.getCalledFunction()->isTargetIntrinsic()) {
170 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
171 *this, II, DemandedMask, Known, KnownBitsComputed);
172 }
173 return std::nullopt;
174}
175
177 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
178 APInt &PoisonElts2, APInt &PoisonElts3,
179 std::function<void(Instruction *, unsigned, APInt, APInt &)>
180 SimplifyAndSetOp) {
181 // Handle target specific intrinsics
182 if (II.getCalledFunction()->isTargetIntrinsic()) {
183 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
184 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
185 SimplifyAndSetOp);
186 }
187 return std::nullopt;
188}
189
190bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
191 // Approved exception for TTI use: This queries a legality property of the
192 // target, not an profitability heuristic. Ideally this should be part of
193 // DataLayout instead.
194 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
195}
196
197Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
198 if (!RewriteGEP)
200
202 auto *Inst = dyn_cast<Instruction>(GEP);
203 if (Inst)
205
206 Value *Offset = EmitGEPOffset(GEP);
207 // If a non-trivial GEP has other uses, rewrite it to avoid duplicating
208 // the offset arithmetic.
209 if (Inst && !GEP->hasOneUse() && !GEP->hasAllConstantIndices() &&
210 !GEP->getSourceElementType()->isIntegerTy(8)) {
212 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
213 Offset, "", GEP->getNoWrapFlags()));
215 }
216 return Offset;
217}
218
219/// Legal integers and common types are considered desirable. This is used to
220/// avoid creating instructions with types that may not be supported well by the
221/// the backend.
222/// NOTE: This treats i8, i16 and i32 specially because they are common
223/// types in frontend languages.
224bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
225 switch (BitWidth) {
226 case 8:
227 case 16:
228 case 32:
229 return true;
230 default:
231 return DL.isLegalInteger(BitWidth);
232 }
233}
234
235/// Return true if it is desirable to convert an integer computation from a
236/// given bit width to a new bit width.
237/// We don't want to convert from a legal or desirable type (like i8) to an
238/// illegal type or from a smaller to a larger illegal type. A width of '1'
239/// is always treated as a desirable type because i1 is a fundamental type in
240/// IR, and there are many specialized optimizations for i1 types.
241/// Common/desirable widths are equally treated as legal to convert to, in
242/// order to open up more combining opportunities.
243bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
244 unsigned ToWidth) const {
245 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
246 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
247
248 // Convert to desirable widths even if they are not legal types.
249 // Only shrink types, to prevent infinite loops.
250 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
251 return true;
252
253 // If this is a legal or desiable integer from type, and the result would be
254 // an illegal type, don't do the transformation.
255 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
256 return false;
257
258 // Otherwise, if both are illegal, do not increase the size of the result. We
259 // do allow things like i160 -> i64, but not i64 -> i160.
260 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
261 return false;
262
263 return true;
264}
265
266/// Return true if it is desirable to convert a computation from 'From' to 'To'.
267/// We don't want to convert from a legal to an illegal type or from a smaller
268/// to a larger illegal type. i1 is always treated as a legal type because it is
269/// a fundamental type in IR, and there are many specialized optimizations for
270/// i1 types.
271bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
272 // TODO: This could be extended to allow vectors. Datalayout changes might be
273 // needed to properly support that.
274 if (!From->isIntegerTy() || !To->isIntegerTy())
275 return false;
276
277 unsigned FromWidth = From->getPrimitiveSizeInBits();
278 unsigned ToWidth = To->getPrimitiveSizeInBits();
279 return shouldChangeType(FromWidth, ToWidth);
280}
281
282// Return true, if No Signed Wrap should be maintained for I.
283// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
284// where both B and C should be ConstantInts, results in a constant that does
285// not overflow. This function only handles the Add/Sub/Mul opcodes. For
286// all other opcodes, the function conservatively returns false.
288 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
289 if (!OBO || !OBO->hasNoSignedWrap())
290 return false;
291
292 const APInt *BVal, *CVal;
293 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
294 return false;
295
296 // We reason about Add/Sub/Mul Only.
297 bool Overflow = false;
298 switch (I.getOpcode()) {
299 case Instruction::Add:
300 (void)BVal->sadd_ov(*CVal, Overflow);
301 break;
302 case Instruction::Sub:
303 (void)BVal->ssub_ov(*CVal, Overflow);
304 break;
305 case Instruction::Mul:
306 (void)BVal->smul_ov(*CVal, Overflow);
307 break;
308 default:
309 // Conservatively return false for other opcodes.
310 return false;
311 }
312 return !Overflow;
313}
314
316 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
317 return OBO && OBO->hasNoUnsignedWrap();
318}
319
321 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
322 return OBO && OBO->hasNoSignedWrap();
323}
324
325/// Conservatively clears subclassOptionalData after a reassociation or
326/// commutation. We preserve fast-math flags when applicable as they can be
327/// preserved.
329 FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
330 if (!FPMO) {
331 I.clearSubclassOptionalData();
332 return;
333 }
334
335 FastMathFlags FMF = I.getFastMathFlags();
336 I.clearSubclassOptionalData();
337 I.setFastMathFlags(FMF);
338}
339
340/// Combine constant operands of associative operations either before or after a
341/// cast to eliminate one of the associative operations:
342/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
343/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
345 InstCombinerImpl &IC) {
346 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
347 if (!Cast || !Cast->hasOneUse())
348 return false;
349
350 // TODO: Enhance logic for other casts and remove this check.
351 auto CastOpcode = Cast->getOpcode();
352 if (CastOpcode != Instruction::ZExt)
353 return false;
354
355 // TODO: Enhance logic for other BinOps and remove this check.
356 if (!BinOp1->isBitwiseLogicOp())
357 return false;
358
359 auto AssocOpcode = BinOp1->getOpcode();
360 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
361 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
362 return false;
363
364 Constant *C1, *C2;
365 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
366 !match(BinOp2->getOperand(1), m_Constant(C2)))
367 return false;
368
369 // TODO: This assumes a zext cast.
370 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
371 // to the destination type might lose bits.
372
373 // Fold the constants together in the destination type:
374 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
375 const DataLayout &DL = IC.getDataLayout();
376 Type *DestTy = C1->getType();
377 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
378 if (!CastC2)
379 return false;
380 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
381 if (!FoldedC)
382 return false;
383
384 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
385 IC.replaceOperand(*BinOp1, 1, FoldedC);
387 Cast->dropPoisonGeneratingFlags();
388 return true;
389}
390
391// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
392// inttoptr ( ptrtoint (x) ) --> x
393Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
394 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
395 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
396 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
397 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
398 Type *CastTy = IntToPtr->getDestTy();
399 if (PtrToInt &&
400 CastTy->getPointerAddressSpace() ==
401 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
402 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
403 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
404 return PtrToInt->getOperand(0);
405 }
406 return nullptr;
407}
408
409/// This performs a few simplifications for operators that are associative or
410/// commutative:
411///
412/// Commutative operators:
413///
414/// 1. Order operands such that they are listed from right (least complex) to
415/// left (most complex). This puts constants before unary operators before
416/// binary operators.
417///
418/// Associative operators:
419///
420/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
421/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
422///
423/// Associative and commutative operators:
424///
425/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
426/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
427/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
428/// if C1 and C2 are constants.
430 Instruction::BinaryOps Opcode = I.getOpcode();
431 bool Changed = false;
432
433 do {
434 // Order operands such that they are listed from right (least complex) to
435 // left (most complex). This puts constants before unary operators before
436 // binary operators.
437 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
438 getComplexity(I.getOperand(1)))
439 Changed = !I.swapOperands();
440
441 if (I.isCommutative()) {
442 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
443 replaceOperand(I, 0, Pair->first);
444 replaceOperand(I, 1, Pair->second);
445 Changed = true;
446 }
447 }
448
449 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
450 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
451
452 if (I.isAssociative()) {
453 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
454 if (Op0 && Op0->getOpcode() == Opcode) {
455 Value *A = Op0->getOperand(0);
456 Value *B = Op0->getOperand(1);
457 Value *C = I.getOperand(1);
458
459 // Does "B op C" simplify?
460 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
461 // It simplifies to V. Form "A op V".
462 replaceOperand(I, 0, A);
463 replaceOperand(I, 1, V);
464 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
465 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
466
467 // Conservatively clear all optional flags since they may not be
468 // preserved by the reassociation. Reset nsw/nuw based on the above
469 // analysis.
471
472 // Note: this is only valid because SimplifyBinOp doesn't look at
473 // the operands to Op0.
474 if (IsNUW)
475 I.setHasNoUnsignedWrap(true);
476
477 if (IsNSW)
478 I.setHasNoSignedWrap(true);
479
480 Changed = true;
481 ++NumReassoc;
482 continue;
483 }
484 }
485
486 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
487 if (Op1 && Op1->getOpcode() == Opcode) {
488 Value *A = I.getOperand(0);
489 Value *B = Op1->getOperand(0);
490 Value *C = Op1->getOperand(1);
491
492 // Does "A op B" simplify?
493 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
494 // It simplifies to V. Form "V op C".
495 replaceOperand(I, 0, V);
496 replaceOperand(I, 1, C);
497 // Conservatively clear the optional flags, since they may not be
498 // preserved by the reassociation.
500 Changed = true;
501 ++NumReassoc;
502 continue;
503 }
504 }
505 }
506
507 if (I.isAssociative() && I.isCommutative()) {
508 if (simplifyAssocCastAssoc(&I, *this)) {
509 Changed = true;
510 ++NumReassoc;
511 continue;
512 }
513
514 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
515 if (Op0 && Op0->getOpcode() == Opcode) {
516 Value *A = Op0->getOperand(0);
517 Value *B = Op0->getOperand(1);
518 Value *C = I.getOperand(1);
519
520 // Does "C op A" simplify?
521 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
522 // It simplifies to V. Form "V op B".
523 replaceOperand(I, 0, V);
524 replaceOperand(I, 1, B);
525 // Conservatively clear the optional flags, since they may not be
526 // preserved by the reassociation.
528 Changed = true;
529 ++NumReassoc;
530 continue;
531 }
532 }
533
534 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
535 if (Op1 && Op1->getOpcode() == Opcode) {
536 Value *A = I.getOperand(0);
537 Value *B = Op1->getOperand(0);
538 Value *C = Op1->getOperand(1);
539
540 // Does "C op A" simplify?
541 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
542 // It simplifies to V. Form "B op V".
543 replaceOperand(I, 0, B);
544 replaceOperand(I, 1, V);
545 // Conservatively clear the optional flags, since they may not be
546 // preserved by the reassociation.
548 Changed = true;
549 ++NumReassoc;
550 continue;
551 }
552 }
553
554 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
555 // if C1 and C2 are constants.
556 Value *A, *B;
557 Constant *C1, *C2, *CRes;
558 if (Op0 && Op1 &&
559 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
560 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
561 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
562 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
563 bool IsNUW = hasNoUnsignedWrap(I) &&
564 hasNoUnsignedWrap(*Op0) &&
565 hasNoUnsignedWrap(*Op1);
566 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
567 BinaryOperator::CreateNUW(Opcode, A, B) :
568 BinaryOperator::Create(Opcode, A, B);
569
570 if (isa<FPMathOperator>(NewBO)) {
571 FastMathFlags Flags = I.getFastMathFlags() &
572 Op0->getFastMathFlags() &
573 Op1->getFastMathFlags();
574 NewBO->setFastMathFlags(Flags);
575 }
576 InsertNewInstWith(NewBO, I.getIterator());
577 NewBO->takeName(Op1);
578 replaceOperand(I, 0, NewBO);
579 replaceOperand(I, 1, CRes);
580 // Conservatively clear the optional flags, since they may not be
581 // preserved by the reassociation.
583 if (IsNUW)
584 I.setHasNoUnsignedWrap(true);
585
586 Changed = true;
587 continue;
588 }
589 }
590
591 // No further simplifications.
592 return Changed;
593 } while (true);
594}
595
596/// Return whether "X LOp (Y ROp Z)" is always equal to
597/// "(X LOp Y) ROp (X LOp Z)".
600 // X & (Y | Z) <--> (X & Y) | (X & Z)
601 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
602 if (LOp == Instruction::And)
603 return ROp == Instruction::Or || ROp == Instruction::Xor;
604
605 // X | (Y & Z) <--> (X | Y) & (X | Z)
606 if (LOp == Instruction::Or)
607 return ROp == Instruction::And;
608
609 // X * (Y + Z) <--> (X * Y) + (X * Z)
610 // X * (Y - Z) <--> (X * Y) - (X * Z)
611 if (LOp == Instruction::Mul)
612 return ROp == Instruction::Add || ROp == Instruction::Sub;
613
614 return false;
615}
616
617/// Return whether "(X LOp Y) ROp Z" is always equal to
618/// "(X ROp Z) LOp (Y ROp Z)".
622 return leftDistributesOverRight(ROp, LOp);
623
624 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
626
627 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
628 // but this requires knowing that the addition does not overflow and other
629 // such subtleties.
630}
631
632/// This function returns identity value for given opcode, which can be used to
633/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
635 if (isa<Constant>(V))
636 return nullptr;
637
638 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
639}
640
641/// This function predicates factorization using distributive laws. By default,
642/// it just returns the 'Op' inputs. But for special-cases like
643/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
644/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
645/// allow more factorization opportunities.
648 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
649 assert(Op && "Expected a binary operator");
650 LHS = Op->getOperand(0);
651 RHS = Op->getOperand(1);
652 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
653 Constant *C;
654 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
655 // X << C --> X * (1 << C)
657 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
658 assert(RHS && "Constant folding of immediate constants failed");
659 return Instruction::Mul;
660 }
661 // TODO: We can add other conversions e.g. shr => div etc.
662 }
663 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
664 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
666 // lshr nneg C, X --> ashr nneg C, X
667 return Instruction::AShr;
668 }
669 }
670 return Op->getOpcode();
671}
672
673/// This tries to simplify binary operations by factorizing out common terms
674/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
677 Instruction::BinaryOps InnerOpcode, Value *A,
678 Value *B, Value *C, Value *D) {
679 assert(A && B && C && D && "All values must be provided");
680
681 Value *V = nullptr;
682 Value *RetVal = nullptr;
683 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
684 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
685
686 // Does "X op' Y" always equal "Y op' X"?
687 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
688
689 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
690 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
691 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
692 // commutative case, "(A op' B) op (C op' A)"?
693 if (A == C || (InnerCommutative && A == D)) {
694 if (A != C)
695 std::swap(C, D);
696 // Consider forming "A op' (B op D)".
697 // If "B op D" simplifies then it can be formed with no cost.
698 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
699
700 // If "B op D" doesn't simplify then only go on if one of the existing
701 // operations "A op' B" and "C op' D" will be zapped as no longer used.
702 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
703 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
704 if (V)
705 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
706 }
707 }
708
709 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
710 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
711 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
712 // commutative case, "(A op' B) op (B op' D)"?
713 if (B == D || (InnerCommutative && B == C)) {
714 if (B != D)
715 std::swap(C, D);
716 // Consider forming "(A op C) op' B".
717 // If "A op C" simplifies then it can be formed with no cost.
718 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
719
720 // If "A op C" doesn't simplify then only go on if one of the existing
721 // operations "A op' B" and "C op' D" will be zapped as no longer used.
722 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
723 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
724 if (V)
725 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
726 }
727 }
728
729 if (!RetVal)
730 return nullptr;
731
732 ++NumFactor;
733 RetVal->takeName(&I);
734
735 // Try to add no-overflow flags to the final value.
736 if (isa<BinaryOperator>(RetVal)) {
737 bool HasNSW = false;
738 bool HasNUW = false;
739 if (isa<OverflowingBinaryOperator>(&I)) {
740 HasNSW = I.hasNoSignedWrap();
741 HasNUW = I.hasNoUnsignedWrap();
742 }
743 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
744 HasNSW &= LOBO->hasNoSignedWrap();
745 HasNUW &= LOBO->hasNoUnsignedWrap();
746 }
747
748 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
749 HasNSW &= ROBO->hasNoSignedWrap();
750 HasNUW &= ROBO->hasNoUnsignedWrap();
751 }
752
753 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
754 // We can propagate 'nsw' if we know that
755 // %Y = mul nsw i16 %X, C
756 // %Z = add nsw i16 %Y, %X
757 // =>
758 // %Z = mul nsw i16 %X, C+1
759 //
760 // iff C+1 isn't INT_MIN
761 const APInt *CInt;
762 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
763 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
764
765 // nuw can be propagated with any constant or nuw value.
766 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
767 }
768 }
769 return RetVal;
770}
771
772// If `I` has one Const operand and the other matches `(ctpop (not x))`,
773// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
774// This is only useful is the new subtract can fold so we only handle the
775// following cases:
776// 1) (add/sub/disjoint_or C, (ctpop (not x))
777// -> (add/sub/disjoint_or C', (ctpop x))
778// 1) (cmp pred C, (ctpop (not x))
779// -> (cmp pred C', (ctpop x))
781 unsigned Opc = I->getOpcode();
782 unsigned ConstIdx = 1;
783 switch (Opc) {
784 default:
785 return nullptr;
786 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
787 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
788 // is constant.
789 case Instruction::Sub:
790 ConstIdx = 0;
791 break;
792 case Instruction::ICmp:
793 // Signed predicates aren't correct in some edge cases like for i2 types, as
794 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
795 // comparisons against it are simplfied to unsigned.
796 if (cast<ICmpInst>(I)->isSigned())
797 return nullptr;
798 break;
799 case Instruction::Or:
800 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
801 return nullptr;
802 [[fallthrough]];
803 case Instruction::Add:
804 break;
805 }
806
807 Value *Op;
808 // Find ctpop.
809 if (!match(I->getOperand(1 - ConstIdx),
810 m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(Op)))))
811 return nullptr;
812
813 Constant *C;
814 // Check other operand is ImmConstant.
815 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
816 return nullptr;
817
818 Type *Ty = Op->getType();
819 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
820 // Need extra check for icmp. Note if this check is true, it generally means
821 // the icmp will simplify to true/false.
822 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
823 Constant *Cmp =
825 if (!Cmp || !Cmp->isZeroValue())
826 return nullptr;
827 }
828
829 // Check we can invert `(not x)` for free.
830 bool Consumes = false;
831 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
832 return nullptr;
833 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
834 assert(NotOp != nullptr &&
835 "Desync between isFreeToInvert and getFreelyInverted");
836
837 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
838
839 Value *R = nullptr;
840
841 // Do the transformation here to avoid potentially introducing an infinite
842 // loop.
843 switch (Opc) {
844 case Instruction::Sub:
845 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
846 break;
847 case Instruction::Or:
848 case Instruction::Add:
849 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
850 break;
851 case Instruction::ICmp:
852 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
853 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
854 break;
855 default:
856 llvm_unreachable("Unhandled Opcode");
857 }
858 assert(R != nullptr);
859 return replaceInstUsesWith(*I, R);
860}
861
862// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
863// IFF
864// 1) the logic_shifts match
865// 2) either both binops are binops and one is `and` or
866// BinOp1 is `and`
867// (logic_shift (inv_logic_shift C1, C), C) == C1 or
868//
869// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
870//
871// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
872// IFF
873// 1) the logic_shifts match
874// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
875//
876// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
877//
878// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
879// IFF
880// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
881// 2) Binop2 is `not`
882//
883// -> (arithmetic_shift Binop1((not X), Y), Amt)
884
886 const DataLayout &DL = I.getDataLayout();
887 auto IsValidBinOpc = [](unsigned Opc) {
888 switch (Opc) {
889 default:
890 return false;
891 case Instruction::And:
892 case Instruction::Or:
893 case Instruction::Xor:
894 case Instruction::Add:
895 // Skip Sub as we only match constant masks which will canonicalize to use
896 // add.
897 return true;
898 }
899 };
900
901 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
902 // constraints.
903 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
904 unsigned ShOpc) {
905 assert(ShOpc != Instruction::AShr);
906 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
907 ShOpc == Instruction::Shl;
908 };
909
910 auto GetInvShift = [](unsigned ShOpc) {
911 assert(ShOpc != Instruction::AShr);
912 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
913 };
914
915 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
916 unsigned ShOpc, Constant *CMask,
917 Constant *CShift) {
918 // If the BinOp1 is `and` we don't need to check the mask.
919 if (BinOpc1 == Instruction::And)
920 return true;
921
922 // For all other possible transfers we need complete distributable
923 // binop/shift (anything but `add` + `lshr`).
924 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
925 return false;
926
927 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
928 // vecs, otherwise the mask will be simplified and the following check will
929 // handle it).
930 if (BinOpc2 == Instruction::And)
931 return true;
932
933 // Otherwise, need mask that meets the below requirement.
934 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
935 Constant *MaskInvShift =
936 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
937 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
938 CMask;
939 };
940
941 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
942 Constant *CMask, *CShift;
943 Value *X, *Y, *ShiftedX, *Mask, *Shift;
944 if (!match(I.getOperand(ShOpnum),
945 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
946 return nullptr;
947 if (!match(I.getOperand(1 - ShOpnum),
950 m_Value(ShiftedX)),
951 m_Value(Mask))))
952 return nullptr;
953 // Make sure we are matching instruction shifts and not ConstantExpr
954 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
955 auto *IX = dyn_cast<Instruction>(ShiftedX);
956 if (!IY || !IX)
957 return nullptr;
958
959 // LHS and RHS need same shift opcode
960 unsigned ShOpc = IY->getOpcode();
961 if (ShOpc != IX->getOpcode())
962 return nullptr;
963
964 // Make sure binop is real instruction and not ConstantExpr
965 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
966 if (!BO2)
967 return nullptr;
968
969 unsigned BinOpc = BO2->getOpcode();
970 // Make sure we have valid binops.
971 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
972 return nullptr;
973
974 if (ShOpc == Instruction::AShr) {
975 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
976 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
977 Value *NotX = Builder.CreateNot(X);
978 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
980 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
981 }
982
983 return nullptr;
984 }
985
986 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
987 // distribute to drop the shift irrelevant of constants.
988 if (BinOpc == I.getOpcode() &&
989 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
990 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
991 Value *NewBinOp1 = Builder.CreateBinOp(
992 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
993 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
994 }
995
996 // Otherwise we can only distribute by constant shifting the mask, so
997 // ensure we have constants.
998 if (!match(Shift, m_ImmConstant(CShift)))
999 return nullptr;
1000 if (!match(Mask, m_ImmConstant(CMask)))
1001 return nullptr;
1002
1003 // Check if we can distribute the binops.
1004 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1005 return nullptr;
1006
1007 Constant *NewCMask =
1008 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1009 Value *NewBinOp2 = Builder.CreateBinOp(
1010 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1011 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1012 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1013 NewBinOp1, CShift);
1014 };
1015
1016 if (Instruction *R = MatchBinOp(0))
1017 return R;
1018 return MatchBinOp(1);
1019}
1020
1021// (Binop (zext C), (select C, T, F))
1022// -> (select C, (binop 1, T), (binop 0, F))
1023//
1024// (Binop (sext C), (select C, T, F))
1025// -> (select C, (binop -1, T), (binop 0, F))
1026//
1027// Attempt to simplify binary operations into a select with folded args, when
1028// one operand of the binop is a select instruction and the other operand is a
1029// zext/sext extension, whose value is the select condition.
1032 // TODO: this simplification may be extended to any speculatable instruction,
1033 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1034 Instruction::BinaryOps Opc = I.getOpcode();
1035 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1036 Value *A, *CondVal, *TrueVal, *FalseVal;
1037 Value *CastOp;
1038
1039 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1040 return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
1041 A->getType()->getScalarSizeInBits() == 1 &&
1042 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1043 m_Value(FalseVal)));
1044 };
1045
1046 // Make sure one side of the binop is a select instruction, and the other is a
1047 // zero/sign extension operating on a i1.
1048 if (MatchSelectAndCast(LHS, RHS))
1049 CastOp = LHS;
1050 else if (MatchSelectAndCast(RHS, LHS))
1051 CastOp = RHS;
1052 else
1053 return nullptr;
1054
1055 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1056 bool IsCastOpRHS = (CastOp == RHS);
1057 bool IsZExt = isa<ZExtInst>(CastOp);
1058 Constant *C;
1059
1060 if (IsTrueArm) {
1061 C = Constant::getNullValue(V->getType());
1062 } else if (IsZExt) {
1063 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1064 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
1065 } else {
1066 C = Constant::getAllOnesValue(V->getType());
1067 }
1068
1069 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
1070 : Builder.CreateBinOp(Opc, C, V);
1071 };
1072
1073 // If the value used in the zext/sext is the select condition, or the negated
1074 // of the select condition, the binop can be simplified.
1075 if (CondVal == A) {
1076 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1077 return SelectInst::Create(CondVal, NewTrueVal,
1078 NewFoldedConst(true, FalseVal));
1079 }
1080
1081 if (match(A, m_Not(m_Specific(CondVal)))) {
1082 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1083 return SelectInst::Create(CondVal, NewTrueVal,
1084 NewFoldedConst(false, FalseVal));
1085 }
1086
1087 return nullptr;
1088}
1089
1091 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1092 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1093 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1094 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1095 Value *A, *B, *C, *D;
1096 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1097
1098 if (Op0)
1099 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1100 if (Op1)
1101 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1102
1103 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1104 // a common term.
1105 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1106 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1107 return V;
1108
1109 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1110 // term.
1111 if (Op0)
1112 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1113 if (Value *V =
1114 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1115 return V;
1116
1117 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1118 // term.
1119 if (Op1)
1120 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1121 if (Value *V =
1122 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1123 return V;
1124
1125 return nullptr;
1126}
1127
1128/// This tries to simplify binary operations which some other binary operation
1129/// distributes over either by factorizing out common terms
1130/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1131/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1132/// Returns the simplified value, or null if it didn't simplify.
1134 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1135 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1136 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1137 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1138
1139 // Factorization.
1140 if (Value *R = tryFactorizationFolds(I))
1141 return R;
1142
1143 // Expansion.
1144 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1145 // The instruction has the form "(A op' B) op C". See if expanding it out
1146 // to "(A op C) op' (B op C)" results in simplifications.
1147 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1148 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1149
1150 // Disable the use of undef because it's not safe to distribute undef.
1151 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1152 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1153 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1154
1155 // Do "A op C" and "B op C" both simplify?
1156 if (L && R) {
1157 // They do! Return "L op' R".
1158 ++NumExpand;
1159 C = Builder.CreateBinOp(InnerOpcode, L, R);
1160 C->takeName(&I);
1161 return C;
1162 }
1163
1164 // Does "A op C" simplify to the identity value for the inner opcode?
1165 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1166 // They do! Return "B op C".
1167 ++NumExpand;
1168 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1169 C->takeName(&I);
1170 return C;
1171 }
1172
1173 // Does "B op C" simplify to the identity value for the inner opcode?
1174 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1175 // They do! Return "A op C".
1176 ++NumExpand;
1177 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1178 C->takeName(&I);
1179 return C;
1180 }
1181 }
1182
1183 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1184 // The instruction has the form "A op (B op' C)". See if expanding it out
1185 // to "(A op B) op' (A op C)" results in simplifications.
1186 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1187 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1188
1189 // Disable the use of undef because it's not safe to distribute undef.
1190 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1191 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1192 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1193
1194 // Do "A op B" and "A op C" both simplify?
1195 if (L && R) {
1196 // They do! Return "L op' R".
1197 ++NumExpand;
1198 A = Builder.CreateBinOp(InnerOpcode, L, R);
1199 A->takeName(&I);
1200 return A;
1201 }
1202
1203 // Does "A op B" simplify to the identity value for the inner opcode?
1204 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1205 // They do! Return "A op C".
1206 ++NumExpand;
1207 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1208 A->takeName(&I);
1209 return A;
1210 }
1211
1212 // Does "A op C" simplify to the identity value for the inner opcode?
1213 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1214 // They do! Return "A op B".
1215 ++NumExpand;
1216 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1217 A->takeName(&I);
1218 return A;
1219 }
1220 }
1221
1223}
1224
1225static std::optional<std::pair<Value *, Value *>>
1227 if (LHS->getParent() != RHS->getParent())
1228 return std::nullopt;
1229
1230 if (LHS->getNumIncomingValues() < 2)
1231 return std::nullopt;
1232
1233 if (!equal(LHS->blocks(), RHS->blocks()))
1234 return std::nullopt;
1235
1236 Value *L0 = LHS->getIncomingValue(0);
1237 Value *R0 = RHS->getIncomingValue(0);
1238
1239 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1240 Value *L1 = LHS->getIncomingValue(I);
1241 Value *R1 = RHS->getIncomingValue(I);
1242
1243 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1244 continue;
1245
1246 return std::nullopt;
1247 }
1248
1249 return std::optional(std::pair(L0, R0));
1250}
1251
1252std::optional<std::pair<Value *, Value *>>
1253InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1254 Instruction *LHSInst = dyn_cast<Instruction>(LHS);
1255 Instruction *RHSInst = dyn_cast<Instruction>(RHS);
1256 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1257 return std::nullopt;
1258 switch (LHSInst->getOpcode()) {
1259 case Instruction::PHI:
1260 return matchSymmetricPhiNodesPair(cast<PHINode>(LHS), cast<PHINode>(RHS));
1261 case Instruction::Select: {
1262 Value *Cond = LHSInst->getOperand(0);
1263 Value *TrueVal = LHSInst->getOperand(1);
1264 Value *FalseVal = LHSInst->getOperand(2);
1265 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1266 FalseVal == RHSInst->getOperand(1))
1267 return std::pair(TrueVal, FalseVal);
1268 return std::nullopt;
1269 }
1270 case Instruction::Call: {
1271 // Match min(a, b) and max(a, b)
1272 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1273 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1274 if (LHSMinMax && RHSMinMax &&
1275 LHSMinMax->getPredicate() ==
1277 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1278 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1279 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1280 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1281 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1282 return std::nullopt;
1283 }
1284 default:
1285 return std::nullopt;
1286 }
1287}
1288
1290 Value *LHS,
1291 Value *RHS) {
1292 Value *A, *B, *C, *D, *E, *F;
1293 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1294 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1295 if (!LHSIsSelect && !RHSIsSelect)
1296 return nullptr;
1297
1298 FastMathFlags FMF;
1300 if (isa<FPMathOperator>(&I)) {
1301 FMF = I.getFastMathFlags();
1303 }
1304
1305 Instruction::BinaryOps Opcode = I.getOpcode();
1307
1308 Value *Cond, *True = nullptr, *False = nullptr;
1309
1310 // Special-case for add/negate combination. Replace the zero in the negation
1311 // with the trailing add operand:
1312 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1313 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1314 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1315 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1316 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1317 return nullptr;
1318
1319 Value *N;
1320 if (True && match(FVal, m_Neg(m_Value(N)))) {
1321 Value *Sub = Builder.CreateSub(Z, N);
1322 return Builder.CreateSelect(Cond, True, Sub, I.getName());
1323 }
1324 if (False && match(TVal, m_Neg(m_Value(N)))) {
1325 Value *Sub = Builder.CreateSub(Z, N);
1326 return Builder.CreateSelect(Cond, Sub, False, I.getName());
1327 }
1328 return nullptr;
1329 };
1330
1331 if (LHSIsSelect && RHSIsSelect && A == D) {
1332 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1333 Cond = A;
1334 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1335 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1336
1337 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1338 if (False && !True)
1339 True = Builder.CreateBinOp(Opcode, B, E);
1340 else if (True && !False)
1341 False = Builder.CreateBinOp(Opcode, C, F);
1342 }
1343 } else if (LHSIsSelect && LHS->hasOneUse()) {
1344 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1345 Cond = A;
1346 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1347 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1348 if (Value *NewSel = foldAddNegate(B, C, RHS))
1349 return NewSel;
1350 } else if (RHSIsSelect && RHS->hasOneUse()) {
1351 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1352 Cond = D;
1353 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1354 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1355 if (Value *NewSel = foldAddNegate(E, F, LHS))
1356 return NewSel;
1357 }
1358
1359 if (!True || !False)
1360 return nullptr;
1361
1362 Value *SI = Builder.CreateSelect(Cond, True, False);
1363 SI->takeName(&I);
1364 return SI;
1365}
1366
1367/// Freely adapt every user of V as-if V was changed to !V.
1368/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1370 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1371 for (User *U : make_early_inc_range(I->users())) {
1372 if (U == IgnoredUser)
1373 continue; // Don't consider this user.
1374 switch (cast<Instruction>(U)->getOpcode()) {
1375 case Instruction::Select: {
1376 auto *SI = cast<SelectInst>(U);
1377 SI->swapValues();
1378 SI->swapProfMetadata();
1379 break;
1380 }
1381 case Instruction::Br: {
1382 BranchInst *BI = cast<BranchInst>(U);
1383 BI->swapSuccessors(); // swaps prof metadata too
1384 if (BPI)
1386 break;
1387 }
1388 case Instruction::Xor:
1389 replaceInstUsesWith(cast<Instruction>(*U), I);
1390 // Add to worklist for DCE.
1391 addToWorklist(cast<Instruction>(U));
1392 break;
1393 default:
1394 llvm_unreachable("Got unexpected user - out of sync with "
1395 "canFreelyInvertAllUsersOf() ?");
1396 }
1397 }
1398}
1399
1400/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1401/// constant zero (which is the 'negate' form).
1402Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1403 Value *NegV;
1404 if (match(V, m_Neg(m_Value(NegV))))
1405 return NegV;
1406
1407 // Constants can be considered to be negated values if they can be folded.
1408 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
1409 return ConstantExpr::getNeg(C);
1410
1411 if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
1412 if (C->getType()->getElementType()->isIntegerTy())
1413 return ConstantExpr::getNeg(C);
1414
1415 if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
1416 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1417 Constant *Elt = CV->getAggregateElement(i);
1418 if (!Elt)
1419 return nullptr;
1420
1421 if (isa<UndefValue>(Elt))
1422 continue;
1423
1424 if (!isa<ConstantInt>(Elt))
1425 return nullptr;
1426 }
1427 return ConstantExpr::getNeg(CV);
1428 }
1429
1430 // Negate integer vector splats.
1431 if (auto *CV = dyn_cast<Constant>(V))
1432 if (CV->getType()->isVectorTy() &&
1433 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1434 return ConstantExpr::getNeg(CV);
1435
1436 return nullptr;
1437}
1438
1439// Try to fold:
1440// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1441// -> ({s|u}itofp (int_binop x, y))
1442// 2) (fp_binop ({s|u}itofp x), FpC)
1443// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1444//
1445// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1446Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1447 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1449
1450 Type *FPTy = BO.getType();
1451 Type *IntTy = IntOps[0]->getType();
1452
1453 unsigned IntSz = IntTy->getScalarSizeInBits();
1454 // This is the maximum number of inuse bits by the integer where the int -> fp
1455 // casts are exact.
1456 unsigned MaxRepresentableBits =
1458
1459 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1460 // checks later on.
1461 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1462
1463 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1464 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1465 auto IsNonZero = [&](unsigned OpNo) -> bool {
1466 if (OpsKnown[OpNo].hasKnownBits() &&
1467 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1468 return true;
1469 return isKnownNonZero(IntOps[OpNo], SQ);
1470 };
1471
1472 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1473 // NB: This matches the impl in ValueTracking, we just try to use cached
1474 // knownbits here. If we ever start supporting WithCache for
1475 // `isKnownNonNegative`, change this to an explicit call.
1476 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1477 };
1478
1479 // Check if we know for certain that ({s|u}itofp op) is exact.
1480 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1481 // Can we treat this operand as the desired sign?
1482 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1483 !IsNonNeg(OpNo))
1484 return false;
1485
1486 // If fp precision >= bitwidth(op) then its exact.
1487 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1488 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1489 // handled specially. We can't, however, increase the bound arbitrarily for
1490 // `sitofp` as for larger sizes, it won't sign extend.
1491 if (MaxRepresentableBits < IntSz) {
1492 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1493 // numSignBits(op).
1494 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1495 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1496 if (OpsFromSigned)
1497 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1498 // Finally for unsigned check that fp precision >= bitwidth(op) -
1499 // numLeadingZeros(op).
1500 else {
1501 NumUsedLeadingBits[OpNo] =
1502 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1503 }
1504 }
1505 // NB: We could also check if op is known to be a power of 2 or zero (which
1506 // will always be representable). Its unlikely, however, that is we are
1507 // unable to bound op in any way we will be able to pass the overflow checks
1508 // later on.
1509
1510 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1511 return false;
1512 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1513 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1514 IsNonZero(OpNo);
1515 };
1516
1517 // If we have a constant rhs, see if we can losslessly convert it to an int.
1518 if (Op1FpC != nullptr) {
1519 // Signed + Mul req non-zero
1520 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1521 !match(Op1FpC, m_NonZeroFP()))
1522 return nullptr;
1523
1525 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1526 IntTy, DL);
1527 if (Op1IntC == nullptr)
1528 return nullptr;
1529 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1530 : Instruction::UIToFP,
1531 Op1IntC, FPTy, DL) != Op1FpC)
1532 return nullptr;
1533
1534 // First try to keep sign of cast the same.
1535 IntOps[1] = Op1IntC;
1536 }
1537
1538 // Ensure lhs/rhs integer types match.
1539 if (IntTy != IntOps[1]->getType())
1540 return nullptr;
1541
1542 if (Op1FpC == nullptr) {
1543 if (!IsValidPromotion(1))
1544 return nullptr;
1545 }
1546 if (!IsValidPromotion(0))
1547 return nullptr;
1548
1549 // Final we check if the integer version of the binop will not overflow.
1551 // Because of the precision check, we can often rule out overflows.
1552 bool NeedsOverflowCheck = true;
1553 // Try to conservatively rule out overflow based on the already done precision
1554 // checks.
1555 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1556 unsigned OverflowMaxCurBits =
1557 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1558 bool OutputSigned = OpsFromSigned;
1559 switch (BO.getOpcode()) {
1560 case Instruction::FAdd:
1561 IntOpc = Instruction::Add;
1562 OverflowMaxOutputBits += OverflowMaxCurBits;
1563 break;
1564 case Instruction::FSub:
1565 IntOpc = Instruction::Sub;
1566 OverflowMaxOutputBits += OverflowMaxCurBits;
1567 break;
1568 case Instruction::FMul:
1569 IntOpc = Instruction::Mul;
1570 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1571 break;
1572 default:
1573 llvm_unreachable("Unsupported binop");
1574 }
1575 // The precision check may have already ruled out overflow.
1576 if (OverflowMaxOutputBits < IntSz) {
1577 NeedsOverflowCheck = false;
1578 // We can bound unsigned overflow from sub to in range signed value (this is
1579 // what allows us to avoid the overflow check for sub).
1580 if (IntOpc == Instruction::Sub)
1581 OutputSigned = true;
1582 }
1583
1584 // Precision check did not rule out overflow, so need to check.
1585 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1586 // `IntOps[...]` arguments to `KnownOps[...]`.
1587 if (NeedsOverflowCheck &&
1588 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1589 return nullptr;
1590
1591 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1592 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1593 IntBO->setHasNoSignedWrap(OutputSigned);
1594 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1595 }
1596 if (OutputSigned)
1597 return new SIToFPInst(IntBinOp, FPTy);
1598 return new UIToFPInst(IntBinOp, FPTy);
1599}
1600
1601// Try to fold:
1602// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1603// -> ({s|u}itofp (int_binop x, y))
1604// 2) (fp_binop ({s|u}itofp x), FpC)
1605// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1606Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1607 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1608 Constant *Op1FpC = nullptr;
1609 // Check for:
1610 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1611 // 2) (binop ({s|u}itofp x), FpC)
1612 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1613 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1614 return nullptr;
1615
1616 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1617 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1618 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1619 return nullptr;
1620
1621 // Cache KnownBits a bit to potentially save some analysis.
1622 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1623
1624 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1625 // different constraints depending on the sign of the cast.
1626 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1627 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1628 IntOps, Op1FpC, OpsKnown))
1629 return R;
1630 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1631 Op1FpC, OpsKnown);
1632}
1633
1634/// A binop with a constant operand and a sign-extended boolean operand may be
1635/// converted into a select of constants by applying the binary operation to
1636/// the constant with the two possible values of the extended boolean (0 or -1).
1637Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1638 // TODO: Handle non-commutative binop (constant is operand 0).
1639 // TODO: Handle zext.
1640 // TODO: Peek through 'not' of cast.
1641 Value *BO0 = BO.getOperand(0);
1642 Value *BO1 = BO.getOperand(1);
1643 Value *X;
1644 Constant *C;
1645 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1646 !X->getType()->isIntOrIntVectorTy(1))
1647 return nullptr;
1648
1649 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1652 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1653 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1654 return SelectInst::Create(X, TVal, FVal);
1655}
1656
1658 bool IsTrueArm) {
1660 for (Value *Op : I.operands()) {
1661 Value *V = nullptr;
1662 if (Op == SI) {
1663 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1664 } else if (match(SI->getCondition(),
1667 m_Specific(Op), m_Value(V))) &&
1669 // Pass
1670 } else {
1671 V = Op;
1672 }
1673 Ops.push_back(V);
1674 }
1675
1676 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1677}
1678
1680 Value *NewOp, InstCombiner &IC) {
1681 Instruction *Clone = I.clone();
1682 Clone->replaceUsesOfWith(SI, NewOp);
1684 IC.InsertNewInstBefore(Clone, I.getIterator());
1685 return Clone;
1686}
1687
1689 bool FoldWithMultiUse) {
1690 // Don't modify shared select instructions unless set FoldWithMultiUse
1691 if (!SI->hasOneUse() && !FoldWithMultiUse)
1692 return nullptr;
1693
1694 Value *TV = SI->getTrueValue();
1695 Value *FV = SI->getFalseValue();
1696
1697 // Bool selects with constant operands can be folded to logical ops.
1698 if (SI->getType()->isIntOrIntVectorTy(1))
1699 return nullptr;
1700
1701 // Test if a FCmpInst instruction is used exclusively by a select as
1702 // part of a minimum or maximum operation. If so, refrain from doing
1703 // any other folding. This helps out other analyses which understand
1704 // non-obfuscated minimum and maximum idioms. And in this case, at
1705 // least one of the comparison operands has at least one user besides
1706 // the compare (the select), which would often largely negate the
1707 // benefit of folding anyway.
1708 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1709 if (CI->hasOneUse()) {
1710 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1711 if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1))
1712 return nullptr;
1713 }
1714 }
1715
1716 // Make sure that one of the select arms folds successfully.
1717 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1718 Value *NewFV =
1719 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1720 if (!NewTV && !NewFV)
1721 return nullptr;
1722
1723 // Create an instruction for the arm that did not fold.
1724 if (!NewTV)
1725 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1726 if (!NewFV)
1727 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1728 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1729}
1730
1732 Value *InValue, BasicBlock *InBB,
1733 const DataLayout &DL,
1734 const SimplifyQuery SQ) {
1735 // NB: It is a precondition of this transform that the operands be
1736 // phi translatable!
1738 for (Value *Op : I.operands()) {
1739 if (Op == PN)
1740 Ops.push_back(InValue);
1741 else
1742 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1743 }
1744
1745 // Don't consider the simplification successful if we get back a constant
1746 // expression. That's just an instruction in hiding.
1747 // Also reject the case where we simplify back to the phi node. We wouldn't
1748 // be able to remove it in that case.
1750 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1751 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1752 return NewVal;
1753
1754 // Check if incoming PHI value can be replaced with constant
1755 // based on implied condition.
1756 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1757 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1758 if (TerminatorBI && TerminatorBI->isConditional() &&
1759 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1760 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1761 std::optional<bool> ImpliedCond = isImpliedCondition(
1762 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1763 DL, LHSIsTrue);
1764 if (ImpliedCond)
1765 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1766 }
1767
1768 return nullptr;
1769}
1770
1772 bool AllowMultipleUses) {
1773 unsigned NumPHIValues = PN->getNumIncomingValues();
1774 if (NumPHIValues == 0)
1775 return nullptr;
1776
1777 // We normally only transform phis with a single use. However, if a PHI has
1778 // multiple uses and they are all the same operation, we can fold *all* of the
1779 // uses into the PHI.
1780 bool OneUse = PN->hasOneUse();
1781 bool IdenticalUsers = false;
1782 if (!AllowMultipleUses && !OneUse) {
1783 // Walk the use list for the instruction, comparing them to I.
1784 for (User *U : PN->users()) {
1785 Instruction *UI = cast<Instruction>(U);
1786 if (UI != &I && !I.isIdenticalTo(UI))
1787 return nullptr;
1788 }
1789 // Otherwise, we can replace *all* users with the new PHI we form.
1790 IdenticalUsers = true;
1791 }
1792
1793 // Check that all operands are phi-translatable.
1794 for (Value *Op : I.operands()) {
1795 if (Op == PN)
1796 continue;
1797
1798 // Non-instructions never require phi-translation.
1799 auto *I = dyn_cast<Instruction>(Op);
1800 if (!I)
1801 continue;
1802
1803 // Phi-translate can handle phi nodes in the same block.
1804 if (isa<PHINode>(I))
1805 if (I->getParent() == PN->getParent())
1806 continue;
1807
1808 // Operand dominates the block, no phi-translation necessary.
1809 if (DT.dominates(I, PN->getParent()))
1810 continue;
1811
1812 // Not phi-translatable, bail out.
1813 return nullptr;
1814 }
1815
1816 // Check to see whether the instruction can be folded into each phi operand.
1817 // If there is one operand that does not fold, remember the BB it is in.
1818 SmallVector<Value *> NewPhiValues;
1819 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1820 bool SeenNonSimplifiedInVal = false;
1821 for (unsigned i = 0; i != NumPHIValues; ++i) {
1822 Value *InVal = PN->getIncomingValue(i);
1823 BasicBlock *InBB = PN->getIncomingBlock(i);
1824
1825 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1826 NewPhiValues.push_back(NewVal);
1827 continue;
1828 }
1829
1830 // Handle some cases that can't be fully simplified, but where we know that
1831 // the two instructions will fold into one.
1832 auto WillFold = [&]() {
1833 if (!InVal->hasOneUser())
1834 return false;
1835
1836 // icmp of ucmp/scmp with constant will fold to icmp.
1837 const APInt *Ignored;
1838 if (isa<CmpIntrinsic>(InVal) &&
1839 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1840 return true;
1841
1842 // icmp eq zext(bool), 0 will fold to !bool.
1843 if (isa<ZExtInst>(InVal) &&
1844 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1845 match(&I,
1847 return true;
1848
1849 return false;
1850 };
1851
1852 if (WillFold()) {
1853 OpsToMoveUseToIncomingBB.push_back(i);
1854 NewPhiValues.push_back(nullptr);
1855 continue;
1856 }
1857
1858 if (!OneUse && !IdenticalUsers)
1859 return nullptr;
1860
1861 if (SeenNonSimplifiedInVal)
1862 return nullptr; // More than one non-simplified value.
1863 SeenNonSimplifiedInVal = true;
1864
1865 // If there is exactly one non-simplified value, we can insert a copy of the
1866 // operation in that block. However, if this is a critical edge, we would
1867 // be inserting the computation on some other paths (e.g. inside a loop).
1868 // Only do this if the pred block is unconditionally branching into the phi
1869 // block. Also, make sure that the pred block is not dead code.
1870 BranchInst *BI = dyn_cast<BranchInst>(InBB->getTerminator());
1871 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(InBB))
1872 return nullptr;
1873
1874 NewPhiValues.push_back(nullptr);
1875 OpsToMoveUseToIncomingBB.push_back(i);
1876
1877 // If the InVal is an invoke at the end of the pred block, then we can't
1878 // insert a computation after it without breaking the edge.
1879 if (isa<InvokeInst>(InVal))
1880 if (cast<Instruction>(InVal)->getParent() == InBB)
1881 return nullptr;
1882
1883 // Do not push the operation across a loop backedge. This could result in
1884 // an infinite combine loop, and is generally non-profitable (especially
1885 // if the operation was originally outside the loop).
1886 if (isBackEdge(InBB, PN->getParent()))
1887 return nullptr;
1888 }
1889
1890 // Clone the instruction that uses the phi node and move it into the incoming
1891 // BB because we know that the next iteration of InstCombine will simplify it.
1893 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
1895 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
1896
1897 Instruction *Clone = Clones.lookup(OpBB);
1898 if (!Clone) {
1899 Clone = I.clone();
1900 for (Use &U : Clone->operands()) {
1901 if (U == PN)
1902 U = Op;
1903 else
1904 U = U->DoPHITranslation(PN->getParent(), OpBB);
1905 }
1906 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
1907 Clones.insert({OpBB, Clone});
1908 }
1909
1910 NewPhiValues[OpIndex] = Clone;
1911 }
1912
1913 // Okay, we can do the transformation: create the new PHI node.
1914 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
1915 InsertNewInstBefore(NewPN, PN->getIterator());
1916 NewPN->takeName(PN);
1917 NewPN->setDebugLoc(PN->getDebugLoc());
1918
1919 for (unsigned i = 0; i != NumPHIValues; ++i)
1920 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
1921
1922 if (IdenticalUsers) {
1923 for (User *U : make_early_inc_range(PN->users())) {
1924 Instruction *User = cast<Instruction>(U);
1925 if (User == &I)
1926 continue;
1927 replaceInstUsesWith(*User, NewPN);
1929 }
1930 OneUse = true;
1931 }
1932
1933 if (OneUse) {
1934 replaceAllDbgUsesWith(const_cast<PHINode &>(*PN),
1935 const_cast<PHINode &>(*NewPN),
1936 const_cast<PHINode &>(*PN), DT);
1937 }
1938 return replaceInstUsesWith(I, NewPN);
1939}
1940
1942 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
1943 // we are guarding against replicating the binop in >1 predecessor.
1944 // This could miss matching a phi with 2 constant incoming values.
1945 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
1946 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
1947 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1948 Phi0->getNumOperands() != Phi1->getNumOperands())
1949 return nullptr;
1950
1951 // TODO: Remove the restriction for binop being in the same block as the phis.
1952 if (BO.getParent() != Phi0->getParent() ||
1953 BO.getParent() != Phi1->getParent())
1954 return nullptr;
1955
1956 // Fold if there is at least one specific constant value in phi0 or phi1's
1957 // incoming values that comes from the same block and this specific constant
1958 // value can be used to do optimization for specific binary operator.
1959 // For example:
1960 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
1961 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
1962 // %add = add i32 %phi0, %phi1
1963 // ==>
1964 // %add = phi i32 [%j, %bb0], [%i, %bb1]
1966 /*AllowRHSConstant*/ false);
1967 if (C) {
1968 SmallVector<Value *, 4> NewIncomingValues;
1969 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
1970 auto &Phi0Use = std::get<0>(T);
1971 auto &Phi1Use = std::get<1>(T);
1972 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1973 return false;
1974 Value *Phi0UseV = Phi0Use.get();
1975 Value *Phi1UseV = Phi1Use.get();
1976 if (Phi0UseV == C)
1977 NewIncomingValues.push_back(Phi1UseV);
1978 else if (Phi1UseV == C)
1979 NewIncomingValues.push_back(Phi0UseV);
1980 else
1981 return false;
1982 return true;
1983 };
1984
1985 if (all_of(zip(Phi0->operands(), Phi1->operands()),
1986 CanFoldIncomingValuePair)) {
1987 PHINode *NewPhi =
1988 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
1989 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
1990 "The number of collected incoming values should equal the number "
1991 "of the original PHINode operands!");
1992 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
1993 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
1994 return NewPhi;
1995 }
1996 }
1997
1998 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1999 return nullptr;
2000
2001 // Match a pair of incoming constants for one of the predecessor blocks.
2002 BasicBlock *ConstBB, *OtherBB;
2003 Constant *C0, *C1;
2004 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2005 ConstBB = Phi0->getIncomingBlock(0);
2006 OtherBB = Phi0->getIncomingBlock(1);
2007 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2008 ConstBB = Phi0->getIncomingBlock(1);
2009 OtherBB = Phi0->getIncomingBlock(0);
2010 } else {
2011 return nullptr;
2012 }
2013 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2014 return nullptr;
2015
2016 // The block that we are hoisting to must reach here unconditionally.
2017 // Otherwise, we could be speculatively executing an expensive or
2018 // non-speculative op.
2019 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
2020 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2021 !DT.isReachableFromEntry(OtherBB))
2022 return nullptr;
2023
2024 // TODO: This check could be tightened to only apply to binops (div/rem) that
2025 // are not safe to speculatively execute. But that could allow hoisting
2026 // potentially expensive instructions (fdiv for example).
2027 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2029 return nullptr;
2030
2031 // Fold constants for the predecessor block with constant incoming values.
2032 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2033 if (!NewC)
2034 return nullptr;
2035
2036 // Make a new binop in the predecessor block with the non-constant incoming
2037 // values.
2038 Builder.SetInsertPoint(PredBlockBranch);
2039 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2040 Phi0->getIncomingValueForBlock(OtherBB),
2041 Phi1->getIncomingValueForBlock(OtherBB));
2042 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2043 NotFoldedNewBO->copyIRFlags(&BO);
2044
2045 // Replace the binop with a phi of the new values. The old phis are dead.
2046 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2047 NewPhi->addIncoming(NewBO, OtherBB);
2048 NewPhi->addIncoming(NewC, ConstBB);
2049 return NewPhi;
2050}
2051
2053 if (!isa<Constant>(I.getOperand(1)))
2054 return nullptr;
2055
2056 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
2057 if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
2058 return NewSel;
2059 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
2060 if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
2061 return NewPhi;
2062 }
2063 return nullptr;
2064}
2065
2067 // If this GEP has only 0 indices, it is the same pointer as
2068 // Src. If Src is not a trivial GEP too, don't combine
2069 // the indices.
2070 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2071 !Src.hasOneUse())
2072 return false;
2073 return true;
2074}
2075
2077 if (!isa<VectorType>(Inst.getType()))
2078 return nullptr;
2079
2080 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2081 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2082 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2083 cast<VectorType>(Inst.getType())->getElementCount());
2084 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2085 cast<VectorType>(Inst.getType())->getElementCount());
2086
2087 // If both operands of the binop are vector concatenations, then perform the
2088 // narrow binop on each pair of the source operands followed by concatenation
2089 // of the results.
2090 Value *L0, *L1, *R0, *R1;
2091 ArrayRef<int> Mask;
2092 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2093 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2094 LHS->hasOneUse() && RHS->hasOneUse() &&
2095 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2096 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2097 // This transform does not have the speculative execution constraint as
2098 // below because the shuffle is a concatenation. The new binops are
2099 // operating on exactly the same elements as the existing binop.
2100 // TODO: We could ease the mask requirement to allow different undef lanes,
2101 // but that requires an analysis of the binop-with-undef output value.
2102 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2103 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2104 BO->copyIRFlags(&Inst);
2105 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2106 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2107 BO->copyIRFlags(&Inst);
2108 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2109 }
2110
2111 auto createBinOpReverse = [&](Value *X, Value *Y) {
2112 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2113 if (auto *BO = dyn_cast<BinaryOperator>(V))
2114 BO->copyIRFlags(&Inst);
2115 Module *M = Inst.getModule();
2117 M, Intrinsic::vector_reverse, V->getType());
2118 return CallInst::Create(F, V);
2119 };
2120
2121 // NOTE: Reverse shuffles don't require the speculative execution protection
2122 // below because they don't affect which lanes take part in the computation.
2123
2124 Value *V1, *V2;
2125 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2126 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2127 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2128 (LHS->hasOneUse() || RHS->hasOneUse() ||
2129 (LHS == RHS && LHS->hasNUses(2))))
2130 return createBinOpReverse(V1, V2);
2131
2132 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2133 if (LHS->hasOneUse() && isSplatValue(RHS))
2134 return createBinOpReverse(V1, RHS);
2135 }
2136 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2137 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2138 return createBinOpReverse(LHS, V2);
2139
2140 // It may not be safe to reorder shuffles and things like div, urem, etc.
2141 // because we may trap when executing those ops on unknown vector elements.
2142 // See PR20059.
2144 return nullptr;
2145
2146 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2147 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2148 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2149 BO->copyIRFlags(&Inst);
2150 return new ShuffleVectorInst(XY, M);
2151 };
2152
2153 // If both arguments of the binary operation are shuffles that use the same
2154 // mask and shuffle within a single vector, move the shuffle after the binop.
2155 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2156 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2157 V1->getType() == V2->getType() &&
2158 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2159 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2160 return createBinOpShuffle(V1, V2, Mask);
2161 }
2162
2163 // If both arguments of a commutative binop are select-shuffles that use the
2164 // same mask with commuted operands, the shuffles are unnecessary.
2165 if (Inst.isCommutative() &&
2166 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2167 match(RHS,
2168 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2169 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2170 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2171 // TODO: Allow shuffles that contain undefs in the mask?
2172 // That is legal, but it reduces undef knowledge.
2173 // TODO: Allow arbitrary shuffles by shuffling after binop?
2174 // That might be legal, but we have to deal with poison.
2175 if (LShuf->isSelect() &&
2176 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2177 RShuf->isSelect() &&
2178 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2179 // Example:
2180 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2181 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2182 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2183 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2184 NewBO->copyIRFlags(&Inst);
2185 return NewBO;
2186 }
2187 }
2188
2189 // If one argument is a shuffle within one vector and the other is a constant,
2190 // try moving the shuffle after the binary operation. This canonicalization
2191 // intends to move shuffles closer to other shuffles and binops closer to
2192 // other binops, so they can be folded. It may also enable demanded elements
2193 // transforms.
2194 Constant *C;
2195 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.getType());
2196 if (InstVTy &&
2198 m_Mask(Mask))),
2199 m_ImmConstant(C))) &&
2200 cast<FixedVectorType>(V1->getType())->getNumElements() <=
2201 InstVTy->getNumElements()) {
2202 assert(InstVTy->getScalarType() == V1->getType()->getScalarType() &&
2203 "Shuffle should not change scalar type");
2204
2205 // Find constant NewC that has property:
2206 // shuffle(NewC, ShMask) = C
2207 // If such constant does not exist (example: ShMask=<0,0> and C=<1,2>)
2208 // reorder is not possible. A 1-to-1 mapping is not required. Example:
2209 // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <undef,5,6,undef>
2210 bool ConstOp1 = isa<Constant>(RHS);
2211 ArrayRef<int> ShMask = Mask;
2212 unsigned SrcVecNumElts =
2213 cast<FixedVectorType>(V1->getType())->getNumElements();
2214 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2215 SmallVector<Constant *, 16> NewVecC(SrcVecNumElts, PoisonScalar);
2216 bool MayChange = true;
2217 unsigned NumElts = InstVTy->getNumElements();
2218 for (unsigned I = 0; I < NumElts; ++I) {
2219 Constant *CElt = C->getAggregateElement(I);
2220 if (ShMask[I] >= 0) {
2221 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2222 Constant *NewCElt = NewVecC[ShMask[I]];
2223 // Bail out if:
2224 // 1. The constant vector contains a constant expression.
2225 // 2. The shuffle needs an element of the constant vector that can't
2226 // be mapped to a new constant vector.
2227 // 3. This is a widening shuffle that copies elements of V1 into the
2228 // extended elements (extending with poison is allowed).
2229 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2230 I >= SrcVecNumElts) {
2231 MayChange = false;
2232 break;
2233 }
2234 NewVecC[ShMask[I]] = CElt;
2235 }
2236 // If this is a widening shuffle, we must be able to extend with poison
2237 // elements. If the original binop does not produce a poison in the high
2238 // lanes, then this transform is not safe.
2239 // Similarly for poison lanes due to the shuffle mask, we can only
2240 // transform binops that preserve poison.
2241 // TODO: We could shuffle those non-poison constant values into the
2242 // result by using a constant vector (rather than an poison vector)
2243 // as operand 1 of the new binop, but that might be too aggressive
2244 // for target-independent shuffle creation.
2245 if (I >= SrcVecNumElts || ShMask[I] < 0) {
2246 Constant *MaybePoison =
2247 ConstOp1
2248 ? ConstantFoldBinaryOpOperands(Opcode, PoisonScalar, CElt, DL)
2249 : ConstantFoldBinaryOpOperands(Opcode, CElt, PoisonScalar, DL);
2250 if (!MaybePoison || !isa<PoisonValue>(MaybePoison)) {
2251 MayChange = false;
2252 break;
2253 }
2254 }
2255 }
2256 if (MayChange) {
2257 Constant *NewC = ConstantVector::get(NewVecC);
2258 // It may not be safe to execute a binop on a vector with poison elements
2259 // because the entire instruction can be folded to undef or create poison
2260 // that did not exist in the original code.
2261 // TODO: The shift case should not be necessary.
2262 if (Inst.isIntDivRem() || (Inst.isShift() && ConstOp1))
2263 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2264
2265 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2266 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2267 Value *NewLHS = ConstOp1 ? V1 : NewC;
2268 Value *NewRHS = ConstOp1 ? NewC : V1;
2269 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2270 }
2271 }
2272
2273 // Try to reassociate to sink a splat shuffle after a binary operation.
2274 if (Inst.isAssociative() && Inst.isCommutative()) {
2275 // Canonicalize shuffle operand as LHS.
2276 if (isa<ShuffleVectorInst>(RHS))
2277 std::swap(LHS, RHS);
2278
2279 Value *X;
2280 ArrayRef<int> MaskC;
2281 int SplatIndex;
2282 Value *Y, *OtherOp;
2283 if (!match(LHS,
2284 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2285 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2286 X->getType() != Inst.getType() ||
2287 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2288 return nullptr;
2289
2290 // FIXME: This may not be safe if the analysis allows undef elements. By
2291 // moving 'Y' before the splat shuffle, we are implicitly assuming
2292 // that it is not undef/poison at the splat index.
2293 if (isSplatValue(OtherOp, SplatIndex)) {
2294 std::swap(Y, OtherOp);
2295 } else if (!isSplatValue(Y, SplatIndex)) {
2296 return nullptr;
2297 }
2298
2299 // X and Y are splatted values, so perform the binary operation on those
2300 // values followed by a splat followed by the 2nd binary operation:
2301 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2302 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2303 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2304 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2305 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2306
2307 // Intersect FMF on both new binops. Other (poison-generating) flags are
2308 // dropped to be safe.
2309 if (isa<FPMathOperator>(R)) {
2310 R->copyFastMathFlags(&Inst);
2311 R->andIRFlags(RHS);
2312 }
2313 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2314 NewInstBO->copyIRFlags(R);
2315 return R;
2316 }
2317
2318 return nullptr;
2319}
2320
2321/// Try to narrow the width of a binop if at least 1 operand is an extend of
2322/// of a value. This requires a potentially expensive known bits check to make
2323/// sure the narrow op does not overflow.
2324Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2325 // We need at least one extended operand.
2326 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2327
2328 // If this is a sub, we swap the operands since we always want an extension
2329 // on the RHS. The LHS can be an extension or a constant.
2330 if (BO.getOpcode() == Instruction::Sub)
2331 std::swap(Op0, Op1);
2332
2333 Value *X;
2334 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2335 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2336 return nullptr;
2337
2338 // If both operands are the same extension from the same source type and we
2339 // can eliminate at least one (hasOneUse), this might work.
2340 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2341 Value *Y;
2342 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2343 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2344 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2345 // If that did not match, see if we have a suitable constant operand.
2346 // Truncating and extending must produce the same constant.
2347 Constant *WideC;
2348 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2349 return nullptr;
2350 Constant *NarrowC = getLosslessTrunc(WideC, X->getType(), CastOpc);
2351 if (!NarrowC)
2352 return nullptr;
2353 Y = NarrowC;
2354 }
2355
2356 // Swap back now that we found our operands.
2357 if (BO.getOpcode() == Instruction::Sub)
2358 std::swap(X, Y);
2359
2360 // Both operands have narrow versions. Last step: the math must not overflow
2361 // in the narrow width.
2362 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2363 return nullptr;
2364
2365 // bo (ext X), (ext Y) --> ext (bo X, Y)
2366 // bo (ext X), C --> ext (bo X, C')
2367 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2368 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2369 if (IsSext)
2370 NewBinOp->setHasNoSignedWrap();
2371 else
2372 NewBinOp->setHasNoUnsignedWrap();
2373 }
2374 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2375}
2376
2377/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2378/// transform.
2380 GEPOperator &GEP2) {
2382}
2383
2384/// Thread a GEP operation with constant indices through the constant true/false
2385/// arms of a select.
2387 InstCombiner::BuilderTy &Builder) {
2388 if (!GEP.hasAllConstantIndices())
2389 return nullptr;
2390
2391 Instruction *Sel;
2392 Value *Cond;
2393 Constant *TrueC, *FalseC;
2394 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2395 !match(Sel,
2396 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2397 return nullptr;
2398
2399 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2400 // Propagate 'inbounds' and metadata from existing instructions.
2401 // Note: using IRBuilder to create the constants for efficiency.
2402 SmallVector<Value *, 4> IndexC(GEP.indices());
2403 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2404 Type *Ty = GEP.getSourceElementType();
2405 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2406 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2407 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2408}
2409
2410// Canonicalization:
2411// gep T, (gep i8, base, C1), (Index + C2) into
2412// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2414 GEPOperator *Src,
2415 InstCombinerImpl &IC) {
2416 if (GEP.getNumIndices() != 1)
2417 return nullptr;
2418 auto &DL = IC.getDataLayout();
2419 Value *Base;
2420 const APInt *C1;
2421 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2422 return nullptr;
2423 Value *VarIndex;
2424 const APInt *C2;
2425 Type *PtrTy = Src->getType()->getScalarType();
2426 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2427 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2428 return nullptr;
2429 if (C1->getBitWidth() != IndexSizeInBits ||
2430 C2->getBitWidth() != IndexSizeInBits)
2431 return nullptr;
2432 Type *BaseType = GEP.getSourceElementType();
2433 if (isa<ScalableVectorType>(BaseType))
2434 return nullptr;
2435 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2436 APInt NewOffset = TypeSize * *C2 + *C1;
2437 if (NewOffset.isZero() ||
2438 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2439 Value *GEPConst =
2440 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset));
2441 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex);
2442 }
2443
2444 return nullptr;
2445}
2446
2448 GEPOperator *Src) {
2449 // Combine Indices - If the source pointer to this getelementptr instruction
2450 // is a getelementptr instruction with matching element type, combine the
2451 // indices of the two getelementptr instructions into a single instruction.
2452 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2453 return nullptr;
2454
2455 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2456 return I;
2457
2458 // For constant GEPs, use a more general offset-based folding approach.
2459 Type *PtrTy = Src->getType()->getScalarType();
2460 if (GEP.hasAllConstantIndices() &&
2461 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2462 // Split Src into a variable part and a constant suffix.
2464 Type *BaseType = GTI.getIndexedType();
2465 bool IsFirstType = true;
2466 unsigned NumVarIndices = 0;
2467 for (auto Pair : enumerate(Src->indices())) {
2468 if (!isa<ConstantInt>(Pair.value())) {
2469 BaseType = GTI.getIndexedType();
2470 IsFirstType = false;
2471 NumVarIndices = Pair.index() + 1;
2472 }
2473 ++GTI;
2474 }
2475
2476 // Determine the offset for the constant suffix of Src.
2478 if (NumVarIndices != Src->getNumIndices()) {
2479 // FIXME: getIndexedOffsetInType() does not handled scalable vectors.
2480 if (BaseType->isScalableTy())
2481 return nullptr;
2482
2483 SmallVector<Value *> ConstantIndices;
2484 if (!IsFirstType)
2485 ConstantIndices.push_back(
2487 append_range(ConstantIndices, drop_begin(Src->indices(), NumVarIndices));
2488 Offset += DL.getIndexedOffsetInType(BaseType, ConstantIndices);
2489 }
2490
2491 // Add the offset for GEP (which is fully constant).
2492 if (!GEP.accumulateConstantOffset(DL, Offset))
2493 return nullptr;
2494
2495 // Convert the total offset back into indices.
2496 SmallVector<APInt> ConstIndices =
2498 if (!Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero()))
2499 return nullptr;
2500
2501 GEPNoWrapFlags NW = getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP));
2502 SmallVector<Value *> Indices;
2503 append_range(Indices, drop_end(Src->indices(),
2504 Src->getNumIndices() - NumVarIndices));
2505 for (const APInt &Idx : drop_begin(ConstIndices, !IsFirstType)) {
2506 Indices.push_back(ConstantInt::get(GEP.getContext(), Idx));
2507 // Even if the total offset is inbounds, we may end up representing it
2508 // by first performing a larger negative offset, and then a smaller
2509 // positive one. The large negative offset might go out of bounds. Only
2510 // preserve inbounds if all signs are the same.
2511 if (Idx.isNonNegative() != ConstIndices[0].isNonNegative())
2513 if (!Idx.isNonNegative())
2514 NW = NW.withoutNoUnsignedWrap();
2515 }
2516
2517 return replaceInstUsesWith(
2518 GEP, Builder.CreateGEP(Src->getSourceElementType(), Src->getOperand(0),
2519 Indices, "", NW));
2520 }
2521
2522 if (Src->getResultElementType() != GEP.getSourceElementType())
2523 return nullptr;
2524
2525 SmallVector<Value*, 8> Indices;
2526
2527 // Find out whether the last index in the source GEP is a sequential idx.
2528 bool EndsWithSequential = false;
2529 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2530 I != E; ++I)
2531 EndsWithSequential = I.isSequential();
2532
2533 // Can we combine the two pointer arithmetics offsets?
2534 if (EndsWithSequential) {
2535 // Replace: gep (gep %P, long B), long A, ...
2536 // With: T = long A+B; gep %P, T, ...
2537 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2538 Value *GO1 = GEP.getOperand(1);
2539
2540 // If they aren't the same type, then the input hasn't been processed
2541 // by the loop above yet (which canonicalizes sequential index types to
2542 // intptr_t). Just avoid transforming this until the input has been
2543 // normalized.
2544 if (SO1->getType() != GO1->getType())
2545 return nullptr;
2546
2547 Value *Sum =
2548 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2549 // Only do the combine when we are sure the cost after the
2550 // merge is never more than that before the merge.
2551 if (Sum == nullptr)
2552 return nullptr;
2553
2554 Indices.append(Src->op_begin()+1, Src->op_end()-1);
2555 Indices.push_back(Sum);
2556 Indices.append(GEP.op_begin()+2, GEP.op_end());
2557 } else if (isa<Constant>(*GEP.idx_begin()) &&
2558 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
2559 Src->getNumOperands() != 1) {
2560 // Otherwise we can do the fold if the first index of the GEP is a zero
2561 Indices.append(Src->op_begin()+1, Src->op_end());
2562 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
2563 }
2564
2565 if (!Indices.empty())
2566 return replaceInstUsesWith(
2568 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2569 getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP))));
2570
2571 return nullptr;
2572}
2573
2575 BuilderTy *Builder,
2576 bool &DoesConsume, unsigned Depth) {
2577 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2578 // ~(~(X)) -> X.
2579 Value *A, *B;
2580 if (match(V, m_Not(m_Value(A)))) {
2581 DoesConsume = true;
2582 return A;
2583 }
2584
2585 Constant *C;
2586 // Constants can be considered to be not'ed values.
2587 if (match(V, m_ImmConstant(C)))
2588 return ConstantExpr::getNot(C);
2589
2591 return nullptr;
2592
2593 // The rest of the cases require that we invert all uses so don't bother
2594 // doing the analysis if we know we can't use the result.
2595 if (!WillInvertAllUses)
2596 return nullptr;
2597
2598 // Compares can be inverted if all of their uses are being modified to use
2599 // the ~V.
2600 if (auto *I = dyn_cast<CmpInst>(V)) {
2601 if (Builder != nullptr)
2602 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
2603 I->getOperand(1));
2604 return NonNull;
2605 }
2606
2607 // If `V` is of the form `A + B` then `-1 - V` can be folded into
2608 // `(-1 - B) - A` if we are willing to invert all of the uses.
2609 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
2610 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2611 DoesConsume, Depth))
2612 return Builder ? Builder->CreateSub(BV, A) : NonNull;
2613 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2614 DoesConsume, Depth))
2615 return Builder ? Builder->CreateSub(AV, B) : NonNull;
2616 return nullptr;
2617 }
2618
2619 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
2620 // into `A ^ B` if we are willing to invert all of the uses.
2621 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
2622 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2623 DoesConsume, Depth))
2624 return Builder ? Builder->CreateXor(A, BV) : NonNull;
2625 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2626 DoesConsume, Depth))
2627 return Builder ? Builder->CreateXor(AV, B) : NonNull;
2628 return nullptr;
2629 }
2630
2631 // If `V` is of the form `B - A` then `-1 - V` can be folded into
2632 // `A + (-1 - B)` if we are willing to invert all of the uses.
2633 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
2634 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2635 DoesConsume, Depth))
2636 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
2637 return nullptr;
2638 }
2639
2640 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
2641 // into `A s>> B` if we are willing to invert all of the uses.
2642 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
2643 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2644 DoesConsume, Depth))
2645 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
2646 return nullptr;
2647 }
2648
2649 Value *Cond;
2650 // LogicOps are special in that we canonicalize them at the cost of an
2651 // instruction.
2652 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
2653 !shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(V));
2654 // Selects/min/max with invertible operands are freely invertible
2655 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
2656 bool LocalDoesConsume = DoesConsume;
2657 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
2658 LocalDoesConsume, Depth))
2659 return nullptr;
2660 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2661 LocalDoesConsume, Depth)) {
2662 DoesConsume = LocalDoesConsume;
2663 if (Builder != nullptr) {
2664 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2665 DoesConsume, Depth);
2666 assert(NotB != nullptr &&
2667 "Unable to build inverted value for known freely invertable op");
2668 if (auto *II = dyn_cast<IntrinsicInst>(V))
2670 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
2671 return Builder->CreateSelect(Cond, NotA, NotB);
2672 }
2673 return NonNull;
2674 }
2675 }
2676
2677 if (PHINode *PN = dyn_cast<PHINode>(V)) {
2678 bool LocalDoesConsume = DoesConsume;
2680 for (Use &U : PN->operands()) {
2681 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2682 Value *NewIncomingVal = getFreelyInvertedImpl(
2683 U.get(), /*WillInvertAllUses=*/false,
2684 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
2685 if (NewIncomingVal == nullptr)
2686 return nullptr;
2687 // Make sure that we can safely erase the original PHI node.
2688 if (NewIncomingVal == V)
2689 return nullptr;
2690 if (Builder != nullptr)
2691 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
2692 }
2693
2694 DoesConsume = LocalDoesConsume;
2695 if (Builder != nullptr) {
2698 PHINode *NewPN =
2699 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
2700 for (auto [Val, Pred] : IncomingValues)
2701 NewPN->addIncoming(Val, Pred);
2702 return NewPN;
2703 }
2704 return NonNull;
2705 }
2706
2707 if (match(V, m_SExtLike(m_Value(A)))) {
2708 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2709 DoesConsume, Depth))
2710 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
2711 return nullptr;
2712 }
2713
2714 if (match(V, m_Trunc(m_Value(A)))) {
2715 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2716 DoesConsume, Depth))
2717 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
2718 return nullptr;
2719 }
2720
2721 // De Morgan's Laws:
2722 // (~(A | B)) -> (~A & ~B)
2723 // (~(A & B)) -> (~A | ~B)
2724 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
2725 bool IsLogical, Value *A,
2726 Value *B) -> Value * {
2727 bool LocalDoesConsume = DoesConsume;
2728 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
2729 LocalDoesConsume, Depth))
2730 return nullptr;
2731 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2732 LocalDoesConsume, Depth)) {
2733 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2734 LocalDoesConsume, Depth);
2735 DoesConsume = LocalDoesConsume;
2736 if (IsLogical)
2737 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
2738 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
2739 }
2740
2741 return nullptr;
2742 };
2743
2744 if (match(V, m_Or(m_Value(A), m_Value(B))))
2745 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
2746 B);
2747
2748 if (match(V, m_And(m_Value(A), m_Value(B))))
2749 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
2750 B);
2751
2752 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
2753 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
2754 B);
2755
2756 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
2757 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
2758 B);
2759
2760 return nullptr;
2761}
2762
2763/// Return true if we should canonicalize the gep to an i8 ptradd.
2765 Value *PtrOp = GEP.getOperand(0);
2766 Type *GEPEltType = GEP.getSourceElementType();
2767 if (GEPEltType->isIntegerTy(8))
2768 return false;
2769
2770 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
2771 // intrinsic. This has better support in BasicAA.
2772 if (GEPEltType->isScalableTy())
2773 return true;
2774
2775 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
2776 // together.
2777 if (GEP.getNumIndices() == 1 &&
2778 match(GEP.getOperand(1),
2780 m_Shl(m_Value(), m_ConstantInt())))))
2781 return true;
2782
2783 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
2784 // possibly be merged together.
2785 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
2786 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
2787 any_of(GEP.indices(), [](Value *V) {
2788 const APInt *C;
2789 return match(V, m_APInt(C)) && !C->isZero();
2790 });
2791}
2792
2794 IRBuilderBase &Builder) {
2795 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
2796 if (!Op1)
2797 return nullptr;
2798
2799 // Don't fold a GEP into itself through a PHI node. This can only happen
2800 // through the back-edge of a loop. Folding a GEP into itself means that
2801 // the value of the previous iteration needs to be stored in the meantime,
2802 // thus requiring an additional register variable to be live, but not
2803 // actually achieving anything (the GEP still needs to be executed once per
2804 // loop iteration).
2805 if (Op1 == &GEP)
2806 return nullptr;
2807 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
2808
2809 int DI = -1;
2810
2811 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
2812 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
2813 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2814 Op1->getSourceElementType() != Op2->getSourceElementType())
2815 return nullptr;
2816
2817 // As for Op1 above, don't try to fold a GEP into itself.
2818 if (Op2 == &GEP)
2819 return nullptr;
2820
2821 // Keep track of the type as we walk the GEP.
2822 Type *CurTy = nullptr;
2823
2824 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
2825 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2826 return nullptr;
2827
2828 if (Op1->getOperand(J) != Op2->getOperand(J)) {
2829 if (DI == -1) {
2830 // We have not seen any differences yet in the GEPs feeding the
2831 // PHI yet, so we record this one if it is allowed to be a
2832 // variable.
2833
2834 // The first two arguments can vary for any GEP, the rest have to be
2835 // static for struct slots
2836 if (J > 1) {
2837 assert(CurTy && "No current type?");
2838 if (CurTy->isStructTy())
2839 return nullptr;
2840 }
2841
2842 DI = J;
2843 } else {
2844 // The GEP is different by more than one input. While this could be
2845 // extended to support GEPs that vary by more than one variable it
2846 // doesn't make sense since it greatly increases the complexity and
2847 // would result in an R+R+R addressing mode which no backend
2848 // directly supports and would need to be broken into several
2849 // simpler instructions anyway.
2850 return nullptr;
2851 }
2852 }
2853
2854 // Sink down a layer of the type for the next iteration.
2855 if (J > 0) {
2856 if (J == 1) {
2857 CurTy = Op1->getSourceElementType();
2858 } else {
2859 CurTy =
2860 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
2861 }
2862 }
2863 }
2864
2865 NW &= Op2->getNoWrapFlags();
2866 }
2867
2868 // If not all GEPs are identical we'll have to create a new PHI node.
2869 // Check that the old PHI node has only one use so that it will get
2870 // removed.
2871 if (DI != -1 && !PN->hasOneUse())
2872 return nullptr;
2873
2874 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2875 NewGEP->setNoWrapFlags(NW);
2876
2877 if (DI == -1) {
2878 // All the GEPs feeding the PHI are identical. Clone one down into our
2879 // BB so that it can be merged with the current GEP.
2880 } else {
2881 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
2882 // into the current block so it can be merged, and create a new PHI to
2883 // set that index.
2884 PHINode *NewPN;
2885 {
2886 IRBuilderBase::InsertPointGuard Guard(Builder);
2887 Builder.SetInsertPoint(PN);
2888 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
2889 PN->getNumOperands());
2890 }
2891
2892 for (auto &I : PN->operands())
2893 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
2894 PN->getIncomingBlock(I));
2895
2896 NewGEP->setOperand(DI, NewPN);
2897 }
2898
2899 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
2900 return NewGEP;
2901}
2902
2904 Value *PtrOp = GEP.getOperand(0);
2905 SmallVector<Value *, 8> Indices(GEP.indices());
2906 Type *GEPType = GEP.getType();
2907 Type *GEPEltType = GEP.getSourceElementType();
2908 if (Value *V =
2909 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
2911 return replaceInstUsesWith(GEP, V);
2912
2913 // For vector geps, use the generic demanded vector support.
2914 // Skip if GEP return type is scalable. The number of elements is unknown at
2915 // compile-time.
2916 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2917 auto VWidth = GEPFVTy->getNumElements();
2918 APInt PoisonElts(VWidth, 0);
2919 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
2920 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
2921 PoisonElts)) {
2922 if (V != &GEP)
2923 return replaceInstUsesWith(GEP, V);
2924 return &GEP;
2925 }
2926
2927 // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if
2928 // possible (decide on canonical form for pointer broadcast), 3) exploit
2929 // undef elements to decrease demanded bits
2930 }
2931
2932 // Eliminate unneeded casts for indices, and replace indices which displace
2933 // by multiples of a zero size type with zero.
2934 bool MadeChange = false;
2935
2936 // Index width may not be the same width as pointer width.
2937 // Data layout chooses the right type based on supported integer types.
2938 Type *NewScalarIndexTy =
2939 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
2940
2942 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
2943 ++I, ++GTI) {
2944 // Skip indices into struct types.
2945 if (GTI.isStruct())
2946 continue;
2947
2948 Type *IndexTy = (*I)->getType();
2949 Type *NewIndexType =
2950 IndexTy->isVectorTy()
2951 ? VectorType::get(NewScalarIndexTy,
2952 cast<VectorType>(IndexTy)->getElementCount())
2953 : NewScalarIndexTy;
2954
2955 // If the element type has zero size then any index over it is equivalent
2956 // to an index of zero, so replace it with zero if it is not zero already.
2957 Type *EltTy = GTI.getIndexedType();
2958 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
2959 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
2960 *I = Constant::getNullValue(NewIndexType);
2961 MadeChange = true;
2962 }
2963
2964 if (IndexTy != NewIndexType) {
2965 // If we are using a wider index than needed for this platform, shrink
2966 // it to what we need. If narrower, sign-extend it to what we need.
2967 // This explicit cast can make subsequent optimizations more obvious.
2968 *I = Builder.CreateIntCast(*I, NewIndexType, true);
2969 MadeChange = true;
2970 }
2971 }
2972 if (MadeChange)
2973 return &GEP;
2974
2975 // Canonicalize constant GEPs to i8 type.
2976 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
2978 if (GEP.accumulateConstantOffset(DL, Offset))
2979 return replaceInstUsesWith(
2981 GEP.getNoWrapFlags()));
2982 }
2983
2985 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
2986 Value *NewGEP =
2987 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
2988 return replaceInstUsesWith(GEP, NewGEP);
2989 }
2990
2991 // Check to see if the inputs to the PHI node are getelementptr instructions.
2992 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
2993 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
2994 return replaceOperand(GEP, 0, NewPtrOp);
2995 }
2996
2997 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
2998 if (Instruction *I = visitGEPOfGEP(GEP, Src))
2999 return I;
3000
3001 if (GEP.getNumIndices() == 1) {
3002 unsigned AS = GEP.getPointerAddressSpace();
3003 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3004 DL.getIndexSizeInBits(AS)) {
3005 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3006
3007 if (TyAllocSize == 1) {
3008 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3009 // but only if the result pointer is only used as if it were an integer,
3010 // or both point to the same underlying object (otherwise provenance is
3011 // not necessarily retained).
3012 Value *X = GEP.getPointerOperand();
3013 Value *Y;
3014 if (match(GEP.getOperand(1),
3016 GEPType == Y->getType()) {
3017 bool HasSameUnderlyingObject =
3019 bool Changed = false;
3020 GEP.replaceUsesWithIf(Y, [&](Use &U) {
3021 bool ShouldReplace = HasSameUnderlyingObject ||
3022 isa<ICmpInst>(U.getUser()) ||
3023 isa<PtrToIntInst>(U.getUser());
3024 Changed |= ShouldReplace;
3025 return ShouldReplace;
3026 });
3027 return Changed ? &GEP : nullptr;
3028 }
3029 } else if (auto *ExactIns =
3030 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3031 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3032 Value *V;
3033 if (ExactIns->isExact()) {
3034 if ((has_single_bit(TyAllocSize) &&
3035 match(GEP.getOperand(1),
3036 m_Shr(m_Value(V),
3037 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3038 match(GEP.getOperand(1),
3039 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3041 GEP.getPointerOperand(), V,
3042 GEP.getNoWrapFlags());
3043 }
3044 }
3045 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3046 // Try to canonicalize non-i8 element type to i8 if the index is an
3047 // exact instruction. If the index is an exact instruction (div/shr)
3048 // with a constant RHS, we can fold the non-i8 element scale into the
3049 // div/shr (similiar to the mul case, just inverted).
3050 const APInt *C;
3051 std::optional<APInt> NewC;
3052 if (has_single_bit(TyAllocSize) &&
3053 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3054 C->uge(countr_zero(TyAllocSize)))
3055 NewC = *C - countr_zero(TyAllocSize);
3056 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3057 APInt Quot;
3058 uint64_t Rem;
3059 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3060 if (Rem == 0)
3061 NewC = Quot;
3062 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3063 APInt Quot;
3064 int64_t Rem;
3065 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3066 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3067 if (!Quot.isAllOnes() && Rem == 0)
3068 NewC = Quot;
3069 }
3070
3071 if (NewC.has_value()) {
3072 Value *NewOp = Builder.CreateBinOp(
3073 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3074 ConstantInt::get(V->getType(), *NewC));
3075 cast<BinaryOperator>(NewOp)->setIsExact();
3077 GEP.getPointerOperand(), NewOp,
3078 GEP.getNoWrapFlags());
3079 }
3080 }
3081 }
3082 }
3083 }
3084 // We do not handle pointer-vector geps here.
3085 if (GEPType->isVectorTy())
3086 return nullptr;
3087
3088 if (GEP.getNumIndices() == 1) {
3089 // We can only preserve inbounds if the original gep is inbounds, the add
3090 // is nsw, and the add operands are non-negative.
3091 auto CanPreserveInBounds = [&](bool AddIsNSW, Value *Idx1, Value *Idx2) {
3093 return GEP.isInBounds() && AddIsNSW && isKnownNonNegative(Idx1, Q) &&
3094 isKnownNonNegative(Idx2, Q);
3095 };
3096
3097 // Try to replace ADD + GEP with GEP + GEP.
3098 Value *Idx1, *Idx2;
3099 if (match(GEP.getOperand(1),
3100 m_OneUse(m_Add(m_Value(Idx1), m_Value(Idx2))))) {
3101 // %idx = add i64 %idx1, %idx2
3102 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3103 // as:
3104 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3105 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3106 bool IsInBounds = CanPreserveInBounds(
3107 cast<OverflowingBinaryOperator>(GEP.getOperand(1))->hasNoSignedWrap(),
3108 Idx1, Idx2);
3109 auto *NewPtr =
3110 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3111 Idx1, "", IsInBounds);
3112 return replaceInstUsesWith(
3113 GEP, Builder.CreateGEP(GEP.getSourceElementType(), NewPtr, Idx2, "",
3114 IsInBounds));
3115 }
3116 ConstantInt *C;
3117 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAdd(
3118 m_Value(Idx1), m_ConstantInt(C))))))) {
3119 // %add = add nsw i32 %idx1, idx2
3120 // %sidx = sext i32 %add to i64
3121 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3122 // as:
3123 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3124 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3125 bool IsInBounds = CanPreserveInBounds(
3126 /*IsNSW=*/true, Idx1, C);
3127 auto *NewPtr = Builder.CreateGEP(
3128 GEP.getSourceElementType(), GEP.getPointerOperand(),
3129 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "",
3130 IsInBounds);
3131 return replaceInstUsesWith(
3132 GEP,
3133 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3134 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3135 "", IsInBounds));
3136 }
3137 }
3138
3139 if (!GEP.isInBounds()) {
3140 unsigned IdxWidth =
3142 APInt BasePtrOffset(IdxWidth, 0);
3143 Value *UnderlyingPtrOp =
3145 BasePtrOffset);
3146 bool CanBeNull, CanBeFreed;
3147 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3148 DL, CanBeNull, CanBeFreed);
3149 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3150 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3151 BasePtrOffset.isNonNegative()) {
3152 APInt AllocSize(IdxWidth, DerefBytes);
3153 if (BasePtrOffset.ule(AllocSize)) {
3155 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3156 }
3157 }
3158 }
3159 }
3160
3161 // nusw + nneg -> nuw
3162 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3163 all_of(GEP.indices(), [&](Value *Idx) {
3164 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3165 })) {
3166 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3167 return &GEP;
3168 }
3169
3171 return R;
3172
3173 return nullptr;
3174}
3175
3177 Instruction *AI) {
3178 if (isa<ConstantPointerNull>(V))
3179 return true;
3180 if (auto *LI = dyn_cast<LoadInst>(V))
3181 return isa<GlobalVariable>(LI->getPointerOperand());
3182 // Two distinct allocations will never be equal.
3183 return isAllocLikeFn(V, &TLI) && V != AI;
3184}
3185
3186/// Given a call CB which uses an address UsedV, return true if we can prove the
3187/// call's only possible effect is storing to V.
3188static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3189 const TargetLibraryInfo &TLI) {
3190 if (!CB.use_empty())
3191 // TODO: add recursion if returned attribute is present
3192 return false;
3193
3194 if (CB.isTerminator())
3195 // TODO: remove implementation restriction
3196 return false;
3197
3198 if (!CB.willReturn() || !CB.doesNotThrow())
3199 return false;
3200
3201 // If the only possible side effect of the call is writing to the alloca,
3202 // and the result isn't used, we can safely remove any reads implied by the
3203 // call including those which might read the alloca itself.
3204 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3205 return Dest && Dest->Ptr == UsedV;
3206}
3207
3210 const TargetLibraryInfo &TLI) {
3212 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3213 Worklist.push_back(AI);
3214
3215 do {
3216 Instruction *PI = Worklist.pop_back_val();
3217 for (User *U : PI->users()) {
3218 Instruction *I = cast<Instruction>(U);
3219 switch (I->getOpcode()) {
3220 default:
3221 // Give up the moment we see something we can't handle.
3222 return false;
3223
3224 case Instruction::AddrSpaceCast:
3225 case Instruction::BitCast:
3226 case Instruction::GetElementPtr:
3227 Users.emplace_back(I);
3228 Worklist.push_back(I);
3229 continue;
3230
3231 case Instruction::ICmp: {
3232 ICmpInst *ICI = cast<ICmpInst>(I);
3233 // We can fold eq/ne comparisons with null to false/true, respectively.
3234 // We also fold comparisons in some conditions provided the alloc has
3235 // not escaped (see isNeverEqualToUnescapedAlloc).
3236 if (!ICI->isEquality())
3237 return false;
3238 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3239 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3240 return false;
3241
3242 // Do not fold compares to aligned_alloc calls, as they may have to
3243 // return null in case the required alignment cannot be satisfied,
3244 // unless we can prove that both alignment and size are valid.
3245 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3246 // Check if alignment and size of a call to aligned_alloc is valid,
3247 // that is alignment is a power-of-2 and the size is a multiple of the
3248 // alignment.
3249 const APInt *Alignment;
3250 const APInt *Size;
3251 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3252 match(CB->getArgOperand(1), m_APInt(Size)) &&
3253 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3254 };
3255 auto *CB = dyn_cast<CallBase>(AI);
3256 LibFunc TheLibFunc;
3257 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3258 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3259 !AlignmentAndSizeKnownValid(CB))
3260 return false;
3261 Users.emplace_back(I);
3262 continue;
3263 }
3264
3265 case Instruction::Call:
3266 // Ignore no-op and store intrinsics.
3267 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3268 switch (II->getIntrinsicID()) {
3269 default:
3270 return false;
3271
3272 case Intrinsic::memmove:
3273 case Intrinsic::memcpy:
3274 case Intrinsic::memset: {
3275 MemIntrinsic *MI = cast<MemIntrinsic>(II);
3276 if (MI->isVolatile() || MI->getRawDest() != PI)
3277 return false;
3278 [[fallthrough]];
3279 }
3280 case Intrinsic::assume:
3281 case Intrinsic::invariant_start:
3282 case Intrinsic::invariant_end:
3283 case Intrinsic::lifetime_start:
3284 case Intrinsic::lifetime_end:
3285 case Intrinsic::objectsize:
3286 Users.emplace_back(I);
3287 continue;
3288 case Intrinsic::launder_invariant_group:
3289 case Intrinsic::strip_invariant_group:
3290 Users.emplace_back(I);
3291 Worklist.push_back(I);
3292 continue;
3293 }
3294 }
3295
3296 if (isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3297 Users.emplace_back(I);
3298 continue;
3299 }
3300
3301 if (getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3302 getAllocationFamily(I, &TLI) == Family) {
3303 assert(Family);
3304 Users.emplace_back(I);
3305 continue;
3306 }
3307
3308 if (getReallocatedOperand(cast<CallBase>(I)) == PI &&
3309 getAllocationFamily(I, &TLI) == Family) {
3310 assert(Family);
3311 Users.emplace_back(I);
3312 Worklist.push_back(I);
3313 continue;
3314 }
3315
3316 return false;
3317
3318 case Instruction::Store: {
3319 StoreInst *SI = cast<StoreInst>(I);
3320 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3321 return false;
3322 Users.emplace_back(I);
3323 continue;
3324 }
3325 }
3326 llvm_unreachable("missing a return?");
3327 }
3328 } while (!Worklist.empty());
3329 return true;
3330}
3331
3333 assert(isa<AllocaInst>(MI) || isRemovableAlloc(&cast<CallBase>(MI), &TLI));
3334
3335 // If we have a malloc call which is only used in any amount of comparisons to
3336 // null and free calls, delete the calls and replace the comparisons with true
3337 // or false as appropriate.
3338
3339 // This is based on the principle that we can substitute our own allocation
3340 // function (which will never return null) rather than knowledge of the
3341 // specific function being called. In some sense this can change the permitted
3342 // outputs of a program (when we convert a malloc to an alloca, the fact that
3343 // the allocation is now on the stack is potentially visible, for example),
3344 // but we believe in a permissible manner.
3346
3347 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3348 // before each store.
3351 std::unique_ptr<DIBuilder> DIB;
3352 if (isa<AllocaInst>(MI)) {
3353 findDbgUsers(DVIs, &MI, &DVRs);
3354 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3355 }
3356
3357 if (isAllocSiteRemovable(&MI, Users, TLI)) {
3358 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
3359 // Lowering all @llvm.objectsize calls first because they may
3360 // use a bitcast/GEP of the alloca we are removing.
3361 if (!Users[i])
3362 continue;
3363
3364 Instruction *I = cast<Instruction>(&*Users[i]);
3365
3366 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3367 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3368 SmallVector<Instruction *> InsertedInstructions;
3369 Value *Result = lowerObjectSizeCall(
3370 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3371 for (Instruction *Inserted : InsertedInstructions)
3372 Worklist.add(Inserted);
3373 replaceInstUsesWith(*I, Result);
3375 Users[i] = nullptr; // Skip examining in the next loop.
3376 }
3377 }
3378 }
3379 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
3380 if (!Users[i])
3381 continue;
3382
3383 Instruction *I = cast<Instruction>(&*Users[i]);
3384
3385 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3387 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3388 C->isFalseWhenEqual()));
3389 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3390 for (auto *DVI : DVIs)
3391 if (DVI->isAddressOfVariable())
3392 ConvertDebugDeclareToDebugValue(DVI, SI, *DIB);
3393 for (auto *DVR : DVRs)
3394 if (DVR->isAddressOfVariable())
3395 ConvertDebugDeclareToDebugValue(DVR, SI, *DIB);
3396 } else {
3397 // Casts, GEP, or anything else: we're about to delete this instruction,
3398 // so it can not have any valid uses.
3399 replaceInstUsesWith(*I, PoisonValue::get(I->getType()));
3400 }
3402 }
3403
3404 if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
3405 // Replace invoke with a NOP intrinsic to maintain the original CFG
3406 Module *M = II->getModule();
3407 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3408 InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(), {}, "",
3409 II->getParent());
3410 }
3411
3412 // Remove debug intrinsics which describe the value contained within the
3413 // alloca. In addition to removing dbg.{declare,addr} which simply point to
3414 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
3415 //
3416 // ```
3417 // define void @foo(i32 %0) {
3418 // %a = alloca i32 ; Deleted.
3419 // store i32 %0, i32* %a
3420 // dbg.value(i32 %0, "arg0") ; Not deleted.
3421 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
3422 // call void @trivially_inlinable_no_op(i32* %a)
3423 // ret void
3424 // }
3425 // ```
3426 //
3427 // This may not be required if we stop describing the contents of allocas
3428 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
3429 // the LowerDbgDeclare utility.
3430 //
3431 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
3432 // "arg0" dbg.value may be stale after the call. However, failing to remove
3433 // the DW_OP_deref dbg.value causes large gaps in location coverage.
3434 //
3435 // FIXME: the Assignment Tracking project has now likely made this
3436 // redundant (and it's sometimes harmful).
3437 for (auto *DVI : DVIs)
3438 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
3439 DVI->eraseFromParent();
3440 for (auto *DVR : DVRs)
3441 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3442 DVR->eraseFromParent();
3443
3444 return eraseInstFromFunction(MI);
3445 }
3446 return nullptr;
3447}
3448
3449/// Move the call to free before a NULL test.
3450///
3451/// Check if this free is accessed after its argument has been test
3452/// against NULL (property 0).
3453/// If yes, it is legal to move this call in its predecessor block.
3454///
3455/// The move is performed only if the block containing the call to free
3456/// will be removed, i.e.:
3457/// 1. it has only one predecessor P, and P has two successors
3458/// 2. it contains the call, noops, and an unconditional branch
3459/// 3. its successor is the same as its predecessor's successor
3460///
3461/// The profitability is out-of concern here and this function should
3462/// be called only if the caller knows this transformation would be
3463/// profitable (e.g., for code size).
3465 const DataLayout &DL) {
3466 Value *Op = FI.getArgOperand(0);
3467 BasicBlock *FreeInstrBB = FI.getParent();
3468 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
3469
3470 // Validate part of constraint #1: Only one predecessor
3471 // FIXME: We can extend the number of predecessor, but in that case, we
3472 // would duplicate the call to free in each predecessor and it may
3473 // not be profitable even for code size.
3474 if (!PredBB)
3475 return nullptr;
3476
3477 // Validate constraint #2: Does this block contains only the call to
3478 // free, noops, and an unconditional branch?
3479 BasicBlock *SuccBB;
3480 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
3481 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
3482 return nullptr;
3483
3484 // If there are only 2 instructions in the block, at this point,
3485 // this is the call to free and unconditional.
3486 // If there are more than 2 instructions, check that they are noops
3487 // i.e., they won't hurt the performance of the generated code.
3488 if (FreeInstrBB->size() != 2) {
3489 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
3490 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3491 continue;
3492 auto *Cast = dyn_cast<CastInst>(&Inst);
3493 if (!Cast || !Cast->isNoopCast(DL))
3494 return nullptr;
3495 }
3496 }
3497 // Validate the rest of constraint #1 by matching on the pred branch.
3498 Instruction *TI = PredBB->getTerminator();
3499 BasicBlock *TrueBB, *FalseBB;
3500 CmpPredicate Pred;
3501 if (!match(TI, m_Br(m_ICmp(Pred,
3503 m_Specific(Op->stripPointerCasts())),
3504 m_Zero()),
3505 TrueBB, FalseBB)))
3506 return nullptr;
3507 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
3508 return nullptr;
3509
3510 // Validate constraint #3: Ensure the null case just falls through.
3511 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
3512 return nullptr;
3513 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
3514 "Broken CFG: missing edge from predecessor to successor");
3515
3516 // At this point, we know that everything in FreeInstrBB can be moved
3517 // before TI.
3518 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
3519 if (&Instr == FreeInstrBBTerminator)
3520 break;
3521 Instr.moveBeforePreserving(TI->getIterator());
3522 }
3523 assert(FreeInstrBB->size() == 1 &&
3524 "Only the branch instruction should remain");
3525
3526 // Now that we've moved the call to free before the NULL check, we have to
3527 // remove any attributes on its parameter that imply it's non-null, because
3528 // those attributes might have only been valid because of the NULL check, and
3529 // we can get miscompiles if we keep them. This is conservative if non-null is
3530 // also implied by something other than the NULL check, but it's guaranteed to
3531 // be correct, and the conservativeness won't matter in practice, since the
3532 // attributes are irrelevant for the call to free itself and the pointer
3533 // shouldn't be used after the call.
3534 AttributeList Attrs = FI.getAttributes();
3535 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
3536 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3537 if (Dereferenceable.isValid()) {
3538 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
3539 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
3540 Attribute::Dereferenceable);
3541 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
3542 }
3543 FI.setAttributes(Attrs);
3544
3545 return &FI;
3546}
3547
3549 // free undef -> unreachable.
3550 if (isa<UndefValue>(Op)) {
3551 // Leave a marker since we can't modify the CFG here.
3553 return eraseInstFromFunction(FI);
3554 }
3555
3556 // If we have 'free null' delete the instruction. This can happen in stl code
3557 // when lots of inlining happens.
3558 if (isa<ConstantPointerNull>(Op))
3559 return eraseInstFromFunction(FI);
3560
3561 // If we had free(realloc(...)) with no intervening uses, then eliminate the
3562 // realloc() entirely.
3563 CallInst *CI = dyn_cast<CallInst>(Op);
3564 if (CI && CI->hasOneUse())
3565 if (Value *ReallocatedOp = getReallocatedOperand(CI))
3566 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
3567
3568 // If we optimize for code size, try to move the call to free before the null
3569 // test so that simplify cfg can remove the empty block and dead code
3570 // elimination the branch. I.e., helps to turn something like:
3571 // if (foo) free(foo);
3572 // into
3573 // free(foo);
3574 //
3575 // Note that we can only do this for 'free' and not for any flavor of
3576 // 'operator delete'; there is no 'operator delete' symbol for which we are
3577 // permitted to invent a call, even if we're passing in a null pointer.
3578 if (MinimizeSize) {
3579 LibFunc Func;
3580 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
3582 return I;
3583 }
3584
3585 return nullptr;
3586}
3587
3589 Value *RetVal = RI.getReturnValue();
3590 if (!RetVal || !AttributeFuncs::isNoFPClassCompatibleType(RetVal->getType()))
3591 return nullptr;
3592
3593 Function *F = RI.getFunction();
3594 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
3595 if (ReturnClass == fcNone)
3596 return nullptr;
3597
3598 KnownFPClass KnownClass;
3599 Value *Simplified =
3600 SimplifyDemandedUseFPClass(RetVal, ~ReturnClass, KnownClass, 0, &RI);
3601 if (!Simplified)
3602 return nullptr;
3603
3604 return ReturnInst::Create(RI.getContext(), Simplified);
3605}
3606
3607// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
3609 // Try to remove the previous instruction if it must lead to unreachable.
3610 // This includes instructions like stores and "llvm.assume" that may not get
3611 // removed by simple dead code elimination.
3612 bool Changed = false;
3613 while (Instruction *Prev = I.getPrevNonDebugInstruction()) {
3614 // While we theoretically can erase EH, that would result in a block that
3615 // used to start with an EH no longer starting with EH, which is invalid.
3616 // To make it valid, we'd need to fixup predecessors to no longer refer to
3617 // this block, but that changes CFG, which is not allowed in InstCombine.
3618 if (Prev->isEHPad())
3619 break; // Can not drop any more instructions. We're done here.
3620
3622 break; // Can not drop any more instructions. We're done here.
3623 // Otherwise, this instruction can be freely erased,
3624 // even if it is not side-effect free.
3625
3626 // A value may still have uses before we process it here (for example, in
3627 // another unreachable block), so convert those to poison.
3628 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
3629 eraseInstFromFunction(*Prev);
3630 Changed = true;
3631 }
3632 return Changed;
3633}
3634
3637 return nullptr;
3638}
3639
3641 assert(BI.isUnconditional() && "Only for unconditional branches.");
3642
3643 // If this store is the second-to-last instruction in the basic block
3644 // (excluding debug info and bitcasts of pointers) and if the block ends with
3645 // an unconditional branch, try to move the store to the successor block.
3646
3647 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
3648 auto IsNoopInstrForStoreMerging = [](BasicBlock::iterator BBI) {
3649 return BBI->isDebugOrPseudoInst() ||
3650 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
3651 };
3652
3653 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
3654 do {
3655 if (BBI != FirstInstr)
3656 --BBI;
3657 } while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
3658
3659 return dyn_cast<StoreInst>(BBI);
3660 };
3661
3662 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
3663 if (mergeStoreIntoSuccessor(*SI))
3664 return &BI;
3665
3666 return nullptr;
3667}
3668
3671 if (!DeadEdges.insert({From, To}).second)
3672 return;
3673
3674 // Replace phi node operands in successor with poison.
3675 for (PHINode &PN : To->phis())
3676 for (Use &U : PN.incoming_values())
3677 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
3678 replaceUse(U, PoisonValue::get(PN.getType()));
3679 addToWorklist(&PN);
3680 MadeIRChange = true;
3681 }
3682
3683 Worklist.push_back(To);
3684}
3685
3686// Under the assumption that I is unreachable, remove it and following
3687// instructions. Changes are reported directly to MadeIRChange.
3690 BasicBlock *BB = I->getParent();
3691 for (Instruction &Inst : make_early_inc_range(
3692 make_range(std::next(BB->getTerminator()->getReverseIterator()),
3693 std::next(I->getReverseIterator())))) {
3694 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
3695 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
3696 MadeIRChange = true;
3697 }
3698 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
3699 continue;
3700 // RemoveDIs: erase debug-info on this instruction manually.
3701 Inst.dropDbgRecords();
3703 MadeIRChange = true;
3704 }
3705
3706 SmallVector<Value *> Changed;
3707 if (handleUnreachableTerminator(BB->getTerminator(), Changed)) {
3708 MadeIRChange = true;
3709 for (Value *V : Changed)
3710 addToWorklist(cast<Instruction>(V));
3711 }
3712
3713 // Handle potentially dead successors.
3714 for (BasicBlock *Succ : successors(BB))
3715 addDeadEdge(BB, Succ, Worklist);
3716}
3717
3720 while (!Worklist.empty()) {
3721 BasicBlock *BB = Worklist.pop_back_val();
3722 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
3723 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
3724 }))
3725 continue;
3726
3728 }
3729}
3730
3732 BasicBlock *LiveSucc) {
3734 for (BasicBlock *Succ : successors(BB)) {
3735 // The live successor isn't dead.
3736 if (Succ == LiveSucc)
3737 continue;
3738
3739 addDeadEdge(BB, Succ, Worklist);
3740 }
3741
3743}
3744
3746 if (BI.isUnconditional())
3748
3749 // Change br (not X), label True, label False to: br X, label False, True
3750 Value *Cond = BI.getCondition();
3751 Value *X;
3752 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
3753 // Swap Destinations and condition...
3754 BI.swapSuccessors();
3755 if (BPI)
3757 return replaceOperand(BI, 0, X);
3758 }
3759
3760 // Canonicalize logical-and-with-invert as logical-or-with-invert.
3761 // This is done by inverting the condition and swapping successors:
3762 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
3763 Value *Y;
3764 if (isa<SelectInst>(Cond) &&
3765 match(Cond,
3767 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
3768 Value *Or = Builder.CreateLogicalOr(NotX, Y);
3769 BI.swapSuccessors();
3770 if (BPI)
3772 return replaceOperand(BI, 0, Or);
3773 }
3774
3775 // If the condition is irrelevant, remove the use so that other
3776 // transforms on the condition become more effective.
3777 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
3778 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
3779
3780 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
3781 CmpPredicate Pred;
3782 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
3783 !isCanonicalPredicate(Pred)) {
3784 // Swap destinations and condition.
3785 auto *Cmp = cast<CmpInst>(Cond);
3786 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
3787 BI.swapSuccessors();
3788 if (BPI)
3790 Worklist.push(Cmp);
3791 return &BI;
3792 }
3793
3794 if (isa<UndefValue>(Cond)) {
3795 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
3796 return nullptr;
3797 }
3798 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
3800 BI.getSuccessor(!CI->getZExtValue()));
3801 return nullptr;
3802 }
3803
3804 // Replace all dominated uses of the condition with true/false
3805 // Ignore constant expressions to avoid iterating over uses on other
3806 // functions.
3807 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
3808 for (auto &U : make_early_inc_range(Cond->uses())) {
3809 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
3810 if (DT.dominates(Edge0, U)) {
3811 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
3812 addToWorklist(cast<Instruction>(U.getUser()));
3813 continue;
3814 }
3815 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
3816 if (DT.dominates(Edge1, U)) {
3817 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
3818 addToWorklist(cast<Instruction>(U.getUser()));
3819 }
3820 }
3821 }
3822
3823 DC.registerBranch(&BI);
3824 return nullptr;
3825}
3826
3827// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
3828// we can prove that both (switch C) and (switch X) go to the default when cond
3829// is false/true.
3832 bool IsTrueArm) {
3833 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
3834 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
3835 if (!C)
3836 return nullptr;
3837
3838 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
3839 if (CstBB != SI.getDefaultDest())
3840 return nullptr;
3841 Value *X = Select->getOperand(3 - CstOpIdx);
3842 CmpPredicate Pred;
3843 const APInt *RHSC;
3844 if (!match(Select->getCondition(),
3845 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
3846 return nullptr;
3847 if (IsTrueArm)
3848 Pred = ICmpInst::getInversePredicate(Pred);
3849
3850 // See whether we can replace the select with X
3852 for (auto Case : SI.cases())
3853 if (!CR.contains(Case.getCaseValue()->getValue()))
3854 return nullptr;
3855
3856 return X;
3857}
3858
3860 Value *Cond = SI.getCondition();
3861 Value *Op0;
3862 ConstantInt *AddRHS;
3863 if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
3864 // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
3865 for (auto Case : SI.cases()) {
3866 Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
3867 assert(isa<ConstantInt>(NewCase) &&
3868 "Result of expression should be constant");
3869 Case.setValue(cast<ConstantInt>(NewCase));
3870 }
3871 return replaceOperand(SI, 0, Op0);
3872 }
3873
3874 ConstantInt *SubLHS;
3875 if (match(Cond, m_Sub(m_ConstantInt(SubLHS), m_Value(Op0)))) {
3876 // Change 'switch (1-X) case 1:' into 'switch (X) case 0'.
3877 for (auto Case : SI.cases()) {
3878 Constant *NewCase = ConstantExpr::getSub(SubLHS, Case.getCaseValue());
3879 assert(isa<ConstantInt>(NewCase) &&
3880 "Result of expression should be constant");
3881 Case.setValue(cast<ConstantInt>(NewCase));
3882 }
3883 return replaceOperand(SI, 0, Op0);
3884 }
3885
3886 uint64_t ShiftAmt;
3887 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
3888 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
3889 all_of(SI.cases(), [&](const auto &Case) {
3890 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
3891 })) {
3892 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
3893 OverflowingBinaryOperator *Shl = cast<OverflowingBinaryOperator>(Cond);
3894 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
3895 Shl->hasOneUse()) {
3896 Value *NewCond = Op0;
3897 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
3898 // If the shift may wrap, we need to mask off the shifted bits.
3899 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
3900 NewCond = Builder.CreateAnd(
3901 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
3902 }
3903 for (auto Case : SI.cases()) {
3904 const APInt &CaseVal = Case.getCaseValue()->getValue();
3905 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
3906 : CaseVal.lshr(ShiftAmt);
3907 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
3908 }
3909 return replaceOperand(SI, 0, NewCond);
3910 }
3911 }
3912
3913 // Fold switch(zext/sext(X)) into switch(X) if possible.
3914 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
3915 bool IsZExt = isa<ZExtInst>(Cond);
3916 Type *SrcTy = Op0->getType();
3917 unsigned NewWidth = SrcTy->getScalarSizeInBits();
3918
3919 if (all_of(SI.cases(), [&](const auto &Case) {
3920 const APInt &CaseVal = Case.getCaseValue()->getValue();
3921 return IsZExt ? CaseVal.isIntN(NewWidth)
3922 : CaseVal.isSignedIntN(NewWidth);
3923 })) {
3924 for (auto &Case : SI.cases()) {
3925 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
3926 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3927 }
3928 return replaceOperand(SI, 0, Op0);
3929 }
3930 }
3931
3932 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
3933 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
3934 if (Value *V =
3935 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
3936 return replaceOperand(SI, 0, V);
3937 if (Value *V =
3938 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
3939 return replaceOperand(SI, 0, V);
3940 }
3941
3942 KnownBits Known = computeKnownBits(Cond, 0, &SI);
3943 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
3944 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
3945
3946 // Compute the number of leading bits we can ignore.
3947 // TODO: A better way to determine this would use ComputeNumSignBits().
3948 for (const auto &C : SI.cases()) {
3949 LeadingKnownZeros =
3950 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
3951 LeadingKnownOnes =
3952 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
3953 }
3954
3955 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
3956
3957 // Shrink the condition operand if the new type is smaller than the old type.
3958 // But do not shrink to a non-standard type, because backend can't generate
3959 // good code for that yet.
3960 // TODO: We can make it aggressive again after fixing PR39569.
3961 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
3962 shouldChangeType(Known.getBitWidth(), NewWidth)) {
3963 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
3965 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
3966
3967 for (auto Case : SI.cases()) {
3968 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
3969 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3970 }
3971 return replaceOperand(SI, 0, NewCond);
3972 }
3973
3974 if (isa<UndefValue>(Cond)) {
3975 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
3976 return nullptr;
3977 }
3978 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
3979 handlePotentiallyDeadSuccessors(SI.getParent(),
3980 SI.findCaseValue(CI)->getCaseSuccessor());
3981 return nullptr;
3982 }
3983
3984 return nullptr;
3985}
3986
3988InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
3989 auto *WO = dyn_cast<WithOverflowInst>(EV.getAggregateOperand());
3990 if (!WO)
3991 return nullptr;
3992
3993 Intrinsic::ID OvID = WO->getIntrinsicID();
3994 const APInt *C = nullptr;
3995 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
3996 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
3997 OvID == Intrinsic::umul_with_overflow)) {
3998 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
3999 if (C->isAllOnes())
4000 return BinaryOperator::CreateNeg(WO->getLHS());
4001 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
4002 if (C->isPowerOf2()) {
4003 return BinaryOperator::CreateShl(
4004 WO->getLHS(),
4005 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4006 }
4007 }
4008 }
4009
4010 // We're extracting from an overflow intrinsic. See if we're the only user.
4011 // That allows us to simplify multiple result intrinsics to simpler things
4012 // that just get one value.
4013 if (!WO->hasOneUse())
4014 return nullptr;
4015
4016 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4017 // and replace it with a traditional binary instruction.
4018 if (*EV.idx_begin() == 0) {
4019 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4020 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4021 // Replace the old instruction's uses with poison.
4022 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4024 return BinaryOperator::Create(BinOp, LHS, RHS);
4025 }
4026
4027 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4028
4029 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4030 if (OvID == Intrinsic::usub_with_overflow)
4031 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4032
4033 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4034 // +1 is not possible because we assume signed values.
4035 if (OvID == Intrinsic::smul_with_overflow &&
4036 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4037 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4038
4039 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4040 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4041 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4042 // Only handle even bitwidths for performance reasons.
4043 if (BitWidth % 2 == 0)
4044 return new ICmpInst(
4045 ICmpInst::ICMP_UGT, WO->getLHS(),
4046 ConstantInt::get(WO->getLHS()->getType(),
4048 }
4049
4050 // If only the overflow result is used, and the right hand side is a
4051 // constant (or constant splat), we can remove the intrinsic by directly
4052 // checking for overflow.
4053 if (C) {
4054 // Compute the no-wrap range for LHS given RHS=C, then construct an
4055 // equivalent icmp, potentially using an offset.
4057 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4058
4059 CmpInst::Predicate Pred;
4060 APInt NewRHSC, Offset;
4061 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4062 auto *OpTy = WO->getRHS()->getType();
4063 auto *NewLHS = WO->getLHS();
4064 if (Offset != 0)
4065 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4066 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4067 ConstantInt::get(OpTy, NewRHSC));
4068 }
4069
4070 return nullptr;
4071}
4072
4075 InstCombiner::BuilderTy &Builder) {
4076 // Helper to fold frexp of select to select of frexp.
4077
4078 if (!SelectInst->hasOneUse() || !FrexpCall->hasOneUse())
4079 return nullptr;
4081 Value *TrueVal = SelectInst->getTrueValue();
4082 Value *FalseVal = SelectInst->getFalseValue();
4083
4084 const APFloat *ConstVal = nullptr;
4085 Value *VarOp = nullptr;
4086 bool ConstIsTrue = false;
4087
4088 if (match(TrueVal, m_APFloat(ConstVal))) {
4089 VarOp = FalseVal;
4090 ConstIsTrue = true;
4091 } else if (match(FalseVal, m_APFloat(ConstVal))) {
4092 VarOp = TrueVal;
4093 ConstIsTrue = false;
4094 } else {
4095 return nullptr;
4096 }
4097
4098 Builder.SetInsertPoint(&EV);
4099
4100 CallInst *NewFrexp =
4101 Builder.CreateCall(FrexpCall->getCalledFunction(), {VarOp}, "frexp");
4102 NewFrexp->copyIRFlags(FrexpCall);
4103
4104 Value *NewEV = Builder.CreateExtractValue(NewFrexp, 0, "mantissa");
4105
4106 int Exp;
4107 APFloat Mantissa = frexp(*ConstVal, Exp, APFloat::rmNearestTiesToEven);
4108
4109 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4110
4111 Value *NewSel = Builder.CreateSelectFMF(
4112 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4113 ConstIsTrue ? NewEV : ConstantMantissa, SelectInst, "select.frexp");
4114 return NewSel;
4115}
4117 Value *Agg = EV.getAggregateOperand();
4118
4119 if (!EV.hasIndices())
4120 return replaceInstUsesWith(EV, Agg);
4121
4122 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4123 SQ.getWithInstruction(&EV)))
4124 return replaceInstUsesWith(EV, V);
4125
4126 Value *Cond, *TrueVal, *FalseVal;
4127 if (match(&EV, m_ExtractValue<0>(m_Intrinsic<Intrinsic::frexp>(m_Select(
4128 m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal)))))) {
4129 auto *SelInst =
4130 cast<SelectInst>(cast<IntrinsicInst>(Agg)->getArgOperand(0));
4131 if (Value *Result =
4132 foldFrexpOfSelect(EV, cast<IntrinsicInst>(Agg), SelInst, Builder))
4133 return replaceInstUsesWith(EV, Result);
4134 }
4135 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
4136 // We're extracting from an insertvalue instruction, compare the indices
4137 const unsigned *exti, *exte, *insi, *inse;
4138 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4139 exte = EV.idx_end(), inse = IV->idx_end();
4140 exti != exte && insi != inse;
4141 ++exti, ++insi) {
4142 if (*insi != *exti)
4143 // The insert and extract both reference distinctly different elements.
4144 // This means the extract is not influenced by the insert, and we can
4145 // replace the aggregate operand of the extract with the aggregate
4146 // operand of the insert. i.e., replace
4147 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4148 // %E = extractvalue { i32, { i32 } } %I, 0
4149 // with
4150 // %E = extractvalue { i32, { i32 } } %A, 0
4151 return ExtractValueInst::Create(IV->getAggregateOperand(),
4152 EV.getIndices());
4153 }
4154 if (exti == exte && insi == inse)
4155 // Both iterators are at the end: Index lists are identical. Replace
4156 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4157 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4158 // with "i32 42"
4159 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4160 if (exti == exte) {
4161 // The extract list is a prefix of the insert list. i.e. replace
4162 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4163 // %E = extractvalue { i32, { i32 } } %I, 1
4164 // with
4165 // %X = extractvalue { i32, { i32 } } %A, 1
4166 // %E = insertvalue { i32 } %X, i32 42, 0
4167 // by switching the order of the insert and extract (though the
4168 // insertvalue should be left in, since it may have other uses).
4169 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4170 EV.getIndices());
4171 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4172 ArrayRef(insi, inse));
4173 }
4174 if (insi == inse)
4175 // The insert list is a prefix of the extract list
4176 // We can simply remove the common indices from the extract and make it
4177 // operate on the inserted value instead of the insertvalue result.
4178 // i.e., replace
4179 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4180 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4181 // with
4182 // %E extractvalue { i32 } { i32 42 }, 0
4183 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4184 ArrayRef(exti, exte));
4185 }
4186
4187 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4188 return R;
4189
4190 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4191 // Bail out if the aggregate contains scalable vector type
4192 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4193 STy && STy->isScalableTy())
4194 return nullptr;
4195
4196 // If the (non-volatile) load only has one use, we can rewrite this to a
4197 // load from a GEP. This reduces the size of the load. If a load is used
4198 // only by extractvalue instructions then this either must have been
4199 // optimized before, or it is a struct with padding, in which case we
4200 // don't want to do the transformation as it loses padding knowledge.
4201 if (L->isSimple() && L->hasOneUse()) {
4202 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4203 SmallVector<Value*, 4> Indices;
4204 // Prefix an i32 0 since we need the first element.
4205 Indices.push_back(Builder.getInt32(0));
4206 for (unsigned Idx : EV.indices())
4207 Indices.push_back(Builder.getInt32(Idx));
4208
4209 // We need to insert these at the location of the old load, not at that of
4210 // the extractvalue.
4212 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4213 L->getPointerOperand(), Indices);
4215 // Whatever aliasing information we had for the orignal load must also
4216 // hold for the smaller load, so propagate the annotations.
4217 NL->setAAMetadata(L->getAAMetadata());
4218 // Returning the load directly will cause the main loop to insert it in
4219 // the wrong spot, so use replaceInstUsesWith().
4220 return replaceInstUsesWith(EV, NL);
4221 }
4222 }
4223
4224 if (auto *PN = dyn_cast<PHINode>(Agg))
4225 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4226 return Res;
4227
4228 // Canonicalize extract (select Cond, TV, FV)
4229 // -> select cond, (extract TV), (extract FV)
4230 if (auto *SI = dyn_cast<SelectInst>(Agg))
4231 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4232 return R;
4233
4234 // We could simplify extracts from other values. Note that nested extracts may
4235 // already be simplified implicitly by the above: extract (extract (insert) )
4236 // will be translated into extract ( insert ( extract ) ) first and then just
4237 // the value inserted, if appropriate. Similarly for extracts from single-use
4238 // loads: extract (extract (load)) will be translated to extract (load (gep))
4239 // and if again single-use then via load (gep (gep)) to load (gep).
4240 // However, double extracts from e.g. function arguments or return values
4241 // aren't handled yet.
4242 return nullptr;
4243}
4244
4245/// Return 'true' if the given typeinfo will match anything.
4246static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4247 switch (Personality) {
4251 // The GCC C EH and Rust personality only exists to support cleanups, so
4252 // it's not clear what the semantics of catch clauses are.
4253 return false;
4255 return false;
4257 // While __gnat_all_others_value will match any Ada exception, it doesn't
4258 // match foreign exceptions (or didn't, before gcc-4.7).
4259 return false;
4270 return TypeInfo->isNullValue();
4271 }
4272 llvm_unreachable("invalid enum");
4273}
4274
4275static bool shorter_filter(const Value *LHS, const Value *RHS) {
4276 return
4277 cast<ArrayType>(LHS->getType())->getNumElements()
4278 <
4279 cast<ArrayType>(RHS->getType())->getNumElements();
4280}
4281
4283 // The logic here should be correct for any real-world personality function.
4284 // However if that turns out not to be true, the offending logic can always
4285 // be conditioned on the personality function, like the catch-all logic is.
4286 EHPersonality Personality =
4287 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4288
4289 // Simplify the list of clauses, eg by removing repeated catch clauses
4290 // (these are often created by inlining).
4291 bool MakeNewInstruction = false; // If true, recreate using the following:
4292 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4293 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4294
4295 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4296 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4297 bool isLastClause = i + 1 == e;
4298 if (LI.isCatch(i)) {
4299 // A catch clause.
4300 Constant *CatchClause = LI.getClause(i);
4301 Constant *TypeInfo = CatchClause->stripPointerCasts();
4302
4303 // If we already saw this clause, there is no point in having a second
4304 // copy of it.
4305 if (AlreadyCaught.insert(TypeInfo).second) {
4306 // This catch clause was not already seen.
4307 NewClauses.push_back(CatchClause);
4308 } else {
4309 // Repeated catch clause - drop the redundant copy.
4310 MakeNewInstruction = true;
4311 }
4312
4313 // If this is a catch-all then there is no point in keeping any following
4314 // clauses or marking the landingpad as having a cleanup.
4315 if (isCatchAll(Personality, TypeInfo)) {
4316 if (!isLastClause)
4317 MakeNewInstruction = true;
4318 CleanupFlag = false;
4319 break;
4320 }
4321 } else {
4322 // A filter clause. If any of the filter elements were already caught
4323 // then they can be dropped from the filter. It is tempting to try to
4324 // exploit the filter further by saying that any typeinfo that does not
4325 // occur in the filter can't be caught later (and thus can be dropped).
4326 // However this would be wrong, since typeinfos can match without being
4327 // equal (for example if one represents a C++ class, and the other some
4328 // class derived from it).
4329 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4330 Constant *FilterClause = LI.getClause(i);
4331 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4332 unsigned NumTypeInfos = FilterType->getNumElements();
4333
4334 // An empty filter catches everything, so there is no point in keeping any
4335 // following clauses or marking the landingpad as having a cleanup. By
4336 // dealing with this case here the following code is made a bit simpler.
4337 if (!NumTypeInfos) {
4338 NewClauses.push_back(FilterClause);
4339 if (!isLastClause)
4340 MakeNewInstruction = true;
4341 CleanupFlag = false;
4342 break;
4343 }
4344
4345 bool MakeNewFilter = false; // If true, make a new filter.
4346 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4347 if (isa<ConstantAggregateZero>(FilterClause)) {
4348 // Not an empty filter - it contains at least one null typeinfo.
4349 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4350 Constant *TypeInfo =
4352 // If this typeinfo is a catch-all then the filter can never match.
4353 if (isCatchAll(Personality, TypeInfo)) {
4354 // Throw the filter away.
4355 MakeNewInstruction = true;
4356 continue;
4357 }
4358
4359 // There is no point in having multiple copies of this typeinfo, so
4360 // discard all but the first copy if there is more than one.
4361 NewFilterElts.push_back(TypeInfo);
4362 if (NumTypeInfos > 1)
4363 MakeNewFilter = true;
4364 } else {
4365 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4366 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4367 NewFilterElts.reserve(NumTypeInfos);
4368
4369 // Remove any filter elements that were already caught or that already
4370 // occurred in the filter. While there, see if any of the elements are
4371 // catch-alls. If so, the filter can be discarded.
4372 bool SawCatchAll = false;
4373 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4374 Constant *Elt = Filter->getOperand(j);
4375 Constant *TypeInfo = Elt->stripPointerCasts();
4376 if (isCatchAll(Personality, TypeInfo)) {
4377 // This element is a catch-all. Bail out, noting this fact.
4378 SawCatchAll = true;
4379 break;
4380 }
4381
4382 // Even if we've seen a type in a catch clause, we don't want to
4383 // remove it from the filter. An unexpected type handler may be
4384 // set up for a call site which throws an exception of the same
4385 // type caught. In order for the exception thrown by the unexpected
4386 // handler to propagate correctly, the filter must be correctly
4387 // described for the call site.
4388 //
4389 // Example:
4390 //
4391 // void unexpected() { throw 1;}
4392 // void foo() throw (int) {
4393 // std::set_unexpected(unexpected);
4394 // try {
4395 // throw 2.0;
4396 // } catch (int i) {}
4397 // }
4398
4399 // There is no point in having multiple copies of the same typeinfo in
4400 // a filter, so only add it if we didn't already.
4401 if (SeenInFilter.insert(TypeInfo).second)
4402 NewFilterElts.push_back(cast<Constant>(Elt));
4403 }
4404 // A filter containing a catch-all cannot match anything by definition.
4405 if (SawCatchAll) {
4406 // Throw the filter away.
4407 MakeNewInstruction = true;
4408 continue;
4409 }
4410
4411 // If we dropped something from the filter, make a new one.
4412 if (NewFilterElts.size() < NumTypeInfos)
4413 MakeNewFilter = true;
4414 }
4415 if (MakeNewFilter) {
4416 FilterType = ArrayType::get(FilterType->getElementType(),
4417 NewFilterElts.size());
4418 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
4419 MakeNewInstruction = true;
4420 }
4421
4422 NewClauses.push_back(FilterClause);
4423
4424 // If the new filter is empty then it will catch everything so there is
4425 // no point in keeping any following clauses or marking the landingpad
4426 // as having a cleanup. The case of the original filter being empty was
4427 // already handled above.
4428 if (MakeNewFilter && !NewFilterElts.size()) {
4429 assert(MakeNewInstruction && "New filter but not a new instruction!");
4430 CleanupFlag = false;
4431 break;
4432 }
4433 }
4434 }
4435
4436 // If several filters occur in a row then reorder them so that the shortest
4437 // filters come first (those with the smallest number of elements). This is
4438 // advantageous because shorter filters are more likely to match, speeding up
4439 // unwinding, but mostly because it increases the effectiveness of the other
4440 // filter optimizations below.
4441 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
4442 unsigned j;
4443 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
4444 for (j = i; j != e; ++j)
4445 if (!isa<ArrayType>(NewClauses[j]->getType()))
4446 break;
4447
4448 // Check whether the filters are already sorted by length. We need to know
4449 // if sorting them is actually going to do anything so that we only make a
4450 // new landingpad instruction if it does.
4451 for (unsigned k = i; k + 1 < j; ++k)
4452 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
4453 // Not sorted, so sort the filters now. Doing an unstable sort would be
4454 // correct too but reordering filters pointlessly might confuse users.
4455 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
4457 MakeNewInstruction = true;
4458 break;
4459 }
4460
4461 // Look for the next batch of filters.
4462 i = j + 1;
4463 }
4464
4465 // If typeinfos matched if and only if equal, then the elements of a filter L
4466 // that occurs later than a filter F could be replaced by the intersection of
4467 // the elements of F and L. In reality two typeinfos can match without being
4468 // equal (for example if one represents a C++ class, and the other some class
4469 // derived from it) so it would be wrong to perform this transform in general.
4470 // However the transform is correct and useful if F is a subset of L. In that
4471 // case L can be replaced by F, and thus removed altogether since repeating a
4472 // filter is pointless. So here we look at all pairs of filters F and L where
4473 // L follows F in the list of clauses, and remove L if every element of F is
4474 // an element of L. This can occur when inlining C++ functions with exception
4475 // specifications.
4476 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
4477 // Examine each filter in turn.
4478 Value *Filter = NewClauses[i];
4479 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
4480 if (!FTy)
4481 // Not a filter - skip it.
4482 continue;
4483 unsigned FElts = FTy->getNumElements();
4484 // Examine each filter following this one. Doing this backwards means that
4485 // we don't have to worry about filters disappearing under us when removed.
4486 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
4487 Value *LFilter = NewClauses[j];
4488 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
4489 if (!LTy)
4490 // Not a filter - skip it.
4491 continue;
4492 // If Filter is a subset of LFilter, i.e. every element of Filter is also
4493 // an element of LFilter, then discard LFilter.
4494 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
4495 // If Filter is empty then it is a subset of LFilter.
4496 if (!FElts) {
4497 // Discard LFilter.
4498 NewClauses.erase(J);
4499 MakeNewInstruction = true;
4500 // Move on to the next filter.
4501 continue;
4502 }
4503 unsigned LElts = LTy->getNumElements();
4504 // If Filter is longer than LFilter then it cannot be a subset of it.
4505 if (FElts > LElts)
4506 // Move on to the next filter.
4507 continue;
4508 // At this point we know that LFilter has at least one element.
4509 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
4510 // Filter is a subset of LFilter iff Filter contains only zeros (as we
4511 // already know that Filter is not longer than LFilter).
4512 if (isa<ConstantAggregateZero>(Filter)) {
4513 assert(FElts <= LElts && "Should have handled this case earlier!");
4514 // Discard LFilter.
4515 NewClauses.erase(J);
4516 MakeNewInstruction = true;
4517 }
4518 // Move on to the next filter.
4519 continue;
4520 }
4521 ConstantArray *LArray = cast<ConstantArray>(LFilter);
4522 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
4523 // Since Filter is non-empty and contains only zeros, it is a subset of
4524 // LFilter iff LFilter contains a zero.
4525 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
4526 for (unsigned l = 0; l != LElts; ++l)
4527 if (LArray->getOperand(l)->isNullValue()) {
4528 // LFilter contains a zero - discard it.
4529 NewClauses.erase(J);
4530 MakeNewInstruction = true;
4531 break;
4532 }
4533 // Move on to the next filter.
4534 continue;
4535 }
4536 // At this point we know that both filters are ConstantArrays. Loop over
4537 // operands to see whether every element of Filter is also an element of
4538 // LFilter. Since filters tend to be short this is probably faster than
4539 // using a method that scales nicely.
4540 ConstantArray *FArray = cast<ConstantArray>(Filter);
4541 bool AllFound = true;
4542 for (unsigned f = 0; f != FElts; ++f) {
4543 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
4544 AllFound = false;
4545 for (unsigned l = 0; l != LElts; ++l) {
4546 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
4547 if (LTypeInfo == FTypeInfo) {
4548 AllFound = true;
4549 break;
4550 }
4551 }
4552 if (!AllFound)
4553 break;
4554 }
4555 if (AllFound) {
4556 // Discard LFilter.
4557 NewClauses.erase(J);
4558 MakeNewInstruction = true;
4559 }
4560 // Move on to the next filter.
4561 }
4562 }
4563
4564 // If we changed any of the clauses, replace the old landingpad instruction
4565 // with a new one.
4566 if (MakeNewInstruction) {
4568 NewClauses.size());
4569 for (Constant *C : NewClauses)
4570 NLI->addClause(C);
4571 // A landing pad with no clauses must have the cleanup flag set. It is
4572 // theoretically possible, though highly unlikely, that we eliminated all
4573 // clauses. If so, force the cleanup flag to true.
4574 if (NewClauses.empty())
4575 CleanupFlag = true;
4576 NLI->setCleanup(CleanupFlag);
4577 return NLI;
4578 }
4579
4580 // Even if none of the clauses changed, we may nonetheless have understood
4581 // that the cleanup flag is pointless. Clear it if so.
4582 if (LI.isCleanup() != CleanupFlag) {
4583 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
4584 LI.setCleanup(CleanupFlag);
4585 return &LI;
4586 }
4587
4588 return nullptr;
4589}
4590
4591Value *
4593 // Try to push freeze through instructions that propagate but don't produce
4594 // poison as far as possible. If an operand of freeze follows three
4595 // conditions 1) one-use, 2) does not produce poison, and 3) has all but one
4596 // guaranteed-non-poison operands then push the freeze through to the one
4597 // operand that is not guaranteed non-poison. The actual transform is as
4598 // follows.
4599 // Op1 = ... ; Op1 can be posion
4600 // Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have
4601 // ; single guaranteed-non-poison operands
4602 // ... = Freeze(Op0)
4603 // =>
4604 // Op1 = ...
4605 // Op1.fr = Freeze(Op1)
4606 // ... = Inst(Op1.fr, NonPoisonOps...)
4607 auto *OrigOp = OrigFI.getOperand(0);
4608 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
4609
4610 // While we could change the other users of OrigOp to use freeze(OrigOp), that
4611 // potentially reduces their optimization potential, so let's only do this iff
4612 // the OrigOp is only used by the freeze.
4613 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
4614 return nullptr;
4615
4616 // We can't push the freeze through an instruction which can itself create
4617 // poison. If the only source of new poison is flags, we can simply
4618 // strip them (since we know the only use is the freeze and nothing can
4619 // benefit from them.)
4620 if (canCreateUndefOrPoison(cast<Operator>(OrigOp),
4621 /*ConsiderFlagsAndMetadata*/ false))
4622 return nullptr;
4623
4624 // If operand is guaranteed not to be poison, there is no need to add freeze
4625 // to the operand. So we first find the operand that is not guaranteed to be
4626 // poison.
4627 Use *MaybePoisonOperand = nullptr;
4628 for (Use &U : OrigOpInst->operands()) {
4629 if (isa<MetadataAsValue>(U.get()) ||
4631 continue;
4632 if (!MaybePoisonOperand)
4633 MaybePoisonOperand = &U;
4634 else
4635 return nullptr;
4636 }
4637
4638 OrigOpInst->dropPoisonGeneratingAnnotations();
4639
4640 // If all operands are guaranteed to be non-poison, we can drop freeze.
4641 if (!MaybePoisonOperand)
4642 return OrigOp;
4643
4644 Builder.SetInsertPoint(OrigOpInst);
4645 auto *FrozenMaybePoisonOperand = Builder.CreateFreeze(
4646 MaybePoisonOperand->get(), MaybePoisonOperand->get()->getName() + ".fr");
4647
4648 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
4649 return OrigOp;
4650}
4651
4653 PHINode *PN) {
4654 // Detect whether this is a recurrence with a start value and some number of
4655 // backedge values. We'll check whether we can push the freeze through the
4656 // backedge values (possibly dropping poison flags along the way) until we
4657 // reach the phi again. In that case, we can move the freeze to the start
4658 // value.
4659 Use *StartU = nullptr;
4661 for (Use &U : PN->incoming_values()) {
4662 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
4663 // Add backedge value to worklist.
4664 Worklist.push_back(U.get());
4665 continue;
4666 }
4667
4668 // Don't bother handling multiple start values.
4669 if (StartU)
4670 return nullptr;
4671 StartU = &U;
4672 }
4673
4674 if (!StartU || Worklist.empty())
4675 return nullptr; // Not a recurrence.
4676
4677 Value *StartV = StartU->get();
4678 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
4679 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
4680 // We can't insert freeze if the start value is the result of the
4681 // terminator (e.g. an invoke).
4682 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
4683 return nullptr;
4684
4687 while (!Worklist.empty()) {
4688 Value *V = Worklist.pop_back_val();
4689 if (!Visited.insert(V).second)
4690 continue;
4691
4692 if (Visited.size() > 32)
4693 return nullptr; // Limit the total number of values we inspect.
4694
4695 // Assume that PN is non-poison, because it will be after the transform.
4696 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
4697 continue;
4698
4699 Instruction *I = dyn_cast<Instruction>(V);
4700 if (!I || canCreateUndefOrPoison(cast<Operator>(I),
4701 /*ConsiderFlagsAndMetadata*/ false))
4702 return nullptr;
4703
4704 DropFlags.push_back(I);
4705 append_range(Worklist, I->operands());
4706 }
4707
4708 for (Instruction *I : DropFlags)
4709 I->dropPoisonGeneratingAnnotations();
4710
4711 if (StartNeedsFreeze) {
4713 Value *FrozenStartV = Builder.CreateFreeze(StartV,
4714 StartV->getName() + ".fr");
4715 replaceUse(*StartU, FrozenStartV);
4716 }
4717 return replaceInstUsesWith(FI, PN);
4718}
4719
4721 Value *Op = FI.getOperand(0);
4722
4723 if (isa<Constant>(Op) || Op->hasOneUse())
4724 return false;
4725
4726 // Move the freeze directly after the definition of its operand, so that
4727 // it dominates the maximum number of uses. Note that it may not dominate
4728 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
4729 // the normal/default destination. This is why the domination check in the
4730 // replacement below is still necessary.
4731 BasicBlock::iterator MoveBefore;
4732 if (isa<Argument>(Op)) {
4733 MoveBefore =
4735 } else {
4736 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
4737 if (!MoveBeforeOpt)
4738 return false;
4739 MoveBefore = *MoveBeforeOpt;
4740 }
4741
4742 // Don't move to the position of a debug intrinsic.
4743 if (isa<DbgInfoIntrinsic>(MoveBefore))
4744 MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator();
4745 // Re-point iterator to come after any debug-info records, if we're
4746 // running in "RemoveDIs" mode
4747 MoveBefore.setHeadBit(false);
4748
4749 bool Changed = false;
4750 if (&FI != &*MoveBefore) {
4751 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
4752 Changed = true;
4753 }
4754
4755 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
4756 bool Dominates = DT.dominates(&FI, U);
4757 Changed |= Dominates;
4758 return Dominates;
4759 });
4760
4761 return Changed;
4762}
4763
4764// Check if any direct or bitcast user of this value is a shuffle instruction.
4766 for (auto *U : V->users()) {
4767 if (isa<ShuffleVectorInst>(U))
4768 return true;
4769 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
4770 return true;
4771 }
4772 return false;
4773}
4774
4776 Value *Op0 = I.getOperand(0);
4777
4779 return replaceInstUsesWith(I, V);
4780
4781 // freeze (phi const, x) --> phi const, (freeze x)
4782 if (auto *PN = dyn_cast<PHINode>(Op0)) {
4783 if (Instruction *NV = foldOpIntoPhi(I, PN))
4784 return NV;
4785 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
4786 return NV;
4787 }
4788
4790 return replaceInstUsesWith(I, NI);
4791
4792 // If I is freeze(undef), check its uses and fold it to a fixed constant.
4793 // - or: pick -1
4794 // - select's condition: if the true value is constant, choose it by making
4795 // the condition true.
4796 // - default: pick 0
4797 //
4798 // Note that this transform is intentionally done here rather than
4799 // via an analysis in InstSimplify or at individual user sites. That is
4800 // because we must produce the same value for all uses of the freeze -
4801 // it's the reason "freeze" exists!
4802 //
4803 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
4804 // duplicating logic for binops at least.
4805 auto getUndefReplacement = [&I](Type *Ty) {
4806 Constant *BestValue = nullptr;
4807 Constant *NullValue = Constant::getNullValue(Ty);
4808 for (const auto *U : I.users()) {
4809 Constant *C = NullValue;
4810 if (match(U, m_Or(m_Value(), m_Value())))
4812 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
4813 C = ConstantInt::getTrue(Ty);
4814
4815 if (!BestValue)
4816 BestValue = C;
4817 else if (BestValue != C)
4818 BestValue = NullValue;
4819 }
4820 assert(BestValue && "Must have at least one use");
4821 return BestValue;
4822 };
4823
4824 if (match(Op0, m_Undef())) {
4825 // Don't fold freeze(undef/poison) if it's used as a vector operand in
4826 // a shuffle. This may improve codegen for shuffles that allow
4827 // unspecified inputs.
4829 return nullptr;
4830 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
4831 }
4832
4833 Constant *C;
4834 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement()) {
4835 Constant *ReplaceC = getUndefReplacement(I.getType()->getScalarType());
4837 }
4838
4839 // Replace uses of Op with freeze(Op).
4840 if (freezeOtherUses(I))
4841 return &I;
4842
4843 return nullptr;
4844}
4845
4846/// Check for case where the call writes to an otherwise dead alloca. This
4847/// shows up for unused out-params in idiomatic C/C++ code. Note that this
4848/// helper *only* analyzes the write; doesn't check any other legality aspect.
4850 auto *CB = dyn_cast<CallBase>(I);
4851 if (!CB)
4852 // TODO: handle e.g. store to alloca here - only worth doing if we extend
4853 // to allow reload along used path as described below. Otherwise, this
4854 // is simply a store to a dead allocation which will be removed.
4855 return false;
4856 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
4857 if (!Dest)
4858 return false;
4859 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
4860 if (!AI)
4861 // TODO: allow malloc?
4862 return false;
4863 // TODO: allow memory access dominated by move point? Note that since AI
4864 // could have a reference to itself captured by the call, we would need to
4865 // account for cycles in doing so.
4866 SmallVector<const User *> AllocaUsers;
4868 auto pushUsers = [&](const Instruction &I) {
4869 for (const User *U : I.users()) {
4870 if (Visited.insert(U).second)
4871 AllocaUsers.push_back(U);
4872 }
4873 };
4874 pushUsers(*AI);
4875 while (!AllocaUsers.empty()) {
4876 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
4877 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
4878 pushUsers(*UserI);
4879 continue;
4880 }
4881 if (UserI == CB)
4882 continue;
4883 // TODO: support lifetime.start/end here
4884 return false;
4885 }
4886 return true;
4887}
4888
4889/// Try to move the specified instruction from its current block into the
4890/// beginning of DestBlock, which can only happen if it's safe to move the
4891/// instruction past all of the instructions between it and the end of its
4892/// block.
4894 BasicBlock *DestBlock) {
4895 BasicBlock *SrcBlock = I->getParent();
4896
4897 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
4898 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
4899 I->isTerminator())
4900 return false;
4901
4902 // Do not sink static or dynamic alloca instructions. Static allocas must
4903 // remain in the entry block, and dynamic allocas must not be sunk in between
4904 // a stacksave / stackrestore pair, which would incorrectly shorten its
4905 // lifetime.
4906 if (isa<AllocaInst>(I))
4907 return false;
4908
4909 // Do not sink into catchswitch blocks.
4910 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
4911 return false;
4912
4913 // Do not sink convergent call instructions.
4914 if (auto *CI = dyn_cast<CallInst>(I)) {
4915 if (CI->isConvergent())
4916 return false;
4917 }
4918
4919 // Unless we can prove that the memory write isn't visibile except on the
4920 // path we're sinking to, we must bail.
4921 if (I->mayWriteToMemory()) {
4922 if (!SoleWriteToDeadLocal(I, TLI))
4923 return false;
4924 }
4925
4926 // We can only sink load instructions if there is nothing between the load and
4927 // the end of block that could change the value.
4928 if (I->mayReadFromMemory() &&
4929 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
4930 // We don't want to do any sophisticated alias analysis, so we only check
4931 // the instructions after I in I's parent block if we try to sink to its
4932 // successor block.
4933 if (DestBlock->getUniquePredecessor() != I->getParent())
4934 return false;
4935 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
4936 E = I->getParent()->end();
4937 Scan != E; ++Scan)
4938 if (Scan->mayWriteToMemory())
4939 return false;
4940 }
4941
4942 I->dropDroppableUses([&](const Use *U) {
4943 auto *I = dyn_cast<Instruction>(U->getUser());
4944 if (I && I->getParent() != DestBlock) {
4945 Worklist.add(I);
4946 return true;
4947 }
4948 return false;
4949 });
4950 /// FIXME: We could remove droppable uses that are not dominated by
4951 /// the new position.
4952
4953 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
4954 I->moveBefore(*DestBlock, InsertPos);
4955 ++NumSunkInst;
4956
4957 // Also sink all related debug uses from the source basic block. Otherwise we
4958 // get debug use before the def. Attempt to salvage debug uses first, to
4959 // maximise the range variables have location for. If we cannot salvage, then
4960 // mark the location undef: we know it was supposed to receive a new location
4961 // here, but that computation has been sunk.
4963 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
4964 findDbgUsers(DbgUsers, I, &DbgVariableRecords);
4965 if (!DbgUsers.empty())
4966 tryToSinkInstructionDbgValues(I, InsertPos, SrcBlock, DestBlock, DbgUsers);
4967 if (!DbgVariableRecords.empty())
4968 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
4969 DbgVariableRecords);
4970
4971 // PS: there are numerous flaws with this behaviour, not least that right now
4972 // assignments can be re-ordered past other assignments to the same variable
4973 // if they use different Values. Creating more undef assignements can never be
4974 // undone. And salvaging all users outside of this block can un-necessarily
4975 // alter the lifetime of the live-value that the variable refers to.
4976 // Some of these things can be resolved by tolerating debug use-before-defs in
4977 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
4978 // being used for more architectures.
4979
4980 return true;
4981}
4982
4984 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
4986 // For all debug values in the destination block, the sunk instruction
4987 // will still be available, so they do not need to be dropped.
4989 for (auto &DbgUser : DbgUsers)
4990 if (DbgUser->getParent() != DestBlock)
4991 DbgUsersToSalvage.push_back(DbgUser);
4992
4993 // Process the sinking DbgUsersToSalvage in reverse order, as we only want
4994 // to clone the last appearing debug intrinsic for each given variable.
4996 for (DbgVariableIntrinsic *DVI : DbgUsersToSalvage)
4997 if (DVI->getParent() == SrcBlock)
4998 DbgUsersToSink.push_back(DVI);
4999 llvm::sort(DbgUsersToSink,
5000 [](auto *A, auto *B) { return B->comesBefore(A); });
5001
5003 SmallSet<DebugVariable, 4> SunkVariables;
5004 for (auto *User : DbgUsersToSink) {
5005 // A dbg.declare instruction should not be cloned, since there can only be
5006 // one per variable fragment. It should be left in the original place
5007 // because the sunk instruction is not an alloca (otherwise we could not be
5008 // here).
5009 if (isa<DbgDeclareInst>(User))
5010 continue;
5011
5012 DebugVariable DbgUserVariable =
5013 DebugVariable(User->getVariable(), User->getExpression(),
5014 User->getDebugLoc()->getInlinedAt());
5015
5016 if (!SunkVariables.insert(DbgUserVariable).second)
5017 continue;
5018
5019 // Leave dbg.assign intrinsics in their original positions and there should
5020 // be no need to insert a clone.
5021 if (isa<DbgAssignIntrinsic>(User))
5022 continue;
5023
5024 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(User->clone()));
5025 if (isa<DbgDeclareInst>(User) && isa<CastInst>(I))
5026 DIIClones.back()->replaceVariableLocationOp(I, I->getOperand(0));
5027 LLVM_DEBUG(dbgs() << "CLONE: " << *DIIClones.back() << '\n');
5028 }
5029
5030 // Perform salvaging without the clones, then sink the clones.
5031 if (!DIIClones.empty()) {
5032 salvageDebugInfoForDbgValues(*I, DbgUsersToSalvage, {});
5033 // The clones are in reverse order of original appearance, reverse again to
5034 // maintain the original order.
5035 for (auto &DIIClone : llvm::reverse(DIIClones)) {
5036 DIIClone->insertBefore(InsertPos);
5037 LLVM_DEBUG(dbgs() << "SINK: " << *DIIClone << '\n');
5038 }
5039 }
5040}
5041
5043 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
5044 BasicBlock *DestBlock,
5045 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
5046 // Implementation of tryToSinkInstructionDbgValues, but for the
5047 // DbgVariableRecord of variable assignments rather than dbg.values.
5048
5049 // Fetch all DbgVariableRecords not already in the destination.
5050 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
5051 for (auto &DVR : DbgVariableRecords)
5052 if (DVR->getParent() != DestBlock)
5053 DbgVariableRecordsToSalvage.push_back(DVR);
5054
5055 // Fetch a second collection, of DbgVariableRecords in the source block that
5056 // we're going to sink.
5057 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5058 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5059 if (DVR->getParent() == SrcBlock)
5060 DbgVariableRecordsToSink.push_back(DVR);
5061
5062 // Sort DbgVariableRecords according to their position in the block. This is a
5063 // partial order: DbgVariableRecords attached to different instructions will
5064 // be ordered by the instruction order, but DbgVariableRecords attached to the
5065 // same instruction won't have an order.
5066 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5067 return B->getInstruction()->comesBefore(A->getInstruction());
5068 };
5069 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5070
5071 // If there are two assignments to the same variable attached to the same
5072 // instruction, the ordering between the two assignments is important. Scan
5073 // for this (rare) case and establish which is the last assignment.
5074 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5076 if (DbgVariableRecordsToSink.size() > 1) {
5078 // Count how many assignments to each variable there is per instruction.
5079 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5080 DebugVariable DbgUserVariable =
5081 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5082 DVR->getDebugLoc()->getInlinedAt());
5083 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5084 }
5085
5086 // If there are any instructions with two assignments, add them to the
5087 // FilterOutMap to record that they need extra filtering.
5089 for (auto It : CountMap) {
5090 if (It.second > 1) {
5091 FilterOutMap[It.first] = nullptr;
5092 DupSet.insert(It.first.first);
5093 }
5094 }
5095
5096 // For all instruction/variable pairs needing extra filtering, find the
5097 // latest assignment.
5098 for (const Instruction *Inst : DupSet) {
5099 for (DbgVariableRecord &DVR :
5100 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5101 DebugVariable DbgUserVariable =
5102 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5103 DVR.getDebugLoc()->getInlinedAt());
5104 auto FilterIt =
5105 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5106 if (FilterIt == FilterOutMap.end())
5107 continue;
5108 if (FilterIt->second != nullptr)
5109 continue;
5110 FilterIt->second = &DVR;
5111 }
5112 }
5113 }
5114
5115 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5116 // out any duplicate assignments identified above.
5118 SmallSet<DebugVariable, 4> SunkVariables;
5119 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5121 continue;
5122
5123 DebugVariable DbgUserVariable =
5124 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5125 DVR->getDebugLoc()->getInlinedAt());
5126
5127 // For any variable where there were multiple assignments in the same place,
5128 // ignore all but the last assignment.
5129 if (!FilterOutMap.empty()) {
5130 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5131 auto It = FilterOutMap.find(IVP);
5132
5133 // Filter out.
5134 if (It != FilterOutMap.end() && It->second != DVR)
5135 continue;
5136 }
5137
5138 if (!SunkVariables.insert(DbgUserVariable).second)
5139 continue;
5140
5141 if (DVR->isDbgAssign())
5142 continue;
5143
5144 DVRClones.emplace_back(DVR->clone());
5145 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5146 }
5147
5148 // Perform salvaging without the clones, then sink the clones.
5149 if (DVRClones.empty())
5150 return;
5151
5152 salvageDebugInfoForDbgValues(*I, {}, DbgVariableRecordsToSalvage);
5153
5154 // The clones are in reverse order of original appearance. Assert that the
5155 // head bit is set on the iterator as we _should_ have received it via
5156 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5157 // we'll repeatedly insert at the head, such as:
5158 // DVR-3 (third insertion goes here)
5159 // DVR-2 (second insertion goes here)
5160 // DVR-1 (first insertion goes here)
5161 // Any-Prior-DVRs
5162 // InsertPtInst
5163 assert(InsertPos.getHeadBit());
5164 for (DbgVariableRecord *DVRClone : DVRClones) {
5165 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5166 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5167 }
5168}
5169
5171 while (!Worklist.isEmpty()) {
5172 // Walk deferred instructions in reverse order, and push them to the
5173 // worklist, which means they'll end up popped from the worklist in-order.
5174 while (Instruction *I = Worklist.popDeferred()) {
5175 // Check to see if we can DCE the instruction. We do this already here to
5176 // reduce the number of uses and thus allow other folds to trigger.
5177 // Note that eraseInstFromFunction() may push additional instructions on
5178 // the deferred worklist, so this will DCE whole instruction chains.
5181 ++NumDeadInst;
5182 continue;
5183 }
5184
5185 Worklist.push(I);
5186 }
5187
5189 if (I == nullptr) continue; // skip null values.
5190
5191 // Check to see if we can DCE the instruction.
5194 ++NumDeadInst;
5195 continue;
5196 }
5197
5198 if (!DebugCounter::shouldExecute(VisitCounter))
5199 continue;
5200
5201 // See if we can trivially sink this instruction to its user if we can
5202 // prove that the successor is not executed more frequently than our block.
5203 // Return the UserBlock if successful.
5204 auto getOptionalSinkBlockForInst =
5205 [this](Instruction *I) -> std::optional<BasicBlock *> {
5206 if (!EnableCodeSinking)
5207 return std::nullopt;
5208
5209 BasicBlock *BB = I->getParent();
5210 BasicBlock *UserParent = nullptr;
5211 unsigned NumUsers = 0;
5212
5213 for (Use &U : I->uses()) {
5214 User *User = U.getUser();
5215 if (User->isDroppable())
5216 continue;
5217 if (NumUsers > MaxSinkNumUsers)
5218 return std::nullopt;
5219
5220 Instruction *UserInst = cast<Instruction>(User);
5221 // Special handling for Phi nodes - get the block the use occurs in.
5222 BasicBlock *UserBB = UserInst->getParent();
5223 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5224 UserBB = PN->getIncomingBlock(U);
5225 // Bail out if we have uses in different blocks. We don't do any
5226 // sophisticated analysis (i.e finding NearestCommonDominator of these
5227 // use blocks).
5228 if (UserParent && UserParent != UserBB)
5229 return std::nullopt;
5230 UserParent = UserBB;
5231
5232 // Make sure these checks are done only once, naturally we do the checks
5233 // the first time we get the userparent, this will save compile time.
5234 if (NumUsers == 0) {
5235 // Try sinking to another block. If that block is unreachable, then do
5236 // not bother. SimplifyCFG should handle it.
5237 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5238 return std::nullopt;
5239
5240 auto *Term = UserParent->getTerminator();
5241 // See if the user is one of our successors that has only one
5242 // predecessor, so that we don't have to split the critical edge.
5243 // Another option where we can sink is a block that ends with a
5244 // terminator that does not pass control to other block (such as
5245 // return or unreachable or resume). In this case:
5246 // - I dominates the User (by SSA form);
5247 // - the User will be executed at most once.
5248 // So sinking I down to User is always profitable or neutral.
5249 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5250 return std::nullopt;
5251
5252 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5253 }
5254
5255 NumUsers++;
5256 }
5257
5258 // No user or only has droppable users.
5259 if (!UserParent)
5260 return std::nullopt;
5261
5262 return UserParent;
5263 };
5264
5265 auto OptBB = getOptionalSinkBlockForInst(I);
5266 if (OptBB) {
5267 auto *UserParent = *OptBB;
5268 // Okay, the CFG is simple enough, try to sink this instruction.
5269 if (tryToSinkInstruction(I, UserParent)) {
5270 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5271 MadeIRChange = true;
5272 // We'll add uses of the sunk instruction below, but since
5273 // sinking can expose opportunities for it's *operands* add
5274 // them to the worklist
5275 for (Use &U : I->operands())
5276 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5277 Worklist.push(OpI);
5278 }
5279 }
5280
5281 // Now that we have an instruction, try combining it to simplify it.
5284 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5285
5286#ifndef NDEBUG
5287 std::string OrigI;
5288#endif
5289 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5290 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5291
5292 if (Instruction *Result = visit(*I)) {
5293 ++NumCombined;
5294 // Should we replace the old instruction with a new one?
5295 if (Result != I) {
5296 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5297 << " New = " << *Result << '\n');
5298
5299 // We copy the old instruction's DebugLoc to the new instruction, unless
5300 // InstCombine already assigned a DebugLoc to it, in which case we
5301 // should trust the more specifically selected DebugLoc.
5302 if (!Result->getDebugLoc())
5303 Result->setDebugLoc(I->getDebugLoc());
5304 // We also copy annotation metadata to the new instruction.
5305 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5306 // Everything uses the new instruction now.
5307 I->replaceAllUsesWith(Result);
5308
5309 // Move the name to the new instruction first.
5310 Result->takeName(I);
5311
5312 // Insert the new instruction into the basic block...
5313 BasicBlock *InstParent = I->getParent();
5314 BasicBlock::iterator InsertPos = I->getIterator();
5315
5316 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5317 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5318 // We need to fix up the insertion point.
5319 if (isa<PHINode>(I)) // PHI -> Non-PHI
5320 InsertPos = InstParent->getFirstInsertionPt();
5321 else // Non-PHI -> PHI
5322 InsertPos = InstParent->getFirstNonPHIIt();
5323 }
5324
5325 Result->insertInto(InstParent, InsertPos);
5326
5327 // Push the new instruction and any users onto the worklist.
5329 Worklist.push(Result);
5330
5332 } else {
5333 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5334 << " New = " << *I << '\n');
5335
5336 // If the instruction was modified, it's possible that it is now dead.
5337 // if so, remove it.
5340 } else {
5342 Worklist.push(I);
5343 }
5344 }
5345 MadeIRChange = true;
5346 }
5347 }
5348
5349 Worklist.zap();
5350 return MadeIRChange;
5351}
5352
5353// Track the scopes used by !alias.scope and !noalias. In a function, a
5354// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5355// by both sets. If not, the declaration of the scope can be safely omitted.
5356// The MDNode of the scope can be omitted as well for the instructions that are
5357// part of this function. We do not do that at this point, as this might become
5358// too time consuming to do.
5360 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5361 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5362
5363public:
5365 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5366 if (!I->hasMetadataOtherThanDebugLoc())
5367 return;
5368
5369 auto Track = [](Metadata *ScopeList, auto &Container) {
5370 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5371 if (!MDScopeList || !Container.insert(MDScopeList).second)
5372 return;
5373 for (const auto &MDOperand : MDScopeList->operands())
5374 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5375 Container.insert(MDScope);
5376 };
5377
5378 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5379 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5380 }
5381
5383 NoAliasScopeDeclInst *Decl = dyn_cast<NoAliasScopeDeclInst>(Inst);
5384 if (!Decl)
5385 return false;
5386
5387 assert(Decl->use_empty() &&
5388 "llvm.experimental.noalias.scope.decl in use ?");
5389 const MDNode *MDSL = Decl->getScopeList();
5390 assert(MDSL->getNumOperands() == 1 &&
5391 "llvm.experimental.noalias.scope should refer to a single scope");
5392 auto &MDOperand = MDSL->getOperand(0);
5393 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5394 return !UsedAliasScopesAndLists.contains(MD) ||
5395 !UsedNoAliasScopesAndLists.contains(MD);
5396
5397 // Not an MDNode ? throw away.
5398 return true;
5399 }
5400};
5401
5402/// Populate the IC worklist from a function, by walking it in reverse
5403/// post-order and adding all reachable code to the worklist.
5404///
5405/// This has a couple of tricks to make the code faster and more powerful. In
5406/// particular, we constant fold and DCE instructions as we go, to avoid adding
5407/// them to the worklist (this significantly speeds up instcombine on code where
5408/// many instructions are dead or constant). Additionally, if we find a branch
5409/// whose condition is a known constant, we only visit the reachable successors.
5411 bool MadeIRChange = false;
5413 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
5414 DenseMap<Constant *, Constant *> FoldedConstants;
5415 AliasScopeTracker SeenAliasScopes;
5416
5417 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
5418 for (BasicBlock *Succ : successors(BB))
5419 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
5420 for (PHINode &PN : Succ->phis())
5421 for (Use &U : PN.incoming_values())
5422 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5423 U.set(PoisonValue::get(PN.getType()));
5424 MadeIRChange = true;
5425 }
5426 };
5427
5428 for (BasicBlock *BB : RPOT) {
5429 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
5430 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
5431 })) {
5432 HandleOnlyLiveSuccessor(BB, nullptr);
5433 continue;
5434 }
5435 LiveBlocks.insert(BB);
5436
5437 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
5438 // ConstantProp instruction if trivially constant.
5439 if (!Inst.use_empty() &&
5440 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5441 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
5442 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
5443 << '\n');
5444 Inst.replaceAllUsesWith(C);
5445 ++NumConstProp;
5446 if (isInstructionTriviallyDead(&Inst, &TLI))
5447 Inst.eraseFromParent();
5448 MadeIRChange = true;
5449 continue;
5450 }
5451
5452 // See if we can constant fold its operands.
5453 for (Use &U : Inst.operands()) {
5454 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
5455 continue;
5456
5457 auto *C = cast<Constant>(U);
5458 Constant *&FoldRes = FoldedConstants[C];
5459 if (!FoldRes)
5460 FoldRes = ConstantFoldConstant(C, DL, &TLI);
5461
5462 if (FoldRes != C) {
5463 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
5464 << "\n Old = " << *C
5465 << "\n New = " << *FoldRes << '\n');
5466 U = FoldRes;
5467 MadeIRChange = true;
5468 }
5469 }
5470
5471 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
5472 // these call instructions consumes non-trivial amount of time and
5473 // provides no value for the optimization.
5474 if (!Inst.isDebugOrPseudoInst()) {
5475 InstrsForInstructionWorklist.push_back(&Inst);
5476 SeenAliasScopes.analyse(&Inst);
5477 }
5478 }
5479
5480 // If this is a branch or switch on a constant, mark only the single
5481 // live successor. Otherwise assume all successors are live.
5482 Instruction *TI = BB->getTerminator();
5483 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
5484 if (isa<UndefValue>(BI->getCondition())) {
5485 // Branch on undef is UB.
5486 HandleOnlyLiveSuccessor(BB, nullptr);
5487 continue;
5488 }
5489 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5490 bool CondVal = Cond->getZExtValue();
5491 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5492 continue;
5493 }
5494 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5495 if (isa<UndefValue>(SI->getCondition())) {
5496 // Switch on undef is UB.
5497 HandleOnlyLiveSuccessor(BB, nullptr);
5498 continue;
5499 }
5500 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5501 HandleOnlyLiveSuccessor(BB,
5502 SI->findCaseValue(Cond)->getCaseSuccessor());
5503 continue;
5504 }
5505 }
5506 }
5507
5508 // Remove instructions inside unreachable blocks. This prevents the
5509 // instcombine code from having to deal with some bad special cases, and
5510 // reduces use counts of instructions.
5511 for (BasicBlock &BB : F) {
5512 if (LiveBlocks.count(&BB))
5513 continue;
5514
5515 unsigned NumDeadInstInBB;
5516 unsigned NumDeadDbgInstInBB;
5517 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
5519
5520 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
5521 NumDeadInst += NumDeadInstInBB;
5522 }
5523
5524 // Once we've found all of the instructions to add to instcombine's worklist,
5525 // add them in reverse order. This way instcombine will visit from the top
5526 // of the function down. This jives well with the way that it adds all uses
5527 // of instructions to the worklist after doing a transformation, thus avoiding
5528 // some N^2 behavior in pathological cases.
5529 Worklist.reserve(InstrsForInstructionWorklist.size());
5530 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
5531 // DCE instruction if trivially dead. As we iterate in reverse program
5532 // order here, we will clean up whole chains of dead instructions.
5533 if (isInstructionTriviallyDead(Inst, &TLI) ||
5534 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
5535 ++NumDeadInst;
5536 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
5537 salvageDebugInfo(*Inst);
5538 Inst->eraseFromParent();
5539 MadeIRChange = true;
5540 continue;
5541 }
5542
5543 Worklist.push(Inst);
5544 }
5545
5546 return MadeIRChange;
5547}
5548
5550 // Collect backedges.
5552 for (BasicBlock *BB : RPOT) {
5553 Visited.insert(BB);
5554 for (BasicBlock *Succ : successors(BB))
5555 if (Visited.contains(Succ))
5556 BackEdges.insert({BB, Succ});
5557 }
5558 ComputedBackEdges = true;
5559}
5560
5566 const InstCombineOptions &Opts) {
5567 auto &DL = F.getDataLayout();
5568 bool VerifyFixpoint = Opts.VerifyFixpoint &&
5569 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
5570
5571 /// Builder - This is an IRBuilder that automatically inserts new
5572 /// instructions into the worklist when they are created.
5574 F.getContext(), TargetFolder(DL),
5575 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
5576 Worklist.add(I);
5577 if (auto *Assume = dyn_cast<AssumeInst>(I))
5578 AC.registerAssumption(Assume);
5579 }));
5580
5582
5583 // Lower dbg.declare intrinsics otherwise their value may be clobbered
5584 // by instcombiner.
5585 bool MadeIRChange = false;
5587 MadeIRChange = LowerDbgDeclare(F);
5588
5589 // Iterate while there is work to do.
5590 unsigned Iteration = 0;
5591 while (true) {
5592 ++Iteration;
5593
5594 if (Iteration > Opts.MaxIterations && !VerifyFixpoint) {
5595 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
5596 << " on " << F.getName()
5597 << " reached; stopping without verifying fixpoint\n");
5598 break;
5599 }
5600
5601 ++NumWorklistIterations;
5602 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
5603 << F.getName() << "\n");
5604
5605 InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,
5606 ORE, BFI, BPI, PSI, DL, RPOT);
5608 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
5609 MadeChangeInThisIteration |= IC.run();
5610 if (!MadeChangeInThisIteration)
5611 break;
5612
5613 MadeIRChange = true;
5614 if (Iteration > Opts.MaxIterations) {
5616 "Instruction Combining on " + Twine(F.getName()) +
5617 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
5618 " iterations. " +
5619 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
5620 "'instcombine-no-verify-fixpoint' to suppress this error.",
5621 /*GenCrashDiag=*/false);
5622 }
5623 }
5624
5625 if (Iteration == 1)
5626 ++NumOneIteration;
5627 else if (Iteration == 2)
5628 ++NumTwoIterations;
5629 else if (Iteration == 3)
5630 ++NumThreeIterations;
5631 else
5632 ++NumFourOrMoreIterations;
5633
5634 return MadeIRChange;
5635}
5636
5638
5640 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
5641 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
5642 OS, MapClassName2PassName);
5643 OS << '<';
5644 OS << "max-iterations=" << Options.MaxIterations << ";";
5645 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
5646 OS << '>';
5647}
5648
5649char InstCombinePass::ID = 0;
5650
5653 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
5654 // No changes since last InstCombine pass, exit early.
5655 if (LRT.shouldSkip(&ID))
5656 return PreservedAnalyses::all();
5657
5658 auto &AC = AM.getResult<AssumptionAnalysis>(F);
5659 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
5660 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
5662 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
5663
5664 auto *AA = &AM.getResult<AAManager>(F);
5665 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
5666 ProfileSummaryInfo *PSI =
5667 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
5668 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
5669 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
5671
5672 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
5673 BFI, BPI, PSI, Options)) {
5674 // No changes, all analyses are preserved.
5675 LRT.update(&ID, /*Changed=*/false);
5676 return PreservedAnalyses::all();
5677 }
5678
5679 // Mark all the analyses that instcombine updates as preserved.
5681 LRT.update(&ID, /*Changed=*/true);
5684 return PA;
5685}
5686
5688 AU.setPreservesCFG();
5701}
5702
5704 if (skipFunction(F))
5705 return false;
5706
5707 // Required analyses.
5708 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
5709 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
5710 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
5711 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
5712 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5713 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5714
5715 // Optional analyses.
5716 ProfileSummaryInfo *PSI =
5717 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
5718 BlockFrequencyInfo *BFI =
5719 (PSI && PSI->hasProfileSummary()) ?
5720 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
5721 nullptr;
5722 BranchProbabilityInfo *BPI = nullptr;
5723 if (auto *WrapperPass =
5724 getAnalysisIfAvailable<BranchProbabilityInfoWrapperPass>())
5725 BPI = &WrapperPass->getBPI();
5726
5727 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
5728 BFI, BPI, PSI, InstCombineOptions());
5729}
5730
5732
5735}
5736
5738 "Combine redundant instructions", false, false)
5750
5751// Initialization Routines
5754}
5755
5757 return new InstructionCombiningPass();
5758}
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
Definition: DebugCounter.h:190
#define LLVM_DEBUG(...)
Definition: Debug.h:106
This file defines the DenseMap class.
uint64_t Size
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
Hexagon Vector Combine
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition: IVUsers.cpp:48
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static bool isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI)
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:57
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
unsigned OpIndex
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:245
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition: blake3_impl.h:78
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:234
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition: APInt.cpp:1732
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition: APInt.h:423
static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition: APInt.cpp:1864
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:910
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition: APInt.h:371
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:380
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1468
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1902
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:827
APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1934
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:334
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:440
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:306
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1915
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:851
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:429
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:256
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
Class to represent array types.
Definition: DerivedTypes.h:395
uint64_t getNumElements() const
Definition: DerivedTypes.h:407
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Type * getElementType() const
Definition: DerivedTypes.h:408
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
Definition: Attributes.cpp:443
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:209
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:530
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:437
iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
Definition: BasicBlock.cpp:250
InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:381
const Instruction & front() const
Definition: BasicBlock.h:484
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:593
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:481
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:489
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
Definition: BasicBlock.cpp:450
size_t size() const
Definition: BasicBlock.h:482
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:240
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition: InstrTypes.h:370
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition: InstrTypes.h:293
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
void swapSuccEdgesProbabilities(const BasicBlock *Src)
Swap outgoing edges probabilities for Src with branch terminator.
Represents analyses that only rely on functions' control flow.
Definition: Analysis.h:72
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1112
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1341
void setAttributes(AttributeList A)
Set the attributes for this call.
Definition: InstrTypes.h:1420
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:1931
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1286
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1417
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:696
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:698
@ ICMP_EQ
equal
Definition: InstrTypes.h:694
@ ICMP_NE
not equal
Definition: InstrTypes.h:695
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:825
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:787
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Definition: CmpPredicate.h:22
ConstantArray - Constant Array Declarations.
Definition: Constants.h:427
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1312
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition: Constants.h:770
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2645
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2632
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2638
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
Definition: Constants.cpp:2692
static Constant * getNeg(Constant *C, bool HasNSW=false)
Definition: Constants.cpp:2626
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:866
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:873
static ConstantInt * getBool(LLVMContext &Context, bool V)
Definition: Constants.cpp:880
This class represents a range of values.
Definition: ConstantRange.h:47
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition: Constants.h:511
static Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1421
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
Definition: Constants.cpp:403
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
Definition: Constants.cpp:784
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:420
const Constant * stripPointerCasts() const
Definition: Constant.h:218
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:435
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
SmallVector< APInt > getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const
Get GEP indices to access Offset inside ElemTy.
Definition: DataLayout.cpp:971
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
Definition: DataLayout.h:219
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
Definition: DataLayout.cpp:754
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
Definition: DataLayout.cpp:878
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:457
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:369
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:617
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value * > Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:893
This is the common base class for debug info intrinsics for variables.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Definition: DebugCounter.h:87
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
bool empty() const
Definition: DenseMap.h:98
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
void registerBranch(BranchInst *BI)
Add a branch condition to the cache.
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:317
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:205
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:178
const BasicBlock & getEntryBlock() const
Definition: Function.h:821
Represents flags for the getelementptr instruction/expression.
GEPNoWrapFlags withoutNoUnsignedSignedWrap() const
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
GEPNoWrapFlags withoutNoUnsignedWrap() const
GEPNoWrapFlags getNoWrapFlags() const
Definition: Operator.h:430
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:956
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:980
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:113
Value * CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1700
Value * CreateSelectFMF(Value *C, Value *True, Value *False, FMFSource FMFSource, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1058
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2555
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1053
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2045
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition: IRBuilder.h:2574
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1987
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition: IRBuilder.h:330
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1882
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1874
void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)
Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.
Definition: IRBuilder.h:252
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Definition: IRBuilder.cpp:889
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:900
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:505
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2404
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2435
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1757
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1387
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1798
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition: IRBuilder.h:2533
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1518
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1370
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2449
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2019
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1671
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2225
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:199
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1499
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1562
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2380
Value * CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1694
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:535
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition: IRBuilder.h:521
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition: IRBuilder.h:74
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
InstCombinePass(InstCombineOptions Opts={})
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth, Instruction *CxtI)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void tryToSinkInstructionDbgValues(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableIntrinsic * > &DbgUsers)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
Definition: InstCombiner.h:48
SimplifyQuery SQ
Definition: InstCombiner.h:77
const DataLayout & getDataLayout() const
Definition: InstCombiner.h:337
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
Definition: InstCombiner.h:228
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
Definition: InstCombiner.h:143
TargetLibraryInfo & TLI
Definition: InstCombiner.h:74
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Definition: InstCombiner.h:368
AAResults * AA
Definition: InstCombiner.h:70
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Definition: InstCombiner.h:388
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
Definition: InstCombiner.h:56
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
Definition: InstCombiner.h:187
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
Definition: InstCombiner.h:420
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
Definition: InstCombiner.h:160
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Definition: InstCombiner.h:65
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
Definition: InstCombiner.h:377
BranchProbabilityInfo * BPI
Definition: InstCombiner.h:80
ReversePostOrderTraversal< BasicBlock * > & RPOT
Definition: InstCombiner.h:84
const DataLayout & DL
Definition: InstCombiner.h:76
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
Definition: InstCombiner.h:455
DomConditionCache DC
Definition: InstCombiner.h:82
const bool MinimizeSize
Definition: InstCombiner.h:68
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
void addToWorklist(Instruction *I)
Definition: InstCombiner.h:332
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
Definition: InstCombiner.h:97
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
Definition: InstCombiner.h:412
DominatorTree & DT
Definition: InstCombiner.h:75
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
Definition: InstCombiner.h:280
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
Definition: InstCombiner.h:89
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
Definition: InstCombiner.h:433
BuilderTy & Builder
Definition: InstCombiner.h:61
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
Definition: InstCombiner.h:209
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
Definition: InstCombiner.h:358
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition: InstCombine.h:66
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void add(Instruction *I)
Add instruction to the worklist.
void push(Instruction *I)
Push the instruction onto the worklist stack.
void zap()
Check that the worklist is empty and nuke the backing store for the map.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
Definition: Instruction.h:364
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:511
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:68
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
Definition: Metadata.cpp:1764
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:72
bool isTerminator() const
Definition: Instruction.h:313
void dropUBImplyingAttrsAndMetadata()
Drop any attributes or metadata that can cause immediate undefined behavior.
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:310
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
Definition: Instruction.h:369
bool isShift() const
Definition: Instruction.h:318
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:508
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isIntDivRem() const
Definition: Instruction.h:316
Class to represent integer types.
Definition: DerivedTypes.h:42
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Definition: Instructions.h:176
Metadata node.
Definition: Metadata.h:1073
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1434
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1440
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:895
This is the common base class for memset/memcpy/memmove.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition: Metadata.h:62
This class represents min/max intrinsics.
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Definition: PassManager.h:692
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition: Operator.h:77
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition: Operator.h:110
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition: Operator.h:104
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition: Constants.h:1460
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1878
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void preserveSet()
Mark an analysis set as preserved.
Definition: Analysis.h:146
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:131
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition: Registry.h:44
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition: SmallPtrSet.h:94
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:458
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void reserve(size_type N)
Definition: SmallVector.h:663
iterator erase(const_iterator CI)
Definition: SmallVector.h:737
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
typename SuperClass::iterator iterator
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Definition: TargetFolder.h:34
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
Targets can implement their own combinations for target-specific intrinsics.
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
Can be used to implement target-specific instruction combining.
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
Can be used to implement target-specific instruction combining.
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:258
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:310
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:355
This class represents a cast unsigned integer to floating point.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
op_range operands()
Definition: User.h:288
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition: User.cpp:21
op_iterator op_begin()
Definition: User.h:280
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
op_iterator op_end()
Definition: User.h:282
bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition: User.cpp:115
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:746
bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition: Value.cpp:157
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
iterator_range< user_iterator > users()
Definition: Value.h:421
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition: Value.cpp:149
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:694
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1094
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition: Value.cpp:871
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isZero() const
Definition: TypeSize.h:156
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
reverse_self_iterator getReverseIterator()
Definition: ilist_node.h:135
self_iterator getIterator()
Definition: ilist_node.h:132
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
Definition: Intrinsics.cpp:732
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:524
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
Definition: PatternMatch.h:160
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:100
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:165
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:982
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:826
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:885
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
Definition: PatternMatch.h:186
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
Definition: PatternMatch.h:560
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:168
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
Definition: PatternMatch.h:245
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
Definition: PatternMatch.h:305
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
Definition: PatternMatch.h:864
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
Definition: PatternMatch.h:791
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
Definition: PatternMatch.h:316
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:152
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
@ FalseVal
Definition: TGLexer.h:59
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition: DWP.cpp:480
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition: STLExtras.h:854
void stable_sort(R &&Range)
Definition: STLExtras.h:2037
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
bool succ_empty(const Instruction *I)
Definition: CFG.h:255
Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
FunctionPass * createInstructionCombiningPass()
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I)
Don't use information from its non-constant operands.
std::pair< unsigned, unsigned > removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition: Local.cpp:2874
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2448
void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableIntrinsic * > Insns, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition: Local.cpp:2313
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
Definition: DebugInfo.cpp:162
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1683
auto successors(const MachineBasicBlock *BB)
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2115
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
gep_type_iterator gep_type_end(const User *GEP)
Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Definition: APFloat.h:1526
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition: Local.cpp:2856
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
constexpr bool has_single_bit(T Value) noexcept
Definition: bit.h:146
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:406
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition: Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
Definition: ValueTracking.h:44
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:420
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1664
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool LowerDbgDeclare(Function &F)
Lowers llvm.dbg.declare intrinsics into appropriate set of llvm.dbg.value intrinsics.
Definition: Local.cpp:1987
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value that has an associated llvm....
Definition: Local.cpp:1728
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition: Local.cpp:2784
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition: STLExtras.h:336
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Or
Bitwise or logical OR of integers.
DWARFExpression::Operation Op
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition: STLExtras.h:2067
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
void initializeInstructionCombiningPassPass(PassRegistry &)
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
static constexpr roundingMode rmNearestTiesToEven
Definition: APFloat.h:302
static unsigned int semanticsPrecision(const fltSemantics &)
Definition: APFloat.cpp:315
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition: KnownBits.h:243
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:43
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition: KnownBits.h:240
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69
SimplifyQuery getWithInstruction(const Instruction *I) const
SimplifyQuery getWithoutUndef() const