LLVM 22.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APFloat.h"
37#include "llvm/ADT/APInt.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/ADT/DenseMap.h"
42#include "llvm/ADT/Statistic.h"
47#include "llvm/Analysis/CFG.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constant.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DIBuilder.h"
67#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/DebugInfo.h"
70#include "llvm/IR/Dominators.h"
72#include "llvm/IR/Function.h"
74#include "llvm/IR/IRBuilder.h"
75#include "llvm/IR/InstrTypes.h"
76#include "llvm/IR/Instruction.h"
79#include "llvm/IR/Intrinsics.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/Operator.h"
82#include "llvm/IR/PassManager.h"
84#include "llvm/IR/Type.h"
85#include "llvm/IR/Use.h"
86#include "llvm/IR/User.h"
87#include "llvm/IR/Value.h"
88#include "llvm/IR/ValueHandle.h"
93#include "llvm/Support/Debug.h"
102#include <algorithm>
103#include <cassert>
104#include <cstdint>
105#include <memory>
106#include <optional>
107#include <string>
108#include <utility>
109
110#define DEBUG_TYPE "instcombine"
112#include <optional>
113
114using namespace llvm;
115using namespace llvm::PatternMatch;
116
117STATISTIC(NumWorklistIterations,
118 "Number of instruction combining iterations performed");
119STATISTIC(NumOneIteration, "Number of functions with one iteration");
120STATISTIC(NumTwoIterations, "Number of functions with two iterations");
121STATISTIC(NumThreeIterations, "Number of functions with three iterations");
122STATISTIC(NumFourOrMoreIterations,
123 "Number of functions with four or more iterations");
124
125STATISTIC(NumCombined , "Number of insts combined");
126STATISTIC(NumConstProp, "Number of constant folds");
127STATISTIC(NumDeadInst , "Number of dead inst eliminated");
128STATISTIC(NumSunkInst , "Number of instructions sunk");
129STATISTIC(NumExpand, "Number of expansions");
130STATISTIC(NumFactor , "Number of factorizations");
131STATISTIC(NumReassoc , "Number of reassociations");
132DEBUG_COUNTER(VisitCounter, "instcombine-visit",
133 "Controls which instructions are visited");
134
135static cl::opt<bool> EnableCodeSinking("instcombine-code-sinking",
136 cl::desc("Enable code sinking"),
137 cl::init(true));
138
140 "instcombine-max-sink-users", cl::init(32),
141 cl::desc("Maximum number of undroppable users for instruction sinking"));
142
144MaxArraySize("instcombine-maxarray-size", cl::init(1024),
145 cl::desc("Maximum array size considered when doing a combine"));
146
147namespace llvm {
149} // end namespace llvm
150
151// FIXME: Remove this flag when it is no longer necessary to convert
152// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
153// increases variable availability at the cost of accuracy. Variables that
154// cannot be promoted by mem2reg or SROA will be described as living in memory
155// for their entire lifetime. However, passes like DSE and instcombine can
156// delete stores to the alloca, leading to misleading and inaccurate debug
157// information. This flag can be removed when those passes are fixed.
158static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
159 cl::Hidden, cl::init(true));
160
161std::optional<Instruction *>
163 // Handle target specific intrinsics
164 if (II.getCalledFunction()->isTargetIntrinsic()) {
165 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
166 }
167 return std::nullopt;
168}
169
171 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
172 bool &KnownBitsComputed) {
173 // Handle target specific intrinsics
174 if (II.getCalledFunction()->isTargetIntrinsic()) {
175 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
176 *this, II, DemandedMask, Known, KnownBitsComputed);
177 }
178 return std::nullopt;
179}
180
182 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
183 APInt &PoisonElts2, APInt &PoisonElts3,
184 std::function<void(Instruction *, unsigned, APInt, APInt &)>
185 SimplifyAndSetOp) {
186 // Handle target specific intrinsics
187 if (II.getCalledFunction()->isTargetIntrinsic()) {
188 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
189 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
190 SimplifyAndSetOp);
191 }
192 return std::nullopt;
193}
194
195bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
196 // Approved exception for TTI use: This queries a legality property of the
197 // target, not an profitability heuristic. Ideally this should be part of
198 // DataLayout instead.
199 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
200}
201
202Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
203 if (!RewriteGEP)
204 return llvm::emitGEPOffset(&Builder, DL, GEP);
205
206 IRBuilderBase::InsertPointGuard Guard(Builder);
207 auto *Inst = dyn_cast<Instruction>(GEP);
208 if (Inst)
209 Builder.SetInsertPoint(Inst);
210
211 Value *Offset = EmitGEPOffset(GEP);
212 // Rewrite non-trivial GEPs to avoid duplicating the offset arithmetic.
213 if (Inst && !GEP->hasAllConstantIndices() &&
214 !GEP->getSourceElementType()->isIntegerTy(8)) {
216 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
217 Offset, "", GEP->getNoWrapFlags()));
219 }
220 return Offset;
221}
222
223Value *InstCombinerImpl::EmitGEPOffsets(ArrayRef<GEPOperator *> GEPs,
224 GEPNoWrapFlags NW, Type *IdxTy,
225 bool RewriteGEPs) {
226 auto Add = [&](Value *Sum, Value *Offset) -> Value * {
227 if (Sum)
228 return Builder.CreateAdd(Sum, Offset, "", NW.hasNoUnsignedWrap(),
229 NW.isInBounds());
230 else
231 return Offset;
232 };
233
234 Value *Sum = nullptr;
235 Value *OneUseSum = nullptr;
236 Value *OneUseBase = nullptr;
237 GEPNoWrapFlags OneUseFlags = GEPNoWrapFlags::all();
238 for (GEPOperator *GEP : reverse(GEPs)) {
239 Value *Offset;
240 {
241 // Expand the offset at the point of the previous GEP to enable rewriting.
242 // However, use the original insertion point for calculating Sum.
243 IRBuilderBase::InsertPointGuard Guard(Builder);
244 auto *Inst = dyn_cast<Instruction>(GEP);
245 if (RewriteGEPs && Inst)
246 Builder.SetInsertPoint(Inst);
247
249 if (Offset->getType() != IdxTy)
250 Offset = Builder.CreateVectorSplat(
251 cast<VectorType>(IdxTy)->getElementCount(), Offset);
252 if (GEP->hasOneUse()) {
253 // Offsets of one-use GEPs will be merged into the next multi-use GEP.
254 OneUseSum = Add(OneUseSum, Offset);
255 OneUseFlags = OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags());
256 if (!OneUseBase)
257 OneUseBase = GEP->getPointerOperand();
258 continue;
259 }
260
261 if (OneUseSum)
262 Offset = Add(OneUseSum, Offset);
263
264 // Rewrite the GEP to reuse the computed offset. This also includes
265 // offsets from preceding one-use GEPs.
266 if (RewriteGEPs && Inst &&
267 !(GEP->getSourceElementType()->isIntegerTy(8) &&
268 GEP->getOperand(1) == Offset)) {
270 *Inst,
271 Builder.CreatePtrAdd(
272 OneUseBase ? OneUseBase : GEP->getPointerOperand(), Offset, "",
273 OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags())));
275 }
276 }
277
278 Sum = Add(Sum, Offset);
279 OneUseSum = OneUseBase = nullptr;
280 OneUseFlags = GEPNoWrapFlags::all();
281 }
282 if (OneUseSum)
283 Sum = Add(Sum, OneUseSum);
284 if (!Sum)
285 return Constant::getNullValue(IdxTy);
286 return Sum;
287}
288
289/// Legal integers and common types are considered desirable. This is used to
290/// avoid creating instructions with types that may not be supported well by the
291/// the backend.
292/// NOTE: This treats i8, i16 and i32 specially because they are common
293/// types in frontend languages.
294bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
295 switch (BitWidth) {
296 case 8:
297 case 16:
298 case 32:
299 return true;
300 default:
301 return DL.isLegalInteger(BitWidth);
302 }
303}
304
305/// Return true if it is desirable to convert an integer computation from a
306/// given bit width to a new bit width.
307/// We don't want to convert from a legal or desirable type (like i8) to an
308/// illegal type or from a smaller to a larger illegal type. A width of '1'
309/// is always treated as a desirable type because i1 is a fundamental type in
310/// IR, and there are many specialized optimizations for i1 types.
311/// Common/desirable widths are equally treated as legal to convert to, in
312/// order to open up more combining opportunities.
313bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
314 unsigned ToWidth) const {
315 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
316 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
317
318 // Convert to desirable widths even if they are not legal types.
319 // Only shrink types, to prevent infinite loops.
320 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
321 return true;
322
323 // If this is a legal or desiable integer from type, and the result would be
324 // an illegal type, don't do the transformation.
325 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
326 return false;
327
328 // Otherwise, if both are illegal, do not increase the size of the result. We
329 // do allow things like i160 -> i64, but not i64 -> i160.
330 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
331 return false;
332
333 return true;
334}
335
336/// Return true if it is desirable to convert a computation from 'From' to 'To'.
337/// We don't want to convert from a legal to an illegal type or from a smaller
338/// to a larger illegal type. i1 is always treated as a legal type because it is
339/// a fundamental type in IR, and there are many specialized optimizations for
340/// i1 types.
341bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
342 // TODO: This could be extended to allow vectors. Datalayout changes might be
343 // needed to properly support that.
344 if (!From->isIntegerTy() || !To->isIntegerTy())
345 return false;
346
347 unsigned FromWidth = From->getPrimitiveSizeInBits();
348 unsigned ToWidth = To->getPrimitiveSizeInBits();
349 return shouldChangeType(FromWidth, ToWidth);
350}
351
352// Return true, if No Signed Wrap should be maintained for I.
353// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
354// where both B and C should be ConstantInts, results in a constant that does
355// not overflow. This function only handles the Add/Sub/Mul opcodes. For
356// all other opcodes, the function conservatively returns false.
359 if (!OBO || !OBO->hasNoSignedWrap())
360 return false;
361
362 const APInt *BVal, *CVal;
363 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
364 return false;
365
366 // We reason about Add/Sub/Mul Only.
367 bool Overflow = false;
368 switch (I.getOpcode()) {
369 case Instruction::Add:
370 (void)BVal->sadd_ov(*CVal, Overflow);
371 break;
372 case Instruction::Sub:
373 (void)BVal->ssub_ov(*CVal, Overflow);
374 break;
375 case Instruction::Mul:
376 (void)BVal->smul_ov(*CVal, Overflow);
377 break;
378 default:
379 // Conservatively return false for other opcodes.
380 return false;
381 }
382 return !Overflow;
383}
384
387 return OBO && OBO->hasNoUnsignedWrap();
388}
389
392 return OBO && OBO->hasNoSignedWrap();
393}
394
395/// Conservatively clears subclassOptionalData after a reassociation or
396/// commutation. We preserve fast-math flags when applicable as they can be
397/// preserved.
400 if (!FPMO) {
401 I.clearSubclassOptionalData();
402 return;
403 }
404
405 FastMathFlags FMF = I.getFastMathFlags();
406 I.clearSubclassOptionalData();
407 I.setFastMathFlags(FMF);
408}
409
410/// Combine constant operands of associative operations either before or after a
411/// cast to eliminate one of the associative operations:
412/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
413/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
415 InstCombinerImpl &IC) {
416 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
417 if (!Cast || !Cast->hasOneUse())
418 return false;
419
420 // TODO: Enhance logic for other casts and remove this check.
421 auto CastOpcode = Cast->getOpcode();
422 if (CastOpcode != Instruction::ZExt)
423 return false;
424
425 // TODO: Enhance logic for other BinOps and remove this check.
426 if (!BinOp1->isBitwiseLogicOp())
427 return false;
428
429 auto AssocOpcode = BinOp1->getOpcode();
430 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
431 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
432 return false;
433
434 Constant *C1, *C2;
435 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
436 !match(BinOp2->getOperand(1), m_Constant(C2)))
437 return false;
438
439 // TODO: This assumes a zext cast.
440 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
441 // to the destination type might lose bits.
442
443 // Fold the constants together in the destination type:
444 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
445 const DataLayout &DL = IC.getDataLayout();
446 Type *DestTy = C1->getType();
447 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
448 if (!CastC2)
449 return false;
450 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
451 if (!FoldedC)
452 return false;
453
454 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
455 IC.replaceOperand(*BinOp1, 1, FoldedC);
457 Cast->dropPoisonGeneratingFlags();
458 return true;
459}
460
461// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
462// inttoptr ( ptrtoint (x) ) --> x
463Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
464 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
465 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
466 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
467 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
468 Type *CastTy = IntToPtr->getDestTy();
469 if (PtrToInt &&
470 CastTy->getPointerAddressSpace() ==
471 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
472 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
473 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
474 return PtrToInt->getOperand(0);
475 }
476 return nullptr;
477}
478
479/// This performs a few simplifications for operators that are associative or
480/// commutative:
481///
482/// Commutative operators:
483///
484/// 1. Order operands such that they are listed from right (least complex) to
485/// left (most complex). This puts constants before unary operators before
486/// binary operators.
487///
488/// Associative operators:
489///
490/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
491/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
492///
493/// Associative and commutative operators:
494///
495/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
496/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
497/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
498/// if C1 and C2 are constants.
500 Instruction::BinaryOps Opcode = I.getOpcode();
501 bool Changed = false;
502
503 do {
504 // Order operands such that they are listed from right (least complex) to
505 // left (most complex). This puts constants before unary operators before
506 // binary operators.
507 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
508 getComplexity(I.getOperand(1)))
509 Changed = !I.swapOperands();
510
511 if (I.isCommutative()) {
512 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
513 replaceOperand(I, 0, Pair->first);
514 replaceOperand(I, 1, Pair->second);
515 Changed = true;
516 }
517 }
518
519 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
520 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
521
522 if (I.isAssociative()) {
523 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
524 if (Op0 && Op0->getOpcode() == Opcode) {
525 Value *A = Op0->getOperand(0);
526 Value *B = Op0->getOperand(1);
527 Value *C = I.getOperand(1);
528
529 // Does "B op C" simplify?
530 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
531 // It simplifies to V. Form "A op V".
532 replaceOperand(I, 0, A);
533 replaceOperand(I, 1, V);
534 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
535 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
536
537 // Conservatively clear all optional flags since they may not be
538 // preserved by the reassociation. Reset nsw/nuw based on the above
539 // analysis.
541
542 // Note: this is only valid because SimplifyBinOp doesn't look at
543 // the operands to Op0.
544 if (IsNUW)
545 I.setHasNoUnsignedWrap(true);
546
547 if (IsNSW)
548 I.setHasNoSignedWrap(true);
549
550 Changed = true;
551 ++NumReassoc;
552 continue;
553 }
554 }
555
556 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
557 if (Op1 && Op1->getOpcode() == Opcode) {
558 Value *A = I.getOperand(0);
559 Value *B = Op1->getOperand(0);
560 Value *C = Op1->getOperand(1);
561
562 // Does "A op B" simplify?
563 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
564 // It simplifies to V. Form "V op C".
565 replaceOperand(I, 0, V);
566 replaceOperand(I, 1, C);
567 // Conservatively clear the optional flags, since they may not be
568 // preserved by the reassociation.
570 Changed = true;
571 ++NumReassoc;
572 continue;
573 }
574 }
575 }
576
577 if (I.isAssociative() && I.isCommutative()) {
578 if (simplifyAssocCastAssoc(&I, *this)) {
579 Changed = true;
580 ++NumReassoc;
581 continue;
582 }
583
584 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
585 if (Op0 && Op0->getOpcode() == Opcode) {
586 Value *A = Op0->getOperand(0);
587 Value *B = Op0->getOperand(1);
588 Value *C = I.getOperand(1);
589
590 // Does "C op A" simplify?
591 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
592 // It simplifies to V. Form "V op B".
593 replaceOperand(I, 0, V);
594 replaceOperand(I, 1, B);
595 // Conservatively clear the optional flags, since they may not be
596 // preserved by the reassociation.
598 Changed = true;
599 ++NumReassoc;
600 continue;
601 }
602 }
603
604 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
605 if (Op1 && Op1->getOpcode() == Opcode) {
606 Value *A = I.getOperand(0);
607 Value *B = Op1->getOperand(0);
608 Value *C = Op1->getOperand(1);
609
610 // Does "C op A" simplify?
611 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
612 // It simplifies to V. Form "B op V".
613 replaceOperand(I, 0, B);
614 replaceOperand(I, 1, V);
615 // Conservatively clear the optional flags, since they may not be
616 // preserved by the reassociation.
618 Changed = true;
619 ++NumReassoc;
620 continue;
621 }
622 }
623
624 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
625 // if C1 and C2 are constants.
626 Value *A, *B;
627 Constant *C1, *C2, *CRes;
628 if (Op0 && Op1 &&
629 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
630 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
631 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
632 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
633 bool IsNUW = hasNoUnsignedWrap(I) &&
634 hasNoUnsignedWrap(*Op0) &&
635 hasNoUnsignedWrap(*Op1);
636 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
637 BinaryOperator::CreateNUW(Opcode, A, B) :
638 BinaryOperator::Create(Opcode, A, B);
639
640 if (isa<FPMathOperator>(NewBO)) {
641 FastMathFlags Flags = I.getFastMathFlags() &
642 Op0->getFastMathFlags() &
643 Op1->getFastMathFlags();
644 NewBO->setFastMathFlags(Flags);
645 }
646 InsertNewInstWith(NewBO, I.getIterator());
647 NewBO->takeName(Op1);
648 replaceOperand(I, 0, NewBO);
649 replaceOperand(I, 1, CRes);
650 // Conservatively clear the optional flags, since they may not be
651 // preserved by the reassociation.
653 if (IsNUW)
654 I.setHasNoUnsignedWrap(true);
655
656 Changed = true;
657 continue;
658 }
659 }
660
661 // No further simplifications.
662 return Changed;
663 } while (true);
664}
665
666/// Return whether "X LOp (Y ROp Z)" is always equal to
667/// "(X LOp Y) ROp (X LOp Z)".
670 // X & (Y | Z) <--> (X & Y) | (X & Z)
671 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
672 if (LOp == Instruction::And)
673 return ROp == Instruction::Or || ROp == Instruction::Xor;
674
675 // X | (Y & Z) <--> (X | Y) & (X | Z)
676 if (LOp == Instruction::Or)
677 return ROp == Instruction::And;
678
679 // X * (Y + Z) <--> (X * Y) + (X * Z)
680 // X * (Y - Z) <--> (X * Y) - (X * Z)
681 if (LOp == Instruction::Mul)
682 return ROp == Instruction::Add || ROp == Instruction::Sub;
683
684 return false;
685}
686
687/// Return whether "(X LOp Y) ROp Z" is always equal to
688/// "(X ROp Z) LOp (Y ROp Z)".
692 return leftDistributesOverRight(ROp, LOp);
693
694 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
696
697 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
698 // but this requires knowing that the addition does not overflow and other
699 // such subtleties.
700}
701
702/// This function returns identity value for given opcode, which can be used to
703/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
705 if (isa<Constant>(V))
706 return nullptr;
707
708 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
709}
710
711/// This function predicates factorization using distributive laws. By default,
712/// it just returns the 'Op' inputs. But for special-cases like
713/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
714/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
715/// allow more factorization opportunities.
718 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
719 assert(Op && "Expected a binary operator");
720 LHS = Op->getOperand(0);
721 RHS = Op->getOperand(1);
722 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
723 Constant *C;
724 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
725 // X << C --> X * (1 << C)
727 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
728 assert(RHS && "Constant folding of immediate constants failed");
729 return Instruction::Mul;
730 }
731 // TODO: We can add other conversions e.g. shr => div etc.
732 }
733 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
734 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
736 // lshr nneg C, X --> ashr nneg C, X
737 return Instruction::AShr;
738 }
739 }
740 return Op->getOpcode();
741}
742
743/// This tries to simplify binary operations by factorizing out common terms
744/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
747 Instruction::BinaryOps InnerOpcode, Value *A,
748 Value *B, Value *C, Value *D) {
749 assert(A && B && C && D && "All values must be provided");
750
751 Value *V = nullptr;
752 Value *RetVal = nullptr;
753 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
754 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
755
756 // Does "X op' Y" always equal "Y op' X"?
757 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
758
759 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
760 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
761 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
762 // commutative case, "(A op' B) op (C op' A)"?
763 if (A == C || (InnerCommutative && A == D)) {
764 if (A != C)
765 std::swap(C, D);
766 // Consider forming "A op' (B op D)".
767 // If "B op D" simplifies then it can be formed with no cost.
768 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
769
770 // If "B op D" doesn't simplify then only go on if one of the existing
771 // operations "A op' B" and "C op' D" will be zapped as no longer used.
772 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
773 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
774 if (V)
775 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
776 }
777 }
778
779 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
780 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
781 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
782 // commutative case, "(A op' B) op (B op' D)"?
783 if (B == D || (InnerCommutative && B == C)) {
784 if (B != D)
785 std::swap(C, D);
786 // Consider forming "(A op C) op' B".
787 // If "A op C" simplifies then it can be formed with no cost.
788 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
789
790 // If "A op C" doesn't simplify then only go on if one of the existing
791 // operations "A op' B" and "C op' D" will be zapped as no longer used.
792 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
793 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
794 if (V)
795 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
796 }
797 }
798
799 if (!RetVal)
800 return nullptr;
801
802 ++NumFactor;
803 RetVal->takeName(&I);
804
805 // Try to add no-overflow flags to the final value.
806 if (isa<BinaryOperator>(RetVal)) {
807 bool HasNSW = false;
808 bool HasNUW = false;
810 HasNSW = I.hasNoSignedWrap();
811 HasNUW = I.hasNoUnsignedWrap();
812 }
813 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
814 HasNSW &= LOBO->hasNoSignedWrap();
815 HasNUW &= LOBO->hasNoUnsignedWrap();
816 }
817
818 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
819 HasNSW &= ROBO->hasNoSignedWrap();
820 HasNUW &= ROBO->hasNoUnsignedWrap();
821 }
822
823 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
824 // We can propagate 'nsw' if we know that
825 // %Y = mul nsw i16 %X, C
826 // %Z = add nsw i16 %Y, %X
827 // =>
828 // %Z = mul nsw i16 %X, C+1
829 //
830 // iff C+1 isn't INT_MIN
831 const APInt *CInt;
832 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
833 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
834
835 // nuw can be propagated with any constant or nuw value.
836 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
837 }
838 }
839 return RetVal;
840}
841
842// If `I` has one Const operand and the other matches `(ctpop (not x))`,
843// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
844// This is only useful is the new subtract can fold so we only handle the
845// following cases:
846// 1) (add/sub/disjoint_or C, (ctpop (not x))
847// -> (add/sub/disjoint_or C', (ctpop x))
848// 1) (cmp pred C, (ctpop (not x))
849// -> (cmp pred C', (ctpop x))
851 unsigned Opc = I->getOpcode();
852 unsigned ConstIdx = 1;
853 switch (Opc) {
854 default:
855 return nullptr;
856 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
857 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
858 // is constant.
859 case Instruction::Sub:
860 ConstIdx = 0;
861 break;
862 case Instruction::ICmp:
863 // Signed predicates aren't correct in some edge cases like for i2 types, as
864 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
865 // comparisons against it are simplfied to unsigned.
866 if (cast<ICmpInst>(I)->isSigned())
867 return nullptr;
868 break;
869 case Instruction::Or:
870 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
871 return nullptr;
872 [[fallthrough]];
873 case Instruction::Add:
874 break;
875 }
876
877 Value *Op;
878 // Find ctpop.
879 if (!match(I->getOperand(1 - ConstIdx),
881 return nullptr;
882
883 Constant *C;
884 // Check other operand is ImmConstant.
885 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
886 return nullptr;
887
888 Type *Ty = Op->getType();
889 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
890 // Need extra check for icmp. Note if this check is true, it generally means
891 // the icmp will simplify to true/false.
892 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
893 Constant *Cmp =
895 if (!Cmp || !Cmp->isZeroValue())
896 return nullptr;
897 }
898
899 // Check we can invert `(not x)` for free.
900 bool Consumes = false;
901 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
902 return nullptr;
903 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
904 assert(NotOp != nullptr &&
905 "Desync between isFreeToInvert and getFreelyInverted");
906
907 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
908
909 Value *R = nullptr;
910
911 // Do the transformation here to avoid potentially introducing an infinite
912 // loop.
913 switch (Opc) {
914 case Instruction::Sub:
915 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
916 break;
917 case Instruction::Or:
918 case Instruction::Add:
919 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
920 break;
921 case Instruction::ICmp:
922 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
923 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
924 break;
925 default:
926 llvm_unreachable("Unhandled Opcode");
927 }
928 assert(R != nullptr);
929 return replaceInstUsesWith(*I, R);
930}
931
932// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
933// IFF
934// 1) the logic_shifts match
935// 2) either both binops are binops and one is `and` or
936// BinOp1 is `and`
937// (logic_shift (inv_logic_shift C1, C), C) == C1 or
938//
939// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
940//
941// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
942// IFF
943// 1) the logic_shifts match
944// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
945//
946// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
947//
948// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
949// IFF
950// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
951// 2) Binop2 is `not`
952//
953// -> (arithmetic_shift Binop1((not X), Y), Amt)
954
956 const DataLayout &DL = I.getDataLayout();
957 auto IsValidBinOpc = [](unsigned Opc) {
958 switch (Opc) {
959 default:
960 return false;
961 case Instruction::And:
962 case Instruction::Or:
963 case Instruction::Xor:
964 case Instruction::Add:
965 // Skip Sub as we only match constant masks which will canonicalize to use
966 // add.
967 return true;
968 }
969 };
970
971 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
972 // constraints.
973 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
974 unsigned ShOpc) {
975 assert(ShOpc != Instruction::AShr);
976 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
977 ShOpc == Instruction::Shl;
978 };
979
980 auto GetInvShift = [](unsigned ShOpc) {
981 assert(ShOpc != Instruction::AShr);
982 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
983 };
984
985 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
986 unsigned ShOpc, Constant *CMask,
987 Constant *CShift) {
988 // If the BinOp1 is `and` we don't need to check the mask.
989 if (BinOpc1 == Instruction::And)
990 return true;
991
992 // For all other possible transfers we need complete distributable
993 // binop/shift (anything but `add` + `lshr`).
994 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
995 return false;
996
997 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
998 // vecs, otherwise the mask will be simplified and the following check will
999 // handle it).
1000 if (BinOpc2 == Instruction::And)
1001 return true;
1002
1003 // Otherwise, need mask that meets the below requirement.
1004 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
1005 Constant *MaskInvShift =
1006 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1007 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
1008 CMask;
1009 };
1010
1011 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
1012 Constant *CMask, *CShift;
1013 Value *X, *Y, *ShiftedX, *Mask, *Shift;
1014 if (!match(I.getOperand(ShOpnum),
1015 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
1016 return nullptr;
1017 if (!match(I.getOperand(1 - ShOpnum),
1019 m_OneUse(m_Shift(m_Value(X), m_Specific(Shift))),
1020 m_Value(ShiftedX)),
1021 m_Value(Mask))))
1022 return nullptr;
1023 // Make sure we are matching instruction shifts and not ConstantExpr
1024 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
1025 auto *IX = dyn_cast<Instruction>(ShiftedX);
1026 if (!IY || !IX)
1027 return nullptr;
1028
1029 // LHS and RHS need same shift opcode
1030 unsigned ShOpc = IY->getOpcode();
1031 if (ShOpc != IX->getOpcode())
1032 return nullptr;
1033
1034 // Make sure binop is real instruction and not ConstantExpr
1035 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
1036 if (!BO2)
1037 return nullptr;
1038
1039 unsigned BinOpc = BO2->getOpcode();
1040 // Make sure we have valid binops.
1041 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
1042 return nullptr;
1043
1044 if (ShOpc == Instruction::AShr) {
1045 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
1046 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
1047 Value *NotX = Builder.CreateNot(X);
1048 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
1050 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
1051 }
1052
1053 return nullptr;
1054 }
1055
1056 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
1057 // distribute to drop the shift irrelevant of constants.
1058 if (BinOpc == I.getOpcode() &&
1059 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
1060 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
1061 Value *NewBinOp1 = Builder.CreateBinOp(
1062 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
1063 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
1064 }
1065
1066 // Otherwise we can only distribute by constant shifting the mask, so
1067 // ensure we have constants.
1068 if (!match(Shift, m_ImmConstant(CShift)))
1069 return nullptr;
1070 if (!match(Mask, m_ImmConstant(CMask)))
1071 return nullptr;
1072
1073 // Check if we can distribute the binops.
1074 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1075 return nullptr;
1076
1077 Constant *NewCMask =
1078 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1079 Value *NewBinOp2 = Builder.CreateBinOp(
1080 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1081 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1082 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1083 NewBinOp1, CShift);
1084 };
1085
1086 if (Instruction *R = MatchBinOp(0))
1087 return R;
1088 return MatchBinOp(1);
1089}
1090
1091// (Binop (zext C), (select C, T, F))
1092// -> (select C, (binop 1, T), (binop 0, F))
1093//
1094// (Binop (sext C), (select C, T, F))
1095// -> (select C, (binop -1, T), (binop 0, F))
1096//
1097// Attempt to simplify binary operations into a select with folded args, when
1098// one operand of the binop is a select instruction and the other operand is a
1099// zext/sext extension, whose value is the select condition.
1102 // TODO: this simplification may be extended to any speculatable instruction,
1103 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1104 Instruction::BinaryOps Opc = I.getOpcode();
1105 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1106 Value *A, *CondVal, *TrueVal, *FalseVal;
1107 Value *CastOp;
1108
1109 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1110 return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
1111 A->getType()->getScalarSizeInBits() == 1 &&
1112 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1113 m_Value(FalseVal)));
1114 };
1115
1116 // Make sure one side of the binop is a select instruction, and the other is a
1117 // zero/sign extension operating on a i1.
1118 if (MatchSelectAndCast(LHS, RHS))
1119 CastOp = LHS;
1120 else if (MatchSelectAndCast(RHS, LHS))
1121 CastOp = RHS;
1122 else
1123 return nullptr;
1124
1125 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1126 bool IsCastOpRHS = (CastOp == RHS);
1127 bool IsZExt = isa<ZExtInst>(CastOp);
1128 Constant *C;
1129
1130 if (IsTrueArm) {
1131 C = Constant::getNullValue(V->getType());
1132 } else if (IsZExt) {
1133 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1134 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
1135 } else {
1136 C = Constant::getAllOnesValue(V->getType());
1137 }
1138
1139 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
1140 : Builder.CreateBinOp(Opc, C, V);
1141 };
1142
1143 // If the value used in the zext/sext is the select condition, or the negated
1144 // of the select condition, the binop can be simplified.
1145 if (CondVal == A) {
1146 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1147 return SelectInst::Create(CondVal, NewTrueVal,
1148 NewFoldedConst(true, FalseVal));
1149 }
1150
1151 if (match(A, m_Not(m_Specific(CondVal)))) {
1152 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1153 return SelectInst::Create(CondVal, NewTrueVal,
1154 NewFoldedConst(false, FalseVal));
1155 }
1156
1157 return nullptr;
1158}
1159
1161 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1164 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1165 Value *A, *B, *C, *D;
1166 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1167
1168 if (Op0)
1169 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1170 if (Op1)
1171 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1172
1173 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1174 // a common term.
1175 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1176 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1177 return V;
1178
1179 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1180 // term.
1181 if (Op0)
1182 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1183 if (Value *V =
1184 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1185 return V;
1186
1187 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1188 // term.
1189 if (Op1)
1190 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1191 if (Value *V =
1192 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1193 return V;
1194
1195 return nullptr;
1196}
1197
1198/// This tries to simplify binary operations which some other binary operation
1199/// distributes over either by factorizing out common terms
1200/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1201/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1202/// Returns the simplified value, or null if it didn't simplify.
1204 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1207 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1208
1209 // Factorization.
1210 if (Value *R = tryFactorizationFolds(I))
1211 return R;
1212
1213 // Expansion.
1214 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1215 // The instruction has the form "(A op' B) op C". See if expanding it out
1216 // to "(A op C) op' (B op C)" results in simplifications.
1217 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1218 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1219
1220 // Disable the use of undef because it's not safe to distribute undef.
1221 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1222 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1223 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1224
1225 // Do "A op C" and "B op C" both simplify?
1226 if (L && R) {
1227 // They do! Return "L op' R".
1228 ++NumExpand;
1229 C = Builder.CreateBinOp(InnerOpcode, L, R);
1230 C->takeName(&I);
1231 return C;
1232 }
1233
1234 // Does "A op C" simplify to the identity value for the inner opcode?
1235 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1236 // They do! Return "B op C".
1237 ++NumExpand;
1238 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1239 C->takeName(&I);
1240 return C;
1241 }
1242
1243 // Does "B op C" simplify to the identity value for the inner opcode?
1244 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1245 // They do! Return "A op C".
1246 ++NumExpand;
1247 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1248 C->takeName(&I);
1249 return C;
1250 }
1251 }
1252
1253 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1254 // The instruction has the form "A op (B op' C)". See if expanding it out
1255 // to "(A op B) op' (A op C)" results in simplifications.
1256 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1257 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1258
1259 // Disable the use of undef because it's not safe to distribute undef.
1260 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1261 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1262 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1263
1264 // Do "A op B" and "A op C" both simplify?
1265 if (L && R) {
1266 // They do! Return "L op' R".
1267 ++NumExpand;
1268 A = Builder.CreateBinOp(InnerOpcode, L, R);
1269 A->takeName(&I);
1270 return A;
1271 }
1272
1273 // Does "A op B" simplify to the identity value for the inner opcode?
1274 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1275 // They do! Return "A op C".
1276 ++NumExpand;
1277 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1278 A->takeName(&I);
1279 return A;
1280 }
1281
1282 // Does "A op C" simplify to the identity value for the inner opcode?
1283 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1284 // They do! Return "A op B".
1285 ++NumExpand;
1286 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1287 A->takeName(&I);
1288 return A;
1289 }
1290 }
1291
1292 return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
1293}
1294
1295static std::optional<std::pair<Value *, Value *>>
1297 if (LHS->getParent() != RHS->getParent())
1298 return std::nullopt;
1299
1300 if (LHS->getNumIncomingValues() < 2)
1301 return std::nullopt;
1302
1303 if (!equal(LHS->blocks(), RHS->blocks()))
1304 return std::nullopt;
1305
1306 Value *L0 = LHS->getIncomingValue(0);
1307 Value *R0 = RHS->getIncomingValue(0);
1308
1309 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1310 Value *L1 = LHS->getIncomingValue(I);
1311 Value *R1 = RHS->getIncomingValue(I);
1312
1313 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1314 continue;
1315
1316 return std::nullopt;
1317 }
1318
1319 return std::optional(std::pair(L0, R0));
1320}
1321
1322std::optional<std::pair<Value *, Value *>>
1323InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1326 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1327 return std::nullopt;
1328 switch (LHSInst->getOpcode()) {
1329 case Instruction::PHI:
1331 case Instruction::Select: {
1332 Value *Cond = LHSInst->getOperand(0);
1333 Value *TrueVal = LHSInst->getOperand(1);
1334 Value *FalseVal = LHSInst->getOperand(2);
1335 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1336 FalseVal == RHSInst->getOperand(1))
1337 return std::pair(TrueVal, FalseVal);
1338 return std::nullopt;
1339 }
1340 case Instruction::Call: {
1341 // Match min(a, b) and max(a, b)
1342 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1343 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1344 if (LHSMinMax && RHSMinMax &&
1345 LHSMinMax->getPredicate() ==
1347 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1348 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1349 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1350 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1351 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1352 return std::nullopt;
1353 }
1354 default:
1355 return std::nullopt;
1356 }
1357}
1358
1360 Value *LHS,
1361 Value *RHS) {
1362 Value *A, *B, *C, *D, *E, *F;
1363 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1364 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1365 if (!LHSIsSelect && !RHSIsSelect)
1366 return nullptr;
1367
1369 ? nullptr
1370 : cast<SelectInst>(LHSIsSelect ? LHS : RHS);
1371
1372 FastMathFlags FMF;
1374 if (isa<FPMathOperator>(&I)) {
1375 FMF = I.getFastMathFlags();
1376 Builder.setFastMathFlags(FMF);
1377 }
1378
1379 Instruction::BinaryOps Opcode = I.getOpcode();
1380 SimplifyQuery Q = SQ.getWithInstruction(&I);
1381
1382 Value *Cond, *True = nullptr, *False = nullptr;
1383
1384 // Special-case for add/negate combination. Replace the zero in the negation
1385 // with the trailing add operand:
1386 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1387 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1388 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1389 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1390 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1391 return nullptr;
1392 Value *N;
1393 if (True && match(FVal, m_Neg(m_Value(N)))) {
1394 Value *Sub = Builder.CreateSub(Z, N);
1395 return Builder.CreateSelect(Cond, True, Sub, I.getName(), SI);
1396 }
1397 if (False && match(TVal, m_Neg(m_Value(N)))) {
1398 Value *Sub = Builder.CreateSub(Z, N);
1399 return Builder.CreateSelect(Cond, Sub, False, I.getName(), SI);
1400 }
1401 return nullptr;
1402 };
1403
1404 if (LHSIsSelect && RHSIsSelect && A == D) {
1405 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1406 Cond = A;
1407 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1408 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1409
1410 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1411 if (False && !True)
1412 True = Builder.CreateBinOp(Opcode, B, E);
1413 else if (True && !False)
1414 False = Builder.CreateBinOp(Opcode, C, F);
1415 }
1416 } else if (LHSIsSelect && LHS->hasOneUse()) {
1417 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1418 Cond = A;
1419 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1420 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1421 if (Value *NewSel = foldAddNegate(B, C, RHS))
1422 return NewSel;
1423 } else if (RHSIsSelect && RHS->hasOneUse()) {
1424 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1425 Cond = D;
1426 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1427 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1428 if (Value *NewSel = foldAddNegate(E, F, LHS))
1429 return NewSel;
1430 }
1431
1432 if (!True || !False)
1433 return nullptr;
1434
1435 Value *NewSI = Builder.CreateSelect(Cond, True, False, I.getName(), SI);
1436 NewSI->takeName(&I);
1437 return NewSI;
1438}
1439
1440/// Freely adapt every user of V as-if V was changed to !V.
1441/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1443 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1444 for (User *U : make_early_inc_range(I->users())) {
1445 if (U == IgnoredUser)
1446 continue; // Don't consider this user.
1447 switch (cast<Instruction>(U)->getOpcode()) {
1448 case Instruction::Select: {
1449 auto *SI = cast<SelectInst>(U);
1450 SI->swapValues();
1451 SI->swapProfMetadata();
1452 break;
1453 }
1454 case Instruction::Br: {
1456 BI->swapSuccessors(); // swaps prof metadata too
1457 if (BPI)
1458 BPI->swapSuccEdgesProbabilities(BI->getParent());
1459 break;
1460 }
1461 case Instruction::Xor:
1463 // Add to worklist for DCE.
1465 break;
1466 default:
1467 llvm_unreachable("Got unexpected user - out of sync with "
1468 "canFreelyInvertAllUsersOf() ?");
1469 }
1470 }
1471
1472 // Update pre-existing debug value uses.
1473 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords;
1474 llvm::findDbgValues(I, DbgVariableRecords);
1475
1476 for (DbgVariableRecord *DbgVal : DbgVariableRecords) {
1477 SmallVector<uint64_t, 1> Ops = {dwarf::DW_OP_not};
1478 for (unsigned Idx = 0, End = DbgVal->getNumVariableLocationOps();
1479 Idx != End; ++Idx)
1480 if (DbgVal->getVariableLocationOp(Idx) == I)
1481 DbgVal->setExpression(
1482 DIExpression::appendOpsToArg(DbgVal->getExpression(), Ops, Idx));
1483 }
1484}
1485
1486/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1487/// constant zero (which is the 'negate' form).
1488Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1489 Value *NegV;
1490 if (match(V, m_Neg(m_Value(NegV))))
1491 return NegV;
1492
1493 // Constants can be considered to be negated values if they can be folded.
1495 return ConstantExpr::getNeg(C);
1496
1498 if (C->getType()->getElementType()->isIntegerTy())
1499 return ConstantExpr::getNeg(C);
1500
1502 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1503 Constant *Elt = CV->getAggregateElement(i);
1504 if (!Elt)
1505 return nullptr;
1506
1507 if (isa<UndefValue>(Elt))
1508 continue;
1509
1510 if (!isa<ConstantInt>(Elt))
1511 return nullptr;
1512 }
1513 return ConstantExpr::getNeg(CV);
1514 }
1515
1516 // Negate integer vector splats.
1517 if (auto *CV = dyn_cast<Constant>(V))
1518 if (CV->getType()->isVectorTy() &&
1519 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1520 return ConstantExpr::getNeg(CV);
1521
1522 return nullptr;
1523}
1524
1525// Try to fold:
1526// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1527// -> ({s|u}itofp (int_binop x, y))
1528// 2) (fp_binop ({s|u}itofp x), FpC)
1529// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1530//
1531// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1532Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1533 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1535
1536 Type *FPTy = BO.getType();
1537 Type *IntTy = IntOps[0]->getType();
1538
1539 unsigned IntSz = IntTy->getScalarSizeInBits();
1540 // This is the maximum number of inuse bits by the integer where the int -> fp
1541 // casts are exact.
1542 unsigned MaxRepresentableBits =
1544
1545 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1546 // checks later on.
1547 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1548
1549 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1550 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1551 auto IsNonZero = [&](unsigned OpNo) -> bool {
1552 if (OpsKnown[OpNo].hasKnownBits() &&
1553 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1554 return true;
1555 return isKnownNonZero(IntOps[OpNo], SQ);
1556 };
1557
1558 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1559 // NB: This matches the impl in ValueTracking, we just try to use cached
1560 // knownbits here. If we ever start supporting WithCache for
1561 // `isKnownNonNegative`, change this to an explicit call.
1562 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1563 };
1564
1565 // Check if we know for certain that ({s|u}itofp op) is exact.
1566 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1567 // Can we treat this operand as the desired sign?
1568 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1569 !IsNonNeg(OpNo))
1570 return false;
1571
1572 // If fp precision >= bitwidth(op) then its exact.
1573 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1574 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1575 // handled specially. We can't, however, increase the bound arbitrarily for
1576 // `sitofp` as for larger sizes, it won't sign extend.
1577 if (MaxRepresentableBits < IntSz) {
1578 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1579 // numSignBits(op).
1580 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1581 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1582 if (OpsFromSigned)
1583 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1584 // Finally for unsigned check that fp precision >= bitwidth(op) -
1585 // numLeadingZeros(op).
1586 else {
1587 NumUsedLeadingBits[OpNo] =
1588 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1589 }
1590 }
1591 // NB: We could also check if op is known to be a power of 2 or zero (which
1592 // will always be representable). Its unlikely, however, that is we are
1593 // unable to bound op in any way we will be able to pass the overflow checks
1594 // later on.
1595
1596 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1597 return false;
1598 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1599 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1600 IsNonZero(OpNo);
1601 };
1602
1603 // If we have a constant rhs, see if we can losslessly convert it to an int.
1604 if (Op1FpC != nullptr) {
1605 // Signed + Mul req non-zero
1606 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1607 !match(Op1FpC, m_NonZeroFP()))
1608 return nullptr;
1609
1611 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1612 IntTy, DL);
1613 if (Op1IntC == nullptr)
1614 return nullptr;
1615 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1616 : Instruction::UIToFP,
1617 Op1IntC, FPTy, DL) != Op1FpC)
1618 return nullptr;
1619
1620 // First try to keep sign of cast the same.
1621 IntOps[1] = Op1IntC;
1622 }
1623
1624 // Ensure lhs/rhs integer types match.
1625 if (IntTy != IntOps[1]->getType())
1626 return nullptr;
1627
1628 if (Op1FpC == nullptr) {
1629 if (!IsValidPromotion(1))
1630 return nullptr;
1631 }
1632 if (!IsValidPromotion(0))
1633 return nullptr;
1634
1635 // Final we check if the integer version of the binop will not overflow.
1637 // Because of the precision check, we can often rule out overflows.
1638 bool NeedsOverflowCheck = true;
1639 // Try to conservatively rule out overflow based on the already done precision
1640 // checks.
1641 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1642 unsigned OverflowMaxCurBits =
1643 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1644 bool OutputSigned = OpsFromSigned;
1645 switch (BO.getOpcode()) {
1646 case Instruction::FAdd:
1647 IntOpc = Instruction::Add;
1648 OverflowMaxOutputBits += OverflowMaxCurBits;
1649 break;
1650 case Instruction::FSub:
1651 IntOpc = Instruction::Sub;
1652 OverflowMaxOutputBits += OverflowMaxCurBits;
1653 break;
1654 case Instruction::FMul:
1655 IntOpc = Instruction::Mul;
1656 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1657 break;
1658 default:
1659 llvm_unreachable("Unsupported binop");
1660 }
1661 // The precision check may have already ruled out overflow.
1662 if (OverflowMaxOutputBits < IntSz) {
1663 NeedsOverflowCheck = false;
1664 // We can bound unsigned overflow from sub to in range signed value (this is
1665 // what allows us to avoid the overflow check for sub).
1666 if (IntOpc == Instruction::Sub)
1667 OutputSigned = true;
1668 }
1669
1670 // Precision check did not rule out overflow, so need to check.
1671 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1672 // `IntOps[...]` arguments to `KnownOps[...]`.
1673 if (NeedsOverflowCheck &&
1674 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1675 return nullptr;
1676
1677 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1678 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1679 IntBO->setHasNoSignedWrap(OutputSigned);
1680 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1681 }
1682 if (OutputSigned)
1683 return new SIToFPInst(IntBinOp, FPTy);
1684 return new UIToFPInst(IntBinOp, FPTy);
1685}
1686
1687// Try to fold:
1688// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1689// -> ({s|u}itofp (int_binop x, y))
1690// 2) (fp_binop ({s|u}itofp x), FpC)
1691// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1692Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1693 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1694 Constant *Op1FpC = nullptr;
1695 // Check for:
1696 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1697 // 2) (binop ({s|u}itofp x), FpC)
1698 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1699 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1700 return nullptr;
1701
1702 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1703 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1704 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1705 return nullptr;
1706
1707 // Cache KnownBits a bit to potentially save some analysis.
1708 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1709
1710 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1711 // different constraints depending on the sign of the cast.
1712 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1713 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1714 IntOps, Op1FpC, OpsKnown))
1715 return R;
1716 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1717 Op1FpC, OpsKnown);
1718}
1719
1720/// A binop with a constant operand and a sign-extended boolean operand may be
1721/// converted into a select of constants by applying the binary operation to
1722/// the constant with the two possible values of the extended boolean (0 or -1).
1723Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1724 // TODO: Handle non-commutative binop (constant is operand 0).
1725 // TODO: Handle zext.
1726 // TODO: Peek through 'not' of cast.
1727 Value *BO0 = BO.getOperand(0);
1728 Value *BO1 = BO.getOperand(1);
1729 Value *X;
1730 Constant *C;
1731 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1732 !X->getType()->isIntOrIntVectorTy(1))
1733 return nullptr;
1734
1735 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1738 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1739 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1740 return createSelectInst(X, TVal, FVal);
1741}
1742
1744 bool IsTrueArm) {
1746 for (Value *Op : I.operands()) {
1747 Value *V = nullptr;
1748 if (Op == SI) {
1749 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1750 } else if (match(SI->getCondition(),
1753 m_Specific(Op), m_Value(V))) &&
1755 // Pass
1756 } else {
1757 V = Op;
1758 }
1759 Ops.push_back(V);
1760 }
1761
1762 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1763}
1764
1766 Value *NewOp, InstCombiner &IC) {
1767 Instruction *Clone = I.clone();
1768 Clone->replaceUsesOfWith(SI, NewOp);
1770 IC.InsertNewInstBefore(Clone, I.getIterator());
1771 return Clone;
1772}
1773
1775 bool FoldWithMultiUse) {
1776 // Don't modify shared select instructions unless set FoldWithMultiUse
1777 if (!SI->hasOneUse() && !FoldWithMultiUse)
1778 return nullptr;
1779
1780 Value *TV = SI->getTrueValue();
1781 Value *FV = SI->getFalseValue();
1782
1783 // Bool selects with constant operands can be folded to logical ops.
1784 if (SI->getType()->isIntOrIntVectorTy(1))
1785 return nullptr;
1786
1787 // Avoid breaking min/max reduction pattern,
1788 // which is necessary for vectorization later.
1790 for (Value *IntrinOp : Op.operands())
1791 if (auto *PN = dyn_cast<PHINode>(IntrinOp))
1792 for (Value *PhiOp : PN->operands())
1793 if (PhiOp == &Op)
1794 return nullptr;
1795
1796 // Test if a FCmpInst instruction is used exclusively by a select as
1797 // part of a minimum or maximum operation. If so, refrain from doing
1798 // any other folding. This helps out other analyses which understand
1799 // non-obfuscated minimum and maximum idioms. And in this case, at
1800 // least one of the comparison operands has at least one user besides
1801 // the compare (the select), which would often largely negate the
1802 // benefit of folding anyway.
1803 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1804 if (CI->hasOneUse()) {
1805 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1806 if (((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) &&
1807 !CI->isCommutative())
1808 return nullptr;
1809 }
1810 }
1811
1812 // Make sure that one of the select arms folds successfully.
1813 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1814 Value *NewFV =
1815 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1816 if (!NewTV && !NewFV)
1817 return nullptr;
1818
1819 // Create an instruction for the arm that did not fold.
1820 if (!NewTV)
1821 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1822 if (!NewFV)
1823 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1824 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1825}
1826
1828 Value *InValue, BasicBlock *InBB,
1829 const DataLayout &DL,
1830 const SimplifyQuery SQ) {
1831 // NB: It is a precondition of this transform that the operands be
1832 // phi translatable!
1834 for (Value *Op : I.operands()) {
1835 if (Op == PN)
1836 Ops.push_back(InValue);
1837 else
1838 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1839 }
1840
1841 // Don't consider the simplification successful if we get back a constant
1842 // expression. That's just an instruction in hiding.
1843 // Also reject the case where we simplify back to the phi node. We wouldn't
1844 // be able to remove it in that case.
1846 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1847 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1848 return NewVal;
1849
1850 // Check if incoming PHI value can be replaced with constant
1851 // based on implied condition.
1852 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1853 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1854 if (TerminatorBI && TerminatorBI->isConditional() &&
1855 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1856 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1857 std::optional<bool> ImpliedCond = isImpliedCondition(
1858 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1859 DL, LHSIsTrue);
1860 if (ImpliedCond)
1861 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1862 }
1863
1864 return nullptr;
1865}
1866
1868 bool AllowMultipleUses) {
1869 unsigned NumPHIValues = PN->getNumIncomingValues();
1870 if (NumPHIValues == 0)
1871 return nullptr;
1872
1873 // We normally only transform phis with a single use. However, if a PHI has
1874 // multiple uses and they are all the same operation, we can fold *all* of the
1875 // uses into the PHI.
1876 bool OneUse = PN->hasOneUse();
1877 bool IdenticalUsers = false;
1878 if (!AllowMultipleUses && !OneUse) {
1879 // Walk the use list for the instruction, comparing them to I.
1880 for (User *U : PN->users()) {
1882 if (UI != &I && !I.isIdenticalTo(UI))
1883 return nullptr;
1884 }
1885 // Otherwise, we can replace *all* users with the new PHI we form.
1886 IdenticalUsers = true;
1887 }
1888
1889 // Check that all operands are phi-translatable.
1890 for (Value *Op : I.operands()) {
1891 if (Op == PN)
1892 continue;
1893
1894 // Non-instructions never require phi-translation.
1895 auto *I = dyn_cast<Instruction>(Op);
1896 if (!I)
1897 continue;
1898
1899 // Phi-translate can handle phi nodes in the same block.
1900 if (isa<PHINode>(I))
1901 if (I->getParent() == PN->getParent())
1902 continue;
1903
1904 // Operand dominates the block, no phi-translation necessary.
1905 if (DT.dominates(I, PN->getParent()))
1906 continue;
1907
1908 // Not phi-translatable, bail out.
1909 return nullptr;
1910 }
1911
1912 // Check to see whether the instruction can be folded into each phi operand.
1913 // If there is one operand that does not fold, remember the BB it is in.
1914 SmallVector<Value *> NewPhiValues;
1915 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1916 bool SeenNonSimplifiedInVal = false;
1917 for (unsigned i = 0; i != NumPHIValues; ++i) {
1918 Value *InVal = PN->getIncomingValue(i);
1919 BasicBlock *InBB = PN->getIncomingBlock(i);
1920
1921 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1922 NewPhiValues.push_back(NewVal);
1923 continue;
1924 }
1925
1926 // Handle some cases that can't be fully simplified, but where we know that
1927 // the two instructions will fold into one.
1928 auto WillFold = [&]() {
1929 if (!InVal->hasUseList() || !InVal->hasOneUser())
1930 return false;
1931
1932 // icmp of ucmp/scmp with constant will fold to icmp.
1933 const APInt *Ignored;
1934 if (isa<CmpIntrinsic>(InVal) &&
1935 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1936 return true;
1937
1938 // icmp eq zext(bool), 0 will fold to !bool.
1939 if (isa<ZExtInst>(InVal) &&
1940 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1941 match(&I,
1943 return true;
1944
1945 return false;
1946 };
1947
1948 if (WillFold()) {
1949 OpsToMoveUseToIncomingBB.push_back(i);
1950 NewPhiValues.push_back(nullptr);
1951 continue;
1952 }
1953
1954 if (!OneUse && !IdenticalUsers)
1955 return nullptr;
1956
1957 if (SeenNonSimplifiedInVal)
1958 return nullptr; // More than one non-simplified value.
1959 SeenNonSimplifiedInVal = true;
1960
1961 // If there is exactly one non-simplified value, we can insert a copy of the
1962 // operation in that block. However, if this is a critical edge, we would
1963 // be inserting the computation on some other paths (e.g. inside a loop).
1964 // Only do this if the pred block is unconditionally branching into the phi
1965 // block. Also, make sure that the pred block is not dead code.
1967 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(InBB))
1968 return nullptr;
1969
1970 NewPhiValues.push_back(nullptr);
1971 OpsToMoveUseToIncomingBB.push_back(i);
1972
1973 // If the InVal is an invoke at the end of the pred block, then we can't
1974 // insert a computation after it without breaking the edge.
1975 if (isa<InvokeInst>(InVal))
1976 if (cast<Instruction>(InVal)->getParent() == InBB)
1977 return nullptr;
1978
1979 // Do not push the operation across a loop backedge. This could result in
1980 // an infinite combine loop, and is generally non-profitable (especially
1981 // if the operation was originally outside the loop).
1982 if (isBackEdge(InBB, PN->getParent()))
1983 return nullptr;
1984 }
1985
1986 // Clone the instruction that uses the phi node and move it into the incoming
1987 // BB because we know that the next iteration of InstCombine will simplify it.
1989 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
1991 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
1992
1993 Instruction *Clone = Clones.lookup(OpBB);
1994 if (!Clone) {
1995 Clone = I.clone();
1996 for (Use &U : Clone->operands()) {
1997 if (U == PN)
1998 U = Op;
1999 else
2000 U = U->DoPHITranslation(PN->getParent(), OpBB);
2001 }
2002 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
2003 Clones.insert({OpBB, Clone});
2004 // We may have speculated the instruction.
2006 }
2007
2008 NewPhiValues[OpIndex] = Clone;
2009 }
2010
2011 // Okay, we can do the transformation: create the new PHI node.
2012 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
2013 InsertNewInstBefore(NewPN, PN->getIterator());
2014 NewPN->takeName(PN);
2015 NewPN->setDebugLoc(PN->getDebugLoc());
2016
2017 for (unsigned i = 0; i != NumPHIValues; ++i)
2018 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
2019
2020 if (IdenticalUsers) {
2021 // Collect and deduplicate users up-front to avoid iterator invalidation.
2023 for (User *U : PN->users()) {
2025 if (User == &I)
2026 continue;
2027 ToReplace.insert(User);
2028 }
2029 for (Instruction *I : ToReplace) {
2030 replaceInstUsesWith(*I, NewPN);
2032 }
2033 OneUse = true;
2034 }
2035
2036 if (OneUse) {
2037 replaceAllDbgUsesWith(*PN, *NewPN, *PN, DT);
2038 }
2039 return replaceInstUsesWith(I, NewPN);
2040}
2041
2043 if (!BO.isAssociative())
2044 return nullptr;
2045
2046 // Find the interleaved binary ops.
2047 auto Opc = BO.getOpcode();
2048 auto *BO0 = dyn_cast<BinaryOperator>(BO.getOperand(0));
2049 auto *BO1 = dyn_cast<BinaryOperator>(BO.getOperand(1));
2050 if (!BO0 || !BO1 || !BO0->hasNUses(2) || !BO1->hasNUses(2) ||
2051 BO0->getOpcode() != Opc || BO1->getOpcode() != Opc ||
2052 !BO0->isAssociative() || !BO1->isAssociative() ||
2053 BO0->getParent() != BO1->getParent())
2054 return nullptr;
2055
2056 assert(BO.isCommutative() && BO0->isCommutative() && BO1->isCommutative() &&
2057 "Expected commutative instructions!");
2058
2059 // Find the matching phis, forming the recurrences.
2060 PHINode *PN0, *PN1;
2061 Value *Start0, *Step0, *Start1, *Step1;
2062 if (!matchSimpleRecurrence(BO0, PN0, Start0, Step0) || !PN0->hasOneUse() ||
2063 !matchSimpleRecurrence(BO1, PN1, Start1, Step1) || !PN1->hasOneUse() ||
2064 PN0->getParent() != PN1->getParent())
2065 return nullptr;
2066
2067 assert(PN0->getNumIncomingValues() == 2 && PN1->getNumIncomingValues() == 2 &&
2068 "Expected PHIs with two incoming values!");
2069
2070 // Convert the start and step values to constants.
2071 auto *Init0 = dyn_cast<Constant>(Start0);
2072 auto *Init1 = dyn_cast<Constant>(Start1);
2073 auto *C0 = dyn_cast<Constant>(Step0);
2074 auto *C1 = dyn_cast<Constant>(Step1);
2075 if (!Init0 || !Init1 || !C0 || !C1)
2076 return nullptr;
2077
2078 // Fold the recurrence constants.
2079 auto *Init = ConstantFoldBinaryInstruction(Opc, Init0, Init1);
2080 auto *C = ConstantFoldBinaryInstruction(Opc, C0, C1);
2081 if (!Init || !C)
2082 return nullptr;
2083
2084 // Create the reduced PHI.
2085 auto *NewPN = PHINode::Create(PN0->getType(), PN0->getNumIncomingValues(),
2086 "reduced.phi");
2087
2088 // Create the new binary op.
2089 auto *NewBO = BinaryOperator::Create(Opc, NewPN, C);
2090 if (Opc == Instruction::FAdd || Opc == Instruction::FMul) {
2091 // Intersect FMF flags for FADD and FMUL.
2092 FastMathFlags Intersect = BO0->getFastMathFlags() &
2093 BO1->getFastMathFlags() & BO.getFastMathFlags();
2094 NewBO->setFastMathFlags(Intersect);
2095 } else {
2096 OverflowTracking Flags;
2097 Flags.AllKnownNonNegative = false;
2098 Flags.AllKnownNonZero = false;
2099 Flags.mergeFlags(*BO0);
2100 Flags.mergeFlags(*BO1);
2101 Flags.mergeFlags(BO);
2102 Flags.applyFlags(*NewBO);
2103 }
2104 NewBO->takeName(&BO);
2105
2106 for (unsigned I = 0, E = PN0->getNumIncomingValues(); I != E; ++I) {
2107 auto *V = PN0->getIncomingValue(I);
2108 auto *BB = PN0->getIncomingBlock(I);
2109 if (V == Init0) {
2110 assert(((PN1->getIncomingValue(0) == Init1 &&
2111 PN1->getIncomingBlock(0) == BB) ||
2112 (PN1->getIncomingValue(1) == Init1 &&
2113 PN1->getIncomingBlock(1) == BB)) &&
2114 "Invalid incoming block!");
2115 NewPN->addIncoming(Init, BB);
2116 } else if (V == BO0) {
2117 assert(((PN1->getIncomingValue(0) == BO1 &&
2118 PN1->getIncomingBlock(0) == BB) ||
2119 (PN1->getIncomingValue(1) == BO1 &&
2120 PN1->getIncomingBlock(1) == BB)) &&
2121 "Invalid incoming block!");
2122 NewPN->addIncoming(NewBO, BB);
2123 } else
2124 llvm_unreachable("Unexpected incoming value!");
2125 }
2126
2127 LLVM_DEBUG(dbgs() << " Combined " << *PN0 << "\n " << *BO0
2128 << "\n with " << *PN1 << "\n " << *BO1
2129 << '\n');
2130
2131 // Insert the new recurrence and remove the old (dead) ones.
2132 InsertNewInstWith(NewPN, PN0->getIterator());
2133 InsertNewInstWith(NewBO, BO0->getIterator());
2134
2141
2142 return replaceInstUsesWith(BO, NewBO);
2143}
2144
2146 // Attempt to fold binary operators whose operands are simple recurrences.
2147 if (auto *NewBO = foldBinopWithRecurrence(BO))
2148 return NewBO;
2149
2150 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
2151 // we are guarding against replicating the binop in >1 predecessor.
2152 // This could miss matching a phi with 2 constant incoming values.
2153 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
2154 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
2155 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
2156 Phi0->getNumOperands() != Phi1->getNumOperands())
2157 return nullptr;
2158
2159 // TODO: Remove the restriction for binop being in the same block as the phis.
2160 if (BO.getParent() != Phi0->getParent() ||
2161 BO.getParent() != Phi1->getParent())
2162 return nullptr;
2163
2164 // Fold if there is at least one specific constant value in phi0 or phi1's
2165 // incoming values that comes from the same block and this specific constant
2166 // value can be used to do optimization for specific binary operator.
2167 // For example:
2168 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
2169 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
2170 // %add = add i32 %phi0, %phi1
2171 // ==>
2172 // %add = phi i32 [%j, %bb0], [%i, %bb1]
2174 /*AllowRHSConstant*/ false);
2175 if (C) {
2176 SmallVector<Value *, 4> NewIncomingValues;
2177 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
2178 auto &Phi0Use = std::get<0>(T);
2179 auto &Phi1Use = std::get<1>(T);
2180 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
2181 return false;
2182 Value *Phi0UseV = Phi0Use.get();
2183 Value *Phi1UseV = Phi1Use.get();
2184 if (Phi0UseV == C)
2185 NewIncomingValues.push_back(Phi1UseV);
2186 else if (Phi1UseV == C)
2187 NewIncomingValues.push_back(Phi0UseV);
2188 else
2189 return false;
2190 return true;
2191 };
2192
2193 if (all_of(zip(Phi0->operands(), Phi1->operands()),
2194 CanFoldIncomingValuePair)) {
2195 PHINode *NewPhi =
2196 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
2197 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
2198 "The number of collected incoming values should equal the number "
2199 "of the original PHINode operands!");
2200 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
2201 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
2202 return NewPhi;
2203 }
2204 }
2205
2206 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2207 return nullptr;
2208
2209 // Match a pair of incoming constants for one of the predecessor blocks.
2210 BasicBlock *ConstBB, *OtherBB;
2211 Constant *C0, *C1;
2212 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2213 ConstBB = Phi0->getIncomingBlock(0);
2214 OtherBB = Phi0->getIncomingBlock(1);
2215 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2216 ConstBB = Phi0->getIncomingBlock(1);
2217 OtherBB = Phi0->getIncomingBlock(0);
2218 } else {
2219 return nullptr;
2220 }
2221 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2222 return nullptr;
2223
2224 // The block that we are hoisting to must reach here unconditionally.
2225 // Otherwise, we could be speculatively executing an expensive or
2226 // non-speculative op.
2227 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
2228 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2229 !DT.isReachableFromEntry(OtherBB))
2230 return nullptr;
2231
2232 // TODO: This check could be tightened to only apply to binops (div/rem) that
2233 // are not safe to speculatively execute. But that could allow hoisting
2234 // potentially expensive instructions (fdiv for example).
2235 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2237 return nullptr;
2238
2239 // Fold constants for the predecessor block with constant incoming values.
2240 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2241 if (!NewC)
2242 return nullptr;
2243
2244 // Make a new binop in the predecessor block with the non-constant incoming
2245 // values.
2246 Builder.SetInsertPoint(PredBlockBranch);
2247 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2248 Phi0->getIncomingValueForBlock(OtherBB),
2249 Phi1->getIncomingValueForBlock(OtherBB));
2250 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2251 NotFoldedNewBO->copyIRFlags(&BO);
2252
2253 // Replace the binop with a phi of the new values. The old phis are dead.
2254 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2255 NewPhi->addIncoming(NewBO, OtherBB);
2256 NewPhi->addIncoming(NewC, ConstBB);
2257 return NewPhi;
2258}
2259
2261 if (!isa<Constant>(I.getOperand(1)))
2262 return nullptr;
2263
2264 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
2265 if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
2266 return NewSel;
2267 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
2268 if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
2269 return NewPhi;
2270 }
2271 return nullptr;
2272}
2273
2275 // If this GEP has only 0 indices, it is the same pointer as
2276 // Src. If Src is not a trivial GEP too, don't combine
2277 // the indices.
2278 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2279 !Src.hasOneUse())
2280 return false;
2281 return true;
2282}
2283
2284/// Find a constant NewC that has property:
2285/// shuffle(NewC, ShMask) = C
2286/// Returns nullptr if such a constant does not exist e.g. ShMask=<0,0> C=<1,2>
2287///
2288/// A 1-to-1 mapping is not required. Example:
2289/// ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <poison,5,6,poison>
2291 VectorType *NewCTy) {
2292 if (isa<ScalableVectorType>(NewCTy)) {
2293 Constant *Splat = C->getSplatValue();
2294 if (!Splat)
2295 return nullptr;
2297 }
2298
2299 if (cast<FixedVectorType>(NewCTy)->getNumElements() >
2300 cast<FixedVectorType>(C->getType())->getNumElements())
2301 return nullptr;
2302
2303 unsigned NewCNumElts = cast<FixedVectorType>(NewCTy)->getNumElements();
2304 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2305 SmallVector<Constant *, 16> NewVecC(NewCNumElts, PoisonScalar);
2306 unsigned NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
2307 for (unsigned I = 0; I < NumElts; ++I) {
2308 Constant *CElt = C->getAggregateElement(I);
2309 if (ShMask[I] >= 0) {
2310 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2311 Constant *NewCElt = NewVecC[ShMask[I]];
2312 // Bail out if:
2313 // 1. The constant vector contains a constant expression.
2314 // 2. The shuffle needs an element of the constant vector that can't
2315 // be mapped to a new constant vector.
2316 // 3. This is a widening shuffle that copies elements of V1 into the
2317 // extended elements (extending with poison is allowed).
2318 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2319 I >= NewCNumElts)
2320 return nullptr;
2321 NewVecC[ShMask[I]] = CElt;
2322 }
2323 }
2324 return ConstantVector::get(NewVecC);
2325}
2326
2328 if (!isa<VectorType>(Inst.getType()))
2329 return nullptr;
2330
2331 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2332 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2333 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2334 cast<VectorType>(Inst.getType())->getElementCount());
2335 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2336 cast<VectorType>(Inst.getType())->getElementCount());
2337
2338 // If both operands of the binop are vector concatenations, then perform the
2339 // narrow binop on each pair of the source operands followed by concatenation
2340 // of the results.
2341 Value *L0, *L1, *R0, *R1;
2342 ArrayRef<int> Mask;
2343 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2344 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2345 LHS->hasOneUse() && RHS->hasOneUse() &&
2346 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2347 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2348 // This transform does not have the speculative execution constraint as
2349 // below because the shuffle is a concatenation. The new binops are
2350 // operating on exactly the same elements as the existing binop.
2351 // TODO: We could ease the mask requirement to allow different undef lanes,
2352 // but that requires an analysis of the binop-with-undef output value.
2353 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2354 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2355 BO->copyIRFlags(&Inst);
2356 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2357 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2358 BO->copyIRFlags(&Inst);
2359 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2360 }
2361
2362 auto createBinOpReverse = [&](Value *X, Value *Y) {
2363 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2364 if (auto *BO = dyn_cast<BinaryOperator>(V))
2365 BO->copyIRFlags(&Inst);
2366 Module *M = Inst.getModule();
2368 M, Intrinsic::vector_reverse, V->getType());
2369 return CallInst::Create(F, V);
2370 };
2371
2372 // NOTE: Reverse shuffles don't require the speculative execution protection
2373 // below because they don't affect which lanes take part in the computation.
2374
2375 Value *V1, *V2;
2376 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2377 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2378 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2379 (LHS->hasOneUse() || RHS->hasOneUse() ||
2380 (LHS == RHS && LHS->hasNUses(2))))
2381 return createBinOpReverse(V1, V2);
2382
2383 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2384 if (LHS->hasOneUse() && isSplatValue(RHS))
2385 return createBinOpReverse(V1, RHS);
2386 }
2387 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2388 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2389 return createBinOpReverse(LHS, V2);
2390
2391 auto createBinOpVPReverse = [&](Value *X, Value *Y, Value *EVL) {
2392 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2393 if (auto *BO = dyn_cast<BinaryOperator>(V))
2394 BO->copyIRFlags(&Inst);
2395
2396 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
2397 Value *AllTrueMask = Builder.CreateVectorSplat(EC, Builder.getTrue());
2398 Module *M = Inst.getModule();
2400 M, Intrinsic::experimental_vp_reverse, V->getType());
2401 return CallInst::Create(F, {V, AllTrueMask, EVL});
2402 };
2403
2404 Value *EVL;
2406 m_Value(V1), m_AllOnes(), m_Value(EVL)))) {
2407 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2409 m_Value(V2), m_AllOnes(), m_Specific(EVL))) &&
2410 (LHS->hasOneUse() || RHS->hasOneUse() ||
2411 (LHS == RHS && LHS->hasNUses(2))))
2412 return createBinOpVPReverse(V1, V2, EVL);
2413
2414 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2415 if (LHS->hasOneUse() && isSplatValue(RHS))
2416 return createBinOpVPReverse(V1, RHS, EVL);
2417 }
2418 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2419 else if (isSplatValue(LHS) &&
2421 m_Value(V2), m_AllOnes(), m_Value(EVL))))
2422 return createBinOpVPReverse(LHS, V2, EVL);
2423
2424 // It may not be safe to reorder shuffles and things like div, urem, etc.
2425 // because we may trap when executing those ops on unknown vector elements.
2426 // See PR20059.
2428 return nullptr;
2429
2430 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2431 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2432 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2433 BO->copyIRFlags(&Inst);
2434 return new ShuffleVectorInst(XY, M);
2435 };
2436
2437 // If both arguments of the binary operation are shuffles that use the same
2438 // mask and shuffle within a single vector, move the shuffle after the binop.
2439 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2440 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2441 V1->getType() == V2->getType() &&
2442 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2443 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2444 return createBinOpShuffle(V1, V2, Mask);
2445 }
2446
2447 // If both arguments of a commutative binop are select-shuffles that use the
2448 // same mask with commuted operands, the shuffles are unnecessary.
2449 if (Inst.isCommutative() &&
2450 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2451 match(RHS,
2452 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2453 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2454 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2455 // TODO: Allow shuffles that contain undefs in the mask?
2456 // That is legal, but it reduces undef knowledge.
2457 // TODO: Allow arbitrary shuffles by shuffling after binop?
2458 // That might be legal, but we have to deal with poison.
2459 if (LShuf->isSelect() &&
2460 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2461 RShuf->isSelect() &&
2462 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2463 // Example:
2464 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2465 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2466 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2467 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2468 NewBO->copyIRFlags(&Inst);
2469 return NewBO;
2470 }
2471 }
2472
2473 // If one argument is a shuffle within one vector and the other is a constant,
2474 // try moving the shuffle after the binary operation. This canonicalization
2475 // intends to move shuffles closer to other shuffles and binops closer to
2476 // other binops, so they can be folded. It may also enable demanded elements
2477 // transforms.
2478 Constant *C;
2480 m_Mask(Mask))),
2481 m_ImmConstant(C)))) {
2482 assert(Inst.getType()->getScalarType() == V1->getType()->getScalarType() &&
2483 "Shuffle should not change scalar type");
2484
2485 bool ConstOp1 = isa<Constant>(RHS);
2486 if (Constant *NewC =
2488 // For fixed vectors, lanes of NewC not used by the shuffle will be poison
2489 // which will cause UB for div/rem. Mask them with a safe constant.
2490 if (isa<FixedVectorType>(V1->getType()) && Inst.isIntDivRem())
2491 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2492
2493 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2494 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2495 Value *NewLHS = ConstOp1 ? V1 : NewC;
2496 Value *NewRHS = ConstOp1 ? NewC : V1;
2497 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2498 }
2499 }
2500
2501 // Try to reassociate to sink a splat shuffle after a binary operation.
2502 if (Inst.isAssociative() && Inst.isCommutative()) {
2503 // Canonicalize shuffle operand as LHS.
2504 if (isa<ShuffleVectorInst>(RHS))
2505 std::swap(LHS, RHS);
2506
2507 Value *X;
2508 ArrayRef<int> MaskC;
2509 int SplatIndex;
2510 Value *Y, *OtherOp;
2511 if (!match(LHS,
2512 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2513 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2514 X->getType() != Inst.getType() ||
2515 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2516 return nullptr;
2517
2518 // FIXME: This may not be safe if the analysis allows undef elements. By
2519 // moving 'Y' before the splat shuffle, we are implicitly assuming
2520 // that it is not undef/poison at the splat index.
2521 if (isSplatValue(OtherOp, SplatIndex)) {
2522 std::swap(Y, OtherOp);
2523 } else if (!isSplatValue(Y, SplatIndex)) {
2524 return nullptr;
2525 }
2526
2527 // X and Y are splatted values, so perform the binary operation on those
2528 // values followed by a splat followed by the 2nd binary operation:
2529 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2530 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2531 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2532 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2533 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2534
2535 // Intersect FMF on both new binops. Other (poison-generating) flags are
2536 // dropped to be safe.
2537 if (isa<FPMathOperator>(R)) {
2538 R->copyFastMathFlags(&Inst);
2539 R->andIRFlags(RHS);
2540 }
2541 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2542 NewInstBO->copyIRFlags(R);
2543 return R;
2544 }
2545
2546 return nullptr;
2547}
2548
2549/// Try to narrow the width of a binop if at least 1 operand is an extend of
2550/// of a value. This requires a potentially expensive known bits check to make
2551/// sure the narrow op does not overflow.
2552Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2553 // We need at least one extended operand.
2554 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2555
2556 // If this is a sub, we swap the operands since we always want an extension
2557 // on the RHS. The LHS can be an extension or a constant.
2558 if (BO.getOpcode() == Instruction::Sub)
2559 std::swap(Op0, Op1);
2560
2561 Value *X;
2562 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2563 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2564 return nullptr;
2565
2566 // If both operands are the same extension from the same source type and we
2567 // can eliminate at least one (hasOneUse), this might work.
2568 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2569 Value *Y;
2570 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2571 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2572 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2573 // If that did not match, see if we have a suitable constant operand.
2574 // Truncating and extending must produce the same constant.
2575 Constant *WideC;
2576 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2577 return nullptr;
2578 Constant *NarrowC = getLosslessInvCast(WideC, X->getType(), CastOpc, DL);
2579 if (!NarrowC)
2580 return nullptr;
2581 Y = NarrowC;
2582 }
2583
2584 // Swap back now that we found our operands.
2585 if (BO.getOpcode() == Instruction::Sub)
2586 std::swap(X, Y);
2587
2588 // Both operands have narrow versions. Last step: the math must not overflow
2589 // in the narrow width.
2590 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2591 return nullptr;
2592
2593 // bo (ext X), (ext Y) --> ext (bo X, Y)
2594 // bo (ext X), C --> ext (bo X, C')
2595 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2596 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2597 if (IsSext)
2598 NewBinOp->setHasNoSignedWrap();
2599 else
2600 NewBinOp->setHasNoUnsignedWrap();
2601 }
2602 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2603}
2604
2605/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2606/// transform.
2611
2612/// Thread a GEP operation with constant indices through the constant true/false
2613/// arms of a select.
2615 InstCombiner::BuilderTy &Builder) {
2616 if (!GEP.hasAllConstantIndices())
2617 return nullptr;
2618
2619 Instruction *Sel;
2620 Value *Cond;
2621 Constant *TrueC, *FalseC;
2622 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2623 !match(Sel,
2624 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2625 return nullptr;
2626
2627 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2628 // Propagate 'inbounds' and metadata from existing instructions.
2629 // Note: using IRBuilder to create the constants for efficiency.
2630 SmallVector<Value *, 4> IndexC(GEP.indices());
2631 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2632 Type *Ty = GEP.getSourceElementType();
2633 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2634 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2635 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2636}
2637
2638// Canonicalization:
2639// gep T, (gep i8, base, C1), (Index + C2) into
2640// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2642 GEPOperator *Src,
2643 InstCombinerImpl &IC) {
2644 if (GEP.getNumIndices() != 1)
2645 return nullptr;
2646 auto &DL = IC.getDataLayout();
2647 Value *Base;
2648 const APInt *C1;
2649 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2650 return nullptr;
2651 Value *VarIndex;
2652 const APInt *C2;
2653 Type *PtrTy = Src->getType()->getScalarType();
2654 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2655 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2656 return nullptr;
2657 if (C1->getBitWidth() != IndexSizeInBits ||
2658 C2->getBitWidth() != IndexSizeInBits)
2659 return nullptr;
2660 Type *BaseType = GEP.getSourceElementType();
2662 return nullptr;
2663 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2664 APInt NewOffset = TypeSize * *C2 + *C1;
2665 if (NewOffset.isZero() ||
2666 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2668 if (GEP.hasNoUnsignedWrap() &&
2669 cast<GEPOperator>(Src)->hasNoUnsignedWrap() &&
2670 match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()))) {
2672 if (GEP.isInBounds() && cast<GEPOperator>(Src)->isInBounds())
2673 Flags |= GEPNoWrapFlags::inBounds();
2674 }
2675
2676 Value *GEPConst =
2677 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset), "", Flags);
2678 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex, Flags);
2679 }
2680
2681 return nullptr;
2682}
2683
2684/// Combine constant offsets separated by variable offsets.
2685/// ptradd (ptradd (ptradd p, C1), x), C2 -> ptradd (ptradd p, x), C1+C2
2687 InstCombinerImpl &IC) {
2688 if (!GEP.hasAllConstantIndices())
2689 return nullptr;
2690
2693 auto *InnerGEP = dyn_cast<GetElementPtrInst>(GEP.getPointerOperand());
2694 while (true) {
2695 if (!InnerGEP)
2696 return nullptr;
2697
2698 NW = NW.intersectForReassociate(InnerGEP->getNoWrapFlags());
2699 if (InnerGEP->hasAllConstantIndices())
2700 break;
2701
2702 if (!InnerGEP->hasOneUse())
2703 return nullptr;
2704
2705 Skipped.push_back(InnerGEP);
2706 InnerGEP = dyn_cast<GetElementPtrInst>(InnerGEP->getPointerOperand());
2707 }
2708
2709 // The two constant offset GEPs are directly adjacent: Let normal offset
2710 // merging handle it.
2711 if (Skipped.empty())
2712 return nullptr;
2713
2714 // FIXME: This one-use check is not strictly necessary. Consider relaxing it
2715 // if profitable.
2716 if (!InnerGEP->hasOneUse())
2717 return nullptr;
2718
2719 // Don't bother with vector splats.
2720 Type *Ty = GEP.getType();
2721 if (InnerGEP->getType() != Ty)
2722 return nullptr;
2723
2724 const DataLayout &DL = IC.getDataLayout();
2725 APInt Offset(DL.getIndexTypeSizeInBits(Ty), 0);
2726 if (!GEP.accumulateConstantOffset(DL, Offset) ||
2727 !InnerGEP->accumulateConstantOffset(DL, Offset))
2728 return nullptr;
2729
2730 IC.replaceOperand(*Skipped.back(), 0, InnerGEP->getPointerOperand());
2731 for (GetElementPtrInst *SkippedGEP : Skipped)
2732 SkippedGEP->setNoWrapFlags(NW);
2733
2734 return IC.replaceInstUsesWith(
2735 GEP,
2736 IC.Builder.CreatePtrAdd(Skipped.front(), IC.Builder.getInt(Offset), "",
2737 NW.intersectForOffsetAdd(GEP.getNoWrapFlags())));
2738}
2739
2741 GEPOperator *Src) {
2742 // Combine Indices - If the source pointer to this getelementptr instruction
2743 // is a getelementptr instruction with matching element type, combine the
2744 // indices of the two getelementptr instructions into a single instruction.
2745 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2746 return nullptr;
2747
2748 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2749 return I;
2750
2751 if (auto *I = combineConstantOffsets(GEP, *this))
2752 return I;
2753
2754 if (Src->getResultElementType() != GEP.getSourceElementType())
2755 return nullptr;
2756
2757 // Find out whether the last index in the source GEP is a sequential idx.
2758 bool EndsWithSequential = false;
2759 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2760 I != E; ++I)
2761 EndsWithSequential = I.isSequential();
2762 if (!EndsWithSequential)
2763 return nullptr;
2764
2765 // Replace: gep (gep %P, long B), long A, ...
2766 // With: T = long A+B; gep %P, T, ...
2767 Value *SO1 = Src->getOperand(Src->getNumOperands() - 1);
2768 Value *GO1 = GEP.getOperand(1);
2769
2770 // If they aren't the same type, then the input hasn't been processed
2771 // by the loop above yet (which canonicalizes sequential index types to
2772 // intptr_t). Just avoid transforming this until the input has been
2773 // normalized.
2774 if (SO1->getType() != GO1->getType())
2775 return nullptr;
2776
2777 Value *Sum =
2778 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2779 // Only do the combine when we are sure the cost after the
2780 // merge is never more than that before the merge.
2781 if (Sum == nullptr)
2782 return nullptr;
2783
2785 Indices.append(Src->op_begin() + 1, Src->op_end() - 1);
2786 Indices.push_back(Sum);
2787 Indices.append(GEP.op_begin() + 2, GEP.op_end());
2788
2789 // Don't create GEPs with more than one non-zero index.
2790 unsigned NumNonZeroIndices = count_if(Indices, [](Value *Idx) {
2791 auto *C = dyn_cast<Constant>(Idx);
2792 return !C || !C->isNullValue();
2793 });
2794 if (NumNonZeroIndices > 1)
2795 return nullptr;
2796
2797 return replaceInstUsesWith(
2798 GEP, Builder.CreateGEP(
2799 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2801}
2802
2805 bool &DoesConsume, unsigned Depth) {
2806 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2807 // ~(~(X)) -> X.
2808 Value *A, *B;
2809 if (match(V, m_Not(m_Value(A)))) {
2810 DoesConsume = true;
2811 return A;
2812 }
2813
2814 Constant *C;
2815 // Constants can be considered to be not'ed values.
2816 if (match(V, m_ImmConstant(C)))
2817 return ConstantExpr::getNot(C);
2818
2820 return nullptr;
2821
2822 // The rest of the cases require that we invert all uses so don't bother
2823 // doing the analysis if we know we can't use the result.
2824 if (!WillInvertAllUses)
2825 return nullptr;
2826
2827 // Compares can be inverted if all of their uses are being modified to use
2828 // the ~V.
2829 if (auto *I = dyn_cast<CmpInst>(V)) {
2830 if (Builder != nullptr)
2831 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
2832 I->getOperand(1));
2833 return NonNull;
2834 }
2835
2836 // If `V` is of the form `A + B` then `-1 - V` can be folded into
2837 // `(-1 - B) - A` if we are willing to invert all of the uses.
2838 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
2839 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2840 DoesConsume, Depth))
2841 return Builder ? Builder->CreateSub(BV, A) : NonNull;
2842 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2843 DoesConsume, Depth))
2844 return Builder ? Builder->CreateSub(AV, B) : NonNull;
2845 return nullptr;
2846 }
2847
2848 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
2849 // into `A ^ B` if we are willing to invert all of the uses.
2850 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
2851 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2852 DoesConsume, Depth))
2853 return Builder ? Builder->CreateXor(A, BV) : NonNull;
2854 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2855 DoesConsume, Depth))
2856 return Builder ? Builder->CreateXor(AV, B) : NonNull;
2857 return nullptr;
2858 }
2859
2860 // If `V` is of the form `B - A` then `-1 - V` can be folded into
2861 // `A + (-1 - B)` if we are willing to invert all of the uses.
2862 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
2863 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2864 DoesConsume, Depth))
2865 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
2866 return nullptr;
2867 }
2868
2869 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
2870 // into `A s>> B` if we are willing to invert all of the uses.
2871 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
2872 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2873 DoesConsume, Depth))
2874 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
2875 return nullptr;
2876 }
2877
2878 Value *Cond;
2879 // LogicOps are special in that we canonicalize them at the cost of an
2880 // instruction.
2881 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
2883 // Selects/min/max with invertible operands are freely invertible
2884 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
2885 bool LocalDoesConsume = DoesConsume;
2886 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
2887 LocalDoesConsume, Depth))
2888 return nullptr;
2889 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2890 LocalDoesConsume, Depth)) {
2891 DoesConsume = LocalDoesConsume;
2892 if (Builder != nullptr) {
2893 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2894 DoesConsume, Depth);
2895 assert(NotB != nullptr &&
2896 "Unable to build inverted value for known freely invertable op");
2897 if (auto *II = dyn_cast<IntrinsicInst>(V))
2898 return Builder->CreateBinaryIntrinsic(
2899 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
2900 return Builder->CreateSelect(Cond, NotA, NotB);
2901 }
2902 return NonNull;
2903 }
2904 }
2905
2906 if (PHINode *PN = dyn_cast<PHINode>(V)) {
2907 bool LocalDoesConsume = DoesConsume;
2909 for (Use &U : PN->operands()) {
2910 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2911 Value *NewIncomingVal = getFreelyInvertedImpl(
2912 U.get(), /*WillInvertAllUses=*/false,
2913 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
2914 if (NewIncomingVal == nullptr)
2915 return nullptr;
2916 // Make sure that we can safely erase the original PHI node.
2917 if (NewIncomingVal == V)
2918 return nullptr;
2919 if (Builder != nullptr)
2920 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
2921 }
2922
2923 DoesConsume = LocalDoesConsume;
2924 if (Builder != nullptr) {
2926 Builder->SetInsertPoint(PN);
2927 PHINode *NewPN =
2928 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
2929 for (auto [Val, Pred] : IncomingValues)
2930 NewPN->addIncoming(Val, Pred);
2931 return NewPN;
2932 }
2933 return NonNull;
2934 }
2935
2936 if (match(V, m_SExtLike(m_Value(A)))) {
2937 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2938 DoesConsume, Depth))
2939 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
2940 return nullptr;
2941 }
2942
2943 if (match(V, m_Trunc(m_Value(A)))) {
2944 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2945 DoesConsume, Depth))
2946 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
2947 return nullptr;
2948 }
2949
2950 // De Morgan's Laws:
2951 // (~(A | B)) -> (~A & ~B)
2952 // (~(A & B)) -> (~A | ~B)
2953 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
2954 bool IsLogical, Value *A,
2955 Value *B) -> Value * {
2956 bool LocalDoesConsume = DoesConsume;
2957 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
2958 LocalDoesConsume, Depth))
2959 return nullptr;
2960 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2961 LocalDoesConsume, Depth)) {
2962 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2963 LocalDoesConsume, Depth);
2964 DoesConsume = LocalDoesConsume;
2965 if (IsLogical)
2966 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
2967 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
2968 }
2969
2970 return nullptr;
2971 };
2972
2973 if (match(V, m_Or(m_Value(A), m_Value(B))))
2974 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
2975 B);
2976
2977 if (match(V, m_And(m_Value(A), m_Value(B))))
2978 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
2979 B);
2980
2981 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
2982 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
2983 B);
2984
2985 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
2986 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
2987 B);
2988
2989 return nullptr;
2990}
2991
2992/// Return true if we should canonicalize the gep to an i8 ptradd.
2994 Value *PtrOp = GEP.getOperand(0);
2995 Type *GEPEltType = GEP.getSourceElementType();
2996 if (GEPEltType->isIntegerTy(8))
2997 return false;
2998
2999 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
3000 // intrinsic. This has better support in BasicAA.
3001 if (GEPEltType->isScalableTy())
3002 return true;
3003
3004 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
3005 // together.
3006 if (GEP.getNumIndices() == 1 &&
3007 match(GEP.getOperand(1),
3009 m_Shl(m_Value(), m_ConstantInt())))))
3010 return true;
3011
3012 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
3013 // possibly be merged together.
3014 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
3015 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
3016 any_of(GEP.indices(), [](Value *V) {
3017 const APInt *C;
3018 return match(V, m_APInt(C)) && !C->isZero();
3019 });
3020}
3021
3023 IRBuilderBase &Builder) {
3024 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
3025 if (!Op1)
3026 return nullptr;
3027
3028 // Don't fold a GEP into itself through a PHI node. This can only happen
3029 // through the back-edge of a loop. Folding a GEP into itself means that
3030 // the value of the previous iteration needs to be stored in the meantime,
3031 // thus requiring an additional register variable to be live, but not
3032 // actually achieving anything (the GEP still needs to be executed once per
3033 // loop iteration).
3034 if (Op1 == &GEP)
3035 return nullptr;
3036 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
3037
3038 int DI = -1;
3039
3040 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
3041 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
3042 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
3043 Op1->getSourceElementType() != Op2->getSourceElementType())
3044 return nullptr;
3045
3046 // As for Op1 above, don't try to fold a GEP into itself.
3047 if (Op2 == &GEP)
3048 return nullptr;
3049
3050 // Keep track of the type as we walk the GEP.
3051 Type *CurTy = nullptr;
3052
3053 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
3054 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
3055 return nullptr;
3056
3057 if (Op1->getOperand(J) != Op2->getOperand(J)) {
3058 if (DI == -1) {
3059 // We have not seen any differences yet in the GEPs feeding the
3060 // PHI yet, so we record this one if it is allowed to be a
3061 // variable.
3062
3063 // The first two arguments can vary for any GEP, the rest have to be
3064 // static for struct slots
3065 if (J > 1) {
3066 assert(CurTy && "No current type?");
3067 if (CurTy->isStructTy())
3068 return nullptr;
3069 }
3070
3071 DI = J;
3072 } else {
3073 // The GEP is different by more than one input. While this could be
3074 // extended to support GEPs that vary by more than one variable it
3075 // doesn't make sense since it greatly increases the complexity and
3076 // would result in an R+R+R addressing mode which no backend
3077 // directly supports and would need to be broken into several
3078 // simpler instructions anyway.
3079 return nullptr;
3080 }
3081 }
3082
3083 // Sink down a layer of the type for the next iteration.
3084 if (J > 0) {
3085 if (J == 1) {
3086 CurTy = Op1->getSourceElementType();
3087 } else {
3088 CurTy =
3089 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
3090 }
3091 }
3092 }
3093
3094 NW &= Op2->getNoWrapFlags();
3095 }
3096
3097 // If not all GEPs are identical we'll have to create a new PHI node.
3098 // Check that the old PHI node has only one use so that it will get
3099 // removed.
3100 if (DI != -1 && !PN->hasOneUse())
3101 return nullptr;
3102
3103 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
3104 NewGEP->setNoWrapFlags(NW);
3105
3106 if (DI == -1) {
3107 // All the GEPs feeding the PHI are identical. Clone one down into our
3108 // BB so that it can be merged with the current GEP.
3109 } else {
3110 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
3111 // into the current block so it can be merged, and create a new PHI to
3112 // set that index.
3113 PHINode *NewPN;
3114 {
3115 IRBuilderBase::InsertPointGuard Guard(Builder);
3116 Builder.SetInsertPoint(PN);
3117 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
3118 PN->getNumOperands());
3119 }
3120
3121 for (auto &I : PN->operands())
3122 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
3123 PN->getIncomingBlock(I));
3124
3125 NewGEP->setOperand(DI, NewPN);
3126 }
3127
3128 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
3129 return NewGEP;
3130}
3131
3133 Value *PtrOp = GEP.getOperand(0);
3134 SmallVector<Value *, 8> Indices(GEP.indices());
3135 Type *GEPType = GEP.getType();
3136 Type *GEPEltType = GEP.getSourceElementType();
3137 if (Value *V =
3138 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
3139 SQ.getWithInstruction(&GEP)))
3140 return replaceInstUsesWith(GEP, V);
3141
3142 // For vector geps, use the generic demanded vector support.
3143 // Skip if GEP return type is scalable. The number of elements is unknown at
3144 // compile-time.
3145 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
3146 auto VWidth = GEPFVTy->getNumElements();
3147 APInt PoisonElts(VWidth, 0);
3148 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
3149 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
3150 PoisonElts)) {
3151 if (V != &GEP)
3152 return replaceInstUsesWith(GEP, V);
3153 return &GEP;
3154 }
3155 }
3156
3157 // Eliminate unneeded casts for indices, and replace indices which displace
3158 // by multiples of a zero size type with zero.
3159 bool MadeChange = false;
3160
3161 // Index width may not be the same width as pointer width.
3162 // Data layout chooses the right type based on supported integer types.
3163 Type *NewScalarIndexTy =
3164 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
3165
3167 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
3168 ++I, ++GTI) {
3169 // Skip indices into struct types.
3170 if (GTI.isStruct())
3171 continue;
3172
3173 Type *IndexTy = (*I)->getType();
3174 Type *NewIndexType =
3175 IndexTy->isVectorTy()
3176 ? VectorType::get(NewScalarIndexTy,
3177 cast<VectorType>(IndexTy)->getElementCount())
3178 : NewScalarIndexTy;
3179
3180 // If the element type has zero size then any index over it is equivalent
3181 // to an index of zero, so replace it with zero if it is not zero already.
3182 Type *EltTy = GTI.getIndexedType();
3183 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
3184 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
3185 *I = Constant::getNullValue(NewIndexType);
3186 MadeChange = true;
3187 }
3188
3189 if (IndexTy != NewIndexType) {
3190 // If we are using a wider index than needed for this platform, shrink
3191 // it to what we need. If narrower, sign-extend it to what we need.
3192 // This explicit cast can make subsequent optimizations more obvious.
3193 if (IndexTy->getScalarSizeInBits() <
3194 NewIndexType->getScalarSizeInBits()) {
3195 if (GEP.hasNoUnsignedWrap() && GEP.hasNoUnsignedSignedWrap())
3196 *I = Builder.CreateZExt(*I, NewIndexType, "", /*IsNonNeg=*/true);
3197 else
3198 *I = Builder.CreateSExt(*I, NewIndexType);
3199 } else {
3200 *I = Builder.CreateTrunc(*I, NewIndexType, "", GEP.hasNoUnsignedWrap(),
3201 GEP.hasNoUnsignedSignedWrap());
3202 }
3203 MadeChange = true;
3204 }
3205 }
3206 if (MadeChange)
3207 return &GEP;
3208
3209 // Canonicalize constant GEPs to i8 type.
3210 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
3211 APInt Offset(DL.getIndexTypeSizeInBits(GEPType), 0);
3212 if (GEP.accumulateConstantOffset(DL, Offset))
3213 return replaceInstUsesWith(
3214 GEP, Builder.CreatePtrAdd(PtrOp, Builder.getInt(Offset), "",
3215 GEP.getNoWrapFlags()));
3216 }
3217
3219 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
3220 Value *NewGEP =
3221 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
3222 return replaceInstUsesWith(GEP, NewGEP);
3223 }
3224
3225 // Strip trailing zero indices.
3226 auto *LastIdx = dyn_cast<Constant>(Indices.back());
3227 if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
3228 return replaceInstUsesWith(
3229 GEP, Builder.CreateGEP(GEP.getSourceElementType(), PtrOp,
3230 drop_end(Indices), "", GEP.getNoWrapFlags()));
3231 }
3232
3233 // Strip leading zero indices.
3234 auto *FirstIdx = dyn_cast<Constant>(Indices.front());
3235 if (FirstIdx && FirstIdx->isNullValue() &&
3236 !FirstIdx->getType()->isVectorTy()) {
3238 ++GTI;
3239 if (!GTI.isStruct())
3240 return replaceInstUsesWith(GEP, Builder.CreateGEP(GTI.getIndexedType(),
3241 GEP.getPointerOperand(),
3242 drop_begin(Indices), "",
3243 GEP.getNoWrapFlags()));
3244 }
3245
3246 // Scalarize vector operands; prefer splat-of-gep.as canonical form.
3247 // Note that this looses information about undef lanes; we run it after
3248 // demanded bits to partially mitigate that loss.
3249 if (GEPType->isVectorTy() && llvm::any_of(GEP.operands(), [](Value *Op) {
3250 return Op->getType()->isVectorTy() && getSplatValue(Op);
3251 })) {
3252 SmallVector<Value *> NewOps;
3253 for (auto &Op : GEP.operands()) {
3254 if (Op->getType()->isVectorTy())
3255 if (Value *Scalar = getSplatValue(Op)) {
3256 NewOps.push_back(Scalar);
3257 continue;
3258 }
3259 NewOps.push_back(Op);
3260 }
3261
3262 Value *Res = Builder.CreateGEP(GEP.getSourceElementType(), NewOps[0],
3263 ArrayRef(NewOps).drop_front(), GEP.getName(),
3264 GEP.getNoWrapFlags());
3265 if (!Res->getType()->isVectorTy()) {
3266 ElementCount EC = cast<VectorType>(GEPType)->getElementCount();
3267 Res = Builder.CreateVectorSplat(EC, Res);
3268 }
3269 return replaceInstUsesWith(GEP, Res);
3270 }
3271
3272 bool SeenNonZeroIndex = false;
3273 for (auto [IdxNum, Idx] : enumerate(Indices)) {
3274 auto *C = dyn_cast<Constant>(Idx);
3275 if (C && C->isNullValue())
3276 continue;
3277
3278 if (!SeenNonZeroIndex) {
3279 SeenNonZeroIndex = true;
3280 continue;
3281 }
3282
3283 // GEP has multiple non-zero indices: Split it.
3284 ArrayRef<Value *> FrontIndices = ArrayRef(Indices).take_front(IdxNum);
3285 Value *FrontGEP =
3286 Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
3287 GEP.getName() + ".split", GEP.getNoWrapFlags());
3288
3289 SmallVector<Value *> BackIndices;
3290 BackIndices.push_back(Constant::getNullValue(NewScalarIndexTy));
3291 append_range(BackIndices, drop_begin(Indices, IdxNum));
3293 GetElementPtrInst::getIndexedType(GEPEltType, FrontIndices), FrontGEP,
3294 BackIndices, GEP.getNoWrapFlags());
3295 }
3296
3297 // Check to see if the inputs to the PHI node are getelementptr instructions.
3298 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
3299 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
3300 return replaceOperand(GEP, 0, NewPtrOp);
3301 }
3302
3303 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
3304 if (Instruction *I = visitGEPOfGEP(GEP, Src))
3305 return I;
3306
3307 if (GEP.getNumIndices() == 1) {
3308 unsigned AS = GEP.getPointerAddressSpace();
3309 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3310 DL.getIndexSizeInBits(AS)) {
3311 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3312
3313 if (TyAllocSize == 1) {
3314 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3315 // but only if the result pointer is only used as if it were an integer,
3316 // or both point to the same underlying object (otherwise provenance is
3317 // not necessarily retained).
3318 Value *X = GEP.getPointerOperand();
3319 Value *Y;
3320 if (match(GEP.getOperand(1),
3322 GEPType == Y->getType()) {
3323 bool HasSameUnderlyingObject =
3325 bool Changed = false;
3326 GEP.replaceUsesWithIf(Y, [&](Use &U) {
3327 bool ShouldReplace = HasSameUnderlyingObject ||
3328 isa<ICmpInst>(U.getUser()) ||
3329 isa<PtrToIntInst>(U.getUser());
3330 Changed |= ShouldReplace;
3331 return ShouldReplace;
3332 });
3333 return Changed ? &GEP : nullptr;
3334 }
3335 } else if (auto *ExactIns =
3336 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3337 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3338 Value *V;
3339 if (ExactIns->isExact()) {
3340 if ((has_single_bit(TyAllocSize) &&
3341 match(GEP.getOperand(1),
3342 m_Shr(m_Value(V),
3343 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3344 match(GEP.getOperand(1),
3345 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3346 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3347 GEP.getPointerOperand(), V,
3348 GEP.getNoWrapFlags());
3349 }
3350 }
3351 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3352 // Try to canonicalize non-i8 element type to i8 if the index is an
3353 // exact instruction. If the index is an exact instruction (div/shr)
3354 // with a constant RHS, we can fold the non-i8 element scale into the
3355 // div/shr (similiar to the mul case, just inverted).
3356 const APInt *C;
3357 std::optional<APInt> NewC;
3358 if (has_single_bit(TyAllocSize) &&
3359 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3360 C->uge(countr_zero(TyAllocSize)))
3361 NewC = *C - countr_zero(TyAllocSize);
3362 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3363 APInt Quot;
3364 uint64_t Rem;
3365 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3366 if (Rem == 0)
3367 NewC = Quot;
3368 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3369 APInt Quot;
3370 int64_t Rem;
3371 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3372 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3373 if (!Quot.isAllOnes() && Rem == 0)
3374 NewC = Quot;
3375 }
3376
3377 if (NewC.has_value()) {
3378 Value *NewOp = Builder.CreateBinOp(
3379 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3380 ConstantInt::get(V->getType(), *NewC));
3381 cast<BinaryOperator>(NewOp)->setIsExact();
3382 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3383 GEP.getPointerOperand(), NewOp,
3384 GEP.getNoWrapFlags());
3385 }
3386 }
3387 }
3388 }
3389 }
3390 // We do not handle pointer-vector geps here.
3391 if (GEPType->isVectorTy())
3392 return nullptr;
3393
3394 if (!GEP.isInBounds()) {
3395 unsigned IdxWidth =
3396 DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
3397 APInt BasePtrOffset(IdxWidth, 0);
3398 Value *UnderlyingPtrOp =
3399 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL, BasePtrOffset);
3400 bool CanBeNull, CanBeFreed;
3401 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3402 DL, CanBeNull, CanBeFreed);
3403 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3404 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3405 BasePtrOffset.isNonNegative()) {
3406 APInt AllocSize(IdxWidth, DerefBytes);
3407 if (BasePtrOffset.ule(AllocSize)) {
3409 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3410 }
3411 }
3412 }
3413 }
3414
3415 // nusw + nneg -> nuw
3416 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3417 all_of(GEP.indices(), [&](Value *Idx) {
3418 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3419 })) {
3420 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3421 return &GEP;
3422 }
3423
3424 // These rewrites are trying to preserve inbounds/nuw attributes. So we want
3425 // to do this after having tried to derive "nuw" above.
3426 if (GEP.getNumIndices() == 1) {
3427 // Given (gep p, x+y) we want to determine the common nowrap flags for both
3428 // geps if transforming into (gep (gep p, x), y).
3429 auto GetPreservedNoWrapFlags = [&](bool AddIsNUW) {
3430 // We can preserve both "inbounds nuw", "nusw nuw" and "nuw" if we know
3431 // that x + y does not have unsigned wrap.
3432 if (GEP.hasNoUnsignedWrap() && AddIsNUW)
3433 return GEP.getNoWrapFlags();
3434 return GEPNoWrapFlags::none();
3435 };
3436
3437 // Try to replace ADD + GEP with GEP + GEP.
3438 Value *Idx1, *Idx2;
3439 if (match(GEP.getOperand(1),
3440 m_OneUse(m_AddLike(m_Value(Idx1), m_Value(Idx2))))) {
3441 // %idx = add i64 %idx1, %idx2
3442 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3443 // as:
3444 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3445 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3446 bool NUW = match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()));
3447 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3448 auto *NewPtr =
3449 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3450 Idx1, "", NWFlags);
3451 return replaceInstUsesWith(GEP,
3452 Builder.CreateGEP(GEP.getSourceElementType(),
3453 NewPtr, Idx2, "", NWFlags));
3454 }
3455 ConstantInt *C;
3456 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAddLike(
3457 m_Value(Idx1), m_ConstantInt(C))))))) {
3458 // %add = add nsw i32 %idx1, idx2
3459 // %sidx = sext i32 %add to i64
3460 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3461 // as:
3462 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3463 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3464 bool NUW = match(GEP.getOperand(1),
3466 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3467 auto *NewPtr = Builder.CreateGEP(
3468 GEP.getSourceElementType(), GEP.getPointerOperand(),
3469 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "", NWFlags);
3470 return replaceInstUsesWith(
3471 GEP,
3472 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3473 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3474 "", NWFlags));
3475 }
3476 }
3477
3479 return R;
3480
3481 return nullptr;
3482}
3483
3485 Instruction *AI) {
3487 return true;
3488 if (auto *LI = dyn_cast<LoadInst>(V))
3489 return isa<GlobalVariable>(LI->getPointerOperand());
3490 // Two distinct allocations will never be equal.
3491 return isAllocLikeFn(V, &TLI) && V != AI;
3492}
3493
3494/// Given a call CB which uses an address UsedV, return true if we can prove the
3495/// call's only possible effect is storing to V.
3496static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3497 const TargetLibraryInfo &TLI) {
3498 if (!CB.use_empty())
3499 // TODO: add recursion if returned attribute is present
3500 return false;
3501
3502 if (CB.isTerminator())
3503 // TODO: remove implementation restriction
3504 return false;
3505
3506 if (!CB.willReturn() || !CB.doesNotThrow())
3507 return false;
3508
3509 // If the only possible side effect of the call is writing to the alloca,
3510 // and the result isn't used, we can safely remove any reads implied by the
3511 // call including those which might read the alloca itself.
3512 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3513 return Dest && Dest->Ptr == UsedV;
3514}
3515
3516static std::optional<ModRefInfo>
3518 const TargetLibraryInfo &TLI, bool KnowInit) {
3520 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3521 Worklist.push_back(AI);
3523
3524 do {
3525 Instruction *PI = Worklist.pop_back_val();
3526 for (User *U : PI->users()) {
3528 switch (I->getOpcode()) {
3529 default:
3530 // Give up the moment we see something we can't handle.
3531 return std::nullopt;
3532
3533 case Instruction::AddrSpaceCast:
3534 case Instruction::BitCast:
3535 case Instruction::GetElementPtr:
3536 Users.emplace_back(I);
3537 Worklist.push_back(I);
3538 continue;
3539
3540 case Instruction::ICmp: {
3541 ICmpInst *ICI = cast<ICmpInst>(I);
3542 // We can fold eq/ne comparisons with null to false/true, respectively.
3543 // We also fold comparisons in some conditions provided the alloc has
3544 // not escaped (see isNeverEqualToUnescapedAlloc).
3545 if (!ICI->isEquality())
3546 return std::nullopt;
3547 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3548 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3549 return std::nullopt;
3550
3551 // Do not fold compares to aligned_alloc calls, as they may have to
3552 // return null in case the required alignment cannot be satisfied,
3553 // unless we can prove that both alignment and size are valid.
3554 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3555 // Check if alignment and size of a call to aligned_alloc is valid,
3556 // that is alignment is a power-of-2 and the size is a multiple of the
3557 // alignment.
3558 const APInt *Alignment;
3559 const APInt *Size;
3560 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3561 match(CB->getArgOperand(1), m_APInt(Size)) &&
3562 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3563 };
3564 auto *CB = dyn_cast<CallBase>(AI);
3565 LibFunc TheLibFunc;
3566 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3567 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3568 !AlignmentAndSizeKnownValid(CB))
3569 return std::nullopt;
3570 Users.emplace_back(I);
3571 continue;
3572 }
3573
3574 case Instruction::Call:
3575 // Ignore no-op and store intrinsics.
3577 switch (II->getIntrinsicID()) {
3578 default:
3579 return std::nullopt;
3580
3581 case Intrinsic::memmove:
3582 case Intrinsic::memcpy:
3583 case Intrinsic::memset: {
3585 if (MI->isVolatile())
3586 return std::nullopt;
3587 // Note: this could also be ModRef, but we can still interpret that
3588 // as just Mod in that case.
3589 ModRefInfo NewAccess =
3590 MI->getRawDest() == PI ? ModRefInfo::Mod : ModRefInfo::Ref;
3591 if ((Access & ~NewAccess) != ModRefInfo::NoModRef)
3592 return std::nullopt;
3593 Access |= NewAccess;
3594 [[fallthrough]];
3595 }
3596 case Intrinsic::assume:
3597 case Intrinsic::invariant_start:
3598 case Intrinsic::invariant_end:
3599 case Intrinsic::lifetime_start:
3600 case Intrinsic::lifetime_end:
3601 case Intrinsic::objectsize:
3602 Users.emplace_back(I);
3603 continue;
3604 case Intrinsic::launder_invariant_group:
3605 case Intrinsic::strip_invariant_group:
3606 Users.emplace_back(I);
3607 Worklist.push_back(I);
3608 continue;
3609 }
3610 }
3611
3612 if (Family && getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3613 getAllocationFamily(I, &TLI) == Family) {
3614 Users.emplace_back(I);
3615 continue;
3616 }
3617
3618 if (Family && getReallocatedOperand(cast<CallBase>(I)) == PI &&
3619 getAllocationFamily(I, &TLI) == Family) {
3620 Users.emplace_back(I);
3621 Worklist.push_back(I);
3622 continue;
3623 }
3624
3625 if (!isRefSet(Access) &&
3626 isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3628 Users.emplace_back(I);
3629 continue;
3630 }
3631
3632 return std::nullopt;
3633
3634 case Instruction::Store: {
3636 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3637 return std::nullopt;
3638 if (isRefSet(Access))
3639 return std::nullopt;
3641 Users.emplace_back(I);
3642 continue;
3643 }
3644
3645 case Instruction::Load: {
3646 LoadInst *LI = cast<LoadInst>(I);
3647 if (LI->isVolatile() || LI->getPointerOperand() != PI)
3648 return std::nullopt;
3649 if (isModSet(Access))
3650 return std::nullopt;
3652 Users.emplace_back(I);
3653 continue;
3654 }
3655 }
3656 llvm_unreachable("missing a return?");
3657 }
3658 } while (!Worklist.empty());
3659
3661 return Access;
3662}
3663
3666
3667 // If we have a malloc call which is only used in any amount of comparisons to
3668 // null and free calls, delete the calls and replace the comparisons with true
3669 // or false as appropriate.
3670
3671 // This is based on the principle that we can substitute our own allocation
3672 // function (which will never return null) rather than knowledge of the
3673 // specific function being called. In some sense this can change the permitted
3674 // outputs of a program (when we convert a malloc to an alloca, the fact that
3675 // the allocation is now on the stack is potentially visible, for example),
3676 // but we believe in a permissible manner.
3678
3679 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3680 // before each store.
3682 std::unique_ptr<DIBuilder> DIB;
3683 if (isa<AllocaInst>(MI)) {
3684 findDbgUsers(&MI, DVRs);
3685 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3686 }
3687
3688 // Determine what getInitialValueOfAllocation would return without actually
3689 // allocating the result.
3690 bool KnowInitUndef = false;
3691 bool KnowInitZero = false;
3692 Constant *Init =
3694 if (Init) {
3695 if (isa<UndefValue>(Init))
3696 KnowInitUndef = true;
3697 else if (Init->isNullValue())
3698 KnowInitZero = true;
3699 }
3700 // The various sanitizers don't actually return undef memory, but rather
3701 // memory initialized with special forms of runtime poison
3702 auto &F = *MI.getFunction();
3703 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
3704 F.hasFnAttribute(Attribute::SanitizeAddress))
3705 KnowInitUndef = false;
3706
3707 auto Removable =
3708 isAllocSiteRemovable(&MI, Users, TLI, KnowInitZero | KnowInitUndef);
3709 if (Removable) {
3710 for (WeakTrackingVH &User : Users) {
3711 // Lowering all @llvm.objectsize and MTI calls first because they may use
3712 // a bitcast/GEP of the alloca we are removing.
3713 if (!User)
3714 continue;
3715
3717
3719 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3720 SmallVector<Instruction *> InsertedInstructions;
3721 Value *Result = lowerObjectSizeCall(
3722 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3723 for (Instruction *Inserted : InsertedInstructions)
3724 Worklist.add(Inserted);
3725 replaceInstUsesWith(*I, Result);
3727 User = nullptr; // Skip examining in the next loop.
3728 continue;
3729 }
3730 if (auto *MTI = dyn_cast<MemTransferInst>(I)) {
3731 if (KnowInitZero && isRefSet(*Removable)) {
3733 Builder.SetInsertPoint(MTI);
3734 auto *M = Builder.CreateMemSet(
3735 MTI->getRawDest(),
3736 ConstantInt::get(Type::getInt8Ty(MI.getContext()), 0),
3737 MTI->getLength(), MTI->getDestAlign());
3738 M->copyMetadata(*MTI);
3739 }
3740 }
3741 }
3742 }
3743 for (WeakTrackingVH &User : Users) {
3744 if (!User)
3745 continue;
3746
3748
3749 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3751 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3752 C->isFalseWhenEqual()));
3753 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3754 for (auto *DVR : DVRs)
3755 if (DVR->isAddressOfVariable())
3757 } else {
3758 // Casts, GEP, or anything else: we're about to delete this instruction,
3759 // so it can not have any valid uses.
3760 Constant *Replace;
3761 if (isa<LoadInst>(I)) {
3762 assert(KnowInitZero || KnowInitUndef);
3763 Replace = KnowInitUndef ? UndefValue::get(I->getType())
3764 : Constant::getNullValue(I->getType());
3765 } else
3766 Replace = PoisonValue::get(I->getType());
3767 replaceInstUsesWith(*I, Replace);
3768 }
3770 }
3771
3773 // Replace invoke with a NOP intrinsic to maintain the original CFG
3774 Module *M = II->getModule();
3775 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3776 auto *NewII = InvokeInst::Create(
3777 F, II->getNormalDest(), II->getUnwindDest(), {}, "", II->getParent());
3778 NewII->setDebugLoc(II->getDebugLoc());
3779 }
3780
3781 // Remove debug intrinsics which describe the value contained within the
3782 // alloca. In addition to removing dbg.{declare,addr} which simply point to
3783 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
3784 //
3785 // ```
3786 // define void @foo(i32 %0) {
3787 // %a = alloca i32 ; Deleted.
3788 // store i32 %0, i32* %a
3789 // dbg.value(i32 %0, "arg0") ; Not deleted.
3790 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
3791 // call void @trivially_inlinable_no_op(i32* %a)
3792 // ret void
3793 // }
3794 // ```
3795 //
3796 // This may not be required if we stop describing the contents of allocas
3797 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
3798 // the LowerDbgDeclare utility.
3799 //
3800 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
3801 // "arg0" dbg.value may be stale after the call. However, failing to remove
3802 // the DW_OP_deref dbg.value causes large gaps in location coverage.
3803 //
3804 // FIXME: the Assignment Tracking project has now likely made this
3805 // redundant (and it's sometimes harmful).
3806 for (auto *DVR : DVRs)
3807 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3808 DVR->eraseFromParent();
3809
3810 return eraseInstFromFunction(MI);
3811 }
3812 return nullptr;
3813}
3814
3815/// Move the call to free before a NULL test.
3816///
3817/// Check if this free is accessed after its argument has been test
3818/// against NULL (property 0).
3819/// If yes, it is legal to move this call in its predecessor block.
3820///
3821/// The move is performed only if the block containing the call to free
3822/// will be removed, i.e.:
3823/// 1. it has only one predecessor P, and P has two successors
3824/// 2. it contains the call, noops, and an unconditional branch
3825/// 3. its successor is the same as its predecessor's successor
3826///
3827/// The profitability is out-of concern here and this function should
3828/// be called only if the caller knows this transformation would be
3829/// profitable (e.g., for code size).
3831 const DataLayout &DL) {
3832 Value *Op = FI.getArgOperand(0);
3833 BasicBlock *FreeInstrBB = FI.getParent();
3834 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
3835
3836 // Validate part of constraint #1: Only one predecessor
3837 // FIXME: We can extend the number of predecessor, but in that case, we
3838 // would duplicate the call to free in each predecessor and it may
3839 // not be profitable even for code size.
3840 if (!PredBB)
3841 return nullptr;
3842
3843 // Validate constraint #2: Does this block contains only the call to
3844 // free, noops, and an unconditional branch?
3845 BasicBlock *SuccBB;
3846 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
3847 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
3848 return nullptr;
3849
3850 // If there are only 2 instructions in the block, at this point,
3851 // this is the call to free and unconditional.
3852 // If there are more than 2 instructions, check that they are noops
3853 // i.e., they won't hurt the performance of the generated code.
3854 if (FreeInstrBB->size() != 2) {
3855 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
3856 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3857 continue;
3858 auto *Cast = dyn_cast<CastInst>(&Inst);
3859 if (!Cast || !Cast->isNoopCast(DL))
3860 return nullptr;
3861 }
3862 }
3863 // Validate the rest of constraint #1 by matching on the pred branch.
3864 Instruction *TI = PredBB->getTerminator();
3865 BasicBlock *TrueBB, *FalseBB;
3866 CmpPredicate Pred;
3867 if (!match(TI, m_Br(m_ICmp(Pred,
3869 m_Specific(Op->stripPointerCasts())),
3870 m_Zero()),
3871 TrueBB, FalseBB)))
3872 return nullptr;
3873 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
3874 return nullptr;
3875
3876 // Validate constraint #3: Ensure the null case just falls through.
3877 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
3878 return nullptr;
3879 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
3880 "Broken CFG: missing edge from predecessor to successor");
3881
3882 // At this point, we know that everything in FreeInstrBB can be moved
3883 // before TI.
3884 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
3885 if (&Instr == FreeInstrBBTerminator)
3886 break;
3887 Instr.moveBeforePreserving(TI->getIterator());
3888 }
3889 assert(FreeInstrBB->size() == 1 &&
3890 "Only the branch instruction should remain");
3891
3892 // Now that we've moved the call to free before the NULL check, we have to
3893 // remove any attributes on its parameter that imply it's non-null, because
3894 // those attributes might have only been valid because of the NULL check, and
3895 // we can get miscompiles if we keep them. This is conservative if non-null is
3896 // also implied by something other than the NULL check, but it's guaranteed to
3897 // be correct, and the conservativeness won't matter in practice, since the
3898 // attributes are irrelevant for the call to free itself and the pointer
3899 // shouldn't be used after the call.
3900 AttributeList Attrs = FI.getAttributes();
3901 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
3902 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3903 if (Dereferenceable.isValid()) {
3904 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
3905 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
3906 Attribute::Dereferenceable);
3907 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
3908 }
3909 FI.setAttributes(Attrs);
3910
3911 return &FI;
3912}
3913
3915 // free undef -> unreachable.
3916 if (isa<UndefValue>(Op)) {
3917 // Leave a marker since we can't modify the CFG here.
3919 return eraseInstFromFunction(FI);
3920 }
3921
3922 // If we have 'free null' delete the instruction. This can happen in stl code
3923 // when lots of inlining happens.
3925 return eraseInstFromFunction(FI);
3926
3927 // If we had free(realloc(...)) with no intervening uses, then eliminate the
3928 // realloc() entirely.
3930 if (CI && CI->hasOneUse())
3931 if (Value *ReallocatedOp = getReallocatedOperand(CI))
3932 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
3933
3934 // If we optimize for code size, try to move the call to free before the null
3935 // test so that simplify cfg can remove the empty block and dead code
3936 // elimination the branch. I.e., helps to turn something like:
3937 // if (foo) free(foo);
3938 // into
3939 // free(foo);
3940 //
3941 // Note that we can only do this for 'free' and not for any flavor of
3942 // 'operator delete'; there is no 'operator delete' symbol for which we are
3943 // permitted to invent a call, even if we're passing in a null pointer.
3944 if (MinimizeSize) {
3945 LibFunc Func;
3946 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
3948 return I;
3949 }
3950
3951 return nullptr;
3952}
3953
3955 Value *RetVal = RI.getReturnValue();
3956 if (!RetVal)
3957 return nullptr;
3958
3959 Function *F = RI.getFunction();
3960 Type *RetTy = RetVal->getType();
3961 if (RetTy->isPointerTy()) {
3962 bool HasDereferenceable =
3963 F->getAttributes().getRetDereferenceableBytes() > 0;
3964 if (F->hasRetAttribute(Attribute::NonNull) ||
3965 (HasDereferenceable &&
3967 if (Value *V = simplifyNonNullOperand(RetVal, HasDereferenceable))
3968 return replaceOperand(RI, 0, V);
3969 }
3970 }
3971
3972 if (!AttributeFuncs::isNoFPClassCompatibleType(RetTy))
3973 return nullptr;
3974
3975 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
3976 if (ReturnClass == fcNone)
3977 return nullptr;
3978
3979 KnownFPClass KnownClass;
3980 Value *Simplified =
3981 SimplifyDemandedUseFPClass(RetVal, ~ReturnClass, KnownClass, &RI);
3982 if (!Simplified)
3983 return nullptr;
3984
3985 return ReturnInst::Create(RI.getContext(), Simplified);
3986}
3987
3988// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
3990 // Try to remove the previous instruction if it must lead to unreachable.
3991 // This includes instructions like stores and "llvm.assume" that may not get
3992 // removed by simple dead code elimination.
3993 bool Changed = false;
3994 while (Instruction *Prev = I.getPrevNode()) {
3995 // While we theoretically can erase EH, that would result in a block that
3996 // used to start with an EH no longer starting with EH, which is invalid.
3997 // To make it valid, we'd need to fixup predecessors to no longer refer to
3998 // this block, but that changes CFG, which is not allowed in InstCombine.
3999 if (Prev->isEHPad())
4000 break; // Can not drop any more instructions. We're done here.
4001
4003 break; // Can not drop any more instructions. We're done here.
4004 // Otherwise, this instruction can be freely erased,
4005 // even if it is not side-effect free.
4006
4007 // A value may still have uses before we process it here (for example, in
4008 // another unreachable block), so convert those to poison.
4009 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
4010 eraseInstFromFunction(*Prev);
4011 Changed = true;
4012 }
4013 return Changed;
4014}
4015
4020
4022 assert(BI.isUnconditional() && "Only for unconditional branches.");
4023
4024 // If this store is the second-to-last instruction in the basic block
4025 // (excluding debug info) and if the block ends with
4026 // an unconditional branch, try to move the store to the successor block.
4027
4028 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
4029 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
4030 do {
4031 if (BBI != FirstInstr)
4032 --BBI;
4033 } while (BBI != FirstInstr && BBI->isDebugOrPseudoInst());
4034
4035 return dyn_cast<StoreInst>(BBI);
4036 };
4037
4038 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
4040 return &BI;
4041
4042 return nullptr;
4043}
4044
4047 if (!DeadEdges.insert({From, To}).second)
4048 return;
4049
4050 // Replace phi node operands in successor with poison.
4051 for (PHINode &PN : To->phis())
4052 for (Use &U : PN.incoming_values())
4053 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
4054 replaceUse(U, PoisonValue::get(PN.getType()));
4055 addToWorklist(&PN);
4056 MadeIRChange = true;
4057 }
4058
4059 Worklist.push_back(To);
4060}
4061
4062// Under the assumption that I is unreachable, remove it and following
4063// instructions. Changes are reported directly to MadeIRChange.
4066 BasicBlock *BB = I->getParent();
4067 for (Instruction &Inst : make_early_inc_range(
4068 make_range(std::next(BB->getTerminator()->getReverseIterator()),
4069 std::next(I->getReverseIterator())))) {
4070 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
4071 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
4072 MadeIRChange = true;
4073 }
4074 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
4075 continue;
4076 // RemoveDIs: erase debug-info on this instruction manually.
4077 Inst.dropDbgRecords();
4079 MadeIRChange = true;
4080 }
4081
4084 MadeIRChange = true;
4085 for (Value *V : Changed)
4087 }
4088
4089 // Handle potentially dead successors.
4090 for (BasicBlock *Succ : successors(BB))
4091 addDeadEdge(BB, Succ, Worklist);
4092}
4093
4096 while (!Worklist.empty()) {
4097 BasicBlock *BB = Worklist.pop_back_val();
4098 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
4099 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
4100 }))
4101 continue;
4102
4104 }
4105}
4106
4108 BasicBlock *LiveSucc) {
4110 for (BasicBlock *Succ : successors(BB)) {
4111 // The live successor isn't dead.
4112 if (Succ == LiveSucc)
4113 continue;
4114
4115 addDeadEdge(BB, Succ, Worklist);
4116 }
4117
4119}
4120
4122 if (BI.isUnconditional())
4124
4125 // Change br (not X), label True, label False to: br X, label False, True
4126 Value *Cond = BI.getCondition();
4127 Value *X;
4128 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
4129 // Swap Destinations and condition...
4130 BI.swapSuccessors();
4131 if (BPI)
4132 BPI->swapSuccEdgesProbabilities(BI.getParent());
4133 return replaceOperand(BI, 0, X);
4134 }
4135
4136 // Canonicalize logical-and-with-invert as logical-or-with-invert.
4137 // This is done by inverting the condition and swapping successors:
4138 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
4139 Value *Y;
4140 if (isa<SelectInst>(Cond) &&
4141 match(Cond,
4143 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
4144 Value *Or = Builder.CreateLogicalOr(NotX, Y);
4145 BI.swapSuccessors();
4146 if (BPI)
4147 BPI->swapSuccEdgesProbabilities(BI.getParent());
4148 return replaceOperand(BI, 0, Or);
4149 }
4150
4151 // If the condition is irrelevant, remove the use so that other
4152 // transforms on the condition become more effective.
4153 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
4154 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
4155
4156 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
4157 CmpPredicate Pred;
4158 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
4159 !isCanonicalPredicate(Pred)) {
4160 // Swap destinations and condition.
4161 auto *Cmp = cast<CmpInst>(Cond);
4162 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
4163 BI.swapSuccessors();
4164 if (BPI)
4165 BPI->swapSuccEdgesProbabilities(BI.getParent());
4166 Worklist.push(Cmp);
4167 return &BI;
4168 }
4169
4170 if (isa<UndefValue>(Cond)) {
4171 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
4172 return nullptr;
4173 }
4174 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4176 BI.getSuccessor(!CI->getZExtValue()));
4177 return nullptr;
4178 }
4179
4180 // Replace all dominated uses of the condition with true/false
4181 // Ignore constant expressions to avoid iterating over uses on other
4182 // functions.
4183 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
4184 for (auto &U : make_early_inc_range(Cond->uses())) {
4185 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
4186 if (DT.dominates(Edge0, U)) {
4187 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
4188 addToWorklist(cast<Instruction>(U.getUser()));
4189 continue;
4190 }
4191 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
4192 if (DT.dominates(Edge1, U)) {
4193 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
4194 addToWorklist(cast<Instruction>(U.getUser()));
4195 }
4196 }
4197 }
4198
4199 DC.registerBranch(&BI);
4200 return nullptr;
4201}
4202
4203// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
4204// we can prove that both (switch C) and (switch X) go to the default when cond
4205// is false/true.
4208 bool IsTrueArm) {
4209 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
4210 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
4211 if (!C)
4212 return nullptr;
4213
4214 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
4215 if (CstBB != SI.getDefaultDest())
4216 return nullptr;
4217 Value *X = Select->getOperand(3 - CstOpIdx);
4218 CmpPredicate Pred;
4219 const APInt *RHSC;
4220 if (!match(Select->getCondition(),
4221 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
4222 return nullptr;
4223 if (IsTrueArm)
4224 Pred = ICmpInst::getInversePredicate(Pred);
4225
4226 // See whether we can replace the select with X
4228 for (auto Case : SI.cases())
4229 if (!CR.contains(Case.getCaseValue()->getValue()))
4230 return nullptr;
4231
4232 return X;
4233}
4234
4236 Value *Cond = SI.getCondition();
4237 Value *Op0;
4238 ConstantInt *AddRHS;
4239 if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
4240 // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
4241 for (auto Case : SI.cases()) {
4242 Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
4243 assert(isa<ConstantInt>(NewCase) &&
4244 "Result of expression should be constant");
4245 Case.setValue(cast<ConstantInt>(NewCase));
4246 }
4247 return replaceOperand(SI, 0, Op0);
4248 }
4249
4250 ConstantInt *SubLHS;
4251 if (match(Cond, m_Sub(m_ConstantInt(SubLHS), m_Value(Op0)))) {
4252 // Change 'switch (1-X) case 1:' into 'switch (X) case 0'.
4253 for (auto Case : SI.cases()) {
4254 Constant *NewCase = ConstantExpr::getSub(SubLHS, Case.getCaseValue());
4255 assert(isa<ConstantInt>(NewCase) &&
4256 "Result of expression should be constant");
4257 Case.setValue(cast<ConstantInt>(NewCase));
4258 }
4259 return replaceOperand(SI, 0, Op0);
4260 }
4261
4262 uint64_t ShiftAmt;
4263 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
4264 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
4265 all_of(SI.cases(), [&](const auto &Case) {
4266 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
4267 })) {
4268 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
4270 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
4271 Shl->hasOneUse()) {
4272 Value *NewCond = Op0;
4273 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
4274 // If the shift may wrap, we need to mask off the shifted bits.
4275 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
4276 NewCond = Builder.CreateAnd(
4277 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
4278 }
4279 for (auto Case : SI.cases()) {
4280 const APInt &CaseVal = Case.getCaseValue()->getValue();
4281 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
4282 : CaseVal.lshr(ShiftAmt);
4283 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
4284 }
4285 return replaceOperand(SI, 0, NewCond);
4286 }
4287 }
4288
4289 // Fold switch(zext/sext(X)) into switch(X) if possible.
4290 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
4291 bool IsZExt = isa<ZExtInst>(Cond);
4292 Type *SrcTy = Op0->getType();
4293 unsigned NewWidth = SrcTy->getScalarSizeInBits();
4294
4295 if (all_of(SI.cases(), [&](const auto &Case) {
4296 const APInt &CaseVal = Case.getCaseValue()->getValue();
4297 return IsZExt ? CaseVal.isIntN(NewWidth)
4298 : CaseVal.isSignedIntN(NewWidth);
4299 })) {
4300 for (auto &Case : SI.cases()) {
4301 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4302 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4303 }
4304 return replaceOperand(SI, 0, Op0);
4305 }
4306 }
4307
4308 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
4309 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
4310 if (Value *V =
4311 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
4312 return replaceOperand(SI, 0, V);
4313 if (Value *V =
4314 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
4315 return replaceOperand(SI, 0, V);
4316 }
4317
4318 KnownBits Known = computeKnownBits(Cond, &SI);
4319 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
4320 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
4321
4322 // Compute the number of leading bits we can ignore.
4323 // TODO: A better way to determine this would use ComputeNumSignBits().
4324 for (const auto &C : SI.cases()) {
4325 LeadingKnownZeros =
4326 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
4327 LeadingKnownOnes =
4328 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
4329 }
4330
4331 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
4332
4333 // Shrink the condition operand if the new type is smaller than the old type.
4334 // But do not shrink to a non-standard type, because backend can't generate
4335 // good code for that yet.
4336 // TODO: We can make it aggressive again after fixing PR39569.
4337 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
4338 shouldChangeType(Known.getBitWidth(), NewWidth)) {
4339 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
4340 Builder.SetInsertPoint(&SI);
4341 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
4342
4343 for (auto Case : SI.cases()) {
4344 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4345 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4346 }
4347 return replaceOperand(SI, 0, NewCond);
4348 }
4349
4350 if (isa<UndefValue>(Cond)) {
4351 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
4352 return nullptr;
4353 }
4354 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4356 SI.findCaseValue(CI)->getCaseSuccessor());
4357 return nullptr;
4358 }
4359
4360 return nullptr;
4361}
4362
4364InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
4366 if (!WO)
4367 return nullptr;
4368
4369 Intrinsic::ID OvID = WO->getIntrinsicID();
4370 const APInt *C = nullptr;
4371 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
4372 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
4373 OvID == Intrinsic::umul_with_overflow)) {
4374 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
4375 if (C->isAllOnes())
4376 return BinaryOperator::CreateNeg(WO->getLHS());
4377 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
4378 if (C->isPowerOf2()) {
4379 return BinaryOperator::CreateShl(
4380 WO->getLHS(),
4381 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4382 }
4383 }
4384 }
4385
4386 // We're extracting from an overflow intrinsic. See if we're the only user.
4387 // That allows us to simplify multiple result intrinsics to simpler things
4388 // that just get one value.
4389 if (!WO->hasOneUse())
4390 return nullptr;
4391
4392 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4393 // and replace it with a traditional binary instruction.
4394 if (*EV.idx_begin() == 0) {
4395 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4396 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4397 // Replace the old instruction's uses with poison.
4398 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4400 return BinaryOperator::Create(BinOp, LHS, RHS);
4401 }
4402
4403 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4404
4405 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4406 if (OvID == Intrinsic::usub_with_overflow)
4407 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4408
4409 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4410 // +1 is not possible because we assume signed values.
4411 if (OvID == Intrinsic::smul_with_overflow &&
4412 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4413 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4414
4415 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4416 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4417 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4418 // Only handle even bitwidths for performance reasons.
4419 if (BitWidth % 2 == 0)
4420 return new ICmpInst(
4421 ICmpInst::ICMP_UGT, WO->getLHS(),
4422 ConstantInt::get(WO->getLHS()->getType(),
4424 }
4425
4426 // If only the overflow result is used, and the right hand side is a
4427 // constant (or constant splat), we can remove the intrinsic by directly
4428 // checking for overflow.
4429 if (C) {
4430 // Compute the no-wrap range for LHS given RHS=C, then construct an
4431 // equivalent icmp, potentially using an offset.
4432 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
4433 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4434
4435 CmpInst::Predicate Pred;
4436 APInt NewRHSC, Offset;
4437 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4438 auto *OpTy = WO->getRHS()->getType();
4439 auto *NewLHS = WO->getLHS();
4440 if (Offset != 0)
4441 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4442 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4443 ConstantInt::get(OpTy, NewRHSC));
4444 }
4445
4446 return nullptr;
4447}
4448
4451 InstCombiner::BuilderTy &Builder) {
4452 // Helper to fold frexp of select to select of frexp.
4453
4454 if (!SelectInst->hasOneUse() || !FrexpCall->hasOneUse())
4455 return nullptr;
4457 Value *TrueVal = SelectInst->getTrueValue();
4458 Value *FalseVal = SelectInst->getFalseValue();
4459
4460 const APFloat *ConstVal = nullptr;
4461 Value *VarOp = nullptr;
4462 bool ConstIsTrue = false;
4463
4464 if (match(TrueVal, m_APFloat(ConstVal))) {
4465 VarOp = FalseVal;
4466 ConstIsTrue = true;
4467 } else if (match(FalseVal, m_APFloat(ConstVal))) {
4468 VarOp = TrueVal;
4469 ConstIsTrue = false;
4470 } else {
4471 return nullptr;
4472 }
4473
4474 Builder.SetInsertPoint(&EV);
4475
4476 CallInst *NewFrexp =
4477 Builder.CreateCall(FrexpCall->getCalledFunction(), {VarOp}, "frexp");
4478 NewFrexp->copyIRFlags(FrexpCall);
4479
4480 Value *NewEV = Builder.CreateExtractValue(NewFrexp, 0, "mantissa");
4481
4482 int Exp;
4483 APFloat Mantissa = frexp(*ConstVal, Exp, APFloat::rmNearestTiesToEven);
4484
4485 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4486
4487 Value *NewSel = Builder.CreateSelectFMF(
4488 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4489 ConstIsTrue ? NewEV : ConstantMantissa, SelectInst, "select.frexp");
4490 return NewSel;
4491}
4493 Value *Agg = EV.getAggregateOperand();
4494
4495 if (!EV.hasIndices())
4496 return replaceInstUsesWith(EV, Agg);
4497
4498 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4499 SQ.getWithInstruction(&EV)))
4500 return replaceInstUsesWith(EV, V);
4501
4502 Value *Cond, *TrueVal, *FalseVal;
4504 m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal)))))) {
4505 auto *SelInst =
4506 cast<SelectInst>(cast<IntrinsicInst>(Agg)->getArgOperand(0));
4507 if (Value *Result =
4508 foldFrexpOfSelect(EV, cast<IntrinsicInst>(Agg), SelInst, Builder))
4509 return replaceInstUsesWith(EV, Result);
4510 }
4512 // We're extracting from an insertvalue instruction, compare the indices
4513 const unsigned *exti, *exte, *insi, *inse;
4514 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4515 exte = EV.idx_end(), inse = IV->idx_end();
4516 exti != exte && insi != inse;
4517 ++exti, ++insi) {
4518 if (*insi != *exti)
4519 // The insert and extract both reference distinctly different elements.
4520 // This means the extract is not influenced by the insert, and we can
4521 // replace the aggregate operand of the extract with the aggregate
4522 // operand of the insert. i.e., replace
4523 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4524 // %E = extractvalue { i32, { i32 } } %I, 0
4525 // with
4526 // %E = extractvalue { i32, { i32 } } %A, 0
4527 return ExtractValueInst::Create(IV->getAggregateOperand(),
4528 EV.getIndices());
4529 }
4530 if (exti == exte && insi == inse)
4531 // Both iterators are at the end: Index lists are identical. Replace
4532 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4533 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4534 // with "i32 42"
4535 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4536 if (exti == exte) {
4537 // The extract list is a prefix of the insert list. i.e. replace
4538 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4539 // %E = extractvalue { i32, { i32 } } %I, 1
4540 // with
4541 // %X = extractvalue { i32, { i32 } } %A, 1
4542 // %E = insertvalue { i32 } %X, i32 42, 0
4543 // by switching the order of the insert and extract (though the
4544 // insertvalue should be left in, since it may have other uses).
4545 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4546 EV.getIndices());
4547 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4548 ArrayRef(insi, inse));
4549 }
4550 if (insi == inse)
4551 // The insert list is a prefix of the extract list
4552 // We can simply remove the common indices from the extract and make it
4553 // operate on the inserted value instead of the insertvalue result.
4554 // i.e., replace
4555 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4556 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4557 // with
4558 // %E extractvalue { i32 } { i32 42 }, 0
4559 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4560 ArrayRef(exti, exte));
4561 }
4562
4563 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4564 return R;
4565
4566 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4567 // Bail out if the aggregate contains scalable vector type
4568 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4569 STy && STy->isScalableTy())
4570 return nullptr;
4571
4572 // If the (non-volatile) load only has one use, we can rewrite this to a
4573 // load from a GEP. This reduces the size of the load. If a load is used
4574 // only by extractvalue instructions then this either must have been
4575 // optimized before, or it is a struct with padding, in which case we
4576 // don't want to do the transformation as it loses padding knowledge.
4577 if (L->isSimple() && L->hasOneUse()) {
4578 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4579 SmallVector<Value*, 4> Indices;
4580 // Prefix an i32 0 since we need the first element.
4581 Indices.push_back(Builder.getInt32(0));
4582 for (unsigned Idx : EV.indices())
4583 Indices.push_back(Builder.getInt32(Idx));
4584
4585 // We need to insert these at the location of the old load, not at that of
4586 // the extractvalue.
4587 Builder.SetInsertPoint(L);
4588 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4589 L->getPointerOperand(), Indices);
4590 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
4591 // Whatever aliasing information we had for the orignal load must also
4592 // hold for the smaller load, so propagate the annotations.
4593 NL->setAAMetadata(L->getAAMetadata());
4594 // Returning the load directly will cause the main loop to insert it in
4595 // the wrong spot, so use replaceInstUsesWith().
4596 return replaceInstUsesWith(EV, NL);
4597 }
4598 }
4599
4600 if (auto *PN = dyn_cast<PHINode>(Agg))
4601 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4602 return Res;
4603
4604 // Canonicalize extract (select Cond, TV, FV)
4605 // -> select cond, (extract TV), (extract FV)
4606 if (auto *SI = dyn_cast<SelectInst>(Agg))
4607 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4608 return R;
4609
4610 // We could simplify extracts from other values. Note that nested extracts may
4611 // already be simplified implicitly by the above: extract (extract (insert) )
4612 // will be translated into extract ( insert ( extract ) ) first and then just
4613 // the value inserted, if appropriate. Similarly for extracts from single-use
4614 // loads: extract (extract (load)) will be translated to extract (load (gep))
4615 // and if again single-use then via load (gep (gep)) to load (gep).
4616 // However, double extracts from e.g. function arguments or return values
4617 // aren't handled yet.
4618 return nullptr;
4619}
4620
4621/// Return 'true' if the given typeinfo will match anything.
4622static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4623 switch (Personality) {
4627 // The GCC C EH and Rust personality only exists to support cleanups, so
4628 // it's not clear what the semantics of catch clauses are.
4629 return false;
4631 return false;
4633 // While __gnat_all_others_value will match any Ada exception, it doesn't
4634 // match foreign exceptions (or didn't, before gcc-4.7).
4635 return false;
4646 return TypeInfo->isNullValue();
4647 }
4648 llvm_unreachable("invalid enum");
4649}
4650
4651static bool shorter_filter(const Value *LHS, const Value *RHS) {
4652 return
4653 cast<ArrayType>(LHS->getType())->getNumElements()
4654 <
4655 cast<ArrayType>(RHS->getType())->getNumElements();
4656}
4657
4659 // The logic here should be correct for any real-world personality function.
4660 // However if that turns out not to be true, the offending logic can always
4661 // be conditioned on the personality function, like the catch-all logic is.
4662 EHPersonality Personality =
4663 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4664
4665 // Simplify the list of clauses, eg by removing repeated catch clauses
4666 // (these are often created by inlining).
4667 bool MakeNewInstruction = false; // If true, recreate using the following:
4668 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4669 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4670
4671 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4672 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4673 bool isLastClause = i + 1 == e;
4674 if (LI.isCatch(i)) {
4675 // A catch clause.
4676 Constant *CatchClause = LI.getClause(i);
4677 Constant *TypeInfo = CatchClause->stripPointerCasts();
4678
4679 // If we already saw this clause, there is no point in having a second
4680 // copy of it.
4681 if (AlreadyCaught.insert(TypeInfo).second) {
4682 // This catch clause was not already seen.
4683 NewClauses.push_back(CatchClause);
4684 } else {
4685 // Repeated catch clause - drop the redundant copy.
4686 MakeNewInstruction = true;
4687 }
4688
4689 // If this is a catch-all then there is no point in keeping any following
4690 // clauses or marking the landingpad as having a cleanup.
4691 if (isCatchAll(Personality, TypeInfo)) {
4692 if (!isLastClause)
4693 MakeNewInstruction = true;
4694 CleanupFlag = false;
4695 break;
4696 }
4697 } else {
4698 // A filter clause. If any of the filter elements were already caught
4699 // then they can be dropped from the filter. It is tempting to try to
4700 // exploit the filter further by saying that any typeinfo that does not
4701 // occur in the filter can't be caught later (and thus can be dropped).
4702 // However this would be wrong, since typeinfos can match without being
4703 // equal (for example if one represents a C++ class, and the other some
4704 // class derived from it).
4705 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4706 Constant *FilterClause = LI.getClause(i);
4707 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4708 unsigned NumTypeInfos = FilterType->getNumElements();
4709
4710 // An empty filter catches everything, so there is no point in keeping any
4711 // following clauses or marking the landingpad as having a cleanup. By
4712 // dealing with this case here the following code is made a bit simpler.
4713 if (!NumTypeInfos) {
4714 NewClauses.push_back(FilterClause);
4715 if (!isLastClause)
4716 MakeNewInstruction = true;
4717 CleanupFlag = false;
4718 break;
4719 }
4720
4721 bool MakeNewFilter = false; // If true, make a new filter.
4722 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4723 if (isa<ConstantAggregateZero>(FilterClause)) {
4724 // Not an empty filter - it contains at least one null typeinfo.
4725 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4726 Constant *TypeInfo =
4728 // If this typeinfo is a catch-all then the filter can never match.
4729 if (isCatchAll(Personality, TypeInfo)) {
4730 // Throw the filter away.
4731 MakeNewInstruction = true;
4732 continue;
4733 }
4734
4735 // There is no point in having multiple copies of this typeinfo, so
4736 // discard all but the first copy if there is more than one.
4737 NewFilterElts.push_back(TypeInfo);
4738 if (NumTypeInfos > 1)
4739 MakeNewFilter = true;
4740 } else {
4741 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4742 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4743 NewFilterElts.reserve(NumTypeInfos);
4744
4745 // Remove any filter elements that were already caught or that already
4746 // occurred in the filter. While there, see if any of the elements are
4747 // catch-alls. If so, the filter can be discarded.
4748 bool SawCatchAll = false;
4749 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4750 Constant *Elt = Filter->getOperand(j);
4751 Constant *TypeInfo = Elt->stripPointerCasts();
4752 if (isCatchAll(Personality, TypeInfo)) {
4753 // This element is a catch-all. Bail out, noting this fact.
4754 SawCatchAll = true;
4755 break;
4756 }
4757
4758 // Even if we've seen a type in a catch clause, we don't want to
4759 // remove it from the filter. An unexpected type handler may be
4760 // set up for a call site which throws an exception of the same
4761 // type caught. In order for the exception thrown by the unexpected
4762 // handler to propagate correctly, the filter must be correctly
4763 // described for the call site.
4764 //
4765 // Example:
4766 //
4767 // void unexpected() { throw 1;}
4768 // void foo() throw (int) {
4769 // std::set_unexpected(unexpected);
4770 // try {
4771 // throw 2.0;
4772 // } catch (int i) {}
4773 // }
4774
4775 // There is no point in having multiple copies of the same typeinfo in
4776 // a filter, so only add it if we didn't already.
4777 if (SeenInFilter.insert(TypeInfo).second)
4778 NewFilterElts.push_back(cast<Constant>(Elt));
4779 }
4780 // A filter containing a catch-all cannot match anything by definition.
4781 if (SawCatchAll) {
4782 // Throw the filter away.
4783 MakeNewInstruction = true;
4784 continue;
4785 }
4786
4787 // If we dropped something from the filter, make a new one.
4788 if (NewFilterElts.size() < NumTypeInfos)
4789 MakeNewFilter = true;
4790 }
4791 if (MakeNewFilter) {
4792 FilterType = ArrayType::get(FilterType->getElementType(),
4793 NewFilterElts.size());
4794 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
4795 MakeNewInstruction = true;
4796 }
4797
4798 NewClauses.push_back(FilterClause);
4799
4800 // If the new filter is empty then it will catch everything so there is
4801 // no point in keeping any following clauses or marking the landingpad
4802 // as having a cleanup. The case of the original filter being empty was
4803 // already handled above.
4804 if (MakeNewFilter && !NewFilterElts.size()) {
4805 assert(MakeNewInstruction && "New filter but not a new instruction!");
4806 CleanupFlag = false;
4807 break;
4808 }
4809 }
4810 }
4811
4812 // If several filters occur in a row then reorder them so that the shortest
4813 // filters come first (those with the smallest number of elements). This is
4814 // advantageous because shorter filters are more likely to match, speeding up
4815 // unwinding, but mostly because it increases the effectiveness of the other
4816 // filter optimizations below.
4817 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
4818 unsigned j;
4819 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
4820 for (j = i; j != e; ++j)
4821 if (!isa<ArrayType>(NewClauses[j]->getType()))
4822 break;
4823
4824 // Check whether the filters are already sorted by length. We need to know
4825 // if sorting them is actually going to do anything so that we only make a
4826 // new landingpad instruction if it does.
4827 for (unsigned k = i; k + 1 < j; ++k)
4828 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
4829 // Not sorted, so sort the filters now. Doing an unstable sort would be
4830 // correct too but reordering filters pointlessly might confuse users.
4831 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
4833 MakeNewInstruction = true;
4834 break;
4835 }
4836
4837 // Look for the next batch of filters.
4838 i = j + 1;
4839 }
4840
4841 // If typeinfos matched if and only if equal, then the elements of a filter L
4842 // that occurs later than a filter F could be replaced by the intersection of
4843 // the elements of F and L. In reality two typeinfos can match without being
4844 // equal (for example if one represents a C++ class, and the other some class
4845 // derived from it) so it would be wrong to perform this transform in general.
4846 // However the transform is correct and useful if F is a subset of L. In that
4847 // case L can be replaced by F, and thus removed altogether since repeating a
4848 // filter is pointless. So here we look at all pairs of filters F and L where
4849 // L follows F in the list of clauses, and remove L if every element of F is
4850 // an element of L. This can occur when inlining C++ functions with exception
4851 // specifications.
4852 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
4853 // Examine each filter in turn.
4854 Value *Filter = NewClauses[i];
4855 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
4856 if (!FTy)
4857 // Not a filter - skip it.
4858 continue;
4859 unsigned FElts = FTy->getNumElements();
4860 // Examine each filter following this one. Doing this backwards means that
4861 // we don't have to worry about filters disappearing under us when removed.
4862 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
4863 Value *LFilter = NewClauses[j];
4864 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
4865 if (!LTy)
4866 // Not a filter - skip it.
4867 continue;
4868 // If Filter is a subset of LFilter, i.e. every element of Filter is also
4869 // an element of LFilter, then discard LFilter.
4870 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
4871 // If Filter is empty then it is a subset of LFilter.
4872 if (!FElts) {
4873 // Discard LFilter.
4874 NewClauses.erase(J);
4875 MakeNewInstruction = true;
4876 // Move on to the next filter.
4877 continue;
4878 }
4879 unsigned LElts = LTy->getNumElements();
4880 // If Filter is longer than LFilter then it cannot be a subset of it.
4881 if (FElts > LElts)
4882 // Move on to the next filter.
4883 continue;
4884 // At this point we know that LFilter has at least one element.
4885 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
4886 // Filter is a subset of LFilter iff Filter contains only zeros (as we
4887 // already know that Filter is not longer than LFilter).
4889 assert(FElts <= LElts && "Should have handled this case earlier!");
4890 // Discard LFilter.
4891 NewClauses.erase(J);
4892 MakeNewInstruction = true;
4893 }
4894 // Move on to the next filter.
4895 continue;
4896 }
4897 ConstantArray *LArray = cast<ConstantArray>(LFilter);
4898 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
4899 // Since Filter is non-empty and contains only zeros, it is a subset of
4900 // LFilter iff LFilter contains a zero.
4901 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
4902 for (unsigned l = 0; l != LElts; ++l)
4903 if (LArray->getOperand(l)->isNullValue()) {
4904 // LFilter contains a zero - discard it.
4905 NewClauses.erase(J);
4906 MakeNewInstruction = true;
4907 break;
4908 }
4909 // Move on to the next filter.
4910 continue;
4911 }
4912 // At this point we know that both filters are ConstantArrays. Loop over
4913 // operands to see whether every element of Filter is also an element of
4914 // LFilter. Since filters tend to be short this is probably faster than
4915 // using a method that scales nicely.
4917 bool AllFound = true;
4918 for (unsigned f = 0; f != FElts; ++f) {
4919 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
4920 AllFound = false;
4921 for (unsigned l = 0; l != LElts; ++l) {
4922 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
4923 if (LTypeInfo == FTypeInfo) {
4924 AllFound = true;
4925 break;
4926 }
4927 }
4928 if (!AllFound)
4929 break;
4930 }
4931 if (AllFound) {
4932 // Discard LFilter.
4933 NewClauses.erase(J);
4934 MakeNewInstruction = true;
4935 }
4936 // Move on to the next filter.
4937 }
4938 }
4939
4940 // If we changed any of the clauses, replace the old landingpad instruction
4941 // with a new one.
4942 if (MakeNewInstruction) {
4944 NewClauses.size());
4945 for (Constant *C : NewClauses)
4946 NLI->addClause(C);
4947 // A landing pad with no clauses must have the cleanup flag set. It is
4948 // theoretically possible, though highly unlikely, that we eliminated all
4949 // clauses. If so, force the cleanup flag to true.
4950 if (NewClauses.empty())
4951 CleanupFlag = true;
4952 NLI->setCleanup(CleanupFlag);
4953 return NLI;
4954 }
4955
4956 // Even if none of the clauses changed, we may nonetheless have understood
4957 // that the cleanup flag is pointless. Clear it if so.
4958 if (LI.isCleanup() != CleanupFlag) {
4959 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
4960 LI.setCleanup(CleanupFlag);
4961 return &LI;
4962 }
4963
4964 return nullptr;
4965}
4966
4967Value *
4969 // Try to push freeze through instructions that propagate but don't produce
4970 // poison as far as possible. If an operand of freeze does not produce poison
4971 // then push the freeze through to the operands that are not guaranteed
4972 // non-poison. The actual transform is as follows.
4973 // Op1 = ... ; Op1 can be poison
4974 // Op0 = Inst(Op1, NonPoisonOps...)
4975 // ... = Freeze(Op0)
4976 // =>
4977 // Op1 = ...
4978 // Op1.fr = Freeze(Op1)
4979 // ... = Inst(Op1.fr, NonPoisonOps...)
4980
4981 auto CanPushFreeze = [](Value *V) {
4982 if (!isa<Instruction>(V) || isa<PHINode>(V))
4983 return false;
4984
4985 // We can't push the freeze through an instruction which can itself create
4986 // poison. If the only source of new poison is flags, we can simply
4987 // strip them (since we know the only use is the freeze and nothing can
4988 // benefit from them.)
4990 /*ConsiderFlagsAndMetadata*/ false);
4991 };
4992
4993 // Pushing freezes up long instruction chains can be expensive. Instead,
4994 // we directly push the freeze all the way to the leaves. However, we leave
4995 // deduplication of freezes on the same value for freezeOtherUses().
4996 Use *OrigUse = &OrigFI.getOperandUse(0);
4999 Worklist.push_back(OrigUse);
5000 while (!Worklist.empty()) {
5001 auto *U = Worklist.pop_back_val();
5002 Value *V = U->get();
5003 if (!CanPushFreeze(V)) {
5004 // If we can't push through the original instruction, abort the transform.
5005 if (U == OrigUse)
5006 return nullptr;
5007
5008 auto *UserI = cast<Instruction>(U->getUser());
5009 Builder.SetInsertPoint(UserI);
5010 Value *Frozen = Builder.CreateFreeze(V, V->getName() + ".fr");
5011 U->set(Frozen);
5012 continue;
5013 }
5014
5015 auto *I = cast<Instruction>(V);
5016 if (!Visited.insert(I).second)
5017 continue;
5018
5019 // reverse() to emit freezes in a more natural order.
5020 for (Use &Op : reverse(I->operands())) {
5021 Value *OpV = Op.get();
5023 continue;
5024 Worklist.push_back(&Op);
5025 }
5026
5027 I->dropPoisonGeneratingAnnotations();
5028 this->Worklist.add(I);
5029 }
5030
5031 return OrigUse->get();
5032}
5033
5035 PHINode *PN) {
5036 // Detect whether this is a recurrence with a start value and some number of
5037 // backedge values. We'll check whether we can push the freeze through the
5038 // backedge values (possibly dropping poison flags along the way) until we
5039 // reach the phi again. In that case, we can move the freeze to the start
5040 // value.
5041 Use *StartU = nullptr;
5043 for (Use &U : PN->incoming_values()) {
5044 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
5045 // Add backedge value to worklist.
5046 Worklist.push_back(U.get());
5047 continue;
5048 }
5049
5050 // Don't bother handling multiple start values.
5051 if (StartU)
5052 return nullptr;
5053 StartU = &U;
5054 }
5055
5056 if (!StartU || Worklist.empty())
5057 return nullptr; // Not a recurrence.
5058
5059 Value *StartV = StartU->get();
5060 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
5061 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
5062 // We can't insert freeze if the start value is the result of the
5063 // terminator (e.g. an invoke).
5064 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
5065 return nullptr;
5066
5069 while (!Worklist.empty()) {
5070 Value *V = Worklist.pop_back_val();
5071 if (!Visited.insert(V).second)
5072 continue;
5073
5074 if (Visited.size() > 32)
5075 return nullptr; // Limit the total number of values we inspect.
5076
5077 // Assume that PN is non-poison, because it will be after the transform.
5078 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
5079 continue;
5080
5083 /*ConsiderFlagsAndMetadata*/ false))
5084 return nullptr;
5085
5086 DropFlags.push_back(I);
5087 append_range(Worklist, I->operands());
5088 }
5089
5090 for (Instruction *I : DropFlags)
5091 I->dropPoisonGeneratingAnnotations();
5092
5093 if (StartNeedsFreeze) {
5094 Builder.SetInsertPoint(StartBB->getTerminator());
5095 Value *FrozenStartV = Builder.CreateFreeze(StartV,
5096 StartV->getName() + ".fr");
5097 replaceUse(*StartU, FrozenStartV);
5098 }
5099 return replaceInstUsesWith(FI, PN);
5100}
5101
5103 Value *Op = FI.getOperand(0);
5104
5105 if (isa<Constant>(Op) || Op->hasOneUse())
5106 return false;
5107
5108 // Move the freeze directly after the definition of its operand, so that
5109 // it dominates the maximum number of uses. Note that it may not dominate
5110 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
5111 // the normal/default destination. This is why the domination check in the
5112 // replacement below is still necessary.
5113 BasicBlock::iterator MoveBefore;
5114 if (isa<Argument>(Op)) {
5115 MoveBefore =
5117 } else {
5118 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
5119 if (!MoveBeforeOpt)
5120 return false;
5121 MoveBefore = *MoveBeforeOpt;
5122 }
5123
5124 // Re-point iterator to come after any debug-info records.
5125 MoveBefore.setHeadBit(false);
5126
5127 bool Changed = false;
5128 if (&FI != &*MoveBefore) {
5129 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
5130 Changed = true;
5131 }
5132
5133 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
5134 bool Dominates = DT.dominates(&FI, U);
5135 Changed |= Dominates;
5136 return Dominates;
5137 });
5138
5139 return Changed;
5140}
5141
5142// Check if any direct or bitcast user of this value is a shuffle instruction.
5144 for (auto *U : V->users()) {
5146 return true;
5147 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
5148 return true;
5149 }
5150 return false;
5151}
5152
5154 Value *Op0 = I.getOperand(0);
5155
5156 if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
5157 return replaceInstUsesWith(I, V);
5158
5159 // freeze (phi const, x) --> phi const, (freeze x)
5160 if (auto *PN = dyn_cast<PHINode>(Op0)) {
5161 if (Instruction *NV = foldOpIntoPhi(I, PN))
5162 return NV;
5163 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
5164 return NV;
5165 }
5166
5168 return replaceInstUsesWith(I, NI);
5169
5170 // If I is freeze(undef), check its uses and fold it to a fixed constant.
5171 // - or: pick -1
5172 // - select's condition: if the true value is constant, choose it by making
5173 // the condition true.
5174 // - phi: pick the common constant across operands
5175 // - default: pick 0
5176 //
5177 // Note that this transform is intentionally done here rather than
5178 // via an analysis in InstSimplify or at individual user sites. That is
5179 // because we must produce the same value for all uses of the freeze -
5180 // it's the reason "freeze" exists!
5181 //
5182 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
5183 // duplicating logic for binops at least.
5184 auto getUndefReplacement = [&](Type *Ty) {
5185 auto pickCommonConstantFromPHI = [](PHINode &PN) -> Value * {
5186 // phi(freeze(undef), C, C). Choose C for freeze so the PHI can be
5187 // removed.
5188 Constant *BestValue = nullptr;
5189 for (Value *V : PN.incoming_values()) {
5190 if (match(V, m_Freeze(m_Undef())))
5191 continue;
5192
5194 if (!C)
5195 return nullptr;
5196
5198 return nullptr;
5199
5200 if (BestValue && BestValue != C)
5201 return nullptr;
5202
5203 BestValue = C;
5204 }
5205 return BestValue;
5206 };
5207
5208 Value *NullValue = Constant::getNullValue(Ty);
5209 Value *BestValue = nullptr;
5210 for (auto *U : I.users()) {
5211 Value *V = NullValue;
5212 if (match(U, m_Or(m_Value(), m_Value())))
5214 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
5215 V = ConstantInt::getTrue(Ty);
5216 else if (match(U, m_c_Select(m_Specific(&I), m_Value(V)))) {
5217 if (V == &I || !isGuaranteedNotToBeUndefOrPoison(V, &AC, &I, &DT))
5218 V = NullValue;
5219 } else if (auto *PHI = dyn_cast<PHINode>(U)) {
5220 if (Value *MaybeV = pickCommonConstantFromPHI(*PHI))
5221 V = MaybeV;
5222 }
5223
5224 if (!BestValue)
5225 BestValue = V;
5226 else if (BestValue != V)
5227 BestValue = NullValue;
5228 }
5229 assert(BestValue && "Must have at least one use");
5230 assert(BestValue != &I && "Cannot replace with itself");
5231 return BestValue;
5232 };
5233
5234 if (match(Op0, m_Undef())) {
5235 // Don't fold freeze(undef/poison) if it's used as a vector operand in
5236 // a shuffle. This may improve codegen for shuffles that allow
5237 // unspecified inputs.
5239 return nullptr;
5240 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
5241 }
5242
5243 auto getFreezeVectorReplacement = [](Constant *C) -> Constant * {
5244 Type *Ty = C->getType();
5245 auto *VTy = dyn_cast<FixedVectorType>(Ty);
5246 if (!VTy)
5247 return nullptr;
5248 unsigned NumElts = VTy->getNumElements();
5249 Constant *BestValue = Constant::getNullValue(VTy->getScalarType());
5250 for (unsigned i = 0; i != NumElts; ++i) {
5251 Constant *EltC = C->getAggregateElement(i);
5252 if (EltC && !match(EltC, m_Undef())) {
5253 BestValue = EltC;
5254 break;
5255 }
5256 }
5257 return Constant::replaceUndefsWith(C, BestValue);
5258 };
5259
5260 Constant *C;
5261 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement() &&
5262 !C->containsConstantExpression()) {
5263 if (Constant *Repl = getFreezeVectorReplacement(C))
5264 return replaceInstUsesWith(I, Repl);
5265 }
5266
5267 // Replace uses of Op with freeze(Op).
5268 if (freezeOtherUses(I))
5269 return &I;
5270
5271 return nullptr;
5272}
5273
5274/// Check for case where the call writes to an otherwise dead alloca. This
5275/// shows up for unused out-params in idiomatic C/C++ code. Note that this
5276/// helper *only* analyzes the write; doesn't check any other legality aspect.
5278 auto *CB = dyn_cast<CallBase>(I);
5279 if (!CB)
5280 // TODO: handle e.g. store to alloca here - only worth doing if we extend
5281 // to allow reload along used path as described below. Otherwise, this
5282 // is simply a store to a dead allocation which will be removed.
5283 return false;
5284 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
5285 if (!Dest)
5286 return false;
5287 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
5288 if (!AI)
5289 // TODO: allow malloc?
5290 return false;
5291 // TODO: allow memory access dominated by move point? Note that since AI
5292 // could have a reference to itself captured by the call, we would need to
5293 // account for cycles in doing so.
5294 SmallVector<const User *> AllocaUsers;
5296 auto pushUsers = [&](const Instruction &I) {
5297 for (const User *U : I.users()) {
5298 if (Visited.insert(U).second)
5299 AllocaUsers.push_back(U);
5300 }
5301 };
5302 pushUsers(*AI);
5303 while (!AllocaUsers.empty()) {
5304 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
5305 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
5306 pushUsers(*UserI);
5307 continue;
5308 }
5309 if (UserI == CB)
5310 continue;
5311 // TODO: support lifetime.start/end here
5312 return false;
5313 }
5314 return true;
5315}
5316
5317/// Try to move the specified instruction from its current block into the
5318/// beginning of DestBlock, which can only happen if it's safe to move the
5319/// instruction past all of the instructions between it and the end of its
5320/// block.
5322 BasicBlock *DestBlock) {
5323 BasicBlock *SrcBlock = I->getParent();
5324
5325 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
5326 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
5327 I->isTerminator())
5328 return false;
5329
5330 // Do not sink static or dynamic alloca instructions. Static allocas must
5331 // remain in the entry block, and dynamic allocas must not be sunk in between
5332 // a stacksave / stackrestore pair, which would incorrectly shorten its
5333 // lifetime.
5334 if (isa<AllocaInst>(I))
5335 return false;
5336
5337 // Do not sink into catchswitch blocks.
5338 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
5339 return false;
5340
5341 // Do not sink convergent call instructions.
5342 if (auto *CI = dyn_cast<CallInst>(I)) {
5343 if (CI->isConvergent())
5344 return false;
5345 }
5346
5347 // Unless we can prove that the memory write isn't visibile except on the
5348 // path we're sinking to, we must bail.
5349 if (I->mayWriteToMemory()) {
5350 if (!SoleWriteToDeadLocal(I, TLI))
5351 return false;
5352 }
5353
5354 // We can only sink load instructions if there is nothing between the load and
5355 // the end of block that could change the value.
5356 if (I->mayReadFromMemory() &&
5357 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
5358 // We don't want to do any sophisticated alias analysis, so we only check
5359 // the instructions after I in I's parent block if we try to sink to its
5360 // successor block.
5361 if (DestBlock->getUniquePredecessor() != I->getParent())
5362 return false;
5363 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
5364 E = I->getParent()->end();
5365 Scan != E; ++Scan)
5366 if (Scan->mayWriteToMemory())
5367 return false;
5368 }
5369
5370 I->dropDroppableUses([&](const Use *U) {
5371 auto *I = dyn_cast<Instruction>(U->getUser());
5372 if (I && I->getParent() != DestBlock) {
5373 Worklist.add(I);
5374 return true;
5375 }
5376 return false;
5377 });
5378 /// FIXME: We could remove droppable uses that are not dominated by
5379 /// the new position.
5380
5381 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
5382 I->moveBefore(*DestBlock, InsertPos);
5383 ++NumSunkInst;
5384
5385 // Also sink all related debug uses from the source basic block. Otherwise we
5386 // get debug use before the def. Attempt to salvage debug uses first, to
5387 // maximise the range variables have location for. If we cannot salvage, then
5388 // mark the location undef: we know it was supposed to receive a new location
5389 // here, but that computation has been sunk.
5390 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
5391 findDbgUsers(I, DbgVariableRecords);
5392 if (!DbgVariableRecords.empty())
5393 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
5394 DbgVariableRecords);
5395
5396 // PS: there are numerous flaws with this behaviour, not least that right now
5397 // assignments can be re-ordered past other assignments to the same variable
5398 // if they use different Values. Creating more undef assignements can never be
5399 // undone. And salvaging all users outside of this block can un-necessarily
5400 // alter the lifetime of the live-value that the variable refers to.
5401 // Some of these things can be resolved by tolerating debug use-before-defs in
5402 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
5403 // being used for more architectures.
5404
5405 return true;
5406}
5407
5409 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
5410 BasicBlock *DestBlock,
5411 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
5412 // For all debug values in the destination block, the sunk instruction
5413 // will still be available, so they do not need to be dropped.
5414
5415 // Fetch all DbgVariableRecords not already in the destination.
5416 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
5417 for (auto &DVR : DbgVariableRecords)
5418 if (DVR->getParent() != DestBlock)
5419 DbgVariableRecordsToSalvage.push_back(DVR);
5420
5421 // Fetch a second collection, of DbgVariableRecords in the source block that
5422 // we're going to sink.
5423 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5424 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5425 if (DVR->getParent() == SrcBlock)
5426 DbgVariableRecordsToSink.push_back(DVR);
5427
5428 // Sort DbgVariableRecords according to their position in the block. This is a
5429 // partial order: DbgVariableRecords attached to different instructions will
5430 // be ordered by the instruction order, but DbgVariableRecords attached to the
5431 // same instruction won't have an order.
5432 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5433 return B->getInstruction()->comesBefore(A->getInstruction());
5434 };
5435 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5436
5437 // If there are two assignments to the same variable attached to the same
5438 // instruction, the ordering between the two assignments is important. Scan
5439 // for this (rare) case and establish which is the last assignment.
5440 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5442 if (DbgVariableRecordsToSink.size() > 1) {
5444 // Count how many assignments to each variable there is per instruction.
5445 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5446 DebugVariable DbgUserVariable =
5447 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5448 DVR->getDebugLoc()->getInlinedAt());
5449 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5450 }
5451
5452 // If there are any instructions with two assignments, add them to the
5453 // FilterOutMap to record that they need extra filtering.
5455 for (auto It : CountMap) {
5456 if (It.second > 1) {
5457 FilterOutMap[It.first] = nullptr;
5458 DupSet.insert(It.first.first);
5459 }
5460 }
5461
5462 // For all instruction/variable pairs needing extra filtering, find the
5463 // latest assignment.
5464 for (const Instruction *Inst : DupSet) {
5465 for (DbgVariableRecord &DVR :
5466 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5467 DebugVariable DbgUserVariable =
5468 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5469 DVR.getDebugLoc()->getInlinedAt());
5470 auto FilterIt =
5471 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5472 if (FilterIt == FilterOutMap.end())
5473 continue;
5474 if (FilterIt->second != nullptr)
5475 continue;
5476 FilterIt->second = &DVR;
5477 }
5478 }
5479 }
5480
5481 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5482 // out any duplicate assignments identified above.
5484 SmallSet<DebugVariable, 4> SunkVariables;
5485 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5487 continue;
5488
5489 DebugVariable DbgUserVariable =
5490 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5491 DVR->getDebugLoc()->getInlinedAt());
5492
5493 // For any variable where there were multiple assignments in the same place,
5494 // ignore all but the last assignment.
5495 if (!FilterOutMap.empty()) {
5496 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5497 auto It = FilterOutMap.find(IVP);
5498
5499 // Filter out.
5500 if (It != FilterOutMap.end() && It->second != DVR)
5501 continue;
5502 }
5503
5504 if (!SunkVariables.insert(DbgUserVariable).second)
5505 continue;
5506
5507 if (DVR->isDbgAssign())
5508 continue;
5509
5510 DVRClones.emplace_back(DVR->clone());
5511 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5512 }
5513
5514 // Perform salvaging without the clones, then sink the clones.
5515 if (DVRClones.empty())
5516 return;
5517
5518 salvageDebugInfoForDbgValues(*I, DbgVariableRecordsToSalvage);
5519
5520 // The clones are in reverse order of original appearance. Assert that the
5521 // head bit is set on the iterator as we _should_ have received it via
5522 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5523 // we'll repeatedly insert at the head, such as:
5524 // DVR-3 (third insertion goes here)
5525 // DVR-2 (second insertion goes here)
5526 // DVR-1 (first insertion goes here)
5527 // Any-Prior-DVRs
5528 // InsertPtInst
5529 assert(InsertPos.getHeadBit());
5530 for (DbgVariableRecord *DVRClone : DVRClones) {
5531 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5532 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5533 }
5534}
5535
5537 while (!Worklist.isEmpty()) {
5538 // Walk deferred instructions in reverse order, and push them to the
5539 // worklist, which means they'll end up popped from the worklist in-order.
5540 while (Instruction *I = Worklist.popDeferred()) {
5541 // Check to see if we can DCE the instruction. We do this already here to
5542 // reduce the number of uses and thus allow other folds to trigger.
5543 // Note that eraseInstFromFunction() may push additional instructions on
5544 // the deferred worklist, so this will DCE whole instruction chains.
5547 ++NumDeadInst;
5548 continue;
5549 }
5550
5551 Worklist.push(I);
5552 }
5553
5554 Instruction *I = Worklist.removeOne();
5555 if (I == nullptr) continue; // skip null values.
5556
5557 // Check to see if we can DCE the instruction.
5560 ++NumDeadInst;
5561 continue;
5562 }
5563
5564 if (!DebugCounter::shouldExecute(VisitCounter))
5565 continue;
5566
5567 // See if we can trivially sink this instruction to its user if we can
5568 // prove that the successor is not executed more frequently than our block.
5569 // Return the UserBlock if successful.
5570 auto getOptionalSinkBlockForInst =
5571 [this](Instruction *I) -> std::optional<BasicBlock *> {
5572 if (!EnableCodeSinking)
5573 return std::nullopt;
5574
5575 BasicBlock *BB = I->getParent();
5576 BasicBlock *UserParent = nullptr;
5577 unsigned NumUsers = 0;
5578
5579 for (Use &U : I->uses()) {
5580 User *User = U.getUser();
5581 if (User->isDroppable())
5582 continue;
5583 if (NumUsers > MaxSinkNumUsers)
5584 return std::nullopt;
5585
5586 Instruction *UserInst = cast<Instruction>(User);
5587 // Special handling for Phi nodes - get the block the use occurs in.
5588 BasicBlock *UserBB = UserInst->getParent();
5589 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5590 UserBB = PN->getIncomingBlock(U);
5591 // Bail out if we have uses in different blocks. We don't do any
5592 // sophisticated analysis (i.e finding NearestCommonDominator of these
5593 // use blocks).
5594 if (UserParent && UserParent != UserBB)
5595 return std::nullopt;
5596 UserParent = UserBB;
5597
5598 // Make sure these checks are done only once, naturally we do the checks
5599 // the first time we get the userparent, this will save compile time.
5600 if (NumUsers == 0) {
5601 // Try sinking to another block. If that block is unreachable, then do
5602 // not bother. SimplifyCFG should handle it.
5603 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5604 return std::nullopt;
5605
5606 auto *Term = UserParent->getTerminator();
5607 // See if the user is one of our successors that has only one
5608 // predecessor, so that we don't have to split the critical edge.
5609 // Another option where we can sink is a block that ends with a
5610 // terminator that does not pass control to other block (such as
5611 // return or unreachable or resume). In this case:
5612 // - I dominates the User (by SSA form);
5613 // - the User will be executed at most once.
5614 // So sinking I down to User is always profitable or neutral.
5615 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5616 return std::nullopt;
5617
5618 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5619 }
5620
5621 NumUsers++;
5622 }
5623
5624 // No user or only has droppable users.
5625 if (!UserParent)
5626 return std::nullopt;
5627
5628 return UserParent;
5629 };
5630
5631 auto OptBB = getOptionalSinkBlockForInst(I);
5632 if (OptBB) {
5633 auto *UserParent = *OptBB;
5634 // Okay, the CFG is simple enough, try to sink this instruction.
5635 if (tryToSinkInstruction(I, UserParent)) {
5636 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5637 MadeIRChange = true;
5638 // We'll add uses of the sunk instruction below, but since
5639 // sinking can expose opportunities for it's *operands* add
5640 // them to the worklist
5641 for (Use &U : I->operands())
5642 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5643 Worklist.push(OpI);
5644 }
5645 }
5646
5647 // Now that we have an instruction, try combining it to simplify it.
5648 Builder.SetInsertPoint(I);
5649 Builder.CollectMetadataToCopy(
5650 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5651
5652#ifndef NDEBUG
5653 std::string OrigI;
5654#endif
5655 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5656 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5657
5658 if (Instruction *Result = visit(*I)) {
5659 ++NumCombined;
5660 // Should we replace the old instruction with a new one?
5661 if (Result != I) {
5662 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5663 << " New = " << *Result << '\n');
5664
5665 // We copy the old instruction's DebugLoc to the new instruction, unless
5666 // InstCombine already assigned a DebugLoc to it, in which case we
5667 // should trust the more specifically selected DebugLoc.
5668 Result->setDebugLoc(Result->getDebugLoc().orElse(I->getDebugLoc()));
5669 // We also copy annotation metadata to the new instruction.
5670 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5671 // Everything uses the new instruction now.
5672 I->replaceAllUsesWith(Result);
5673
5674 // Move the name to the new instruction first.
5675 Result->takeName(I);
5676
5677 // Insert the new instruction into the basic block...
5678 BasicBlock *InstParent = I->getParent();
5679 BasicBlock::iterator InsertPos = I->getIterator();
5680
5681 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5682 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5683 // We need to fix up the insertion point.
5684 if (isa<PHINode>(I)) // PHI -> Non-PHI
5685 InsertPos = InstParent->getFirstInsertionPt();
5686 else // Non-PHI -> PHI
5687 InsertPos = InstParent->getFirstNonPHIIt();
5688 }
5689
5690 Result->insertInto(InstParent, InsertPos);
5691
5692 // Push the new instruction and any users onto the worklist.
5693 Worklist.pushUsersToWorkList(*Result);
5694 Worklist.push(Result);
5695
5697 } else {
5698 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5699 << " New = " << *I << '\n');
5700
5701 // If the instruction was modified, it's possible that it is now dead.
5702 // if so, remove it.
5705 } else {
5706 Worklist.pushUsersToWorkList(*I);
5707 Worklist.push(I);
5708 }
5709 }
5710 MadeIRChange = true;
5711 }
5712 }
5713
5714 Worklist.zap();
5715 return MadeIRChange;
5716}
5717
5718// Track the scopes used by !alias.scope and !noalias. In a function, a
5719// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5720// by both sets. If not, the declaration of the scope can be safely omitted.
5721// The MDNode of the scope can be omitted as well for the instructions that are
5722// part of this function. We do not do that at this point, as this might become
5723// too time consuming to do.
5725 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5726 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5727
5728public:
5730 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5731 if (!I->hasMetadataOtherThanDebugLoc())
5732 return;
5733
5734 auto Track = [](Metadata *ScopeList, auto &Container) {
5735 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5736 if (!MDScopeList || !Container.insert(MDScopeList).second)
5737 return;
5738 for (const auto &MDOperand : MDScopeList->operands())
5739 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5740 Container.insert(MDScope);
5741 };
5742
5743 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5744 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5745 }
5746
5749 if (!Decl)
5750 return false;
5751
5752 assert(Decl->use_empty() &&
5753 "llvm.experimental.noalias.scope.decl in use ?");
5754 const MDNode *MDSL = Decl->getScopeList();
5755 assert(MDSL->getNumOperands() == 1 &&
5756 "llvm.experimental.noalias.scope should refer to a single scope");
5757 auto &MDOperand = MDSL->getOperand(0);
5758 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5759 return !UsedAliasScopesAndLists.contains(MD) ||
5760 !UsedNoAliasScopesAndLists.contains(MD);
5761
5762 // Not an MDNode ? throw away.
5763 return true;
5764 }
5765};
5766
5767/// Populate the IC worklist from a function, by walking it in reverse
5768/// post-order and adding all reachable code to the worklist.
5769///
5770/// This has a couple of tricks to make the code faster and more powerful. In
5771/// particular, we constant fold and DCE instructions as we go, to avoid adding
5772/// them to the worklist (this significantly speeds up instcombine on code where
5773/// many instructions are dead or constant). Additionally, if we find a branch
5774/// whose condition is a known constant, we only visit the reachable successors.
5776 bool MadeIRChange = false;
5778 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
5779 DenseMap<Constant *, Constant *> FoldedConstants;
5780 AliasScopeTracker SeenAliasScopes;
5781
5782 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
5783 for (BasicBlock *Succ : successors(BB))
5784 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
5785 for (PHINode &PN : Succ->phis())
5786 for (Use &U : PN.incoming_values())
5787 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5788 U.set(PoisonValue::get(PN.getType()));
5789 MadeIRChange = true;
5790 }
5791 };
5792
5793 for (BasicBlock *BB : RPOT) {
5794 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
5795 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
5796 })) {
5797 HandleOnlyLiveSuccessor(BB, nullptr);
5798 continue;
5799 }
5800 LiveBlocks.insert(BB);
5801
5802 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
5803 // ConstantProp instruction if trivially constant.
5804 if (!Inst.use_empty() &&
5805 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5806 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
5807 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
5808 << '\n');
5809 Inst.replaceAllUsesWith(C);
5810 ++NumConstProp;
5811 if (isInstructionTriviallyDead(&Inst, &TLI))
5812 Inst.eraseFromParent();
5813 MadeIRChange = true;
5814 continue;
5815 }
5816
5817 // See if we can constant fold its operands.
5818 for (Use &U : Inst.operands()) {
5820 continue;
5821
5822 auto *C = cast<Constant>(U);
5823 Constant *&FoldRes = FoldedConstants[C];
5824 if (!FoldRes)
5825 FoldRes = ConstantFoldConstant(C, DL, &TLI);
5826
5827 if (FoldRes != C) {
5828 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
5829 << "\n Old = " << *C
5830 << "\n New = " << *FoldRes << '\n');
5831 U = FoldRes;
5832 MadeIRChange = true;
5833 }
5834 }
5835
5836 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
5837 // these call instructions consumes non-trivial amount of time and
5838 // provides no value for the optimization.
5839 if (!Inst.isDebugOrPseudoInst()) {
5840 InstrsForInstructionWorklist.push_back(&Inst);
5841 SeenAliasScopes.analyse(&Inst);
5842 }
5843 }
5844
5845 // If this is a branch or switch on a constant, mark only the single
5846 // live successor. Otherwise assume all successors are live.
5847 Instruction *TI = BB->getTerminator();
5848 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
5849 if (isa<UndefValue>(BI->getCondition())) {
5850 // Branch on undef is UB.
5851 HandleOnlyLiveSuccessor(BB, nullptr);
5852 continue;
5853 }
5854 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5855 bool CondVal = Cond->getZExtValue();
5856 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5857 continue;
5858 }
5859 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5860 if (isa<UndefValue>(SI->getCondition())) {
5861 // Switch on undef is UB.
5862 HandleOnlyLiveSuccessor(BB, nullptr);
5863 continue;
5864 }
5865 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5866 HandleOnlyLiveSuccessor(BB,
5867 SI->findCaseValue(Cond)->getCaseSuccessor());
5868 continue;
5869 }
5870 }
5871 }
5872
5873 // Remove instructions inside unreachable blocks. This prevents the
5874 // instcombine code from having to deal with some bad special cases, and
5875 // reduces use counts of instructions.
5876 for (BasicBlock &BB : F) {
5877 if (LiveBlocks.count(&BB))
5878 continue;
5879
5880 unsigned NumDeadInstInBB;
5881 NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB);
5882
5883 MadeIRChange |= NumDeadInstInBB != 0;
5884 NumDeadInst += NumDeadInstInBB;
5885 }
5886
5887 // Once we've found all of the instructions to add to instcombine's worklist,
5888 // add them in reverse order. This way instcombine will visit from the top
5889 // of the function down. This jives well with the way that it adds all uses
5890 // of instructions to the worklist after doing a transformation, thus avoiding
5891 // some N^2 behavior in pathological cases.
5892 Worklist.reserve(InstrsForInstructionWorklist.size());
5893 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
5894 // DCE instruction if trivially dead. As we iterate in reverse program
5895 // order here, we will clean up whole chains of dead instructions.
5896 if (isInstructionTriviallyDead(Inst, &TLI) ||
5897 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
5898 ++NumDeadInst;
5899 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
5900 salvageDebugInfo(*Inst);
5901 Inst->eraseFromParent();
5902 MadeIRChange = true;
5903 continue;
5904 }
5905
5906 Worklist.push(Inst);
5907 }
5908
5909 return MadeIRChange;
5910}
5911
5913 // Collect backedges.
5915 for (BasicBlock *BB : RPOT) {
5916 Visited.insert(BB);
5917 for (BasicBlock *Succ : successors(BB))
5918 if (Visited.contains(Succ))
5919 BackEdges.insert({BB, Succ});
5920 }
5921 ComputedBackEdges = true;
5922}
5923
5929 const InstCombineOptions &Opts) {
5930 auto &DL = F.getDataLayout();
5931 bool VerifyFixpoint = Opts.VerifyFixpoint &&
5932 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
5933
5934 /// Builder - This is an IRBuilder that automatically inserts new
5935 /// instructions into the worklist when they are created.
5937 F.getContext(), TargetFolder(DL),
5938 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
5939 Worklist.add(I);
5940 if (auto *Assume = dyn_cast<AssumeInst>(I))
5941 AC.registerAssumption(Assume);
5942 }));
5943
5945
5946 // Lower dbg.declare intrinsics otherwise their value may be clobbered
5947 // by instcombiner.
5948 bool MadeIRChange = false;
5950 MadeIRChange = LowerDbgDeclare(F);
5951
5952 // Iterate while there is work to do.
5953 unsigned Iteration = 0;
5954 while (true) {
5955 if (Iteration >= Opts.MaxIterations && !VerifyFixpoint) {
5956 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
5957 << " on " << F.getName()
5958 << " reached; stopping without verifying fixpoint\n");
5959 break;
5960 }
5961
5962 ++Iteration;
5963 ++NumWorklistIterations;
5964 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
5965 << F.getName() << "\n");
5966
5967 InstCombinerImpl IC(Worklist, Builder, F, AA, AC, TLI, TTI, DT, ORE, BFI,
5968 BPI, PSI, DL, RPOT);
5970 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
5971 MadeChangeInThisIteration |= IC.run();
5972 if (!MadeChangeInThisIteration)
5973 break;
5974
5975 MadeIRChange = true;
5976 if (Iteration > Opts.MaxIterations) {
5978 "Instruction Combining on " + Twine(F.getName()) +
5979 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
5980 " iterations. " +
5981 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
5982 "'instcombine-no-verify-fixpoint' to suppress this error.");
5983 }
5984 }
5985
5986 if (Iteration == 1)
5987 ++NumOneIteration;
5988 else if (Iteration == 2)
5989 ++NumTwoIterations;
5990 else if (Iteration == 3)
5991 ++NumThreeIterations;
5992 else
5993 ++NumFourOrMoreIterations;
5994
5995 return MadeIRChange;
5996}
5997
5999
6001 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
6002 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
6003 OS, MapClassName2PassName);
6004 OS << '<';
6005 OS << "max-iterations=" << Options.MaxIterations << ";";
6006 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
6007 OS << '>';
6008}
6009
6010char InstCombinePass::ID = 0;
6011
6014 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
6015 // No changes since last InstCombine pass, exit early.
6016 if (LRT.shouldSkip(&ID))
6017 return PreservedAnalyses::all();
6018
6019 auto &AC = AM.getResult<AssumptionAnalysis>(F);
6020 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
6021 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
6023 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
6024
6025 auto *AA = &AM.getResult<AAManager>(F);
6026 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
6027 ProfileSummaryInfo *PSI =
6028 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
6029 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
6030 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
6032
6033 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6034 BFI, BPI, PSI, Options)) {
6035 // No changes, all analyses are preserved.
6036 LRT.update(&ID, /*Changed=*/false);
6037 return PreservedAnalyses::all();
6038 }
6039
6040 // Mark all the analyses that instcombine updates as preserved.
6042 LRT.update(&ID, /*Changed=*/true);
6045 return PA;
6046}
6047
6063
6065 if (skipFunction(F))
6066 return false;
6067
6068 // Required analyses.
6069 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
6070 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
6071 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
6073 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6075
6076 // Optional analyses.
6077 ProfileSummaryInfo *PSI =
6079 BlockFrequencyInfo *BFI =
6080 (PSI && PSI->hasProfileSummary()) ?
6082 nullptr;
6083 BranchProbabilityInfo *BPI = nullptr;
6084 if (auto *WrapperPass =
6086 BPI = &WrapperPass->getBPI();
6087
6088 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6089 BFI, BPI, PSI, InstCombineOptions());
6090}
6091
6093
6097
6099 "Combine redundant instructions", false, false)
6110 "Combine redundant instructions", false, false)
6111
6112// Initialization Routines
6116
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI)
DXIL Resource Access
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition IVUsers.cpp:48
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * combineConstantOffsets(GetElementPtrInst &GEP, InstCombinerImpl &IC)
Combine constant offsets separated by variable offsets.
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static std::optional< ModRefInfo > isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI, bool KnowInit)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file contains the declarations for metadata subclasses.
#define T
uint64_t IntrinsicInst * II
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
unsigned OpIndex
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static unsigned getScalarSizeInBits(Type *Ty)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:234
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition APInt.cpp:1758
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:423
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition APInt.cpp:1890
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:371
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:380
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1488
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1928
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:827
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1960
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:334
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:440
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:306
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1941
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:851
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition ArrayRef.h:224
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
uint64_t getNumElements() const
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
size_t size() const
Definition BasicBlock.h:480
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition InstrTypes.h:294
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:701
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:703
@ ICMP_NE
not equal
Definition InstrTypes.h:700
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:829
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:791
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
ConstantArray - Constant Array Declarations.
Definition Constants.h:433
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition Constants.h:776
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition Constants.h:517
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
Definition Constant.h:219
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:167
bool empty() const
Definition DenseMap.h:109
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:222
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:322
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
FunctionPass(char &pid)
Definition Pass.h:316
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition Pass.cpp:188
const BasicBlock & getEntryBlock() const
Definition Function.h:807
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
static GEPNoWrapFlags all()
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForReassociate(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep (gep, p, y), x).
bool hasNoUnsignedWrap() const
bool isInBounds() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
GEPNoWrapFlags getNoWrapFlags() const
Definition Operator.h:425
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2039
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition IRBuilder.h:538
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition IRBuilder.h:75
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2783
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI InstCombinePass(InstCombineOptions Opts={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * foldBinopWithRecurrence(BinaryOperator &BO)
Try to fold binary operators whose operands are simple interleaved recurrences to a single recurrence...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, Instruction *CxtI, unsigned Depth=0)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
SimplifyQuery SQ
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
TargetLibraryInfo & TLI
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
const DataLayout & DL
DomConditionCache DC
const bool MinimizeSize
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
AssumptionCache & AC
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
BuilderTy & Builder
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition InstCombine.h:68
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void add(Instruction *I)
Add instruction to the worklist.
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
bool isShift() const
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
bool isIntDivRem() const
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
A wrapper class for inspecting calls to intrinsic functions.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
Tracking metadata reference owned by Metadata.
Definition Metadata.h:900
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition Metadata.h:64
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition Operator.h:111
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition Operator.h:105
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition Constants.h:1468
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition Registry.h:44
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:150
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition SmallPtrSet.h:99
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:338
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:62
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:295
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:294
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:107
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Use * op_iterator
Definition User.h:279
op_range operands()
Definition User.h:292
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:21
op_iterator op_begin()
Definition User.h:284
const Use & getOperandUse(unsigned i) const
Definition User.h:245
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
op_iterator op_end()
Definition User.h:286
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition User.cpp:115
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:759
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
iterator_range< user_iterator > users()
Definition Value.h:426
bool hasUseList() const
Check if this Value has a use-list.
Definition Value.h:344
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition Value.cpp:150
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
bool use_empty() const
Definition Value.h:346
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:881
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Value handle that is nullable, but tries to track the Value.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
reverse_self_iterator getReverseIterator()
Definition ilist_node.h:126
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2038
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &)
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition Local.cpp:2485
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool succ_empty(const Instruction *I)
Definition CFG.h:256
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI FunctionPass * createInstructionCombiningPass()
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2452
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1725
auto successors(const MachineBasicBlock *BB)
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2116
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Definition APFloat.h:1555
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition Local.cpp:2468
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:186
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:754
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:402
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition Local.cpp:1795
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1662
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition Local.cpp:2037
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition Local.cpp:2414
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:325
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
@ ModRef
The access may reference and may modify the value stored in memory.
Definition ModRef.h:36
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ NoModRef
The access neither references nor modifies the value stored in memory.
Definition ModRef.h:30
TargetTransformInfo TTI
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:1941
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1877
cl::opt< bool > ProfcheckDisableMetadataFixes("profcheck-disable-metadata-fixes", cl::Hidden, cl::init(false), cl::desc("Disable metadata propagation fixes discovered through Issue #147390"))
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition STLExtras.h:2068
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
bool isRefSet(const ModRefInfo MRI)
Definition ModRef.h:52
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:304
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
Definition APFloat.cpp:324
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition KnownBits.h:251
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:248
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
SimplifyQuery getWithInstruction(const Instruction *I) const