LLVM 22.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APFloat.h"
37#include "llvm/ADT/APInt.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/ADT/DenseMap.h"
42#include "llvm/ADT/Statistic.h"
47#include "llvm/Analysis/CFG.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constant.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DIBuilder.h"
67#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/DebugInfo.h"
70#include "llvm/IR/Dominators.h"
72#include "llvm/IR/Function.h"
74#include "llvm/IR/IRBuilder.h"
75#include "llvm/IR/InstrTypes.h"
76#include "llvm/IR/Instruction.h"
79#include "llvm/IR/Intrinsics.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/Operator.h"
82#include "llvm/IR/PassManager.h"
84#include "llvm/IR/Type.h"
85#include "llvm/IR/Use.h"
86#include "llvm/IR/User.h"
87#include "llvm/IR/Value.h"
88#include "llvm/IR/ValueHandle.h"
93#include "llvm/Support/Debug.h"
102#include <algorithm>
103#include <cassert>
104#include <cstdint>
105#include <memory>
106#include <optional>
107#include <string>
108#include <utility>
109
110#define DEBUG_TYPE "instcombine"
112#include <optional>
113
114using namespace llvm;
115using namespace llvm::PatternMatch;
116
117STATISTIC(NumWorklistIterations,
118 "Number of instruction combining iterations performed");
119STATISTIC(NumOneIteration, "Number of functions with one iteration");
120STATISTIC(NumTwoIterations, "Number of functions with two iterations");
121STATISTIC(NumThreeIterations, "Number of functions with three iterations");
122STATISTIC(NumFourOrMoreIterations,
123 "Number of functions with four or more iterations");
124
125STATISTIC(NumCombined , "Number of insts combined");
126STATISTIC(NumConstProp, "Number of constant folds");
127STATISTIC(NumDeadInst , "Number of dead inst eliminated");
128STATISTIC(NumSunkInst , "Number of instructions sunk");
129STATISTIC(NumExpand, "Number of expansions");
130STATISTIC(NumFactor , "Number of factorizations");
131STATISTIC(NumReassoc , "Number of reassociations");
132DEBUG_COUNTER(VisitCounter, "instcombine-visit",
133 "Controls which instructions are visited");
134
135static cl::opt<bool>
136EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
137 cl::init(true));
138
140 "instcombine-max-sink-users", cl::init(32),
141 cl::desc("Maximum number of undroppable users for instruction sinking"));
142
144MaxArraySize("instcombine-maxarray-size", cl::init(1024),
145 cl::desc("Maximum array size considered when doing a combine"));
146
147// FIXME: Remove this flag when it is no longer necessary to convert
148// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
149// increases variable availability at the cost of accuracy. Variables that
150// cannot be promoted by mem2reg or SROA will be described as living in memory
151// for their entire lifetime. However, passes like DSE and instcombine can
152// delete stores to the alloca, leading to misleading and inaccurate debug
153// information. This flag can be removed when those passes are fixed.
154static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
155 cl::Hidden, cl::init(true));
156
157std::optional<Instruction *>
159 // Handle target specific intrinsics
160 if (II.getCalledFunction()->isTargetIntrinsic()) {
161 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
162 }
163 return std::nullopt;
164}
165
167 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
168 bool &KnownBitsComputed) {
169 // Handle target specific intrinsics
170 if (II.getCalledFunction()->isTargetIntrinsic()) {
171 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
172 *this, II, DemandedMask, Known, KnownBitsComputed);
173 }
174 return std::nullopt;
175}
176
178 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
179 APInt &PoisonElts2, APInt &PoisonElts3,
180 std::function<void(Instruction *, unsigned, APInt, APInt &)>
181 SimplifyAndSetOp) {
182 // Handle target specific intrinsics
183 if (II.getCalledFunction()->isTargetIntrinsic()) {
184 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
185 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
186 SimplifyAndSetOp);
187 }
188 return std::nullopt;
189}
190
191bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
192 // Approved exception for TTI use: This queries a legality property of the
193 // target, not an profitability heuristic. Ideally this should be part of
194 // DataLayout instead.
195 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
196}
197
198Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
199 if (!RewriteGEP)
201
203 auto *Inst = dyn_cast<Instruction>(GEP);
204 if (Inst)
206
207 Value *Offset = EmitGEPOffset(GEP);
208 // Rewrite non-trivial GEPs to avoid duplicating the offset arithmetic.
209 if (Inst && !GEP->hasAllConstantIndices() &&
210 !GEP->getSourceElementType()->isIntegerTy(8)) {
212 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
213 Offset, "", GEP->getNoWrapFlags()));
215 }
216 return Offset;
217}
218
219Value *InstCombinerImpl::EmitGEPOffsets(ArrayRef<GEPOperator *> GEPs,
220 GEPNoWrapFlags NW, Type *IdxTy,
221 bool RewriteGEPs) {
222 auto Add = [&](Value *Sum, Value *Offset) -> Value * {
223 if (Sum)
224 return Builder.CreateAdd(Sum, Offset, "", NW.hasNoUnsignedWrap(),
225 NW.isInBounds());
226 else
227 return Offset;
228 };
229
230 Value *Sum = nullptr;
231 Value *OneUseSum = nullptr;
232 Value *OneUseBase = nullptr;
233 GEPNoWrapFlags OneUseFlags = GEPNoWrapFlags::all();
234 for (GEPOperator *GEP : reverse(GEPs)) {
235 Value *Offset;
236 {
237 // Expand the offset at the point of the previous GEP to enable rewriting.
238 // However, use the original insertion point for calculating Sum.
240 auto *Inst = dyn_cast<Instruction>(GEP);
241 if (RewriteGEPs && Inst)
243
245 if (Offset->getType() != IdxTy)
247 cast<VectorType>(IdxTy)->getElementCount(), Offset);
248 if (GEP->hasOneUse()) {
249 // Offsets of one-use GEPs will be merged into the next multi-use GEP.
250 OneUseSum = Add(OneUseSum, Offset);
251 OneUseFlags = OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags());
252 if (!OneUseBase)
253 OneUseBase = GEP->getPointerOperand();
254 continue;
255 }
256
257 if (OneUseSum)
258 Offset = Add(OneUseSum, Offset);
259
260 // Rewrite the GEP to reuse the computed offset. This also includes
261 // offsets from preceding one-use GEPs.
262 if (RewriteGEPs && Inst &&
263 !(GEP->getSourceElementType()->isIntegerTy(8) &&
264 GEP->getOperand(1) == Offset)) {
266 *Inst,
268 OneUseBase ? OneUseBase : GEP->getPointerOperand(), Offset, "",
269 OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags())));
271 }
272 }
273
274 Sum = Add(Sum, Offset);
275 OneUseSum = OneUseBase = nullptr;
276 OneUseFlags = GEPNoWrapFlags::all();
277 }
278 if (OneUseSum)
279 Sum = Add(Sum, OneUseSum);
280 if (!Sum)
281 return Constant::getNullValue(IdxTy);
282 return Sum;
283}
284
285/// Legal integers and common types are considered desirable. This is used to
286/// avoid creating instructions with types that may not be supported well by the
287/// the backend.
288/// NOTE: This treats i8, i16 and i32 specially because they are common
289/// types in frontend languages.
290bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
291 switch (BitWidth) {
292 case 8:
293 case 16:
294 case 32:
295 return true;
296 default:
297 return DL.isLegalInteger(BitWidth);
298 }
299}
300
301/// Return true if it is desirable to convert an integer computation from a
302/// given bit width to a new bit width.
303/// We don't want to convert from a legal or desirable type (like i8) to an
304/// illegal type or from a smaller to a larger illegal type. A width of '1'
305/// is always treated as a desirable type because i1 is a fundamental type in
306/// IR, and there are many specialized optimizations for i1 types.
307/// Common/desirable widths are equally treated as legal to convert to, in
308/// order to open up more combining opportunities.
309bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
310 unsigned ToWidth) const {
311 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
312 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
313
314 // Convert to desirable widths even if they are not legal types.
315 // Only shrink types, to prevent infinite loops.
316 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
317 return true;
318
319 // If this is a legal or desiable integer from type, and the result would be
320 // an illegal type, don't do the transformation.
321 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
322 return false;
323
324 // Otherwise, if both are illegal, do not increase the size of the result. We
325 // do allow things like i160 -> i64, but not i64 -> i160.
326 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
327 return false;
328
329 return true;
330}
331
332/// Return true if it is desirable to convert a computation from 'From' to 'To'.
333/// We don't want to convert from a legal to an illegal type or from a smaller
334/// to a larger illegal type. i1 is always treated as a legal type because it is
335/// a fundamental type in IR, and there are many specialized optimizations for
336/// i1 types.
337bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
338 // TODO: This could be extended to allow vectors. Datalayout changes might be
339 // needed to properly support that.
340 if (!From->isIntegerTy() || !To->isIntegerTy())
341 return false;
342
343 unsigned FromWidth = From->getPrimitiveSizeInBits();
344 unsigned ToWidth = To->getPrimitiveSizeInBits();
345 return shouldChangeType(FromWidth, ToWidth);
346}
347
348// Return true, if No Signed Wrap should be maintained for I.
349// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
350// where both B and C should be ConstantInts, results in a constant that does
351// not overflow. This function only handles the Add/Sub/Mul opcodes. For
352// all other opcodes, the function conservatively returns false.
354 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
355 if (!OBO || !OBO->hasNoSignedWrap())
356 return false;
357
358 const APInt *BVal, *CVal;
359 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
360 return false;
361
362 // We reason about Add/Sub/Mul Only.
363 bool Overflow = false;
364 switch (I.getOpcode()) {
365 case Instruction::Add:
366 (void)BVal->sadd_ov(*CVal, Overflow);
367 break;
368 case Instruction::Sub:
369 (void)BVal->ssub_ov(*CVal, Overflow);
370 break;
371 case Instruction::Mul:
372 (void)BVal->smul_ov(*CVal, Overflow);
373 break;
374 default:
375 // Conservatively return false for other opcodes.
376 return false;
377 }
378 return !Overflow;
379}
380
382 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
383 return OBO && OBO->hasNoUnsignedWrap();
384}
385
387 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
388 return OBO && OBO->hasNoSignedWrap();
389}
390
391/// Conservatively clears subclassOptionalData after a reassociation or
392/// commutation. We preserve fast-math flags when applicable as they can be
393/// preserved.
395 FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
396 if (!FPMO) {
397 I.clearSubclassOptionalData();
398 return;
399 }
400
401 FastMathFlags FMF = I.getFastMathFlags();
402 I.clearSubclassOptionalData();
403 I.setFastMathFlags(FMF);
404}
405
406/// Combine constant operands of associative operations either before or after a
407/// cast to eliminate one of the associative operations:
408/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
409/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
411 InstCombinerImpl &IC) {
412 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
413 if (!Cast || !Cast->hasOneUse())
414 return false;
415
416 // TODO: Enhance logic for other casts and remove this check.
417 auto CastOpcode = Cast->getOpcode();
418 if (CastOpcode != Instruction::ZExt)
419 return false;
420
421 // TODO: Enhance logic for other BinOps and remove this check.
422 if (!BinOp1->isBitwiseLogicOp())
423 return false;
424
425 auto AssocOpcode = BinOp1->getOpcode();
426 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
427 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
428 return false;
429
430 Constant *C1, *C2;
431 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
432 !match(BinOp2->getOperand(1), m_Constant(C2)))
433 return false;
434
435 // TODO: This assumes a zext cast.
436 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
437 // to the destination type might lose bits.
438
439 // Fold the constants together in the destination type:
440 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
441 const DataLayout &DL = IC.getDataLayout();
442 Type *DestTy = C1->getType();
443 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
444 if (!CastC2)
445 return false;
446 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
447 if (!FoldedC)
448 return false;
449
450 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
451 IC.replaceOperand(*BinOp1, 1, FoldedC);
453 Cast->dropPoisonGeneratingFlags();
454 return true;
455}
456
457// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
458// inttoptr ( ptrtoint (x) ) --> x
459Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
460 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
461 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
462 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
463 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
464 Type *CastTy = IntToPtr->getDestTy();
465 if (PtrToInt &&
466 CastTy->getPointerAddressSpace() ==
467 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
468 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
469 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
470 return PtrToInt->getOperand(0);
471 }
472 return nullptr;
473}
474
475/// This performs a few simplifications for operators that are associative or
476/// commutative:
477///
478/// Commutative operators:
479///
480/// 1. Order operands such that they are listed from right (least complex) to
481/// left (most complex). This puts constants before unary operators before
482/// binary operators.
483///
484/// Associative operators:
485///
486/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
487/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
488///
489/// Associative and commutative operators:
490///
491/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
492/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
493/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
494/// if C1 and C2 are constants.
496 Instruction::BinaryOps Opcode = I.getOpcode();
497 bool Changed = false;
498
499 do {
500 // Order operands such that they are listed from right (least complex) to
501 // left (most complex). This puts constants before unary operators before
502 // binary operators.
503 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
504 getComplexity(I.getOperand(1)))
505 Changed = !I.swapOperands();
506
507 if (I.isCommutative()) {
508 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
509 replaceOperand(I, 0, Pair->first);
510 replaceOperand(I, 1, Pair->second);
511 Changed = true;
512 }
513 }
514
515 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
516 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
517
518 if (I.isAssociative()) {
519 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
520 if (Op0 && Op0->getOpcode() == Opcode) {
521 Value *A = Op0->getOperand(0);
522 Value *B = Op0->getOperand(1);
523 Value *C = I.getOperand(1);
524
525 // Does "B op C" simplify?
526 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
527 // It simplifies to V. Form "A op V".
528 replaceOperand(I, 0, A);
529 replaceOperand(I, 1, V);
530 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
531 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
532
533 // Conservatively clear all optional flags since they may not be
534 // preserved by the reassociation. Reset nsw/nuw based on the above
535 // analysis.
537
538 // Note: this is only valid because SimplifyBinOp doesn't look at
539 // the operands to Op0.
540 if (IsNUW)
541 I.setHasNoUnsignedWrap(true);
542
543 if (IsNSW)
544 I.setHasNoSignedWrap(true);
545
546 Changed = true;
547 ++NumReassoc;
548 continue;
549 }
550 }
551
552 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
553 if (Op1 && Op1->getOpcode() == Opcode) {
554 Value *A = I.getOperand(0);
555 Value *B = Op1->getOperand(0);
556 Value *C = Op1->getOperand(1);
557
558 // Does "A op B" simplify?
559 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
560 // It simplifies to V. Form "V op C".
561 replaceOperand(I, 0, V);
562 replaceOperand(I, 1, C);
563 // Conservatively clear the optional flags, since they may not be
564 // preserved by the reassociation.
566 Changed = true;
567 ++NumReassoc;
568 continue;
569 }
570 }
571 }
572
573 if (I.isAssociative() && I.isCommutative()) {
574 if (simplifyAssocCastAssoc(&I, *this)) {
575 Changed = true;
576 ++NumReassoc;
577 continue;
578 }
579
580 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
581 if (Op0 && Op0->getOpcode() == Opcode) {
582 Value *A = Op0->getOperand(0);
583 Value *B = Op0->getOperand(1);
584 Value *C = I.getOperand(1);
585
586 // Does "C op A" simplify?
587 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
588 // It simplifies to V. Form "V op B".
589 replaceOperand(I, 0, V);
590 replaceOperand(I, 1, B);
591 // Conservatively clear the optional flags, since they may not be
592 // preserved by the reassociation.
594 Changed = true;
595 ++NumReassoc;
596 continue;
597 }
598 }
599
600 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
601 if (Op1 && Op1->getOpcode() == Opcode) {
602 Value *A = I.getOperand(0);
603 Value *B = Op1->getOperand(0);
604 Value *C = Op1->getOperand(1);
605
606 // Does "C op A" simplify?
607 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
608 // It simplifies to V. Form "B op V".
609 replaceOperand(I, 0, B);
610 replaceOperand(I, 1, V);
611 // Conservatively clear the optional flags, since they may not be
612 // preserved by the reassociation.
614 Changed = true;
615 ++NumReassoc;
616 continue;
617 }
618 }
619
620 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
621 // if C1 and C2 are constants.
622 Value *A, *B;
623 Constant *C1, *C2, *CRes;
624 if (Op0 && Op1 &&
625 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
626 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
627 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
628 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
629 bool IsNUW = hasNoUnsignedWrap(I) &&
630 hasNoUnsignedWrap(*Op0) &&
631 hasNoUnsignedWrap(*Op1);
632 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
633 BinaryOperator::CreateNUW(Opcode, A, B) :
634 BinaryOperator::Create(Opcode, A, B);
635
636 if (isa<FPMathOperator>(NewBO)) {
637 FastMathFlags Flags = I.getFastMathFlags() &
638 Op0->getFastMathFlags() &
639 Op1->getFastMathFlags();
640 NewBO->setFastMathFlags(Flags);
641 }
642 InsertNewInstWith(NewBO, I.getIterator());
643 NewBO->takeName(Op1);
644 replaceOperand(I, 0, NewBO);
645 replaceOperand(I, 1, CRes);
646 // Conservatively clear the optional flags, since they may not be
647 // preserved by the reassociation.
649 if (IsNUW)
650 I.setHasNoUnsignedWrap(true);
651
652 Changed = true;
653 continue;
654 }
655 }
656
657 // No further simplifications.
658 return Changed;
659 } while (true);
660}
661
662/// Return whether "X LOp (Y ROp Z)" is always equal to
663/// "(X LOp Y) ROp (X LOp Z)".
666 // X & (Y | Z) <--> (X & Y) | (X & Z)
667 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
668 if (LOp == Instruction::And)
669 return ROp == Instruction::Or || ROp == Instruction::Xor;
670
671 // X | (Y & Z) <--> (X | Y) & (X | Z)
672 if (LOp == Instruction::Or)
673 return ROp == Instruction::And;
674
675 // X * (Y + Z) <--> (X * Y) + (X * Z)
676 // X * (Y - Z) <--> (X * Y) - (X * Z)
677 if (LOp == Instruction::Mul)
678 return ROp == Instruction::Add || ROp == Instruction::Sub;
679
680 return false;
681}
682
683/// Return whether "(X LOp Y) ROp Z" is always equal to
684/// "(X ROp Z) LOp (Y ROp Z)".
688 return leftDistributesOverRight(ROp, LOp);
689
690 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
692
693 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
694 // but this requires knowing that the addition does not overflow and other
695 // such subtleties.
696}
697
698/// This function returns identity value for given opcode, which can be used to
699/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
701 if (isa<Constant>(V))
702 return nullptr;
703
704 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
705}
706
707/// This function predicates factorization using distributive laws. By default,
708/// it just returns the 'Op' inputs. But for special-cases like
709/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
710/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
711/// allow more factorization opportunities.
714 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
715 assert(Op && "Expected a binary operator");
716 LHS = Op->getOperand(0);
717 RHS = Op->getOperand(1);
718 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
719 Constant *C;
720 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
721 // X << C --> X * (1 << C)
723 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
724 assert(RHS && "Constant folding of immediate constants failed");
725 return Instruction::Mul;
726 }
727 // TODO: We can add other conversions e.g. shr => div etc.
728 }
729 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
730 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
732 // lshr nneg C, X --> ashr nneg C, X
733 return Instruction::AShr;
734 }
735 }
736 return Op->getOpcode();
737}
738
739/// This tries to simplify binary operations by factorizing out common terms
740/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
743 Instruction::BinaryOps InnerOpcode, Value *A,
744 Value *B, Value *C, Value *D) {
745 assert(A && B && C && D && "All values must be provided");
746
747 Value *V = nullptr;
748 Value *RetVal = nullptr;
749 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
750 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
751
752 // Does "X op' Y" always equal "Y op' X"?
753 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
754
755 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
756 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
757 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
758 // commutative case, "(A op' B) op (C op' A)"?
759 if (A == C || (InnerCommutative && A == D)) {
760 if (A != C)
761 std::swap(C, D);
762 // Consider forming "A op' (B op D)".
763 // If "B op D" simplifies then it can be formed with no cost.
764 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
765
766 // If "B op D" doesn't simplify then only go on if one of the existing
767 // operations "A op' B" and "C op' D" will be zapped as no longer used.
768 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
769 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
770 if (V)
771 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
772 }
773 }
774
775 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
776 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
777 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
778 // commutative case, "(A op' B) op (B op' D)"?
779 if (B == D || (InnerCommutative && B == C)) {
780 if (B != D)
781 std::swap(C, D);
782 // Consider forming "(A op C) op' B".
783 // If "A op C" simplifies then it can be formed with no cost.
784 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
785
786 // If "A op C" doesn't simplify then only go on if one of the existing
787 // operations "A op' B" and "C op' D" will be zapped as no longer used.
788 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
789 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
790 if (V)
791 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
792 }
793 }
794
795 if (!RetVal)
796 return nullptr;
797
798 ++NumFactor;
799 RetVal->takeName(&I);
800
801 // Try to add no-overflow flags to the final value.
802 if (isa<BinaryOperator>(RetVal)) {
803 bool HasNSW = false;
804 bool HasNUW = false;
805 if (isa<OverflowingBinaryOperator>(&I)) {
806 HasNSW = I.hasNoSignedWrap();
807 HasNUW = I.hasNoUnsignedWrap();
808 }
809 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
810 HasNSW &= LOBO->hasNoSignedWrap();
811 HasNUW &= LOBO->hasNoUnsignedWrap();
812 }
813
814 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
815 HasNSW &= ROBO->hasNoSignedWrap();
816 HasNUW &= ROBO->hasNoUnsignedWrap();
817 }
818
819 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
820 // We can propagate 'nsw' if we know that
821 // %Y = mul nsw i16 %X, C
822 // %Z = add nsw i16 %Y, %X
823 // =>
824 // %Z = mul nsw i16 %X, C+1
825 //
826 // iff C+1 isn't INT_MIN
827 const APInt *CInt;
828 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
829 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
830
831 // nuw can be propagated with any constant or nuw value.
832 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
833 }
834 }
835 return RetVal;
836}
837
838// If `I` has one Const operand and the other matches `(ctpop (not x))`,
839// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
840// This is only useful is the new subtract can fold so we only handle the
841// following cases:
842// 1) (add/sub/disjoint_or C, (ctpop (not x))
843// -> (add/sub/disjoint_or C', (ctpop x))
844// 1) (cmp pred C, (ctpop (not x))
845// -> (cmp pred C', (ctpop x))
847 unsigned Opc = I->getOpcode();
848 unsigned ConstIdx = 1;
849 switch (Opc) {
850 default:
851 return nullptr;
852 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
853 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
854 // is constant.
855 case Instruction::Sub:
856 ConstIdx = 0;
857 break;
858 case Instruction::ICmp:
859 // Signed predicates aren't correct in some edge cases like for i2 types, as
860 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
861 // comparisons against it are simplfied to unsigned.
862 if (cast<ICmpInst>(I)->isSigned())
863 return nullptr;
864 break;
865 case Instruction::Or:
866 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
867 return nullptr;
868 [[fallthrough]];
869 case Instruction::Add:
870 break;
871 }
872
873 Value *Op;
874 // Find ctpop.
875 if (!match(I->getOperand(1 - ConstIdx),
876 m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(Op)))))
877 return nullptr;
878
879 Constant *C;
880 // Check other operand is ImmConstant.
881 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
882 return nullptr;
883
884 Type *Ty = Op->getType();
885 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
886 // Need extra check for icmp. Note if this check is true, it generally means
887 // the icmp will simplify to true/false.
888 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
889 Constant *Cmp =
891 if (!Cmp || !Cmp->isZeroValue())
892 return nullptr;
893 }
894
895 // Check we can invert `(not x)` for free.
896 bool Consumes = false;
897 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
898 return nullptr;
899 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
900 assert(NotOp != nullptr &&
901 "Desync between isFreeToInvert and getFreelyInverted");
902
903 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
904
905 Value *R = nullptr;
906
907 // Do the transformation here to avoid potentially introducing an infinite
908 // loop.
909 switch (Opc) {
910 case Instruction::Sub:
911 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
912 break;
913 case Instruction::Or:
914 case Instruction::Add:
915 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
916 break;
917 case Instruction::ICmp:
918 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
919 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
920 break;
921 default:
922 llvm_unreachable("Unhandled Opcode");
923 }
924 assert(R != nullptr);
925 return replaceInstUsesWith(*I, R);
926}
927
928// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
929// IFF
930// 1) the logic_shifts match
931// 2) either both binops are binops and one is `and` or
932// BinOp1 is `and`
933// (logic_shift (inv_logic_shift C1, C), C) == C1 or
934//
935// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
936//
937// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
938// IFF
939// 1) the logic_shifts match
940// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
941//
942// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
943//
944// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
945// IFF
946// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
947// 2) Binop2 is `not`
948//
949// -> (arithmetic_shift Binop1((not X), Y), Amt)
950
952 const DataLayout &DL = I.getDataLayout();
953 auto IsValidBinOpc = [](unsigned Opc) {
954 switch (Opc) {
955 default:
956 return false;
957 case Instruction::And:
958 case Instruction::Or:
959 case Instruction::Xor:
960 case Instruction::Add:
961 // Skip Sub as we only match constant masks which will canonicalize to use
962 // add.
963 return true;
964 }
965 };
966
967 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
968 // constraints.
969 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
970 unsigned ShOpc) {
971 assert(ShOpc != Instruction::AShr);
972 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
973 ShOpc == Instruction::Shl;
974 };
975
976 auto GetInvShift = [](unsigned ShOpc) {
977 assert(ShOpc != Instruction::AShr);
978 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
979 };
980
981 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
982 unsigned ShOpc, Constant *CMask,
983 Constant *CShift) {
984 // If the BinOp1 is `and` we don't need to check the mask.
985 if (BinOpc1 == Instruction::And)
986 return true;
987
988 // For all other possible transfers we need complete distributable
989 // binop/shift (anything but `add` + `lshr`).
990 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
991 return false;
992
993 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
994 // vecs, otherwise the mask will be simplified and the following check will
995 // handle it).
996 if (BinOpc2 == Instruction::And)
997 return true;
998
999 // Otherwise, need mask that meets the below requirement.
1000 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
1001 Constant *MaskInvShift =
1002 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1003 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
1004 CMask;
1005 };
1006
1007 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
1008 Constant *CMask, *CShift;
1009 Value *X, *Y, *ShiftedX, *Mask, *Shift;
1010 if (!match(I.getOperand(ShOpnum),
1011 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
1012 return nullptr;
1013 if (!match(I.getOperand(1 - ShOpnum),
1015 m_OneUse(m_Shift(m_Value(X), m_Specific(Shift))),
1016 m_Value(ShiftedX)),
1017 m_Value(Mask))))
1018 return nullptr;
1019 // Make sure we are matching instruction shifts and not ConstantExpr
1020 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
1021 auto *IX = dyn_cast<Instruction>(ShiftedX);
1022 if (!IY || !IX)
1023 return nullptr;
1024
1025 // LHS and RHS need same shift opcode
1026 unsigned ShOpc = IY->getOpcode();
1027 if (ShOpc != IX->getOpcode())
1028 return nullptr;
1029
1030 // Make sure binop is real instruction and not ConstantExpr
1031 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
1032 if (!BO2)
1033 return nullptr;
1034
1035 unsigned BinOpc = BO2->getOpcode();
1036 // Make sure we have valid binops.
1037 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
1038 return nullptr;
1039
1040 if (ShOpc == Instruction::AShr) {
1041 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
1042 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
1043 Value *NotX = Builder.CreateNot(X);
1044 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
1046 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
1047 }
1048
1049 return nullptr;
1050 }
1051
1052 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
1053 // distribute to drop the shift irrelevant of constants.
1054 if (BinOpc == I.getOpcode() &&
1055 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
1056 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
1057 Value *NewBinOp1 = Builder.CreateBinOp(
1058 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
1059 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
1060 }
1061
1062 // Otherwise we can only distribute by constant shifting the mask, so
1063 // ensure we have constants.
1064 if (!match(Shift, m_ImmConstant(CShift)))
1065 return nullptr;
1066 if (!match(Mask, m_ImmConstant(CMask)))
1067 return nullptr;
1068
1069 // Check if we can distribute the binops.
1070 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1071 return nullptr;
1072
1073 Constant *NewCMask =
1074 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1075 Value *NewBinOp2 = Builder.CreateBinOp(
1076 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1077 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1078 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1079 NewBinOp1, CShift);
1080 };
1081
1082 if (Instruction *R = MatchBinOp(0))
1083 return R;
1084 return MatchBinOp(1);
1085}
1086
1087// (Binop (zext C), (select C, T, F))
1088// -> (select C, (binop 1, T), (binop 0, F))
1089//
1090// (Binop (sext C), (select C, T, F))
1091// -> (select C, (binop -1, T), (binop 0, F))
1092//
1093// Attempt to simplify binary operations into a select with folded args, when
1094// one operand of the binop is a select instruction and the other operand is a
1095// zext/sext extension, whose value is the select condition.
1098 // TODO: this simplification may be extended to any speculatable instruction,
1099 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1100 Instruction::BinaryOps Opc = I.getOpcode();
1101 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1102 Value *A, *CondVal, *TrueVal, *FalseVal;
1103 Value *CastOp;
1104
1105 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1106 return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
1107 A->getType()->getScalarSizeInBits() == 1 &&
1108 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1109 m_Value(FalseVal)));
1110 };
1111
1112 // Make sure one side of the binop is a select instruction, and the other is a
1113 // zero/sign extension operating on a i1.
1114 if (MatchSelectAndCast(LHS, RHS))
1115 CastOp = LHS;
1116 else if (MatchSelectAndCast(RHS, LHS))
1117 CastOp = RHS;
1118 else
1119 return nullptr;
1120
1121 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1122 bool IsCastOpRHS = (CastOp == RHS);
1123 bool IsZExt = isa<ZExtInst>(CastOp);
1124 Constant *C;
1125
1126 if (IsTrueArm) {
1127 C = Constant::getNullValue(V->getType());
1128 } else if (IsZExt) {
1129 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1130 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
1131 } else {
1132 C = Constant::getAllOnesValue(V->getType());
1133 }
1134
1135 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
1136 : Builder.CreateBinOp(Opc, C, V);
1137 };
1138
1139 // If the value used in the zext/sext is the select condition, or the negated
1140 // of the select condition, the binop can be simplified.
1141 if (CondVal == A) {
1142 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1143 return SelectInst::Create(CondVal, NewTrueVal,
1144 NewFoldedConst(true, FalseVal));
1145 }
1146
1147 if (match(A, m_Not(m_Specific(CondVal)))) {
1148 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1149 return SelectInst::Create(CondVal, NewTrueVal,
1150 NewFoldedConst(false, FalseVal));
1151 }
1152
1153 return nullptr;
1154}
1155
1157 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1158 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1159 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1160 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1161 Value *A, *B, *C, *D;
1162 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1163
1164 if (Op0)
1165 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1166 if (Op1)
1167 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1168
1169 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1170 // a common term.
1171 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1172 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1173 return V;
1174
1175 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1176 // term.
1177 if (Op0)
1178 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1179 if (Value *V =
1180 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1181 return V;
1182
1183 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1184 // term.
1185 if (Op1)
1186 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1187 if (Value *V =
1188 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1189 return V;
1190
1191 return nullptr;
1192}
1193
1194/// This tries to simplify binary operations which some other binary operation
1195/// distributes over either by factorizing out common terms
1196/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1197/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1198/// Returns the simplified value, or null if it didn't simplify.
1200 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1201 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1202 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1203 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1204
1205 // Factorization.
1206 if (Value *R = tryFactorizationFolds(I))
1207 return R;
1208
1209 // Expansion.
1210 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1211 // The instruction has the form "(A op' B) op C". See if expanding it out
1212 // to "(A op C) op' (B op C)" results in simplifications.
1213 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1214 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1215
1216 // Disable the use of undef because it's not safe to distribute undef.
1217 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1218 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1219 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1220
1221 // Do "A op C" and "B op C" both simplify?
1222 if (L && R) {
1223 // They do! Return "L op' R".
1224 ++NumExpand;
1225 C = Builder.CreateBinOp(InnerOpcode, L, R);
1226 C->takeName(&I);
1227 return C;
1228 }
1229
1230 // Does "A op C" simplify to the identity value for the inner opcode?
1231 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1232 // They do! Return "B op C".
1233 ++NumExpand;
1234 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1235 C->takeName(&I);
1236 return C;
1237 }
1238
1239 // Does "B op C" simplify to the identity value for the inner opcode?
1240 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1241 // They do! Return "A op C".
1242 ++NumExpand;
1243 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1244 C->takeName(&I);
1245 return C;
1246 }
1247 }
1248
1249 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1250 // The instruction has the form "A op (B op' C)". See if expanding it out
1251 // to "(A op B) op' (A op C)" results in simplifications.
1252 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1253 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1254
1255 // Disable the use of undef because it's not safe to distribute undef.
1256 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1257 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1258 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1259
1260 // Do "A op B" and "A op C" both simplify?
1261 if (L && R) {
1262 // They do! Return "L op' R".
1263 ++NumExpand;
1264 A = Builder.CreateBinOp(InnerOpcode, L, R);
1265 A->takeName(&I);
1266 return A;
1267 }
1268
1269 // Does "A op B" simplify to the identity value for the inner opcode?
1270 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1271 // They do! Return "A op C".
1272 ++NumExpand;
1273 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1274 A->takeName(&I);
1275 return A;
1276 }
1277
1278 // Does "A op C" simplify to the identity value for the inner opcode?
1279 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1280 // They do! Return "A op B".
1281 ++NumExpand;
1282 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1283 A->takeName(&I);
1284 return A;
1285 }
1286 }
1287
1289}
1290
1291static std::optional<std::pair<Value *, Value *>>
1293 if (LHS->getParent() != RHS->getParent())
1294 return std::nullopt;
1295
1296 if (LHS->getNumIncomingValues() < 2)
1297 return std::nullopt;
1298
1299 if (!equal(LHS->blocks(), RHS->blocks()))
1300 return std::nullopt;
1301
1302 Value *L0 = LHS->getIncomingValue(0);
1303 Value *R0 = RHS->getIncomingValue(0);
1304
1305 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1306 Value *L1 = LHS->getIncomingValue(I);
1307 Value *R1 = RHS->getIncomingValue(I);
1308
1309 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1310 continue;
1311
1312 return std::nullopt;
1313 }
1314
1315 return std::optional(std::pair(L0, R0));
1316}
1317
1318std::optional<std::pair<Value *, Value *>>
1319InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1320 Instruction *LHSInst = dyn_cast<Instruction>(LHS);
1321 Instruction *RHSInst = dyn_cast<Instruction>(RHS);
1322 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1323 return std::nullopt;
1324 switch (LHSInst->getOpcode()) {
1325 case Instruction::PHI:
1326 return matchSymmetricPhiNodesPair(cast<PHINode>(LHS), cast<PHINode>(RHS));
1327 case Instruction::Select: {
1328 Value *Cond = LHSInst->getOperand(0);
1329 Value *TrueVal = LHSInst->getOperand(1);
1330 Value *FalseVal = LHSInst->getOperand(2);
1331 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1332 FalseVal == RHSInst->getOperand(1))
1333 return std::pair(TrueVal, FalseVal);
1334 return std::nullopt;
1335 }
1336 case Instruction::Call: {
1337 // Match min(a, b) and max(a, b)
1338 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1339 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1340 if (LHSMinMax && RHSMinMax &&
1341 LHSMinMax->getPredicate() ==
1343 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1344 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1345 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1346 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1347 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1348 return std::nullopt;
1349 }
1350 default:
1351 return std::nullopt;
1352 }
1353}
1354
1356 Value *LHS,
1357 Value *RHS) {
1358 Value *A, *B, *C, *D, *E, *F;
1359 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1360 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1361 if (!LHSIsSelect && !RHSIsSelect)
1362 return nullptr;
1363
1364 FastMathFlags FMF;
1366 if (isa<FPMathOperator>(&I)) {
1367 FMF = I.getFastMathFlags();
1369 }
1370
1371 Instruction::BinaryOps Opcode = I.getOpcode();
1373
1374 Value *Cond, *True = nullptr, *False = nullptr;
1375
1376 // Special-case for add/negate combination. Replace the zero in the negation
1377 // with the trailing add operand:
1378 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1379 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1380 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1381 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1382 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1383 return nullptr;
1384
1385 Value *N;
1386 if (True && match(FVal, m_Neg(m_Value(N)))) {
1387 Value *Sub = Builder.CreateSub(Z, N);
1388 return Builder.CreateSelect(Cond, True, Sub, I.getName());
1389 }
1390 if (False && match(TVal, m_Neg(m_Value(N)))) {
1391 Value *Sub = Builder.CreateSub(Z, N);
1392 return Builder.CreateSelect(Cond, Sub, False, I.getName());
1393 }
1394 return nullptr;
1395 };
1396
1397 if (LHSIsSelect && RHSIsSelect && A == D) {
1398 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1399 Cond = A;
1400 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1401 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1402
1403 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1404 if (False && !True)
1405 True = Builder.CreateBinOp(Opcode, B, E);
1406 else if (True && !False)
1407 False = Builder.CreateBinOp(Opcode, C, F);
1408 }
1409 } else if (LHSIsSelect && LHS->hasOneUse()) {
1410 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1411 Cond = A;
1412 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1413 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1414 if (Value *NewSel = foldAddNegate(B, C, RHS))
1415 return NewSel;
1416 } else if (RHSIsSelect && RHS->hasOneUse()) {
1417 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1418 Cond = D;
1419 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1420 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1421 if (Value *NewSel = foldAddNegate(E, F, LHS))
1422 return NewSel;
1423 }
1424
1425 if (!True || !False)
1426 return nullptr;
1427
1428 Value *SI = Builder.CreateSelect(Cond, True, False);
1429 SI->takeName(&I);
1430 return SI;
1431}
1432
1433/// Freely adapt every user of V as-if V was changed to !V.
1434/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1436 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1437 for (User *U : make_early_inc_range(I->users())) {
1438 if (U == IgnoredUser)
1439 continue; // Don't consider this user.
1440 switch (cast<Instruction>(U)->getOpcode()) {
1441 case Instruction::Select: {
1442 auto *SI = cast<SelectInst>(U);
1443 SI->swapValues();
1444 SI->swapProfMetadata();
1445 break;
1446 }
1447 case Instruction::Br: {
1448 BranchInst *BI = cast<BranchInst>(U);
1449 BI->swapSuccessors(); // swaps prof metadata too
1450 if (BPI)
1452 break;
1453 }
1454 case Instruction::Xor:
1455 replaceInstUsesWith(cast<Instruction>(*U), I);
1456 // Add to worklist for DCE.
1457 addToWorklist(cast<Instruction>(U));
1458 break;
1459 default:
1460 llvm_unreachable("Got unexpected user - out of sync with "
1461 "canFreelyInvertAllUsersOf() ?");
1462 }
1463 }
1464
1465 // Update pre-existing debug value uses.
1466 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords;
1467 llvm::findDbgValues(I, DbgVariableRecords);
1468
1469 for (DbgVariableRecord *DbgVal : DbgVariableRecords) {
1470 SmallVector<uint64_t, 1> Ops = {dwarf::DW_OP_not};
1471 for (unsigned Idx = 0, End = DbgVal->getNumVariableLocationOps();
1472 Idx != End; ++Idx)
1473 if (DbgVal->getVariableLocationOp(Idx) == I)
1474 DbgVal->setExpression(
1475 DIExpression::appendOpsToArg(DbgVal->getExpression(), Ops, Idx));
1476 }
1477}
1478
1479/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1480/// constant zero (which is the 'negate' form).
1481Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1482 Value *NegV;
1483 if (match(V, m_Neg(m_Value(NegV))))
1484 return NegV;
1485
1486 // Constants can be considered to be negated values if they can be folded.
1487 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
1488 return ConstantExpr::getNeg(C);
1489
1490 if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
1491 if (C->getType()->getElementType()->isIntegerTy())
1492 return ConstantExpr::getNeg(C);
1493
1494 if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
1495 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1496 Constant *Elt = CV->getAggregateElement(i);
1497 if (!Elt)
1498 return nullptr;
1499
1500 if (isa<UndefValue>(Elt))
1501 continue;
1502
1503 if (!isa<ConstantInt>(Elt))
1504 return nullptr;
1505 }
1506 return ConstantExpr::getNeg(CV);
1507 }
1508
1509 // Negate integer vector splats.
1510 if (auto *CV = dyn_cast<Constant>(V))
1511 if (CV->getType()->isVectorTy() &&
1512 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1513 return ConstantExpr::getNeg(CV);
1514
1515 return nullptr;
1516}
1517
1518// Try to fold:
1519// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1520// -> ({s|u}itofp (int_binop x, y))
1521// 2) (fp_binop ({s|u}itofp x), FpC)
1522// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1523//
1524// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1525Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1526 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1528
1529 Type *FPTy = BO.getType();
1530 Type *IntTy = IntOps[0]->getType();
1531
1532 unsigned IntSz = IntTy->getScalarSizeInBits();
1533 // This is the maximum number of inuse bits by the integer where the int -> fp
1534 // casts are exact.
1535 unsigned MaxRepresentableBits =
1537
1538 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1539 // checks later on.
1540 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1541
1542 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1543 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1544 auto IsNonZero = [&](unsigned OpNo) -> bool {
1545 if (OpsKnown[OpNo].hasKnownBits() &&
1546 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1547 return true;
1548 return isKnownNonZero(IntOps[OpNo], SQ);
1549 };
1550
1551 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1552 // NB: This matches the impl in ValueTracking, we just try to use cached
1553 // knownbits here. If we ever start supporting WithCache for
1554 // `isKnownNonNegative`, change this to an explicit call.
1555 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1556 };
1557
1558 // Check if we know for certain that ({s|u}itofp op) is exact.
1559 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1560 // Can we treat this operand as the desired sign?
1561 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1562 !IsNonNeg(OpNo))
1563 return false;
1564
1565 // If fp precision >= bitwidth(op) then its exact.
1566 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1567 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1568 // handled specially. We can't, however, increase the bound arbitrarily for
1569 // `sitofp` as for larger sizes, it won't sign extend.
1570 if (MaxRepresentableBits < IntSz) {
1571 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1572 // numSignBits(op).
1573 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1574 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1575 if (OpsFromSigned)
1576 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1577 // Finally for unsigned check that fp precision >= bitwidth(op) -
1578 // numLeadingZeros(op).
1579 else {
1580 NumUsedLeadingBits[OpNo] =
1581 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1582 }
1583 }
1584 // NB: We could also check if op is known to be a power of 2 or zero (which
1585 // will always be representable). Its unlikely, however, that is we are
1586 // unable to bound op in any way we will be able to pass the overflow checks
1587 // later on.
1588
1589 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1590 return false;
1591 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1592 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1593 IsNonZero(OpNo);
1594 };
1595
1596 // If we have a constant rhs, see if we can losslessly convert it to an int.
1597 if (Op1FpC != nullptr) {
1598 // Signed + Mul req non-zero
1599 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1600 !match(Op1FpC, m_NonZeroFP()))
1601 return nullptr;
1602
1604 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1605 IntTy, DL);
1606 if (Op1IntC == nullptr)
1607 return nullptr;
1608 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1609 : Instruction::UIToFP,
1610 Op1IntC, FPTy, DL) != Op1FpC)
1611 return nullptr;
1612
1613 // First try to keep sign of cast the same.
1614 IntOps[1] = Op1IntC;
1615 }
1616
1617 // Ensure lhs/rhs integer types match.
1618 if (IntTy != IntOps[1]->getType())
1619 return nullptr;
1620
1621 if (Op1FpC == nullptr) {
1622 if (!IsValidPromotion(1))
1623 return nullptr;
1624 }
1625 if (!IsValidPromotion(0))
1626 return nullptr;
1627
1628 // Final we check if the integer version of the binop will not overflow.
1630 // Because of the precision check, we can often rule out overflows.
1631 bool NeedsOverflowCheck = true;
1632 // Try to conservatively rule out overflow based on the already done precision
1633 // checks.
1634 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1635 unsigned OverflowMaxCurBits =
1636 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1637 bool OutputSigned = OpsFromSigned;
1638 switch (BO.getOpcode()) {
1639 case Instruction::FAdd:
1640 IntOpc = Instruction::Add;
1641 OverflowMaxOutputBits += OverflowMaxCurBits;
1642 break;
1643 case Instruction::FSub:
1644 IntOpc = Instruction::Sub;
1645 OverflowMaxOutputBits += OverflowMaxCurBits;
1646 break;
1647 case Instruction::FMul:
1648 IntOpc = Instruction::Mul;
1649 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1650 break;
1651 default:
1652 llvm_unreachable("Unsupported binop");
1653 }
1654 // The precision check may have already ruled out overflow.
1655 if (OverflowMaxOutputBits < IntSz) {
1656 NeedsOverflowCheck = false;
1657 // We can bound unsigned overflow from sub to in range signed value (this is
1658 // what allows us to avoid the overflow check for sub).
1659 if (IntOpc == Instruction::Sub)
1660 OutputSigned = true;
1661 }
1662
1663 // Precision check did not rule out overflow, so need to check.
1664 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1665 // `IntOps[...]` arguments to `KnownOps[...]`.
1666 if (NeedsOverflowCheck &&
1667 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1668 return nullptr;
1669
1670 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1671 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1672 IntBO->setHasNoSignedWrap(OutputSigned);
1673 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1674 }
1675 if (OutputSigned)
1676 return new SIToFPInst(IntBinOp, FPTy);
1677 return new UIToFPInst(IntBinOp, FPTy);
1678}
1679
1680// Try to fold:
1681// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1682// -> ({s|u}itofp (int_binop x, y))
1683// 2) (fp_binop ({s|u}itofp x), FpC)
1684// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1685Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1686 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1687 Constant *Op1FpC = nullptr;
1688 // Check for:
1689 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1690 // 2) (binop ({s|u}itofp x), FpC)
1691 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1692 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1693 return nullptr;
1694
1695 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1696 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1697 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1698 return nullptr;
1699
1700 // Cache KnownBits a bit to potentially save some analysis.
1701 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1702
1703 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1704 // different constraints depending on the sign of the cast.
1705 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1706 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1707 IntOps, Op1FpC, OpsKnown))
1708 return R;
1709 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1710 Op1FpC, OpsKnown);
1711}
1712
1713/// A binop with a constant operand and a sign-extended boolean operand may be
1714/// converted into a select of constants by applying the binary operation to
1715/// the constant with the two possible values of the extended boolean (0 or -1).
1716Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1717 // TODO: Handle non-commutative binop (constant is operand 0).
1718 // TODO: Handle zext.
1719 // TODO: Peek through 'not' of cast.
1720 Value *BO0 = BO.getOperand(0);
1721 Value *BO1 = BO.getOperand(1);
1722 Value *X;
1723 Constant *C;
1724 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1725 !X->getType()->isIntOrIntVectorTy(1))
1726 return nullptr;
1727
1728 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1731 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1732 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1733 return SelectInst::Create(X, TVal, FVal);
1734}
1735
1737 bool IsTrueArm) {
1739 for (Value *Op : I.operands()) {
1740 Value *V = nullptr;
1741 if (Op == SI) {
1742 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1743 } else if (match(SI->getCondition(),
1746 m_Specific(Op), m_Value(V))) &&
1748 // Pass
1749 } else {
1750 V = Op;
1751 }
1752 Ops.push_back(V);
1753 }
1754
1755 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1756}
1757
1759 Value *NewOp, InstCombiner &IC) {
1760 Instruction *Clone = I.clone();
1761 Clone->replaceUsesOfWith(SI, NewOp);
1763 IC.InsertNewInstBefore(Clone, I.getIterator());
1764 return Clone;
1765}
1766
1768 bool FoldWithMultiUse) {
1769 // Don't modify shared select instructions unless set FoldWithMultiUse
1770 if (!SI->hasOneUse() && !FoldWithMultiUse)
1771 return nullptr;
1772
1773 Value *TV = SI->getTrueValue();
1774 Value *FV = SI->getFalseValue();
1775
1776 // Bool selects with constant operands can be folded to logical ops.
1777 if (SI->getType()->isIntOrIntVectorTy(1))
1778 return nullptr;
1779
1780 // Avoid breaking min/max reduction pattern,
1781 // which is necessary for vectorization later.
1782 if (isa<MinMaxIntrinsic>(&Op))
1783 for (Value *IntrinOp : Op.operands())
1784 if (auto *PN = dyn_cast<PHINode>(IntrinOp))
1785 for (Value *PhiOp : PN->operands())
1786 if (PhiOp == &Op)
1787 return nullptr;
1788
1789 // Test if a FCmpInst instruction is used exclusively by a select as
1790 // part of a minimum or maximum operation. If so, refrain from doing
1791 // any other folding. This helps out other analyses which understand
1792 // non-obfuscated minimum and maximum idioms. And in this case, at
1793 // least one of the comparison operands has at least one user besides
1794 // the compare (the select), which would often largely negate the
1795 // benefit of folding anyway.
1796 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1797 if (CI->hasOneUse()) {
1798 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1799 if (((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) &&
1800 !CI->isCommutative())
1801 return nullptr;
1802 }
1803 }
1804
1805 // Make sure that one of the select arms folds successfully.
1806 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1807 Value *NewFV =
1808 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1809 if (!NewTV && !NewFV)
1810 return nullptr;
1811
1812 // Create an instruction for the arm that did not fold.
1813 if (!NewTV)
1814 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1815 if (!NewFV)
1816 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1817 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1818}
1819
1821 Value *InValue, BasicBlock *InBB,
1822 const DataLayout &DL,
1823 const SimplifyQuery SQ) {
1824 // NB: It is a precondition of this transform that the operands be
1825 // phi translatable!
1827 for (Value *Op : I.operands()) {
1828 if (Op == PN)
1829 Ops.push_back(InValue);
1830 else
1831 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1832 }
1833
1834 // Don't consider the simplification successful if we get back a constant
1835 // expression. That's just an instruction in hiding.
1836 // Also reject the case where we simplify back to the phi node. We wouldn't
1837 // be able to remove it in that case.
1839 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1840 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1841 return NewVal;
1842
1843 // Check if incoming PHI value can be replaced with constant
1844 // based on implied condition.
1845 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1846 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1847 if (TerminatorBI && TerminatorBI->isConditional() &&
1848 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1849 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1850 std::optional<bool> ImpliedCond = isImpliedCondition(
1851 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1852 DL, LHSIsTrue);
1853 if (ImpliedCond)
1854 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1855 }
1856
1857 return nullptr;
1858}
1859
1861 bool AllowMultipleUses) {
1862 unsigned NumPHIValues = PN->getNumIncomingValues();
1863 if (NumPHIValues == 0)
1864 return nullptr;
1865
1866 // We normally only transform phis with a single use. However, if a PHI has
1867 // multiple uses and they are all the same operation, we can fold *all* of the
1868 // uses into the PHI.
1869 bool OneUse = PN->hasOneUse();
1870 bool IdenticalUsers = false;
1871 if (!AllowMultipleUses && !OneUse) {
1872 // Walk the use list for the instruction, comparing them to I.
1873 for (User *U : PN->users()) {
1874 Instruction *UI = cast<Instruction>(U);
1875 if (UI != &I && !I.isIdenticalTo(UI))
1876 return nullptr;
1877 }
1878 // Otherwise, we can replace *all* users with the new PHI we form.
1879 IdenticalUsers = true;
1880 }
1881
1882 // Check that all operands are phi-translatable.
1883 for (Value *Op : I.operands()) {
1884 if (Op == PN)
1885 continue;
1886
1887 // Non-instructions never require phi-translation.
1888 auto *I = dyn_cast<Instruction>(Op);
1889 if (!I)
1890 continue;
1891
1892 // Phi-translate can handle phi nodes in the same block.
1893 if (isa<PHINode>(I))
1894 if (I->getParent() == PN->getParent())
1895 continue;
1896
1897 // Operand dominates the block, no phi-translation necessary.
1898 if (DT.dominates(I, PN->getParent()))
1899 continue;
1900
1901 // Not phi-translatable, bail out.
1902 return nullptr;
1903 }
1904
1905 // Check to see whether the instruction can be folded into each phi operand.
1906 // If there is one operand that does not fold, remember the BB it is in.
1907 SmallVector<Value *> NewPhiValues;
1908 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1909 bool SeenNonSimplifiedInVal = false;
1910 for (unsigned i = 0; i != NumPHIValues; ++i) {
1911 Value *InVal = PN->getIncomingValue(i);
1912 BasicBlock *InBB = PN->getIncomingBlock(i);
1913
1914 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1915 NewPhiValues.push_back(NewVal);
1916 continue;
1917 }
1918
1919 // Handle some cases that can't be fully simplified, but where we know that
1920 // the two instructions will fold into one.
1921 auto WillFold = [&]() {
1922 if (!InVal->hasUseList() || !InVal->hasOneUser())
1923 return false;
1924
1925 // icmp of ucmp/scmp with constant will fold to icmp.
1926 const APInt *Ignored;
1927 if (isa<CmpIntrinsic>(InVal) &&
1928 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1929 return true;
1930
1931 // icmp eq zext(bool), 0 will fold to !bool.
1932 if (isa<ZExtInst>(InVal) &&
1933 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1934 match(&I,
1936 return true;
1937
1938 return false;
1939 };
1940
1941 if (WillFold()) {
1942 OpsToMoveUseToIncomingBB.push_back(i);
1943 NewPhiValues.push_back(nullptr);
1944 continue;
1945 }
1946
1947 if (!OneUse && !IdenticalUsers)
1948 return nullptr;
1949
1950 if (SeenNonSimplifiedInVal)
1951 return nullptr; // More than one non-simplified value.
1952 SeenNonSimplifiedInVal = true;
1953
1954 // If there is exactly one non-simplified value, we can insert a copy of the
1955 // operation in that block. However, if this is a critical edge, we would
1956 // be inserting the computation on some other paths (e.g. inside a loop).
1957 // Only do this if the pred block is unconditionally branching into the phi
1958 // block. Also, make sure that the pred block is not dead code.
1959 BranchInst *BI = dyn_cast<BranchInst>(InBB->getTerminator());
1960 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(InBB))
1961 return nullptr;
1962
1963 NewPhiValues.push_back(nullptr);
1964 OpsToMoveUseToIncomingBB.push_back(i);
1965
1966 // If the InVal is an invoke at the end of the pred block, then we can't
1967 // insert a computation after it without breaking the edge.
1968 if (isa<InvokeInst>(InVal))
1969 if (cast<Instruction>(InVal)->getParent() == InBB)
1970 return nullptr;
1971
1972 // Do not push the operation across a loop backedge. This could result in
1973 // an infinite combine loop, and is generally non-profitable (especially
1974 // if the operation was originally outside the loop).
1975 if (isBackEdge(InBB, PN->getParent()))
1976 return nullptr;
1977 }
1978
1979 // Clone the instruction that uses the phi node and move it into the incoming
1980 // BB because we know that the next iteration of InstCombine will simplify it.
1982 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
1984 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
1985
1986 Instruction *Clone = Clones.lookup(OpBB);
1987 if (!Clone) {
1988 Clone = I.clone();
1989 for (Use &U : Clone->operands()) {
1990 if (U == PN)
1991 U = Op;
1992 else
1993 U = U->DoPHITranslation(PN->getParent(), OpBB);
1994 }
1995 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
1996 Clones.insert({OpBB, Clone});
1997 // We may have speculated the instruction.
1999 }
2000
2001 NewPhiValues[OpIndex] = Clone;
2002 }
2003
2004 // Okay, we can do the transformation: create the new PHI node.
2005 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
2006 InsertNewInstBefore(NewPN, PN->getIterator());
2007 NewPN->takeName(PN);
2008 NewPN->setDebugLoc(PN->getDebugLoc());
2009
2010 for (unsigned i = 0; i != NumPHIValues; ++i)
2011 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
2012
2013 if (IdenticalUsers) {
2014 // Collect and deduplicate users up-front to avoid iterator invalidation.
2016 for (User *U : PN->users()) {
2017 Instruction *User = cast<Instruction>(U);
2018 if (User == &I)
2019 continue;
2020 ToReplace.insert(User);
2021 }
2022 for (Instruction *I : ToReplace) {
2023 replaceInstUsesWith(*I, NewPN);
2025 }
2026 OneUse = true;
2027 }
2028
2029 if (OneUse) {
2030 replaceAllDbgUsesWith(const_cast<PHINode &>(*PN),
2031 const_cast<PHINode &>(*NewPN),
2032 const_cast<PHINode &>(*PN), DT);
2033 }
2034 return replaceInstUsesWith(I, NewPN);
2035}
2036
2038 if (!BO.isAssociative())
2039 return nullptr;
2040
2041 // Find the interleaved binary ops.
2042 auto Opc = BO.getOpcode();
2043 auto *BO0 = dyn_cast<BinaryOperator>(BO.getOperand(0));
2044 auto *BO1 = dyn_cast<BinaryOperator>(BO.getOperand(1));
2045 if (!BO0 || !BO1 || !BO0->hasNUses(2) || !BO1->hasNUses(2) ||
2046 BO0->getOpcode() != Opc || BO1->getOpcode() != Opc ||
2047 !BO0->isAssociative() || !BO1->isAssociative() ||
2048 BO0->getParent() != BO1->getParent())
2049 return nullptr;
2050
2051 assert(BO.isCommutative() && BO0->isCommutative() && BO1->isCommutative() &&
2052 "Expected commutative instructions!");
2053
2054 // Find the matching phis, forming the recurrences.
2055 PHINode *PN0, *PN1;
2056 Value *Start0, *Step0, *Start1, *Step1;
2057 if (!matchSimpleRecurrence(BO0, PN0, Start0, Step0) || !PN0->hasOneUse() ||
2058 !matchSimpleRecurrence(BO1, PN1, Start1, Step1) || !PN1->hasOneUse() ||
2059 PN0->getParent() != PN1->getParent())
2060 return nullptr;
2061
2062 assert(PN0->getNumIncomingValues() == 2 && PN1->getNumIncomingValues() == 2 &&
2063 "Expected PHIs with two incoming values!");
2064
2065 // Convert the start and step values to constants.
2066 auto *Init0 = dyn_cast<Constant>(Start0);
2067 auto *Init1 = dyn_cast<Constant>(Start1);
2068 auto *C0 = dyn_cast<Constant>(Step0);
2069 auto *C1 = dyn_cast<Constant>(Step1);
2070 if (!Init0 || !Init1 || !C0 || !C1)
2071 return nullptr;
2072
2073 // Fold the recurrence constants.
2074 auto *Init = ConstantFoldBinaryInstruction(Opc, Init0, Init1);
2075 auto *C = ConstantFoldBinaryInstruction(Opc, C0, C1);
2076 if (!Init || !C)
2077 return nullptr;
2078
2079 // Create the reduced PHI.
2080 auto *NewPN = PHINode::Create(PN0->getType(), PN0->getNumIncomingValues(),
2081 "reduced.phi");
2082
2083 // Create the new binary op.
2084 auto *NewBO = BinaryOperator::Create(Opc, NewPN, C);
2085 if (Opc == Instruction::FAdd || Opc == Instruction::FMul) {
2086 // Intersect FMF flags for FADD and FMUL.
2087 FastMathFlags Intersect = BO0->getFastMathFlags() &
2088 BO1->getFastMathFlags() & BO.getFastMathFlags();
2089 NewBO->setFastMathFlags(Intersect);
2090 } else {
2091 OverflowTracking Flags;
2092 Flags.AllKnownNonNegative = false;
2093 Flags.AllKnownNonZero = false;
2094 Flags.mergeFlags(*BO0);
2095 Flags.mergeFlags(*BO1);
2096 Flags.mergeFlags(BO);
2097 Flags.applyFlags(*NewBO);
2098 }
2099 NewBO->takeName(&BO);
2100
2101 for (unsigned I = 0, E = PN0->getNumIncomingValues(); I != E; ++I) {
2102 auto *V = PN0->getIncomingValue(I);
2103 auto *BB = PN0->getIncomingBlock(I);
2104 if (V == Init0) {
2105 assert(((PN1->getIncomingValue(0) == Init1 &&
2106 PN1->getIncomingBlock(0) == BB) ||
2107 (PN1->getIncomingValue(1) == Init1 &&
2108 PN1->getIncomingBlock(1) == BB)) &&
2109 "Invalid incoming block!");
2110 NewPN->addIncoming(Init, BB);
2111 } else if (V == BO0) {
2112 assert(((PN1->getIncomingValue(0) == BO1 &&
2113 PN1->getIncomingBlock(0) == BB) ||
2114 (PN1->getIncomingValue(1) == BO1 &&
2115 PN1->getIncomingBlock(1) == BB)) &&
2116 "Invalid incoming block!");
2117 NewPN->addIncoming(NewBO, BB);
2118 } else
2119 llvm_unreachable("Unexpected incoming value!");
2120 }
2121
2122 LLVM_DEBUG(dbgs() << " Combined " << *PN0 << "\n " << *BO0
2123 << "\n with " << *PN1 << "\n " << *BO1
2124 << '\n');
2125
2126 // Insert the new recurrence and remove the old (dead) ones.
2127 InsertNewInstWith(NewPN, PN0->getIterator());
2128 InsertNewInstWith(NewBO, BO0->getIterator());
2129
2136
2137 return replaceInstUsesWith(BO, NewBO);
2138}
2139
2141 // Attempt to fold binary operators whose operands are simple recurrences.
2142 if (auto *NewBO = foldBinopWithRecurrence(BO))
2143 return NewBO;
2144
2145 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
2146 // we are guarding against replicating the binop in >1 predecessor.
2147 // This could miss matching a phi with 2 constant incoming values.
2148 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
2149 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
2150 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
2151 Phi0->getNumOperands() != Phi1->getNumOperands())
2152 return nullptr;
2153
2154 // TODO: Remove the restriction for binop being in the same block as the phis.
2155 if (BO.getParent() != Phi0->getParent() ||
2156 BO.getParent() != Phi1->getParent())
2157 return nullptr;
2158
2159 // Fold if there is at least one specific constant value in phi0 or phi1's
2160 // incoming values that comes from the same block and this specific constant
2161 // value can be used to do optimization for specific binary operator.
2162 // For example:
2163 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
2164 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
2165 // %add = add i32 %phi0, %phi1
2166 // ==>
2167 // %add = phi i32 [%j, %bb0], [%i, %bb1]
2169 /*AllowRHSConstant*/ false);
2170 if (C) {
2171 SmallVector<Value *, 4> NewIncomingValues;
2172 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
2173 auto &Phi0Use = std::get<0>(T);
2174 auto &Phi1Use = std::get<1>(T);
2175 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
2176 return false;
2177 Value *Phi0UseV = Phi0Use.get();
2178 Value *Phi1UseV = Phi1Use.get();
2179 if (Phi0UseV == C)
2180 NewIncomingValues.push_back(Phi1UseV);
2181 else if (Phi1UseV == C)
2182 NewIncomingValues.push_back(Phi0UseV);
2183 else
2184 return false;
2185 return true;
2186 };
2187
2188 if (all_of(zip(Phi0->operands(), Phi1->operands()),
2189 CanFoldIncomingValuePair)) {
2190 PHINode *NewPhi =
2191 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
2192 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
2193 "The number of collected incoming values should equal the number "
2194 "of the original PHINode operands!");
2195 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
2196 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
2197 return NewPhi;
2198 }
2199 }
2200
2201 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2202 return nullptr;
2203
2204 // Match a pair of incoming constants for one of the predecessor blocks.
2205 BasicBlock *ConstBB, *OtherBB;
2206 Constant *C0, *C1;
2207 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2208 ConstBB = Phi0->getIncomingBlock(0);
2209 OtherBB = Phi0->getIncomingBlock(1);
2210 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2211 ConstBB = Phi0->getIncomingBlock(1);
2212 OtherBB = Phi0->getIncomingBlock(0);
2213 } else {
2214 return nullptr;
2215 }
2216 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2217 return nullptr;
2218
2219 // The block that we are hoisting to must reach here unconditionally.
2220 // Otherwise, we could be speculatively executing an expensive or
2221 // non-speculative op.
2222 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
2223 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2224 !DT.isReachableFromEntry(OtherBB))
2225 return nullptr;
2226
2227 // TODO: This check could be tightened to only apply to binops (div/rem) that
2228 // are not safe to speculatively execute. But that could allow hoisting
2229 // potentially expensive instructions (fdiv for example).
2230 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2232 return nullptr;
2233
2234 // Fold constants for the predecessor block with constant incoming values.
2235 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2236 if (!NewC)
2237 return nullptr;
2238
2239 // Make a new binop in the predecessor block with the non-constant incoming
2240 // values.
2241 Builder.SetInsertPoint(PredBlockBranch);
2242 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2243 Phi0->getIncomingValueForBlock(OtherBB),
2244 Phi1->getIncomingValueForBlock(OtherBB));
2245 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2246 NotFoldedNewBO->copyIRFlags(&BO);
2247
2248 // Replace the binop with a phi of the new values. The old phis are dead.
2249 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2250 NewPhi->addIncoming(NewBO, OtherBB);
2251 NewPhi->addIncoming(NewC, ConstBB);
2252 return NewPhi;
2253}
2254
2256 if (!isa<Constant>(I.getOperand(1)))
2257 return nullptr;
2258
2259 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
2260 if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
2261 return NewSel;
2262 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
2263 if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
2264 return NewPhi;
2265 }
2266 return nullptr;
2267}
2268
2270 // If this GEP has only 0 indices, it is the same pointer as
2271 // Src. If Src is not a trivial GEP too, don't combine
2272 // the indices.
2273 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2274 !Src.hasOneUse())
2275 return false;
2276 return true;
2277}
2278
2279/// Find a constant NewC that has property:
2280/// shuffle(NewC, ShMask) = C
2281/// Returns nullptr if such a constant does not exist e.g. ShMask=<0,0> C=<1,2>
2282///
2283/// A 1-to-1 mapping is not required. Example:
2284/// ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <poison,5,6,poison>
2286 VectorType *NewCTy) {
2287 if (isa<ScalableVectorType>(NewCTy)) {
2288 Constant *Splat = C->getSplatValue();
2289 if (!Splat)
2290 return nullptr;
2292 }
2293
2294 if (cast<FixedVectorType>(NewCTy)->getNumElements() >
2295 cast<FixedVectorType>(C->getType())->getNumElements())
2296 return nullptr;
2297
2298 unsigned NewCNumElts = cast<FixedVectorType>(NewCTy)->getNumElements();
2299 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2300 SmallVector<Constant *, 16> NewVecC(NewCNumElts, PoisonScalar);
2301 unsigned NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
2302 for (unsigned I = 0; I < NumElts; ++I) {
2303 Constant *CElt = C->getAggregateElement(I);
2304 if (ShMask[I] >= 0) {
2305 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2306 Constant *NewCElt = NewVecC[ShMask[I]];
2307 // Bail out if:
2308 // 1. The constant vector contains a constant expression.
2309 // 2. The shuffle needs an element of the constant vector that can't
2310 // be mapped to a new constant vector.
2311 // 3. This is a widening shuffle that copies elements of V1 into the
2312 // extended elements (extending with poison is allowed).
2313 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2314 I >= NewCNumElts)
2315 return nullptr;
2316 NewVecC[ShMask[I]] = CElt;
2317 }
2318 }
2319 return ConstantVector::get(NewVecC);
2320}
2321
2323 if (!isa<VectorType>(Inst.getType()))
2324 return nullptr;
2325
2326 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2327 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2328 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2329 cast<VectorType>(Inst.getType())->getElementCount());
2330 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2331 cast<VectorType>(Inst.getType())->getElementCount());
2332
2333 // If both operands of the binop are vector concatenations, then perform the
2334 // narrow binop on each pair of the source operands followed by concatenation
2335 // of the results.
2336 Value *L0, *L1, *R0, *R1;
2337 ArrayRef<int> Mask;
2338 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2339 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2340 LHS->hasOneUse() && RHS->hasOneUse() &&
2341 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2342 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2343 // This transform does not have the speculative execution constraint as
2344 // below because the shuffle is a concatenation. The new binops are
2345 // operating on exactly the same elements as the existing binop.
2346 // TODO: We could ease the mask requirement to allow different undef lanes,
2347 // but that requires an analysis of the binop-with-undef output value.
2348 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2349 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2350 BO->copyIRFlags(&Inst);
2351 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2352 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2353 BO->copyIRFlags(&Inst);
2354 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2355 }
2356
2357 auto createBinOpReverse = [&](Value *X, Value *Y) {
2358 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2359 if (auto *BO = dyn_cast<BinaryOperator>(V))
2360 BO->copyIRFlags(&Inst);
2361 Module *M = Inst.getModule();
2363 M, Intrinsic::vector_reverse, V->getType());
2364 return CallInst::Create(F, V);
2365 };
2366
2367 // NOTE: Reverse shuffles don't require the speculative execution protection
2368 // below because they don't affect which lanes take part in the computation.
2369
2370 Value *V1, *V2;
2371 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2372 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2373 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2374 (LHS->hasOneUse() || RHS->hasOneUse() ||
2375 (LHS == RHS && LHS->hasNUses(2))))
2376 return createBinOpReverse(V1, V2);
2377
2378 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2379 if (LHS->hasOneUse() && isSplatValue(RHS))
2380 return createBinOpReverse(V1, RHS);
2381 }
2382 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2383 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2384 return createBinOpReverse(LHS, V2);
2385
2386 auto createBinOpVPReverse = [&](Value *X, Value *Y, Value *EVL) {
2387 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2388 if (auto *BO = dyn_cast<BinaryOperator>(V))
2389 BO->copyIRFlags(&Inst);
2390
2391 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
2392 Value *AllTrueMask = Builder.CreateVectorSplat(EC, Builder.getTrue());
2393 Module *M = Inst.getModule();
2395 M, Intrinsic::experimental_vp_reverse, V->getType());
2396 return CallInst::Create(F, {V, AllTrueMask, EVL});
2397 };
2398
2399 Value *EVL;
2400 if (match(LHS, m_Intrinsic<Intrinsic::experimental_vp_reverse>(
2401 m_Value(V1), m_AllOnes(), m_Value(EVL)))) {
2402 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2403 if (match(RHS, m_Intrinsic<Intrinsic::experimental_vp_reverse>(
2404 m_Value(V2), m_AllOnes(), m_Specific(EVL))) &&
2405 (LHS->hasOneUse() || RHS->hasOneUse() ||
2406 (LHS == RHS && LHS->hasNUses(2))))
2407 return createBinOpVPReverse(V1, V2, EVL);
2408
2409 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2410 if (LHS->hasOneUse() && isSplatValue(RHS))
2411 return createBinOpVPReverse(V1, RHS, EVL);
2412 }
2413 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2414 else if (isSplatValue(LHS) &&
2415 match(RHS, m_Intrinsic<Intrinsic::experimental_vp_reverse>(
2416 m_Value(V2), m_AllOnes(), m_Value(EVL))))
2417 return createBinOpVPReverse(LHS, V2, EVL);
2418
2419 // It may not be safe to reorder shuffles and things like div, urem, etc.
2420 // because we may trap when executing those ops on unknown vector elements.
2421 // See PR20059.
2423 return nullptr;
2424
2425 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2426 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2427 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2428 BO->copyIRFlags(&Inst);
2429 return new ShuffleVectorInst(XY, M);
2430 };
2431
2432 // If both arguments of the binary operation are shuffles that use the same
2433 // mask and shuffle within a single vector, move the shuffle after the binop.
2434 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2435 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2436 V1->getType() == V2->getType() &&
2437 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2438 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2439 return createBinOpShuffle(V1, V2, Mask);
2440 }
2441
2442 // If both arguments of a commutative binop are select-shuffles that use the
2443 // same mask with commuted operands, the shuffles are unnecessary.
2444 if (Inst.isCommutative() &&
2445 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2446 match(RHS,
2447 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2448 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2449 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2450 // TODO: Allow shuffles that contain undefs in the mask?
2451 // That is legal, but it reduces undef knowledge.
2452 // TODO: Allow arbitrary shuffles by shuffling after binop?
2453 // That might be legal, but we have to deal with poison.
2454 if (LShuf->isSelect() &&
2455 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2456 RShuf->isSelect() &&
2457 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2458 // Example:
2459 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2460 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2461 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2462 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2463 NewBO->copyIRFlags(&Inst);
2464 return NewBO;
2465 }
2466 }
2467
2468 // If one argument is a shuffle within one vector and the other is a constant,
2469 // try moving the shuffle after the binary operation. This canonicalization
2470 // intends to move shuffles closer to other shuffles and binops closer to
2471 // other binops, so they can be folded. It may also enable demanded elements
2472 // transforms.
2473 Constant *C;
2475 m_Mask(Mask))),
2476 m_ImmConstant(C)))) {
2477 assert(Inst.getType()->getScalarType() == V1->getType()->getScalarType() &&
2478 "Shuffle should not change scalar type");
2479
2480 bool ConstOp1 = isa<Constant>(RHS);
2481 if (Constant *NewC =
2482 unshuffleConstant(Mask, C, cast<VectorType>(V1->getType()))) {
2483 // For fixed vectors, lanes of NewC not used by the shuffle will be poison
2484 // which will cause UB for div/rem. Mask them with a safe constant.
2485 if (isa<FixedVectorType>(V1->getType()) && Inst.isIntDivRem())
2486 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2487
2488 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2489 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2490 Value *NewLHS = ConstOp1 ? V1 : NewC;
2491 Value *NewRHS = ConstOp1 ? NewC : V1;
2492 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2493 }
2494 }
2495
2496 // Try to reassociate to sink a splat shuffle after a binary operation.
2497 if (Inst.isAssociative() && Inst.isCommutative()) {
2498 // Canonicalize shuffle operand as LHS.
2499 if (isa<ShuffleVectorInst>(RHS))
2500 std::swap(LHS, RHS);
2501
2502 Value *X;
2503 ArrayRef<int> MaskC;
2504 int SplatIndex;
2505 Value *Y, *OtherOp;
2506 if (!match(LHS,
2507 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2508 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2509 X->getType() != Inst.getType() ||
2510 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2511 return nullptr;
2512
2513 // FIXME: This may not be safe if the analysis allows undef elements. By
2514 // moving 'Y' before the splat shuffle, we are implicitly assuming
2515 // that it is not undef/poison at the splat index.
2516 if (isSplatValue(OtherOp, SplatIndex)) {
2517 std::swap(Y, OtherOp);
2518 } else if (!isSplatValue(Y, SplatIndex)) {
2519 return nullptr;
2520 }
2521
2522 // X and Y are splatted values, so perform the binary operation on those
2523 // values followed by a splat followed by the 2nd binary operation:
2524 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2525 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2526 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2527 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2528 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2529
2530 // Intersect FMF on both new binops. Other (poison-generating) flags are
2531 // dropped to be safe.
2532 if (isa<FPMathOperator>(R)) {
2533 R->copyFastMathFlags(&Inst);
2534 R->andIRFlags(RHS);
2535 }
2536 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2537 NewInstBO->copyIRFlags(R);
2538 return R;
2539 }
2540
2541 return nullptr;
2542}
2543
2544/// Try to narrow the width of a binop if at least 1 operand is an extend of
2545/// of a value. This requires a potentially expensive known bits check to make
2546/// sure the narrow op does not overflow.
2547Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2548 // We need at least one extended operand.
2549 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2550
2551 // If this is a sub, we swap the operands since we always want an extension
2552 // on the RHS. The LHS can be an extension or a constant.
2553 if (BO.getOpcode() == Instruction::Sub)
2554 std::swap(Op0, Op1);
2555
2556 Value *X;
2557 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2558 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2559 return nullptr;
2560
2561 // If both operands are the same extension from the same source type and we
2562 // can eliminate at least one (hasOneUse), this might work.
2563 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2564 Value *Y;
2565 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2566 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2567 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2568 // If that did not match, see if we have a suitable constant operand.
2569 // Truncating and extending must produce the same constant.
2570 Constant *WideC;
2571 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2572 return nullptr;
2573 Constant *NarrowC = getLosslessTrunc(WideC, X->getType(), CastOpc);
2574 if (!NarrowC)
2575 return nullptr;
2576 Y = NarrowC;
2577 }
2578
2579 // Swap back now that we found our operands.
2580 if (BO.getOpcode() == Instruction::Sub)
2581 std::swap(X, Y);
2582
2583 // Both operands have narrow versions. Last step: the math must not overflow
2584 // in the narrow width.
2585 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2586 return nullptr;
2587
2588 // bo (ext X), (ext Y) --> ext (bo X, Y)
2589 // bo (ext X), C --> ext (bo X, C')
2590 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2591 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2592 if (IsSext)
2593 NewBinOp->setHasNoSignedWrap();
2594 else
2595 NewBinOp->setHasNoUnsignedWrap();
2596 }
2597 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2598}
2599
2600/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2601/// transform.
2603 GEPOperator &GEP2) {
2605}
2606
2607/// Thread a GEP operation with constant indices through the constant true/false
2608/// arms of a select.
2610 InstCombiner::BuilderTy &Builder) {
2611 if (!GEP.hasAllConstantIndices())
2612 return nullptr;
2613
2614 Instruction *Sel;
2615 Value *Cond;
2616 Constant *TrueC, *FalseC;
2617 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2618 !match(Sel,
2619 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2620 return nullptr;
2621
2622 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2623 // Propagate 'inbounds' and metadata from existing instructions.
2624 // Note: using IRBuilder to create the constants for efficiency.
2625 SmallVector<Value *, 4> IndexC(GEP.indices());
2626 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2627 Type *Ty = GEP.getSourceElementType();
2628 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2629 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2630 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2631}
2632
2633// Canonicalization:
2634// gep T, (gep i8, base, C1), (Index + C2) into
2635// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2637 GEPOperator *Src,
2638 InstCombinerImpl &IC) {
2639 if (GEP.getNumIndices() != 1)
2640 return nullptr;
2641 auto &DL = IC.getDataLayout();
2642 Value *Base;
2643 const APInt *C1;
2644 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2645 return nullptr;
2646 Value *VarIndex;
2647 const APInt *C2;
2648 Type *PtrTy = Src->getType()->getScalarType();
2649 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2650 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2651 return nullptr;
2652 if (C1->getBitWidth() != IndexSizeInBits ||
2653 C2->getBitWidth() != IndexSizeInBits)
2654 return nullptr;
2655 Type *BaseType = GEP.getSourceElementType();
2656 if (isa<ScalableVectorType>(BaseType))
2657 return nullptr;
2658 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2659 APInt NewOffset = TypeSize * *C2 + *C1;
2660 if (NewOffset.isZero() ||
2661 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2663 if (GEP.hasNoUnsignedWrap() &&
2664 cast<GEPOperator>(Src)->hasNoUnsignedWrap() &&
2665 match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()))) {
2667 if (GEP.isInBounds() && cast<GEPOperator>(Src)->isInBounds())
2668 Flags |= GEPNoWrapFlags::inBounds();
2669 }
2670
2671 Value *GEPConst =
2672 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset), "", Flags);
2673 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex, Flags);
2674 }
2675
2676 return nullptr;
2677}
2678
2680 GEPOperator *Src) {
2681 // Combine Indices - If the source pointer to this getelementptr instruction
2682 // is a getelementptr instruction with matching element type, combine the
2683 // indices of the two getelementptr instructions into a single instruction.
2684 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2685 return nullptr;
2686
2687 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2688 return I;
2689
2690 // For constant GEPs, use a more general offset-based folding approach.
2691 Type *PtrTy = Src->getType()->getScalarType();
2692 if (GEP.hasAllConstantIndices() &&
2693 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2694 // Split Src into a variable part and a constant suffix.
2696 Type *BaseType = GTI.getIndexedType();
2697 bool IsFirstType = true;
2698 unsigned NumVarIndices = 0;
2699 for (auto Pair : enumerate(Src->indices())) {
2700 if (!isa<ConstantInt>(Pair.value())) {
2701 BaseType = GTI.getIndexedType();
2702 IsFirstType = false;
2703 NumVarIndices = Pair.index() + 1;
2704 }
2705 ++GTI;
2706 }
2707
2708 // Determine the offset for the constant suffix of Src.
2710 if (NumVarIndices != Src->getNumIndices()) {
2711 // FIXME: getIndexedOffsetInType() does not handled scalable vectors.
2712 if (BaseType->isScalableTy())
2713 return nullptr;
2714
2715 SmallVector<Value *> ConstantIndices;
2716 if (!IsFirstType)
2717 ConstantIndices.push_back(
2719 append_range(ConstantIndices, drop_begin(Src->indices(), NumVarIndices));
2720 Offset += DL.getIndexedOffsetInType(BaseType, ConstantIndices);
2721 }
2722
2723 // Add the offset for GEP (which is fully constant).
2724 if (!GEP.accumulateConstantOffset(DL, Offset))
2725 return nullptr;
2726
2727 // Convert the total offset back into indices.
2728 SmallVector<APInt> ConstIndices =
2730 if (!Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero()))
2731 return nullptr;
2732
2733 GEPNoWrapFlags NW = getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP));
2734 SmallVector<Value *> Indices(
2735 drop_end(Src->indices(), Src->getNumIndices() - NumVarIndices));
2736 for (const APInt &Idx : drop_begin(ConstIndices, !IsFirstType)) {
2737 Indices.push_back(ConstantInt::get(GEP.getContext(), Idx));
2738 // Even if the total offset is inbounds, we may end up representing it
2739 // by first performing a larger negative offset, and then a smaller
2740 // positive one. The large negative offset might go out of bounds. Only
2741 // preserve inbounds if all signs are the same.
2742 if (Idx.isNonNegative() != ConstIndices[0].isNonNegative())
2744 if (!Idx.isNonNegative())
2745 NW = NW.withoutNoUnsignedWrap();
2746 }
2747
2748 return replaceInstUsesWith(
2749 GEP, Builder.CreateGEP(Src->getSourceElementType(), Src->getOperand(0),
2750 Indices, "", NW));
2751 }
2752
2753 if (Src->getResultElementType() != GEP.getSourceElementType())
2754 return nullptr;
2755
2756 SmallVector<Value*, 8> Indices;
2757
2758 // Find out whether the last index in the source GEP is a sequential idx.
2759 bool EndsWithSequential = false;
2760 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2761 I != E; ++I)
2762 EndsWithSequential = I.isSequential();
2763
2764 // Can we combine the two pointer arithmetics offsets?
2765 if (EndsWithSequential) {
2766 // Replace: gep (gep %P, long B), long A, ...
2767 // With: T = long A+B; gep %P, T, ...
2768 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2769 Value *GO1 = GEP.getOperand(1);
2770
2771 // If they aren't the same type, then the input hasn't been processed
2772 // by the loop above yet (which canonicalizes sequential index types to
2773 // intptr_t). Just avoid transforming this until the input has been
2774 // normalized.
2775 if (SO1->getType() != GO1->getType())
2776 return nullptr;
2777
2778 Value *Sum =
2779 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2780 // Only do the combine when we are sure the cost after the
2781 // merge is never more than that before the merge.
2782 if (Sum == nullptr)
2783 return nullptr;
2784
2785 Indices.append(Src->op_begin()+1, Src->op_end()-1);
2786 Indices.push_back(Sum);
2787 Indices.append(GEP.op_begin()+2, GEP.op_end());
2788 } else if (isa<Constant>(*GEP.idx_begin()) &&
2789 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
2790 Src->getNumOperands() != 1) {
2791 // Otherwise we can do the fold if the first index of the GEP is a zero
2792 Indices.append(Src->op_begin()+1, Src->op_end());
2793 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
2794 }
2795
2796 // Don't create GEPs with more than one variable index.
2797 unsigned NumVarIndices =
2798 count_if(Indices, [](Value *Idx) { return !isa<Constant>(Idx); });
2799 if (NumVarIndices > 1)
2800 return nullptr;
2801
2802 if (!Indices.empty())
2803 return replaceInstUsesWith(
2805 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2806 getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP))));
2807
2808 return nullptr;
2809}
2810
2812 BuilderTy *Builder,
2813 bool &DoesConsume, unsigned Depth) {
2814 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2815 // ~(~(X)) -> X.
2816 Value *A, *B;
2817 if (match(V, m_Not(m_Value(A)))) {
2818 DoesConsume = true;
2819 return A;
2820 }
2821
2822 Constant *C;
2823 // Constants can be considered to be not'ed values.
2824 if (match(V, m_ImmConstant(C)))
2825 return ConstantExpr::getNot(C);
2826
2828 return nullptr;
2829
2830 // The rest of the cases require that we invert all uses so don't bother
2831 // doing the analysis if we know we can't use the result.
2832 if (!WillInvertAllUses)
2833 return nullptr;
2834
2835 // Compares can be inverted if all of their uses are being modified to use
2836 // the ~V.
2837 if (auto *I = dyn_cast<CmpInst>(V)) {
2838 if (Builder != nullptr)
2839 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
2840 I->getOperand(1));
2841 return NonNull;
2842 }
2843
2844 // If `V` is of the form `A + B` then `-1 - V` can be folded into
2845 // `(-1 - B) - A` if we are willing to invert all of the uses.
2846 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
2847 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2848 DoesConsume, Depth))
2849 return Builder ? Builder->CreateSub(BV, A) : NonNull;
2850 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2851 DoesConsume, Depth))
2852 return Builder ? Builder->CreateSub(AV, B) : NonNull;
2853 return nullptr;
2854 }
2855
2856 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
2857 // into `A ^ B` if we are willing to invert all of the uses.
2858 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
2859 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2860 DoesConsume, Depth))
2861 return Builder ? Builder->CreateXor(A, BV) : NonNull;
2862 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2863 DoesConsume, Depth))
2864 return Builder ? Builder->CreateXor(AV, B) : NonNull;
2865 return nullptr;
2866 }
2867
2868 // If `V` is of the form `B - A` then `-1 - V` can be folded into
2869 // `A + (-1 - B)` if we are willing to invert all of the uses.
2870 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
2871 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2872 DoesConsume, Depth))
2873 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
2874 return nullptr;
2875 }
2876
2877 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
2878 // into `A s>> B` if we are willing to invert all of the uses.
2879 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
2880 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2881 DoesConsume, Depth))
2882 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
2883 return nullptr;
2884 }
2885
2886 Value *Cond;
2887 // LogicOps are special in that we canonicalize them at the cost of an
2888 // instruction.
2889 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
2890 !shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(V));
2891 // Selects/min/max with invertible operands are freely invertible
2892 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
2893 bool LocalDoesConsume = DoesConsume;
2894 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
2895 LocalDoesConsume, Depth))
2896 return nullptr;
2897 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2898 LocalDoesConsume, Depth)) {
2899 DoesConsume = LocalDoesConsume;
2900 if (Builder != nullptr) {
2901 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2902 DoesConsume, Depth);
2903 assert(NotB != nullptr &&
2904 "Unable to build inverted value for known freely invertable op");
2905 if (auto *II = dyn_cast<IntrinsicInst>(V))
2907 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
2908 return Builder->CreateSelect(Cond, NotA, NotB);
2909 }
2910 return NonNull;
2911 }
2912 }
2913
2914 if (PHINode *PN = dyn_cast<PHINode>(V)) {
2915 bool LocalDoesConsume = DoesConsume;
2917 for (Use &U : PN->operands()) {
2918 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2919 Value *NewIncomingVal = getFreelyInvertedImpl(
2920 U.get(), /*WillInvertAllUses=*/false,
2921 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
2922 if (NewIncomingVal == nullptr)
2923 return nullptr;
2924 // Make sure that we can safely erase the original PHI node.
2925 if (NewIncomingVal == V)
2926 return nullptr;
2927 if (Builder != nullptr)
2928 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
2929 }
2930
2931 DoesConsume = LocalDoesConsume;
2932 if (Builder != nullptr) {
2935 PHINode *NewPN =
2936 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
2937 for (auto [Val, Pred] : IncomingValues)
2938 NewPN->addIncoming(Val, Pred);
2939 return NewPN;
2940 }
2941 return NonNull;
2942 }
2943
2944 if (match(V, m_SExtLike(m_Value(A)))) {
2945 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2946 DoesConsume, Depth))
2947 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
2948 return nullptr;
2949 }
2950
2951 if (match(V, m_Trunc(m_Value(A)))) {
2952 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2953 DoesConsume, Depth))
2954 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
2955 return nullptr;
2956 }
2957
2958 // De Morgan's Laws:
2959 // (~(A | B)) -> (~A & ~B)
2960 // (~(A & B)) -> (~A | ~B)
2961 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
2962 bool IsLogical, Value *A,
2963 Value *B) -> Value * {
2964 bool LocalDoesConsume = DoesConsume;
2965 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
2966 LocalDoesConsume, Depth))
2967 return nullptr;
2968 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2969 LocalDoesConsume, Depth)) {
2970 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2971 LocalDoesConsume, Depth);
2972 DoesConsume = LocalDoesConsume;
2973 if (IsLogical)
2974 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
2975 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
2976 }
2977
2978 return nullptr;
2979 };
2980
2981 if (match(V, m_Or(m_Value(A), m_Value(B))))
2982 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
2983 B);
2984
2985 if (match(V, m_And(m_Value(A), m_Value(B))))
2986 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
2987 B);
2988
2989 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
2990 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
2991 B);
2992
2993 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
2994 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
2995 B);
2996
2997 return nullptr;
2998}
2999
3000/// Return true if we should canonicalize the gep to an i8 ptradd.
3002 Value *PtrOp = GEP.getOperand(0);
3003 Type *GEPEltType = GEP.getSourceElementType();
3004 if (GEPEltType->isIntegerTy(8))
3005 return false;
3006
3007 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
3008 // intrinsic. This has better support in BasicAA.
3009 if (GEPEltType->isScalableTy())
3010 return true;
3011
3012 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
3013 // together.
3014 if (GEP.getNumIndices() == 1 &&
3015 match(GEP.getOperand(1),
3017 m_Shl(m_Value(), m_ConstantInt())))))
3018 return true;
3019
3020 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
3021 // possibly be merged together.
3022 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
3023 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
3024 any_of(GEP.indices(), [](Value *V) {
3025 const APInt *C;
3026 return match(V, m_APInt(C)) && !C->isZero();
3027 });
3028}
3029
3031 IRBuilderBase &Builder) {
3032 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
3033 if (!Op1)
3034 return nullptr;
3035
3036 // Don't fold a GEP into itself through a PHI node. This can only happen
3037 // through the back-edge of a loop. Folding a GEP into itself means that
3038 // the value of the previous iteration needs to be stored in the meantime,
3039 // thus requiring an additional register variable to be live, but not
3040 // actually achieving anything (the GEP still needs to be executed once per
3041 // loop iteration).
3042 if (Op1 == &GEP)
3043 return nullptr;
3044 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
3045
3046 int DI = -1;
3047
3048 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
3049 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
3050 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
3051 Op1->getSourceElementType() != Op2->getSourceElementType())
3052 return nullptr;
3053
3054 // As for Op1 above, don't try to fold a GEP into itself.
3055 if (Op2 == &GEP)
3056 return nullptr;
3057
3058 // Keep track of the type as we walk the GEP.
3059 Type *CurTy = nullptr;
3060
3061 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
3062 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
3063 return nullptr;
3064
3065 if (Op1->getOperand(J) != Op2->getOperand(J)) {
3066 if (DI == -1) {
3067 // We have not seen any differences yet in the GEPs feeding the
3068 // PHI yet, so we record this one if it is allowed to be a
3069 // variable.
3070
3071 // The first two arguments can vary for any GEP, the rest have to be
3072 // static for struct slots
3073 if (J > 1) {
3074 assert(CurTy && "No current type?");
3075 if (CurTy->isStructTy())
3076 return nullptr;
3077 }
3078
3079 DI = J;
3080 } else {
3081 // The GEP is different by more than one input. While this could be
3082 // extended to support GEPs that vary by more than one variable it
3083 // doesn't make sense since it greatly increases the complexity and
3084 // would result in an R+R+R addressing mode which no backend
3085 // directly supports and would need to be broken into several
3086 // simpler instructions anyway.
3087 return nullptr;
3088 }
3089 }
3090
3091 // Sink down a layer of the type for the next iteration.
3092 if (J > 0) {
3093 if (J == 1) {
3094 CurTy = Op1->getSourceElementType();
3095 } else {
3096 CurTy =
3097 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
3098 }
3099 }
3100 }
3101
3102 NW &= Op2->getNoWrapFlags();
3103 }
3104
3105 // If not all GEPs are identical we'll have to create a new PHI node.
3106 // Check that the old PHI node has only one use so that it will get
3107 // removed.
3108 if (DI != -1 && !PN->hasOneUse())
3109 return nullptr;
3110
3111 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
3112 NewGEP->setNoWrapFlags(NW);
3113
3114 if (DI == -1) {
3115 // All the GEPs feeding the PHI are identical. Clone one down into our
3116 // BB so that it can be merged with the current GEP.
3117 } else {
3118 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
3119 // into the current block so it can be merged, and create a new PHI to
3120 // set that index.
3121 PHINode *NewPN;
3122 {
3123 IRBuilderBase::InsertPointGuard Guard(Builder);
3124 Builder.SetInsertPoint(PN);
3125 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
3126 PN->getNumOperands());
3127 }
3128
3129 for (auto &I : PN->operands())
3130 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
3131 PN->getIncomingBlock(I));
3132
3133 NewGEP->setOperand(DI, NewPN);
3134 }
3135
3136 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
3137 return NewGEP;
3138}
3139
3141 Value *PtrOp = GEP.getOperand(0);
3142 SmallVector<Value *, 8> Indices(GEP.indices());
3143 Type *GEPType = GEP.getType();
3144 Type *GEPEltType = GEP.getSourceElementType();
3145 if (Value *V =
3146 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
3148 return replaceInstUsesWith(GEP, V);
3149
3150 // For vector geps, use the generic demanded vector support.
3151 // Skip if GEP return type is scalable. The number of elements is unknown at
3152 // compile-time.
3153 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
3154 auto VWidth = GEPFVTy->getNumElements();
3155 APInt PoisonElts(VWidth, 0);
3156 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
3157 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
3158 PoisonElts)) {
3159 if (V != &GEP)
3160 return replaceInstUsesWith(GEP, V);
3161 return &GEP;
3162 }
3163 }
3164
3165 // Eliminate unneeded casts for indices, and replace indices which displace
3166 // by multiples of a zero size type with zero.
3167 bool MadeChange = false;
3168
3169 // Index width may not be the same width as pointer width.
3170 // Data layout chooses the right type based on supported integer types.
3171 Type *NewScalarIndexTy =
3172 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
3173
3175 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
3176 ++I, ++GTI) {
3177 // Skip indices into struct types.
3178 if (GTI.isStruct())
3179 continue;
3180
3181 Type *IndexTy = (*I)->getType();
3182 Type *NewIndexType =
3183 IndexTy->isVectorTy()
3184 ? VectorType::get(NewScalarIndexTy,
3185 cast<VectorType>(IndexTy)->getElementCount())
3186 : NewScalarIndexTy;
3187
3188 // If the element type has zero size then any index over it is equivalent
3189 // to an index of zero, so replace it with zero if it is not zero already.
3190 Type *EltTy = GTI.getIndexedType();
3191 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
3192 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
3193 *I = Constant::getNullValue(NewIndexType);
3194 MadeChange = true;
3195 }
3196
3197 if (IndexTy != NewIndexType) {
3198 // If we are using a wider index than needed for this platform, shrink
3199 // it to what we need. If narrower, sign-extend it to what we need.
3200 // This explicit cast can make subsequent optimizations more obvious.
3201 if (IndexTy->getScalarSizeInBits() <
3202 NewIndexType->getScalarSizeInBits()) {
3203 if (GEP.hasNoUnsignedWrap() && GEP.hasNoUnsignedSignedWrap())
3204 *I = Builder.CreateZExt(*I, NewIndexType, "", /*IsNonNeg=*/true);
3205 else
3206 *I = Builder.CreateSExt(*I, NewIndexType);
3207 } else {
3208 *I = Builder.CreateTrunc(*I, NewIndexType, "", GEP.hasNoUnsignedWrap(),
3209 GEP.hasNoUnsignedSignedWrap());
3210 }
3211 MadeChange = true;
3212 }
3213 }
3214 if (MadeChange)
3215 return &GEP;
3216
3217 // Canonicalize constant GEPs to i8 type.
3218 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
3220 if (GEP.accumulateConstantOffset(DL, Offset))
3221 return replaceInstUsesWith(
3223 GEP.getNoWrapFlags()));
3224 }
3225
3227 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
3228 Value *NewGEP =
3229 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
3230 return replaceInstUsesWith(GEP, NewGEP);
3231 }
3232
3233 // Strip trailing zero indices.
3234 auto *LastIdx = dyn_cast<Constant>(Indices.back());
3235 if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
3236 return replaceInstUsesWith(
3237 GEP, Builder.CreateGEP(GEP.getSourceElementType(), PtrOp,
3238 drop_end(Indices), "", GEP.getNoWrapFlags()));
3239 }
3240
3241 // Scalarize vector operands; prefer splat-of-gep.as canonical form.
3242 // Note that this looses information about undef lanes; we run it after
3243 // demanded bits to partially mitigate that loss.
3244 if (GEPType->isVectorTy() && llvm::any_of(GEP.operands(), [](Value *Op) {
3245 return Op->getType()->isVectorTy() && getSplatValue(Op);
3246 })) {
3247 SmallVector<Value *> NewOps;
3248 for (auto &Op : GEP.operands()) {
3249 if (Op->getType()->isVectorTy())
3250 if (Value *Scalar = getSplatValue(Op)) {
3251 NewOps.push_back(Scalar);
3252 continue;
3253 }
3254 NewOps.push_back(Op);
3255 }
3256
3257 Value *Res = Builder.CreateGEP(GEP.getSourceElementType(), NewOps[0],
3258 ArrayRef(NewOps).drop_front(), GEP.getName(),
3259 GEP.getNoWrapFlags());
3260 if (!Res->getType()->isVectorTy()) {
3261 ElementCount EC = cast<VectorType>(GEPType)->getElementCount();
3262 Res = Builder.CreateVectorSplat(EC, Res);
3263 }
3264 return replaceInstUsesWith(GEP, Res);
3265 }
3266
3267 bool SeenVarIndex = false;
3268 for (auto [IdxNum, Idx] : enumerate(Indices)) {
3269 if (isa<Constant>(Idx))
3270 continue;
3271
3272 if (!SeenVarIndex) {
3273 SeenVarIndex = true;
3274 continue;
3275 }
3276
3277 // GEP has multiple variable indices: Split it.
3278 ArrayRef<Value *> FrontIndices = ArrayRef(Indices).take_front(IdxNum);
3279 Value *FrontGEP =
3280 Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
3281 GEP.getName() + ".split", GEP.getNoWrapFlags());
3282
3283 SmallVector<Value *> BackIndices;
3284 BackIndices.push_back(Constant::getNullValue(NewScalarIndexTy));
3285 append_range(BackIndices, drop_begin(Indices, IdxNum));
3287 GetElementPtrInst::getIndexedType(GEPEltType, FrontIndices), FrontGEP,
3288 BackIndices, GEP.getNoWrapFlags());
3289 }
3290
3291 // Check to see if the inputs to the PHI node are getelementptr instructions.
3292 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
3293 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
3294 return replaceOperand(GEP, 0, NewPtrOp);
3295 }
3296
3297 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
3298 if (Instruction *I = visitGEPOfGEP(GEP, Src))
3299 return I;
3300
3301 if (GEP.getNumIndices() == 1) {
3302 unsigned AS = GEP.getPointerAddressSpace();
3303 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3304 DL.getIndexSizeInBits(AS)) {
3305 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3306
3307 if (TyAllocSize == 1) {
3308 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3309 // but only if the result pointer is only used as if it were an integer,
3310 // or both point to the same underlying object (otherwise provenance is
3311 // not necessarily retained).
3312 Value *X = GEP.getPointerOperand();
3313 Value *Y;
3314 if (match(GEP.getOperand(1),
3316 GEPType == Y->getType()) {
3317 bool HasSameUnderlyingObject =
3319 bool Changed = false;
3320 GEP.replaceUsesWithIf(Y, [&](Use &U) {
3321 bool ShouldReplace = HasSameUnderlyingObject ||
3322 isa<ICmpInst>(U.getUser()) ||
3323 isa<PtrToIntInst>(U.getUser());
3324 Changed |= ShouldReplace;
3325 return ShouldReplace;
3326 });
3327 return Changed ? &GEP : nullptr;
3328 }
3329 } else if (auto *ExactIns =
3330 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3331 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3332 Value *V;
3333 if (ExactIns->isExact()) {
3334 if ((has_single_bit(TyAllocSize) &&
3335 match(GEP.getOperand(1),
3336 m_Shr(m_Value(V),
3337 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3338 match(GEP.getOperand(1),
3339 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3341 GEP.getPointerOperand(), V,
3342 GEP.getNoWrapFlags());
3343 }
3344 }
3345 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3346 // Try to canonicalize non-i8 element type to i8 if the index is an
3347 // exact instruction. If the index is an exact instruction (div/shr)
3348 // with a constant RHS, we can fold the non-i8 element scale into the
3349 // div/shr (similiar to the mul case, just inverted).
3350 const APInt *C;
3351 std::optional<APInt> NewC;
3352 if (has_single_bit(TyAllocSize) &&
3353 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3354 C->uge(countr_zero(TyAllocSize)))
3355 NewC = *C - countr_zero(TyAllocSize);
3356 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3357 APInt Quot;
3358 uint64_t Rem;
3359 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3360 if (Rem == 0)
3361 NewC = Quot;
3362 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3363 APInt Quot;
3364 int64_t Rem;
3365 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3366 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3367 if (!Quot.isAllOnes() && Rem == 0)
3368 NewC = Quot;
3369 }
3370
3371 if (NewC.has_value()) {
3372 Value *NewOp = Builder.CreateBinOp(
3373 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3374 ConstantInt::get(V->getType(), *NewC));
3375 cast<BinaryOperator>(NewOp)->setIsExact();
3377 GEP.getPointerOperand(), NewOp,
3378 GEP.getNoWrapFlags());
3379 }
3380 }
3381 }
3382 }
3383 }
3384 // We do not handle pointer-vector geps here.
3385 if (GEPType->isVectorTy())
3386 return nullptr;
3387
3388 if (!GEP.isInBounds()) {
3389 unsigned IdxWidth =
3391 APInt BasePtrOffset(IdxWidth, 0);
3392 Value *UnderlyingPtrOp =
3393 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL, BasePtrOffset);
3394 bool CanBeNull, CanBeFreed;
3395 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3396 DL, CanBeNull, CanBeFreed);
3397 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3398 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3399 BasePtrOffset.isNonNegative()) {
3400 APInt AllocSize(IdxWidth, DerefBytes);
3401 if (BasePtrOffset.ule(AllocSize)) {
3403 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3404 }
3405 }
3406 }
3407 }
3408
3409 // nusw + nneg -> nuw
3410 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3411 all_of(GEP.indices(), [&](Value *Idx) {
3412 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3413 })) {
3414 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3415 return &GEP;
3416 }
3417
3418 // These rewrites are trying to preserve inbounds/nuw attributes. So we want
3419 // to do this after having tried to derive "nuw" above.
3420 if (GEP.getNumIndices() == 1) {
3421 // Given (gep p, x+y) we want to determine the common nowrap flags for both
3422 // geps if transforming into (gep (gep p, x), y).
3423 auto GetPreservedNoWrapFlags = [&](bool AddIsNUW) {
3424 // We can preserve both "inbounds nuw", "nusw nuw" and "nuw" if we know
3425 // that x + y does not have unsigned wrap.
3426 if (GEP.hasNoUnsignedWrap() && AddIsNUW)
3427 return GEP.getNoWrapFlags();
3428 return GEPNoWrapFlags::none();
3429 };
3430
3431 // Try to replace ADD + GEP with GEP + GEP.
3432 Value *Idx1, *Idx2;
3433 if (match(GEP.getOperand(1),
3434 m_OneUse(m_AddLike(m_Value(Idx1), m_Value(Idx2))))) {
3435 // %idx = add i64 %idx1, %idx2
3436 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3437 // as:
3438 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3439 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3440 bool NUW = match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()));
3441 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3442 auto *NewPtr =
3443 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3444 Idx1, "", NWFlags);
3445 return replaceInstUsesWith(GEP,
3446 Builder.CreateGEP(GEP.getSourceElementType(),
3447 NewPtr, Idx2, "", NWFlags));
3448 }
3449 ConstantInt *C;
3450 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAddLike(
3451 m_Value(Idx1), m_ConstantInt(C))))))) {
3452 // %add = add nsw i32 %idx1, idx2
3453 // %sidx = sext i32 %add to i64
3454 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3455 // as:
3456 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3457 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3458 bool NUW = match(GEP.getOperand(1),
3460 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3461 auto *NewPtr = Builder.CreateGEP(
3462 GEP.getSourceElementType(), GEP.getPointerOperand(),
3463 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "", NWFlags);
3464 return replaceInstUsesWith(
3465 GEP,
3466 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3467 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3468 "", NWFlags));
3469 }
3470 }
3471
3473 return R;
3474
3475 return nullptr;
3476}
3477
3479 Instruction *AI) {
3480 if (isa<ConstantPointerNull>(V))
3481 return true;
3482 if (auto *LI = dyn_cast<LoadInst>(V))
3483 return isa<GlobalVariable>(LI->getPointerOperand());
3484 // Two distinct allocations will never be equal.
3485 return isAllocLikeFn(V, &TLI) && V != AI;
3486}
3487
3488/// Given a call CB which uses an address UsedV, return true if we can prove the
3489/// call's only possible effect is storing to V.
3490static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3491 const TargetLibraryInfo &TLI) {
3492 if (!CB.use_empty())
3493 // TODO: add recursion if returned attribute is present
3494 return false;
3495
3496 if (CB.isTerminator())
3497 // TODO: remove implementation restriction
3498 return false;
3499
3500 if (!CB.willReturn() || !CB.doesNotThrow())
3501 return false;
3502
3503 // If the only possible side effect of the call is writing to the alloca,
3504 // and the result isn't used, we can safely remove any reads implied by the
3505 // call including those which might read the alloca itself.
3506 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3507 return Dest && Dest->Ptr == UsedV;
3508}
3509
3510static std::optional<ModRefInfo>
3512 const TargetLibraryInfo &TLI, bool KnowInit) {
3514 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3515 Worklist.push_back(AI);
3517
3518 do {
3519 Instruction *PI = Worklist.pop_back_val();
3520 for (User *U : PI->users()) {
3521 Instruction *I = cast<Instruction>(U);
3522 switch (I->getOpcode()) {
3523 default:
3524 // Give up the moment we see something we can't handle.
3525 return std::nullopt;
3526
3527 case Instruction::AddrSpaceCast:
3528 case Instruction::BitCast:
3529 case Instruction::GetElementPtr:
3530 Users.emplace_back(I);
3531 Worklist.push_back(I);
3532 continue;
3533
3534 case Instruction::ICmp: {
3535 ICmpInst *ICI = cast<ICmpInst>(I);
3536 // We can fold eq/ne comparisons with null to false/true, respectively.
3537 // We also fold comparisons in some conditions provided the alloc has
3538 // not escaped (see isNeverEqualToUnescapedAlloc).
3539 if (!ICI->isEquality())
3540 return std::nullopt;
3541 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3542 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3543 return std::nullopt;
3544
3545 // Do not fold compares to aligned_alloc calls, as they may have to
3546 // return null in case the required alignment cannot be satisfied,
3547 // unless we can prove that both alignment and size are valid.
3548 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3549 // Check if alignment and size of a call to aligned_alloc is valid,
3550 // that is alignment is a power-of-2 and the size is a multiple of the
3551 // alignment.
3552 const APInt *Alignment;
3553 const APInt *Size;
3554 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3555 match(CB->getArgOperand(1), m_APInt(Size)) &&
3556 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3557 };
3558 auto *CB = dyn_cast<CallBase>(AI);
3559 LibFunc TheLibFunc;
3560 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3561 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3562 !AlignmentAndSizeKnownValid(CB))
3563 return std::nullopt;
3564 Users.emplace_back(I);
3565 continue;
3566 }
3567
3568 case Instruction::Call:
3569 // Ignore no-op and store intrinsics.
3570 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3571 switch (II->getIntrinsicID()) {
3572 default:
3573 return std::nullopt;
3574
3575 case Intrinsic::memmove:
3576 case Intrinsic::memcpy:
3577 case Intrinsic::memset: {
3578 MemIntrinsic *MI = cast<MemIntrinsic>(II);
3579 if (MI->isVolatile())
3580 return std::nullopt;
3581 // Note: this could also be ModRef, but we can still interpret that
3582 // as just Mod in that case.
3583 ModRefInfo NewAccess =
3584 MI->getRawDest() == PI ? ModRefInfo::Mod : ModRefInfo::Ref;
3585 if ((Access & ~NewAccess) != ModRefInfo::NoModRef)
3586 return std::nullopt;
3587 Access |= NewAccess;
3588 [[fallthrough]];
3589 }
3590 case Intrinsic::assume:
3591 case Intrinsic::invariant_start:
3592 case Intrinsic::invariant_end:
3593 case Intrinsic::lifetime_start:
3594 case Intrinsic::lifetime_end:
3595 case Intrinsic::objectsize:
3596 Users.emplace_back(I);
3597 continue;
3598 case Intrinsic::launder_invariant_group:
3599 case Intrinsic::strip_invariant_group:
3600 Users.emplace_back(I);
3601 Worklist.push_back(I);
3602 continue;
3603 }
3604 }
3605
3606 if (Family && getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3607 getAllocationFamily(I, &TLI) == Family) {
3608 Users.emplace_back(I);
3609 continue;
3610 }
3611
3612 if (Family && getReallocatedOperand(cast<CallBase>(I)) == PI &&
3613 getAllocationFamily(I, &TLI) == Family) {
3614 Users.emplace_back(I);
3615 Worklist.push_back(I);
3616 continue;
3617 }
3618
3619 if (!isRefSet(Access) &&
3620 isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3622 Users.emplace_back(I);
3623 continue;
3624 }
3625
3626 return std::nullopt;
3627
3628 case Instruction::Store: {
3629 StoreInst *SI = cast<StoreInst>(I);
3630 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3631 return std::nullopt;
3632 if (isRefSet(Access))
3633 return std::nullopt;
3635 Users.emplace_back(I);
3636 continue;
3637 }
3638
3639 case Instruction::Load: {
3640 LoadInst *LI = cast<LoadInst>(I);
3641 if (LI->isVolatile() || LI->getPointerOperand() != PI)
3642 return std::nullopt;
3643 if (isModSet(Access))
3644 return std::nullopt;
3646 Users.emplace_back(I);
3647 continue;
3648 }
3649 }
3650 llvm_unreachable("missing a return?");
3651 }
3652 } while (!Worklist.empty());
3653
3655 return Access;
3656}
3657
3659 assert(isa<AllocaInst>(MI) || isRemovableAlloc(&cast<CallBase>(MI), &TLI));
3660
3661 // If we have a malloc call which is only used in any amount of comparisons to
3662 // null and free calls, delete the calls and replace the comparisons with true
3663 // or false as appropriate.
3664
3665 // This is based on the principle that we can substitute our own allocation
3666 // function (which will never return null) rather than knowledge of the
3667 // specific function being called. In some sense this can change the permitted
3668 // outputs of a program (when we convert a malloc to an alloca, the fact that
3669 // the allocation is now on the stack is potentially visible, for example),
3670 // but we believe in a permissible manner.
3672
3673 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3674 // before each store.
3676 std::unique_ptr<DIBuilder> DIB;
3677 if (isa<AllocaInst>(MI)) {
3678 findDbgUsers(&MI, DVRs);
3679 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3680 }
3681
3682 // Determine what getInitialValueOfAllocation would return without actually
3683 // allocating the result.
3684 bool KnowInitUndef = false;
3685 bool KnowInitZero = false;
3686 Constant *Init =
3688 if (Init) {
3689 if (isa<UndefValue>(Init))
3690 KnowInitUndef = true;
3691 else if (Init->isNullValue())
3692 KnowInitZero = true;
3693 }
3694 // The various sanitizers don't actually return undef memory, but rather
3695 // memory initialized with special forms of runtime poison
3696 auto &F = *MI.getFunction();
3697 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
3698 F.hasFnAttribute(Attribute::SanitizeAddress))
3699 KnowInitUndef = false;
3700
3701 auto Removable =
3702 isAllocSiteRemovable(&MI, Users, TLI, KnowInitZero | KnowInitUndef);
3703 if (Removable) {
3704 for (WeakTrackingVH &User : Users) {
3705 // Lowering all @llvm.objectsize and MTI calls first because they may use
3706 // a bitcast/GEP of the alloca we are removing.
3707 if (!User)
3708 continue;
3709
3710 Instruction *I = cast<Instruction>(&*User);
3711
3712 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3713 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3714 SmallVector<Instruction *> InsertedInstructions;
3715 Value *Result = lowerObjectSizeCall(
3716 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3717 for (Instruction *Inserted : InsertedInstructions)
3718 Worklist.add(Inserted);
3719 replaceInstUsesWith(*I, Result);
3721 User = nullptr; // Skip examining in the next loop.
3722 continue;
3723 }
3724 if (auto *MTI = dyn_cast<MemTransferInst>(I)) {
3725 if (KnowInitZero && isRefSet(*Removable)) {
3728 auto *M = Builder.CreateMemSet(
3729 MTI->getRawDest(),
3730 ConstantInt::get(Type::getInt8Ty(MI.getContext()), 0),
3731 MTI->getLength(), MTI->getDestAlign());
3732 M->copyMetadata(*MTI);
3733 }
3734 }
3735 }
3736 }
3737 for (WeakTrackingVH &User : Users) {
3738 if (!User)
3739 continue;
3740
3741 Instruction *I = cast<Instruction>(&*User);
3742
3743 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3745 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3746 C->isFalseWhenEqual()));
3747 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3748 for (auto *DVR : DVRs)
3749 if (DVR->isAddressOfVariable())
3750 ConvertDebugDeclareToDebugValue(DVR, SI, *DIB);
3751 } else {
3752 // Casts, GEP, or anything else: we're about to delete this instruction,
3753 // so it can not have any valid uses.
3754 Constant *Replace;
3755 if (isa<LoadInst>(I)) {
3756 assert(KnowInitZero || KnowInitUndef);
3757 Replace = KnowInitUndef ? UndefValue::get(I->getType())
3758 : Constant::getNullValue(I->getType());
3759 } else
3760 Replace = PoisonValue::get(I->getType());
3761 replaceInstUsesWith(*I, Replace);
3762 }
3764 }
3765
3766 if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
3767 // Replace invoke with a NOP intrinsic to maintain the original CFG
3768 Module *M = II->getModule();
3769 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3770 auto *NewII = InvokeInst::Create(
3771 F, II->getNormalDest(), II->getUnwindDest(), {}, "", II->getParent());
3772 NewII->setDebugLoc(II->getDebugLoc());
3773 }
3774
3775 // Remove debug intrinsics which describe the value contained within the
3776 // alloca. In addition to removing dbg.{declare,addr} which simply point to
3777 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
3778 //
3779 // ```
3780 // define void @foo(i32 %0) {
3781 // %a = alloca i32 ; Deleted.
3782 // store i32 %0, i32* %a
3783 // dbg.value(i32 %0, "arg0") ; Not deleted.
3784 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
3785 // call void @trivially_inlinable_no_op(i32* %a)
3786 // ret void
3787 // }
3788 // ```
3789 //
3790 // This may not be required if we stop describing the contents of allocas
3791 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
3792 // the LowerDbgDeclare utility.
3793 //
3794 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
3795 // "arg0" dbg.value may be stale after the call. However, failing to remove
3796 // the DW_OP_deref dbg.value causes large gaps in location coverage.
3797 //
3798 // FIXME: the Assignment Tracking project has now likely made this
3799 // redundant (and it's sometimes harmful).
3800 for (auto *DVR : DVRs)
3801 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3802 DVR->eraseFromParent();
3803
3804 return eraseInstFromFunction(MI);
3805 }
3806 return nullptr;
3807}
3808
3809/// Move the call to free before a NULL test.
3810///
3811/// Check if this free is accessed after its argument has been test
3812/// against NULL (property 0).
3813/// If yes, it is legal to move this call in its predecessor block.
3814///
3815/// The move is performed only if the block containing the call to free
3816/// will be removed, i.e.:
3817/// 1. it has only one predecessor P, and P has two successors
3818/// 2. it contains the call, noops, and an unconditional branch
3819/// 3. its successor is the same as its predecessor's successor
3820///
3821/// The profitability is out-of concern here and this function should
3822/// be called only if the caller knows this transformation would be
3823/// profitable (e.g., for code size).
3825 const DataLayout &DL) {
3826 Value *Op = FI.getArgOperand(0);
3827 BasicBlock *FreeInstrBB = FI.getParent();
3828 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
3829
3830 // Validate part of constraint #1: Only one predecessor
3831 // FIXME: We can extend the number of predecessor, but in that case, we
3832 // would duplicate the call to free in each predecessor and it may
3833 // not be profitable even for code size.
3834 if (!PredBB)
3835 return nullptr;
3836
3837 // Validate constraint #2: Does this block contains only the call to
3838 // free, noops, and an unconditional branch?
3839 BasicBlock *SuccBB;
3840 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
3841 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
3842 return nullptr;
3843
3844 // If there are only 2 instructions in the block, at this point,
3845 // this is the call to free and unconditional.
3846 // If there are more than 2 instructions, check that they are noops
3847 // i.e., they won't hurt the performance of the generated code.
3848 if (FreeInstrBB->size() != 2) {
3849 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
3850 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3851 continue;
3852 auto *Cast = dyn_cast<CastInst>(&Inst);
3853 if (!Cast || !Cast->isNoopCast(DL))
3854 return nullptr;
3855 }
3856 }
3857 // Validate the rest of constraint #1 by matching on the pred branch.
3858 Instruction *TI = PredBB->getTerminator();
3859 BasicBlock *TrueBB, *FalseBB;
3860 CmpPredicate Pred;
3861 if (!match(TI, m_Br(m_ICmp(Pred,
3863 m_Specific(Op->stripPointerCasts())),
3864 m_Zero()),
3865 TrueBB, FalseBB)))
3866 return nullptr;
3867 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
3868 return nullptr;
3869
3870 // Validate constraint #3: Ensure the null case just falls through.
3871 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
3872 return nullptr;
3873 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
3874 "Broken CFG: missing edge from predecessor to successor");
3875
3876 // At this point, we know that everything in FreeInstrBB can be moved
3877 // before TI.
3878 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
3879 if (&Instr == FreeInstrBBTerminator)
3880 break;
3881 Instr.moveBeforePreserving(TI->getIterator());
3882 }
3883 assert(FreeInstrBB->size() == 1 &&
3884 "Only the branch instruction should remain");
3885
3886 // Now that we've moved the call to free before the NULL check, we have to
3887 // remove any attributes on its parameter that imply it's non-null, because
3888 // those attributes might have only been valid because of the NULL check, and
3889 // we can get miscompiles if we keep them. This is conservative if non-null is
3890 // also implied by something other than the NULL check, but it's guaranteed to
3891 // be correct, and the conservativeness won't matter in practice, since the
3892 // attributes are irrelevant for the call to free itself and the pointer
3893 // shouldn't be used after the call.
3894 AttributeList Attrs = FI.getAttributes();
3895 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
3896 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3897 if (Dereferenceable.isValid()) {
3898 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
3899 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
3900 Attribute::Dereferenceable);
3901 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
3902 }
3903 FI.setAttributes(Attrs);
3904
3905 return &FI;
3906}
3907
3909 // free undef -> unreachable.
3910 if (isa<UndefValue>(Op)) {
3911 // Leave a marker since we can't modify the CFG here.
3913 return eraseInstFromFunction(FI);
3914 }
3915
3916 // If we have 'free null' delete the instruction. This can happen in stl code
3917 // when lots of inlining happens.
3918 if (isa<ConstantPointerNull>(Op))
3919 return eraseInstFromFunction(FI);
3920
3921 // If we had free(realloc(...)) with no intervening uses, then eliminate the
3922 // realloc() entirely.
3923 CallInst *CI = dyn_cast<CallInst>(Op);
3924 if (CI && CI->hasOneUse())
3925 if (Value *ReallocatedOp = getReallocatedOperand(CI))
3926 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
3927
3928 // If we optimize for code size, try to move the call to free before the null
3929 // test so that simplify cfg can remove the empty block and dead code
3930 // elimination the branch. I.e., helps to turn something like:
3931 // if (foo) free(foo);
3932 // into
3933 // free(foo);
3934 //
3935 // Note that we can only do this for 'free' and not for any flavor of
3936 // 'operator delete'; there is no 'operator delete' symbol for which we are
3937 // permitted to invent a call, even if we're passing in a null pointer.
3938 if (MinimizeSize) {
3939 LibFunc Func;
3940 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
3942 return I;
3943 }
3944
3945 return nullptr;
3946}
3947
3949 Value *RetVal = RI.getReturnValue();
3950 if (!RetVal)
3951 return nullptr;
3952
3953 Function *F = RI.getFunction();
3954 Type *RetTy = RetVal->getType();
3955 if (RetTy->isPointerTy()) {
3956 bool HasDereferenceable =
3957 F->getAttributes().getRetDereferenceableBytes() > 0;
3958 if (F->hasRetAttribute(Attribute::NonNull) ||
3959 (HasDereferenceable &&
3960 !NullPointerIsDefined(F, RetTy->getPointerAddressSpace()))) {
3961 if (Value *V = simplifyNonNullOperand(RetVal, HasDereferenceable))
3962 return replaceOperand(RI, 0, V);
3963 }
3964 }
3965
3967 return nullptr;
3968
3969 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
3970 if (ReturnClass == fcNone)
3971 return nullptr;
3972
3973 KnownFPClass KnownClass;
3974 Value *Simplified =
3975 SimplifyDemandedUseFPClass(RetVal, ~ReturnClass, KnownClass, &RI);
3976 if (!Simplified)
3977 return nullptr;
3978
3979 return ReturnInst::Create(RI.getContext(), Simplified);
3980}
3981
3982// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
3984 // Try to remove the previous instruction if it must lead to unreachable.
3985 // This includes instructions like stores and "llvm.assume" that may not get
3986 // removed by simple dead code elimination.
3987 bool Changed = false;
3988 while (Instruction *Prev = I.getPrevNode()) {
3989 // While we theoretically can erase EH, that would result in a block that
3990 // used to start with an EH no longer starting with EH, which is invalid.
3991 // To make it valid, we'd need to fixup predecessors to no longer refer to
3992 // this block, but that changes CFG, which is not allowed in InstCombine.
3993 if (Prev->isEHPad())
3994 break; // Can not drop any more instructions. We're done here.
3995
3997 break; // Can not drop any more instructions. We're done here.
3998 // Otherwise, this instruction can be freely erased,
3999 // even if it is not side-effect free.
4000
4001 // A value may still have uses before we process it here (for example, in
4002 // another unreachable block), so convert those to poison.
4003 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
4004 eraseInstFromFunction(*Prev);
4005 Changed = true;
4006 }
4007 return Changed;
4008}
4009
4012 return nullptr;
4013}
4014
4016 assert(BI.isUnconditional() && "Only for unconditional branches.");
4017
4018 // If this store is the second-to-last instruction in the basic block
4019 // (excluding debug info) and if the block ends with
4020 // an unconditional branch, try to move the store to the successor block.
4021
4022 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
4023 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
4024 do {
4025 if (BBI != FirstInstr)
4026 --BBI;
4027 } while (BBI != FirstInstr && BBI->isDebugOrPseudoInst());
4028
4029 return dyn_cast<StoreInst>(BBI);
4030 };
4031
4032 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
4033 if (mergeStoreIntoSuccessor(*SI))
4034 return &BI;
4035
4036 return nullptr;
4037}
4038
4041 if (!DeadEdges.insert({From, To}).second)
4042 return;
4043
4044 // Replace phi node operands in successor with poison.
4045 for (PHINode &PN : To->phis())
4046 for (Use &U : PN.incoming_values())
4047 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
4048 replaceUse(U, PoisonValue::get(PN.getType()));
4049 addToWorklist(&PN);
4050 MadeIRChange = true;
4051 }
4052
4053 Worklist.push_back(To);
4054}
4055
4056// Under the assumption that I is unreachable, remove it and following
4057// instructions. Changes are reported directly to MadeIRChange.
4060 BasicBlock *BB = I->getParent();
4061 for (Instruction &Inst : make_early_inc_range(
4062 make_range(std::next(BB->getTerminator()->getReverseIterator()),
4063 std::next(I->getReverseIterator())))) {
4064 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
4065 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
4066 MadeIRChange = true;
4067 }
4068 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
4069 continue;
4070 // RemoveDIs: erase debug-info on this instruction manually.
4071 Inst.dropDbgRecords();
4073 MadeIRChange = true;
4074 }
4075
4076 SmallVector<Value *> Changed;
4077 if (handleUnreachableTerminator(BB->getTerminator(), Changed)) {
4078 MadeIRChange = true;
4079 for (Value *V : Changed)
4080 addToWorklist(cast<Instruction>(V));
4081 }
4082
4083 // Handle potentially dead successors.
4084 for (BasicBlock *Succ : successors(BB))
4085 addDeadEdge(BB, Succ, Worklist);
4086}
4087
4090 while (!Worklist.empty()) {
4091 BasicBlock *BB = Worklist.pop_back_val();
4092 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
4093 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
4094 }))
4095 continue;
4096
4098 }
4099}
4100
4102 BasicBlock *LiveSucc) {
4104 for (BasicBlock *Succ : successors(BB)) {
4105 // The live successor isn't dead.
4106 if (Succ == LiveSucc)
4107 continue;
4108
4109 addDeadEdge(BB, Succ, Worklist);
4110 }
4111
4113}
4114
4116 if (BI.isUnconditional())
4118
4119 // Change br (not X), label True, label False to: br X, label False, True
4120 Value *Cond = BI.getCondition();
4121 Value *X;
4122 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
4123 // Swap Destinations and condition...
4124 BI.swapSuccessors();
4125 if (BPI)
4127 return replaceOperand(BI, 0, X);
4128 }
4129
4130 // Canonicalize logical-and-with-invert as logical-or-with-invert.
4131 // This is done by inverting the condition and swapping successors:
4132 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
4133 Value *Y;
4134 if (isa<SelectInst>(Cond) &&
4135 match(Cond,
4137 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
4138 Value *Or = Builder.CreateLogicalOr(NotX, Y);
4139 BI.swapSuccessors();
4140 if (BPI)
4142 return replaceOperand(BI, 0, Or);
4143 }
4144
4145 // If the condition is irrelevant, remove the use so that other
4146 // transforms on the condition become more effective.
4147 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
4148 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
4149
4150 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
4151 CmpPredicate Pred;
4152 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
4153 !isCanonicalPredicate(Pred)) {
4154 // Swap destinations and condition.
4155 auto *Cmp = cast<CmpInst>(Cond);
4156 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
4157 BI.swapSuccessors();
4158 if (BPI)
4160 Worklist.push(Cmp);
4161 return &BI;
4162 }
4163
4164 if (isa<UndefValue>(Cond)) {
4165 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
4166 return nullptr;
4167 }
4168 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4170 BI.getSuccessor(!CI->getZExtValue()));
4171 return nullptr;
4172 }
4173
4174 // Replace all dominated uses of the condition with true/false
4175 // Ignore constant expressions to avoid iterating over uses on other
4176 // functions.
4177 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
4178 for (auto &U : make_early_inc_range(Cond->uses())) {
4179 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
4180 if (DT.dominates(Edge0, U)) {
4181 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
4182 addToWorklist(cast<Instruction>(U.getUser()));
4183 continue;
4184 }
4185 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
4186 if (DT.dominates(Edge1, U)) {
4187 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
4188 addToWorklist(cast<Instruction>(U.getUser()));
4189 }
4190 }
4191 }
4192
4193 DC.registerBranch(&BI);
4194 return nullptr;
4195}
4196
4197// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
4198// we can prove that both (switch C) and (switch X) go to the default when cond
4199// is false/true.
4202 bool IsTrueArm) {
4203 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
4204 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
4205 if (!C)
4206 return nullptr;
4207
4208 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
4209 if (CstBB != SI.getDefaultDest())
4210 return nullptr;
4211 Value *X = Select->getOperand(3 - CstOpIdx);
4212 CmpPredicate Pred;
4213 const APInt *RHSC;
4214 if (!match(Select->getCondition(),
4215 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
4216 return nullptr;
4217 if (IsTrueArm)
4218 Pred = ICmpInst::getInversePredicate(Pred);
4219
4220 // See whether we can replace the select with X
4222 for (auto Case : SI.cases())
4223 if (!CR.contains(Case.getCaseValue()->getValue()))
4224 return nullptr;
4225
4226 return X;
4227}
4228
4230 Value *Cond = SI.getCondition();
4231 Value *Op0;
4232 ConstantInt *AddRHS;
4233 if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
4234 // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
4235 for (auto Case : SI.cases()) {
4236 Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
4237 assert(isa<ConstantInt>(NewCase) &&
4238 "Result of expression should be constant");
4239 Case.setValue(cast<ConstantInt>(NewCase));
4240 }
4241 return replaceOperand(SI, 0, Op0);
4242 }
4243
4244 ConstantInt *SubLHS;
4245 if (match(Cond, m_Sub(m_ConstantInt(SubLHS), m_Value(Op0)))) {
4246 // Change 'switch (1-X) case 1:' into 'switch (X) case 0'.
4247 for (auto Case : SI.cases()) {
4248 Constant *NewCase = ConstantExpr::getSub(SubLHS, Case.getCaseValue());
4249 assert(isa<ConstantInt>(NewCase) &&
4250 "Result of expression should be constant");
4251 Case.setValue(cast<ConstantInt>(NewCase));
4252 }
4253 return replaceOperand(SI, 0, Op0);
4254 }
4255
4256 uint64_t ShiftAmt;
4257 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
4258 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
4259 all_of(SI.cases(), [&](const auto &Case) {
4260 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
4261 })) {
4262 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
4263 OverflowingBinaryOperator *Shl = cast<OverflowingBinaryOperator>(Cond);
4264 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
4265 Shl->hasOneUse()) {
4266 Value *NewCond = Op0;
4267 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
4268 // If the shift may wrap, we need to mask off the shifted bits.
4269 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
4270 NewCond = Builder.CreateAnd(
4271 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
4272 }
4273 for (auto Case : SI.cases()) {
4274 const APInt &CaseVal = Case.getCaseValue()->getValue();
4275 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
4276 : CaseVal.lshr(ShiftAmt);
4277 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
4278 }
4279 return replaceOperand(SI, 0, NewCond);
4280 }
4281 }
4282
4283 // Fold switch(zext/sext(X)) into switch(X) if possible.
4284 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
4285 bool IsZExt = isa<ZExtInst>(Cond);
4286 Type *SrcTy = Op0->getType();
4287 unsigned NewWidth = SrcTy->getScalarSizeInBits();
4288
4289 if (all_of(SI.cases(), [&](const auto &Case) {
4290 const APInt &CaseVal = Case.getCaseValue()->getValue();
4291 return IsZExt ? CaseVal.isIntN(NewWidth)
4292 : CaseVal.isSignedIntN(NewWidth);
4293 })) {
4294 for (auto &Case : SI.cases()) {
4295 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4296 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4297 }
4298 return replaceOperand(SI, 0, Op0);
4299 }
4300 }
4301
4302 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
4303 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
4304 if (Value *V =
4305 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
4306 return replaceOperand(SI, 0, V);
4307 if (Value *V =
4308 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
4309 return replaceOperand(SI, 0, V);
4310 }
4311
4312 KnownBits Known = computeKnownBits(Cond, &SI);
4313 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
4314 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
4315
4316 // Compute the number of leading bits we can ignore.
4317 // TODO: A better way to determine this would use ComputeNumSignBits().
4318 for (const auto &C : SI.cases()) {
4319 LeadingKnownZeros =
4320 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
4321 LeadingKnownOnes =
4322 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
4323 }
4324
4325 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
4326
4327 // Shrink the condition operand if the new type is smaller than the old type.
4328 // But do not shrink to a non-standard type, because backend can't generate
4329 // good code for that yet.
4330 // TODO: We can make it aggressive again after fixing PR39569.
4331 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
4332 shouldChangeType(Known.getBitWidth(), NewWidth)) {
4333 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
4335 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
4336
4337 for (auto Case : SI.cases()) {
4338 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4339 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4340 }
4341 return replaceOperand(SI, 0, NewCond);
4342 }
4343
4344 if (isa<UndefValue>(Cond)) {
4345 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
4346 return nullptr;
4347 }
4348 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4349 handlePotentiallyDeadSuccessors(SI.getParent(),
4350 SI.findCaseValue(CI)->getCaseSuccessor());
4351 return nullptr;
4352 }
4353
4354 return nullptr;
4355}
4356
4358InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
4359 auto *WO = dyn_cast<WithOverflowInst>(EV.getAggregateOperand());
4360 if (!WO)
4361 return nullptr;
4362
4363 Intrinsic::ID OvID = WO->getIntrinsicID();
4364 const APInt *C = nullptr;
4365 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
4366 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
4367 OvID == Intrinsic::umul_with_overflow)) {
4368 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
4369 if (C->isAllOnes())
4370 return BinaryOperator::CreateNeg(WO->getLHS());
4371 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
4372 if (C->isPowerOf2()) {
4373 return BinaryOperator::CreateShl(
4374 WO->getLHS(),
4375 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4376 }
4377 }
4378 }
4379
4380 // We're extracting from an overflow intrinsic. See if we're the only user.
4381 // That allows us to simplify multiple result intrinsics to simpler things
4382 // that just get one value.
4383 if (!WO->hasOneUse())
4384 return nullptr;
4385
4386 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4387 // and replace it with a traditional binary instruction.
4388 if (*EV.idx_begin() == 0) {
4389 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4390 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4391 // Replace the old instruction's uses with poison.
4392 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4394 return BinaryOperator::Create(BinOp, LHS, RHS);
4395 }
4396
4397 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4398
4399 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4400 if (OvID == Intrinsic::usub_with_overflow)
4401 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4402
4403 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4404 // +1 is not possible because we assume signed values.
4405 if (OvID == Intrinsic::smul_with_overflow &&
4406 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4407 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4408
4409 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4410 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4411 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4412 // Only handle even bitwidths for performance reasons.
4413 if (BitWidth % 2 == 0)
4414 return new ICmpInst(
4415 ICmpInst::ICMP_UGT, WO->getLHS(),
4416 ConstantInt::get(WO->getLHS()->getType(),
4418 }
4419
4420 // If only the overflow result is used, and the right hand side is a
4421 // constant (or constant splat), we can remove the intrinsic by directly
4422 // checking for overflow.
4423 if (C) {
4424 // Compute the no-wrap range for LHS given RHS=C, then construct an
4425 // equivalent icmp, potentially using an offset.
4427 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4428
4429 CmpInst::Predicate Pred;
4430 APInt NewRHSC, Offset;
4431 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4432 auto *OpTy = WO->getRHS()->getType();
4433 auto *NewLHS = WO->getLHS();
4434 if (Offset != 0)
4435 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4436 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4437 ConstantInt::get(OpTy, NewRHSC));
4438 }
4439
4440 return nullptr;
4441}
4442
4445 InstCombiner::BuilderTy &Builder) {
4446 // Helper to fold frexp of select to select of frexp.
4447
4448 if (!SelectInst->hasOneUse() || !FrexpCall->hasOneUse())
4449 return nullptr;
4451 Value *TrueVal = SelectInst->getTrueValue();
4452 Value *FalseVal = SelectInst->getFalseValue();
4453
4454 const APFloat *ConstVal = nullptr;
4455 Value *VarOp = nullptr;
4456 bool ConstIsTrue = false;
4457
4458 if (match(TrueVal, m_APFloat(ConstVal))) {
4459 VarOp = FalseVal;
4460 ConstIsTrue = true;
4461 } else if (match(FalseVal, m_APFloat(ConstVal))) {
4462 VarOp = TrueVal;
4463 ConstIsTrue = false;
4464 } else {
4465 return nullptr;
4466 }
4467
4468 Builder.SetInsertPoint(&EV);
4469
4470 CallInst *NewFrexp =
4471 Builder.CreateCall(FrexpCall->getCalledFunction(), {VarOp}, "frexp");
4472 NewFrexp->copyIRFlags(FrexpCall);
4473
4474 Value *NewEV = Builder.CreateExtractValue(NewFrexp, 0, "mantissa");
4475
4476 int Exp;
4477 APFloat Mantissa = frexp(*ConstVal, Exp, APFloat::rmNearestTiesToEven);
4478
4479 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4480
4481 Value *NewSel = Builder.CreateSelectFMF(
4482 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4483 ConstIsTrue ? NewEV : ConstantMantissa, SelectInst, "select.frexp");
4484 return NewSel;
4485}
4487 Value *Agg = EV.getAggregateOperand();
4488
4489 if (!EV.hasIndices())
4490 return replaceInstUsesWith(EV, Agg);
4491
4492 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4493 SQ.getWithInstruction(&EV)))
4494 return replaceInstUsesWith(EV, V);
4495
4496 Value *Cond, *TrueVal, *FalseVal;
4497 if (match(&EV, m_ExtractValue<0>(m_Intrinsic<Intrinsic::frexp>(m_Select(
4498 m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal)))))) {
4499 auto *SelInst =
4500 cast<SelectInst>(cast<IntrinsicInst>(Agg)->getArgOperand(0));
4501 if (Value *Result =
4502 foldFrexpOfSelect(EV, cast<IntrinsicInst>(Agg), SelInst, Builder))
4503 return replaceInstUsesWith(EV, Result);
4504 }
4505 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
4506 // We're extracting from an insertvalue instruction, compare the indices
4507 const unsigned *exti, *exte, *insi, *inse;
4508 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4509 exte = EV.idx_end(), inse = IV->idx_end();
4510 exti != exte && insi != inse;
4511 ++exti, ++insi) {
4512 if (*insi != *exti)
4513 // The insert and extract both reference distinctly different elements.
4514 // This means the extract is not influenced by the insert, and we can
4515 // replace the aggregate operand of the extract with the aggregate
4516 // operand of the insert. i.e., replace
4517 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4518 // %E = extractvalue { i32, { i32 } } %I, 0
4519 // with
4520 // %E = extractvalue { i32, { i32 } } %A, 0
4521 return ExtractValueInst::Create(IV->getAggregateOperand(),
4522 EV.getIndices());
4523 }
4524 if (exti == exte && insi == inse)
4525 // Both iterators are at the end: Index lists are identical. Replace
4526 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4527 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4528 // with "i32 42"
4529 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4530 if (exti == exte) {
4531 // The extract list is a prefix of the insert list. i.e. replace
4532 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4533 // %E = extractvalue { i32, { i32 } } %I, 1
4534 // with
4535 // %X = extractvalue { i32, { i32 } } %A, 1
4536 // %E = insertvalue { i32 } %X, i32 42, 0
4537 // by switching the order of the insert and extract (though the
4538 // insertvalue should be left in, since it may have other uses).
4539 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4540 EV.getIndices());
4541 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4542 ArrayRef(insi, inse));
4543 }
4544 if (insi == inse)
4545 // The insert list is a prefix of the extract list
4546 // We can simply remove the common indices from the extract and make it
4547 // operate on the inserted value instead of the insertvalue result.
4548 // i.e., replace
4549 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4550 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4551 // with
4552 // %E extractvalue { i32 } { i32 42 }, 0
4553 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4554 ArrayRef(exti, exte));
4555 }
4556
4557 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4558 return R;
4559
4560 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4561 // Bail out if the aggregate contains scalable vector type
4562 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4563 STy && STy->isScalableTy())
4564 return nullptr;
4565
4566 // If the (non-volatile) load only has one use, we can rewrite this to a
4567 // load from a GEP. This reduces the size of the load. If a load is used
4568 // only by extractvalue instructions then this either must have been
4569 // optimized before, or it is a struct with padding, in which case we
4570 // don't want to do the transformation as it loses padding knowledge.
4571 if (L->isSimple() && L->hasOneUse()) {
4572 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4573 SmallVector<Value*, 4> Indices;
4574 // Prefix an i32 0 since we need the first element.
4575 Indices.push_back(Builder.getInt32(0));
4576 for (unsigned Idx : EV.indices())
4577 Indices.push_back(Builder.getInt32(Idx));
4578
4579 // We need to insert these at the location of the old load, not at that of
4580 // the extractvalue.
4582 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4583 L->getPointerOperand(), Indices);
4585 // Whatever aliasing information we had for the orignal load must also
4586 // hold for the smaller load, so propagate the annotations.
4587 NL->setAAMetadata(L->getAAMetadata());
4588 // Returning the load directly will cause the main loop to insert it in
4589 // the wrong spot, so use replaceInstUsesWith().
4590 return replaceInstUsesWith(EV, NL);
4591 }
4592 }
4593
4594 if (auto *PN = dyn_cast<PHINode>(Agg))
4595 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4596 return Res;
4597
4598 // Canonicalize extract (select Cond, TV, FV)
4599 // -> select cond, (extract TV), (extract FV)
4600 if (auto *SI = dyn_cast<SelectInst>(Agg))
4601 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4602 return R;
4603
4604 // We could simplify extracts from other values. Note that nested extracts may
4605 // already be simplified implicitly by the above: extract (extract (insert) )
4606 // will be translated into extract ( insert ( extract ) ) first and then just
4607 // the value inserted, if appropriate. Similarly for extracts from single-use
4608 // loads: extract (extract (load)) will be translated to extract (load (gep))
4609 // and if again single-use then via load (gep (gep)) to load (gep).
4610 // However, double extracts from e.g. function arguments or return values
4611 // aren't handled yet.
4612 return nullptr;
4613}
4614
4615/// Return 'true' if the given typeinfo will match anything.
4616static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4617 switch (Personality) {
4621 // The GCC C EH and Rust personality only exists to support cleanups, so
4622 // it's not clear what the semantics of catch clauses are.
4623 return false;
4625 return false;
4627 // While __gnat_all_others_value will match any Ada exception, it doesn't
4628 // match foreign exceptions (or didn't, before gcc-4.7).
4629 return false;
4640 return TypeInfo->isNullValue();
4641 }
4642 llvm_unreachable("invalid enum");
4643}
4644
4645static bool shorter_filter(const Value *LHS, const Value *RHS) {
4646 return
4647 cast<ArrayType>(LHS->getType())->getNumElements()
4648 <
4649 cast<ArrayType>(RHS->getType())->getNumElements();
4650}
4651
4653 // The logic here should be correct for any real-world personality function.
4654 // However if that turns out not to be true, the offending logic can always
4655 // be conditioned on the personality function, like the catch-all logic is.
4656 EHPersonality Personality =
4657 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4658
4659 // Simplify the list of clauses, eg by removing repeated catch clauses
4660 // (these are often created by inlining).
4661 bool MakeNewInstruction = false; // If true, recreate using the following:
4662 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4663 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4664
4665 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4666 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4667 bool isLastClause = i + 1 == e;
4668 if (LI.isCatch(i)) {
4669 // A catch clause.
4670 Constant *CatchClause = LI.getClause(i);
4671 Constant *TypeInfo = CatchClause->stripPointerCasts();
4672
4673 // If we already saw this clause, there is no point in having a second
4674 // copy of it.
4675 if (AlreadyCaught.insert(TypeInfo).second) {
4676 // This catch clause was not already seen.
4677 NewClauses.push_back(CatchClause);
4678 } else {
4679 // Repeated catch clause - drop the redundant copy.
4680 MakeNewInstruction = true;
4681 }
4682
4683 // If this is a catch-all then there is no point in keeping any following
4684 // clauses or marking the landingpad as having a cleanup.
4685 if (isCatchAll(Personality, TypeInfo)) {
4686 if (!isLastClause)
4687 MakeNewInstruction = true;
4688 CleanupFlag = false;
4689 break;
4690 }
4691 } else {
4692 // A filter clause. If any of the filter elements were already caught
4693 // then they can be dropped from the filter. It is tempting to try to
4694 // exploit the filter further by saying that any typeinfo that does not
4695 // occur in the filter can't be caught later (and thus can be dropped).
4696 // However this would be wrong, since typeinfos can match without being
4697 // equal (for example if one represents a C++ class, and the other some
4698 // class derived from it).
4699 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4700 Constant *FilterClause = LI.getClause(i);
4701 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4702 unsigned NumTypeInfos = FilterType->getNumElements();
4703
4704 // An empty filter catches everything, so there is no point in keeping any
4705 // following clauses or marking the landingpad as having a cleanup. By
4706 // dealing with this case here the following code is made a bit simpler.
4707 if (!NumTypeInfos) {
4708 NewClauses.push_back(FilterClause);
4709 if (!isLastClause)
4710 MakeNewInstruction = true;
4711 CleanupFlag = false;
4712 break;
4713 }
4714
4715 bool MakeNewFilter = false; // If true, make a new filter.
4716 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4717 if (isa<ConstantAggregateZero>(FilterClause)) {
4718 // Not an empty filter - it contains at least one null typeinfo.
4719 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4720 Constant *TypeInfo =
4722 // If this typeinfo is a catch-all then the filter can never match.
4723 if (isCatchAll(Personality, TypeInfo)) {
4724 // Throw the filter away.
4725 MakeNewInstruction = true;
4726 continue;
4727 }
4728
4729 // There is no point in having multiple copies of this typeinfo, so
4730 // discard all but the first copy if there is more than one.
4731 NewFilterElts.push_back(TypeInfo);
4732 if (NumTypeInfos > 1)
4733 MakeNewFilter = true;
4734 } else {
4735 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4736 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4737 NewFilterElts.reserve(NumTypeInfos);
4738
4739 // Remove any filter elements that were already caught or that already
4740 // occurred in the filter. While there, see if any of the elements are
4741 // catch-alls. If so, the filter can be discarded.
4742 bool SawCatchAll = false;
4743 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4744 Constant *Elt = Filter->getOperand(j);
4745 Constant *TypeInfo = Elt->stripPointerCasts();
4746 if (isCatchAll(Personality, TypeInfo)) {
4747 // This element is a catch-all. Bail out, noting this fact.
4748 SawCatchAll = true;
4749 break;
4750 }
4751
4752 // Even if we've seen a type in a catch clause, we don't want to
4753 // remove it from the filter. An unexpected type handler may be
4754 // set up for a call site which throws an exception of the same
4755 // type caught. In order for the exception thrown by the unexpected
4756 // handler to propagate correctly, the filter must be correctly
4757 // described for the call site.
4758 //
4759 // Example:
4760 //
4761 // void unexpected() { throw 1;}
4762 // void foo() throw (int) {
4763 // std::set_unexpected(unexpected);
4764 // try {
4765 // throw 2.0;
4766 // } catch (int i) {}
4767 // }
4768
4769 // There is no point in having multiple copies of the same typeinfo in
4770 // a filter, so only add it if we didn't already.
4771 if (SeenInFilter.insert(TypeInfo).second)
4772 NewFilterElts.push_back(cast<Constant>(Elt));
4773 }
4774 // A filter containing a catch-all cannot match anything by definition.
4775 if (SawCatchAll) {
4776 // Throw the filter away.
4777 MakeNewInstruction = true;
4778 continue;
4779 }
4780
4781 // If we dropped something from the filter, make a new one.
4782 if (NewFilterElts.size() < NumTypeInfos)
4783 MakeNewFilter = true;
4784 }
4785 if (MakeNewFilter) {
4786 FilterType = ArrayType::get(FilterType->getElementType(),
4787 NewFilterElts.size());
4788 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
4789 MakeNewInstruction = true;
4790 }
4791
4792 NewClauses.push_back(FilterClause);
4793
4794 // If the new filter is empty then it will catch everything so there is
4795 // no point in keeping any following clauses or marking the landingpad
4796 // as having a cleanup. The case of the original filter being empty was
4797 // already handled above.
4798 if (MakeNewFilter && !NewFilterElts.size()) {
4799 assert(MakeNewInstruction && "New filter but not a new instruction!");
4800 CleanupFlag = false;
4801 break;
4802 }
4803 }
4804 }
4805
4806 // If several filters occur in a row then reorder them so that the shortest
4807 // filters come first (those with the smallest number of elements). This is
4808 // advantageous because shorter filters are more likely to match, speeding up
4809 // unwinding, but mostly because it increases the effectiveness of the other
4810 // filter optimizations below.
4811 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
4812 unsigned j;
4813 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
4814 for (j = i; j != e; ++j)
4815 if (!isa<ArrayType>(NewClauses[j]->getType()))
4816 break;
4817
4818 // Check whether the filters are already sorted by length. We need to know
4819 // if sorting them is actually going to do anything so that we only make a
4820 // new landingpad instruction if it does.
4821 for (unsigned k = i; k + 1 < j; ++k)
4822 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
4823 // Not sorted, so sort the filters now. Doing an unstable sort would be
4824 // correct too but reordering filters pointlessly might confuse users.
4825 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
4827 MakeNewInstruction = true;
4828 break;
4829 }
4830
4831 // Look for the next batch of filters.
4832 i = j + 1;
4833 }
4834
4835 // If typeinfos matched if and only if equal, then the elements of a filter L
4836 // that occurs later than a filter F could be replaced by the intersection of
4837 // the elements of F and L. In reality two typeinfos can match without being
4838 // equal (for example if one represents a C++ class, and the other some class
4839 // derived from it) so it would be wrong to perform this transform in general.
4840 // However the transform is correct and useful if F is a subset of L. In that
4841 // case L can be replaced by F, and thus removed altogether since repeating a
4842 // filter is pointless. So here we look at all pairs of filters F and L where
4843 // L follows F in the list of clauses, and remove L if every element of F is
4844 // an element of L. This can occur when inlining C++ functions with exception
4845 // specifications.
4846 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
4847 // Examine each filter in turn.
4848 Value *Filter = NewClauses[i];
4849 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
4850 if (!FTy)
4851 // Not a filter - skip it.
4852 continue;
4853 unsigned FElts = FTy->getNumElements();
4854 // Examine each filter following this one. Doing this backwards means that
4855 // we don't have to worry about filters disappearing under us when removed.
4856 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
4857 Value *LFilter = NewClauses[j];
4858 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
4859 if (!LTy)
4860 // Not a filter - skip it.
4861 continue;
4862 // If Filter is a subset of LFilter, i.e. every element of Filter is also
4863 // an element of LFilter, then discard LFilter.
4864 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
4865 // If Filter is empty then it is a subset of LFilter.
4866 if (!FElts) {
4867 // Discard LFilter.
4868 NewClauses.erase(J);
4869 MakeNewInstruction = true;
4870 // Move on to the next filter.
4871 continue;
4872 }
4873 unsigned LElts = LTy->getNumElements();
4874 // If Filter is longer than LFilter then it cannot be a subset of it.
4875 if (FElts > LElts)
4876 // Move on to the next filter.
4877 continue;
4878 // At this point we know that LFilter has at least one element.
4879 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
4880 // Filter is a subset of LFilter iff Filter contains only zeros (as we
4881 // already know that Filter is not longer than LFilter).
4882 if (isa<ConstantAggregateZero>(Filter)) {
4883 assert(FElts <= LElts && "Should have handled this case earlier!");
4884 // Discard LFilter.
4885 NewClauses.erase(J);
4886 MakeNewInstruction = true;
4887 }
4888 // Move on to the next filter.
4889 continue;
4890 }
4891 ConstantArray *LArray = cast<ConstantArray>(LFilter);
4892 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
4893 // Since Filter is non-empty and contains only zeros, it is a subset of
4894 // LFilter iff LFilter contains a zero.
4895 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
4896 for (unsigned l = 0; l != LElts; ++l)
4897 if (LArray->getOperand(l)->isNullValue()) {
4898 // LFilter contains a zero - discard it.
4899 NewClauses.erase(J);
4900 MakeNewInstruction = true;
4901 break;
4902 }
4903 // Move on to the next filter.
4904 continue;
4905 }
4906 // At this point we know that both filters are ConstantArrays. Loop over
4907 // operands to see whether every element of Filter is also an element of
4908 // LFilter. Since filters tend to be short this is probably faster than
4909 // using a method that scales nicely.
4910 ConstantArray *FArray = cast<ConstantArray>(Filter);
4911 bool AllFound = true;
4912 for (unsigned f = 0; f != FElts; ++f) {
4913 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
4914 AllFound = false;
4915 for (unsigned l = 0; l != LElts; ++l) {
4916 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
4917 if (LTypeInfo == FTypeInfo) {
4918 AllFound = true;
4919 break;
4920 }
4921 }
4922 if (!AllFound)
4923 break;
4924 }
4925 if (AllFound) {
4926 // Discard LFilter.
4927 NewClauses.erase(J);
4928 MakeNewInstruction = true;
4929 }
4930 // Move on to the next filter.
4931 }
4932 }
4933
4934 // If we changed any of the clauses, replace the old landingpad instruction
4935 // with a new one.
4936 if (MakeNewInstruction) {
4938 NewClauses.size());
4939 for (Constant *C : NewClauses)
4940 NLI->addClause(C);
4941 // A landing pad with no clauses must have the cleanup flag set. It is
4942 // theoretically possible, though highly unlikely, that we eliminated all
4943 // clauses. If so, force the cleanup flag to true.
4944 if (NewClauses.empty())
4945 CleanupFlag = true;
4946 NLI->setCleanup(CleanupFlag);
4947 return NLI;
4948 }
4949
4950 // Even if none of the clauses changed, we may nonetheless have understood
4951 // that the cleanup flag is pointless. Clear it if so.
4952 if (LI.isCleanup() != CleanupFlag) {
4953 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
4954 LI.setCleanup(CleanupFlag);
4955 return &LI;
4956 }
4957
4958 return nullptr;
4959}
4960
4961Value *
4963 // Try to push freeze through instructions that propagate but don't produce
4964 // poison as far as possible. If an operand of freeze does not produce poison
4965 // then push the freeze through to the operands that are not guaranteed
4966 // non-poison. The actual transform is as follows.
4967 // Op1 = ... ; Op1 can be poison
4968 // Op0 = Inst(Op1, NonPoisonOps...)
4969 // ... = Freeze(Op0)
4970 // =>
4971 // Op1 = ...
4972 // Op1.fr = Freeze(Op1)
4973 // ... = Inst(Op1.fr, NonPoisonOps...)
4974
4975 auto CanPushFreeze = [](Value *V) {
4976 if (!isa<Instruction>(V) || isa<PHINode>(V))
4977 return false;
4978
4979 // We can't push the freeze through an instruction which can itself create
4980 // poison. If the only source of new poison is flags, we can simply
4981 // strip them (since we know the only use is the freeze and nothing can
4982 // benefit from them.)
4983 return !canCreateUndefOrPoison(cast<Operator>(V),
4984 /*ConsiderFlagsAndMetadata*/ false);
4985 };
4986
4987 // Pushing freezes up long instruction chains can be expensive. Instead,
4988 // we directly push the freeze all the way to the leaves. However, we leave
4989 // deduplication of freezes on the same value for freezeOtherUses().
4990 Use *OrigUse = &OrigFI.getOperandUse(0);
4993 Worklist.push_back(OrigUse);
4994 while (!Worklist.empty()) {
4995 auto *U = Worklist.pop_back_val();
4996 Value *V = U->get();
4997 if (!CanPushFreeze(V)) {
4998 // If we can't push through the original instruction, abort the transform.
4999 if (U == OrigUse)
5000 return nullptr;
5001
5002 auto *UserI = cast<Instruction>(U->getUser());
5003 Builder.SetInsertPoint(UserI);
5004 Value *Frozen = Builder.CreateFreeze(V, V->getName() + ".fr");
5005 U->set(Frozen);
5006 continue;
5007 }
5008
5009 auto *I = cast<Instruction>(V);
5010 if (!Visited.insert(I).second)
5011 continue;
5012
5013 // reverse() to emit freezes in a more natural order.
5014 for (Use &Op : reverse(I->operands())) {
5015 Value *OpV = Op.get();
5016 if (isa<MetadataAsValue>(OpV) || isGuaranteedNotToBeUndefOrPoison(OpV))
5017 continue;
5018 Worklist.push_back(&Op);
5019 }
5020
5021 I->dropPoisonGeneratingAnnotations();
5022 this->Worklist.add(I);
5023 }
5024
5025 return OrigUse->get();
5026}
5027
5029 PHINode *PN) {
5030 // Detect whether this is a recurrence with a start value and some number of
5031 // backedge values. We'll check whether we can push the freeze through the
5032 // backedge values (possibly dropping poison flags along the way) until we
5033 // reach the phi again. In that case, we can move the freeze to the start
5034 // value.
5035 Use *StartU = nullptr;
5037 for (Use &U : PN->incoming_values()) {
5038 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
5039 // Add backedge value to worklist.
5040 Worklist.push_back(U.get());
5041 continue;
5042 }
5043
5044 // Don't bother handling multiple start values.
5045 if (StartU)
5046 return nullptr;
5047 StartU = &U;
5048 }
5049
5050 if (!StartU || Worklist.empty())
5051 return nullptr; // Not a recurrence.
5052
5053 Value *StartV = StartU->get();
5054 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
5055 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
5056 // We can't insert freeze if the start value is the result of the
5057 // terminator (e.g. an invoke).
5058 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
5059 return nullptr;
5060
5063 while (!Worklist.empty()) {
5064 Value *V = Worklist.pop_back_val();
5065 if (!Visited.insert(V).second)
5066 continue;
5067
5068 if (Visited.size() > 32)
5069 return nullptr; // Limit the total number of values we inspect.
5070
5071 // Assume that PN is non-poison, because it will be after the transform.
5072 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
5073 continue;
5074
5075 Instruction *I = dyn_cast<Instruction>(V);
5076 if (!I || canCreateUndefOrPoison(cast<Operator>(I),
5077 /*ConsiderFlagsAndMetadata*/ false))
5078 return nullptr;
5079
5080 DropFlags.push_back(I);
5081 append_range(Worklist, I->operands());
5082 }
5083
5084 for (Instruction *I : DropFlags)
5085 I->dropPoisonGeneratingAnnotations();
5086
5087 if (StartNeedsFreeze) {
5089 Value *FrozenStartV = Builder.CreateFreeze(StartV,
5090 StartV->getName() + ".fr");
5091 replaceUse(*StartU, FrozenStartV);
5092 }
5093 return replaceInstUsesWith(FI, PN);
5094}
5095
5097 Value *Op = FI.getOperand(0);
5098
5099 if (isa<Constant>(Op) || Op->hasOneUse())
5100 return false;
5101
5102 // Move the freeze directly after the definition of its operand, so that
5103 // it dominates the maximum number of uses. Note that it may not dominate
5104 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
5105 // the normal/default destination. This is why the domination check in the
5106 // replacement below is still necessary.
5107 BasicBlock::iterator MoveBefore;
5108 if (isa<Argument>(Op)) {
5109 MoveBefore =
5111 } else {
5112 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
5113 if (!MoveBeforeOpt)
5114 return false;
5115 MoveBefore = *MoveBeforeOpt;
5116 }
5117
5118 // Re-point iterator to come after any debug-info records.
5119 MoveBefore.setHeadBit(false);
5120
5121 bool Changed = false;
5122 if (&FI != &*MoveBefore) {
5123 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
5124 Changed = true;
5125 }
5126
5127 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
5128 bool Dominates = DT.dominates(&FI, U);
5129 Changed |= Dominates;
5130 return Dominates;
5131 });
5132
5133 return Changed;
5134}
5135
5136// Check if any direct or bitcast user of this value is a shuffle instruction.
5138 for (auto *U : V->users()) {
5139 if (isa<ShuffleVectorInst>(U))
5140 return true;
5141 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
5142 return true;
5143 }
5144 return false;
5145}
5146
5148 Value *Op0 = I.getOperand(0);
5149
5151 return replaceInstUsesWith(I, V);
5152
5153 // freeze (phi const, x) --> phi const, (freeze x)
5154 if (auto *PN = dyn_cast<PHINode>(Op0)) {
5155 if (Instruction *NV = foldOpIntoPhi(I, PN))
5156 return NV;
5157 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
5158 return NV;
5159 }
5160
5162 return replaceInstUsesWith(I, NI);
5163
5164 // If I is freeze(undef), check its uses and fold it to a fixed constant.
5165 // - or: pick -1
5166 // - select's condition: if the true value is constant, choose it by making
5167 // the condition true.
5168 // - default: pick 0
5169 //
5170 // Note that this transform is intentionally done here rather than
5171 // via an analysis in InstSimplify or at individual user sites. That is
5172 // because we must produce the same value for all uses of the freeze -
5173 // it's the reason "freeze" exists!
5174 //
5175 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
5176 // duplicating logic for binops at least.
5177 auto getUndefReplacement = [&](Type *Ty) {
5178 Value *BestValue = nullptr;
5179 Value *NullValue = Constant::getNullValue(Ty);
5180 for (const auto *U : I.users()) {
5181 Value *V = NullValue;
5182 if (match(U, m_Or(m_Value(), m_Value())))
5184 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
5185 V = ConstantInt::getTrue(Ty);
5186 else if (match(U, m_c_Select(m_Specific(&I), m_Value(V)))) {
5188 V = NullValue;
5189 }
5190
5191 if (!BestValue)
5192 BestValue = V;
5193 else if (BestValue != V)
5194 BestValue = NullValue;
5195 }
5196 assert(BestValue && "Must have at least one use");
5197 return BestValue;
5198 };
5199
5200 if (match(Op0, m_Undef())) {
5201 // Don't fold freeze(undef/poison) if it's used as a vector operand in
5202 // a shuffle. This may improve codegen for shuffles that allow
5203 // unspecified inputs.
5205 return nullptr;
5206 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
5207 }
5208
5209 auto getFreezeVectorReplacement = [](Constant *C) -> Constant * {
5210 Type *Ty = C->getType();
5211 auto *VTy = dyn_cast<FixedVectorType>(Ty);
5212 if (!VTy)
5213 return nullptr;
5214 unsigned NumElts = VTy->getNumElements();
5215 Constant *BestValue = Constant::getNullValue(VTy->getScalarType());
5216 for (unsigned i = 0; i != NumElts; ++i) {
5217 Constant *EltC = C->getAggregateElement(i);
5218 if (EltC && !match(EltC, m_Undef())) {
5219 BestValue = EltC;
5220 break;
5221 }
5222 }
5223 return Constant::replaceUndefsWith(C, BestValue);
5224 };
5225
5226 Constant *C;
5227 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement() &&
5228 !C->containsConstantExpression()) {
5229 if (Constant *Repl = getFreezeVectorReplacement(C))
5230 return replaceInstUsesWith(I, Repl);
5231 }
5232
5233 // Replace uses of Op with freeze(Op).
5234 if (freezeOtherUses(I))
5235 return &I;
5236
5237 return nullptr;
5238}
5239
5240/// Check for case where the call writes to an otherwise dead alloca. This
5241/// shows up for unused out-params in idiomatic C/C++ code. Note that this
5242/// helper *only* analyzes the write; doesn't check any other legality aspect.
5244 auto *CB = dyn_cast<CallBase>(I);
5245 if (!CB)
5246 // TODO: handle e.g. store to alloca here - only worth doing if we extend
5247 // to allow reload along used path as described below. Otherwise, this
5248 // is simply a store to a dead allocation which will be removed.
5249 return false;
5250 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
5251 if (!Dest)
5252 return false;
5253 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
5254 if (!AI)
5255 // TODO: allow malloc?
5256 return false;
5257 // TODO: allow memory access dominated by move point? Note that since AI
5258 // could have a reference to itself captured by the call, we would need to
5259 // account for cycles in doing so.
5260 SmallVector<const User *> AllocaUsers;
5262 auto pushUsers = [&](const Instruction &I) {
5263 for (const User *U : I.users()) {
5264 if (Visited.insert(U).second)
5265 AllocaUsers.push_back(U);
5266 }
5267 };
5268 pushUsers(*AI);
5269 while (!AllocaUsers.empty()) {
5270 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
5271 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
5272 pushUsers(*UserI);
5273 continue;
5274 }
5275 if (UserI == CB)
5276 continue;
5277 // TODO: support lifetime.start/end here
5278 return false;
5279 }
5280 return true;
5281}
5282
5283/// Try to move the specified instruction from its current block into the
5284/// beginning of DestBlock, which can only happen if it's safe to move the
5285/// instruction past all of the instructions between it and the end of its
5286/// block.
5288 BasicBlock *DestBlock) {
5289 BasicBlock *SrcBlock = I->getParent();
5290
5291 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
5292 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
5293 I->isTerminator())
5294 return false;
5295
5296 // Do not sink static or dynamic alloca instructions. Static allocas must
5297 // remain in the entry block, and dynamic allocas must not be sunk in between
5298 // a stacksave / stackrestore pair, which would incorrectly shorten its
5299 // lifetime.
5300 if (isa<AllocaInst>(I))
5301 return false;
5302
5303 // Do not sink into catchswitch blocks.
5304 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
5305 return false;
5306
5307 // Do not sink convergent call instructions.
5308 if (auto *CI = dyn_cast<CallInst>(I)) {
5309 if (CI->isConvergent())
5310 return false;
5311 }
5312
5313 // Unless we can prove that the memory write isn't visibile except on the
5314 // path we're sinking to, we must bail.
5315 if (I->mayWriteToMemory()) {
5316 if (!SoleWriteToDeadLocal(I, TLI))
5317 return false;
5318 }
5319
5320 // We can only sink load instructions if there is nothing between the load and
5321 // the end of block that could change the value.
5322 if (I->mayReadFromMemory() &&
5323 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
5324 // We don't want to do any sophisticated alias analysis, so we only check
5325 // the instructions after I in I's parent block if we try to sink to its
5326 // successor block.
5327 if (DestBlock->getUniquePredecessor() != I->getParent())
5328 return false;
5329 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
5330 E = I->getParent()->end();
5331 Scan != E; ++Scan)
5332 if (Scan->mayWriteToMemory())
5333 return false;
5334 }
5335
5336 I->dropDroppableUses([&](const Use *U) {
5337 auto *I = dyn_cast<Instruction>(U->getUser());
5338 if (I && I->getParent() != DestBlock) {
5339 Worklist.add(I);
5340 return true;
5341 }
5342 return false;
5343 });
5344 /// FIXME: We could remove droppable uses that are not dominated by
5345 /// the new position.
5346
5347 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
5348 I->moveBefore(*DestBlock, InsertPos);
5349 ++NumSunkInst;
5350
5351 // Also sink all related debug uses from the source basic block. Otherwise we
5352 // get debug use before the def. Attempt to salvage debug uses first, to
5353 // maximise the range variables have location for. If we cannot salvage, then
5354 // mark the location undef: we know it was supposed to receive a new location
5355 // here, but that computation has been sunk.
5356 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
5357 findDbgUsers(I, DbgVariableRecords);
5358 if (!DbgVariableRecords.empty())
5359 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
5360 DbgVariableRecords);
5361
5362 // PS: there are numerous flaws with this behaviour, not least that right now
5363 // assignments can be re-ordered past other assignments to the same variable
5364 // if they use different Values. Creating more undef assignements can never be
5365 // undone. And salvaging all users outside of this block can un-necessarily
5366 // alter the lifetime of the live-value that the variable refers to.
5367 // Some of these things can be resolved by tolerating debug use-before-defs in
5368 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
5369 // being used for more architectures.
5370
5371 return true;
5372}
5373
5375 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
5376 BasicBlock *DestBlock,
5377 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
5378 // For all debug values in the destination block, the sunk instruction
5379 // will still be available, so they do not need to be dropped.
5380
5381 // Fetch all DbgVariableRecords not already in the destination.
5382 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
5383 for (auto &DVR : DbgVariableRecords)
5384 if (DVR->getParent() != DestBlock)
5385 DbgVariableRecordsToSalvage.push_back(DVR);
5386
5387 // Fetch a second collection, of DbgVariableRecords in the source block that
5388 // we're going to sink.
5389 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5390 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5391 if (DVR->getParent() == SrcBlock)
5392 DbgVariableRecordsToSink.push_back(DVR);
5393
5394 // Sort DbgVariableRecords according to their position in the block. This is a
5395 // partial order: DbgVariableRecords attached to different instructions will
5396 // be ordered by the instruction order, but DbgVariableRecords attached to the
5397 // same instruction won't have an order.
5398 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5399 return B->getInstruction()->comesBefore(A->getInstruction());
5400 };
5401 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5402
5403 // If there are two assignments to the same variable attached to the same
5404 // instruction, the ordering between the two assignments is important. Scan
5405 // for this (rare) case and establish which is the last assignment.
5406 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5408 if (DbgVariableRecordsToSink.size() > 1) {
5410 // Count how many assignments to each variable there is per instruction.
5411 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5412 DebugVariable DbgUserVariable =
5413 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5414 DVR->getDebugLoc()->getInlinedAt());
5415 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5416 }
5417
5418 // If there are any instructions with two assignments, add them to the
5419 // FilterOutMap to record that they need extra filtering.
5421 for (auto It : CountMap) {
5422 if (It.second > 1) {
5423 FilterOutMap[It.first] = nullptr;
5424 DupSet.insert(It.first.first);
5425 }
5426 }
5427
5428 // For all instruction/variable pairs needing extra filtering, find the
5429 // latest assignment.
5430 for (const Instruction *Inst : DupSet) {
5431 for (DbgVariableRecord &DVR :
5432 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5433 DebugVariable DbgUserVariable =
5434 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5435 DVR.getDebugLoc()->getInlinedAt());
5436 auto FilterIt =
5437 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5438 if (FilterIt == FilterOutMap.end())
5439 continue;
5440 if (FilterIt->second != nullptr)
5441 continue;
5442 FilterIt->second = &DVR;
5443 }
5444 }
5445 }
5446
5447 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5448 // out any duplicate assignments identified above.
5450 SmallSet<DebugVariable, 4> SunkVariables;
5451 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5453 continue;
5454
5455 DebugVariable DbgUserVariable =
5456 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5457 DVR->getDebugLoc()->getInlinedAt());
5458
5459 // For any variable where there were multiple assignments in the same place,
5460 // ignore all but the last assignment.
5461 if (!FilterOutMap.empty()) {
5462 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5463 auto It = FilterOutMap.find(IVP);
5464
5465 // Filter out.
5466 if (It != FilterOutMap.end() && It->second != DVR)
5467 continue;
5468 }
5469
5470 if (!SunkVariables.insert(DbgUserVariable).second)
5471 continue;
5472
5473 if (DVR->isDbgAssign())
5474 continue;
5475
5476 DVRClones.emplace_back(DVR->clone());
5477 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5478 }
5479
5480 // Perform salvaging without the clones, then sink the clones.
5481 if (DVRClones.empty())
5482 return;
5483
5484 salvageDebugInfoForDbgValues(*I, DbgVariableRecordsToSalvage);
5485
5486 // The clones are in reverse order of original appearance. Assert that the
5487 // head bit is set on the iterator as we _should_ have received it via
5488 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5489 // we'll repeatedly insert at the head, such as:
5490 // DVR-3 (third insertion goes here)
5491 // DVR-2 (second insertion goes here)
5492 // DVR-1 (first insertion goes here)
5493 // Any-Prior-DVRs
5494 // InsertPtInst
5495 assert(InsertPos.getHeadBit());
5496 for (DbgVariableRecord *DVRClone : DVRClones) {
5497 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5498 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5499 }
5500}
5501
5503 while (!Worklist.isEmpty()) {
5504 // Walk deferred instructions in reverse order, and push them to the
5505 // worklist, which means they'll end up popped from the worklist in-order.
5506 while (Instruction *I = Worklist.popDeferred()) {
5507 // Check to see if we can DCE the instruction. We do this already here to
5508 // reduce the number of uses and thus allow other folds to trigger.
5509 // Note that eraseInstFromFunction() may push additional instructions on
5510 // the deferred worklist, so this will DCE whole instruction chains.
5513 ++NumDeadInst;
5514 continue;
5515 }
5516
5517 Worklist.push(I);
5518 }
5519
5521 if (I == nullptr) continue; // skip null values.
5522
5523 // Check to see if we can DCE the instruction.
5526 ++NumDeadInst;
5527 continue;
5528 }
5529
5530 if (!DebugCounter::shouldExecute(VisitCounter))
5531 continue;
5532
5533 // See if we can trivially sink this instruction to its user if we can
5534 // prove that the successor is not executed more frequently than our block.
5535 // Return the UserBlock if successful.
5536 auto getOptionalSinkBlockForInst =
5537 [this](Instruction *I) -> std::optional<BasicBlock *> {
5538 if (!EnableCodeSinking)
5539 return std::nullopt;
5540
5541 BasicBlock *BB = I->getParent();
5542 BasicBlock *UserParent = nullptr;
5543 unsigned NumUsers = 0;
5544
5545 for (Use &U : I->uses()) {
5546 User *User = U.getUser();
5547 if (User->isDroppable())
5548 continue;
5549 if (NumUsers > MaxSinkNumUsers)
5550 return std::nullopt;
5551
5552 Instruction *UserInst = cast<Instruction>(User);
5553 // Special handling for Phi nodes - get the block the use occurs in.
5554 BasicBlock *UserBB = UserInst->getParent();
5555 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5556 UserBB = PN->getIncomingBlock(U);
5557 // Bail out if we have uses in different blocks. We don't do any
5558 // sophisticated analysis (i.e finding NearestCommonDominator of these
5559 // use blocks).
5560 if (UserParent && UserParent != UserBB)
5561 return std::nullopt;
5562 UserParent = UserBB;
5563
5564 // Make sure these checks are done only once, naturally we do the checks
5565 // the first time we get the userparent, this will save compile time.
5566 if (NumUsers == 0) {
5567 // Try sinking to another block. If that block is unreachable, then do
5568 // not bother. SimplifyCFG should handle it.
5569 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5570 return std::nullopt;
5571
5572 auto *Term = UserParent->getTerminator();
5573 // See if the user is one of our successors that has only one
5574 // predecessor, so that we don't have to split the critical edge.
5575 // Another option where we can sink is a block that ends with a
5576 // terminator that does not pass control to other block (such as
5577 // return or unreachable or resume). In this case:
5578 // - I dominates the User (by SSA form);
5579 // - the User will be executed at most once.
5580 // So sinking I down to User is always profitable or neutral.
5581 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5582 return std::nullopt;
5583
5584 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5585 }
5586
5587 NumUsers++;
5588 }
5589
5590 // No user or only has droppable users.
5591 if (!UserParent)
5592 return std::nullopt;
5593
5594 return UserParent;
5595 };
5596
5597 auto OptBB = getOptionalSinkBlockForInst(I);
5598 if (OptBB) {
5599 auto *UserParent = *OptBB;
5600 // Okay, the CFG is simple enough, try to sink this instruction.
5601 if (tryToSinkInstruction(I, UserParent)) {
5602 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5603 MadeIRChange = true;
5604 // We'll add uses of the sunk instruction below, but since
5605 // sinking can expose opportunities for it's *operands* add
5606 // them to the worklist
5607 for (Use &U : I->operands())
5608 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5609 Worklist.push(OpI);
5610 }
5611 }
5612
5613 // Now that we have an instruction, try combining it to simplify it.
5616 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5617
5618#ifndef NDEBUG
5619 std::string OrigI;
5620#endif
5621 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5622 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5623
5624 if (Instruction *Result = visit(*I)) {
5625 ++NumCombined;
5626 // Should we replace the old instruction with a new one?
5627 if (Result != I) {
5628 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5629 << " New = " << *Result << '\n');
5630
5631 // We copy the old instruction's DebugLoc to the new instruction, unless
5632 // InstCombine already assigned a DebugLoc to it, in which case we
5633 // should trust the more specifically selected DebugLoc.
5634 Result->setDebugLoc(Result->getDebugLoc().orElse(I->getDebugLoc()));
5635 // We also copy annotation metadata to the new instruction.
5636 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5637 // Everything uses the new instruction now.
5638 I->replaceAllUsesWith(Result);
5639
5640 // Move the name to the new instruction first.
5641 Result->takeName(I);
5642
5643 // Insert the new instruction into the basic block...
5644 BasicBlock *InstParent = I->getParent();
5645 BasicBlock::iterator InsertPos = I->getIterator();
5646
5647 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5648 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5649 // We need to fix up the insertion point.
5650 if (isa<PHINode>(I)) // PHI -> Non-PHI
5651 InsertPos = InstParent->getFirstInsertionPt();
5652 else // Non-PHI -> PHI
5653 InsertPos = InstParent->getFirstNonPHIIt();
5654 }
5655
5656 Result->insertInto(InstParent, InsertPos);
5657
5658 // Push the new instruction and any users onto the worklist.
5660 Worklist.push(Result);
5661
5663 } else {
5664 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5665 << " New = " << *I << '\n');
5666
5667 // If the instruction was modified, it's possible that it is now dead.
5668 // if so, remove it.
5671 } else {
5673 Worklist.push(I);
5674 }
5675 }
5676 MadeIRChange = true;
5677 }
5678 }
5679
5680 Worklist.zap();
5681 return MadeIRChange;
5682}
5683
5684// Track the scopes used by !alias.scope and !noalias. In a function, a
5685// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5686// by both sets. If not, the declaration of the scope can be safely omitted.
5687// The MDNode of the scope can be omitted as well for the instructions that are
5688// part of this function. We do not do that at this point, as this might become
5689// too time consuming to do.
5691 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5692 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5693
5694public:
5696 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5697 if (!I->hasMetadataOtherThanDebugLoc())
5698 return;
5699
5700 auto Track = [](Metadata *ScopeList, auto &Container) {
5701 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5702 if (!MDScopeList || !Container.insert(MDScopeList).second)
5703 return;
5704 for (const auto &MDOperand : MDScopeList->operands())
5705 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5706 Container.insert(MDScope);
5707 };
5708
5709 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5710 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5711 }
5712
5714 NoAliasScopeDeclInst *Decl = dyn_cast<NoAliasScopeDeclInst>(Inst);
5715 if (!Decl)
5716 return false;
5717
5718 assert(Decl->use_empty() &&
5719 "llvm.experimental.noalias.scope.decl in use ?");
5720 const MDNode *MDSL = Decl->getScopeList();
5721 assert(MDSL->getNumOperands() == 1 &&
5722 "llvm.experimental.noalias.scope should refer to a single scope");
5723 auto &MDOperand = MDSL->getOperand(0);
5724 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5725 return !UsedAliasScopesAndLists.contains(MD) ||
5726 !UsedNoAliasScopesAndLists.contains(MD);
5727
5728 // Not an MDNode ? throw away.
5729 return true;
5730 }
5731};
5732
5733/// Populate the IC worklist from a function, by walking it in reverse
5734/// post-order and adding all reachable code to the worklist.
5735///
5736/// This has a couple of tricks to make the code faster and more powerful. In
5737/// particular, we constant fold and DCE instructions as we go, to avoid adding
5738/// them to the worklist (this significantly speeds up instcombine on code where
5739/// many instructions are dead or constant). Additionally, if we find a branch
5740/// whose condition is a known constant, we only visit the reachable successors.
5742 bool MadeIRChange = false;
5744 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
5745 DenseMap<Constant *, Constant *> FoldedConstants;
5746 AliasScopeTracker SeenAliasScopes;
5747
5748 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
5749 for (BasicBlock *Succ : successors(BB))
5750 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
5751 for (PHINode &PN : Succ->phis())
5752 for (Use &U : PN.incoming_values())
5753 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5754 U.set(PoisonValue::get(PN.getType()));
5755 MadeIRChange = true;
5756 }
5757 };
5758
5759 for (BasicBlock *BB : RPOT) {
5760 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
5761 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
5762 })) {
5763 HandleOnlyLiveSuccessor(BB, nullptr);
5764 continue;
5765 }
5766 LiveBlocks.insert(BB);
5767
5768 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
5769 // ConstantProp instruction if trivially constant.
5770 if (!Inst.use_empty() &&
5771 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5772 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
5773 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
5774 << '\n');
5775 Inst.replaceAllUsesWith(C);
5776 ++NumConstProp;
5777 if (isInstructionTriviallyDead(&Inst, &TLI))
5778 Inst.eraseFromParent();
5779 MadeIRChange = true;
5780 continue;
5781 }
5782
5783 // See if we can constant fold its operands.
5784 for (Use &U : Inst.operands()) {
5785 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
5786 continue;
5787
5788 auto *C = cast<Constant>(U);
5789 Constant *&FoldRes = FoldedConstants[C];
5790 if (!FoldRes)
5791 FoldRes = ConstantFoldConstant(C, DL, &TLI);
5792
5793 if (FoldRes != C) {
5794 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
5795 << "\n Old = " << *C
5796 << "\n New = " << *FoldRes << '\n');
5797 U = FoldRes;
5798 MadeIRChange = true;
5799 }
5800 }
5801
5802 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
5803 // these call instructions consumes non-trivial amount of time and
5804 // provides no value for the optimization.
5805 if (!Inst.isDebugOrPseudoInst()) {
5806 InstrsForInstructionWorklist.push_back(&Inst);
5807 SeenAliasScopes.analyse(&Inst);
5808 }
5809 }
5810
5811 // If this is a branch or switch on a constant, mark only the single
5812 // live successor. Otherwise assume all successors are live.
5813 Instruction *TI = BB->getTerminator();
5814 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
5815 if (isa<UndefValue>(BI->getCondition())) {
5816 // Branch on undef is UB.
5817 HandleOnlyLiveSuccessor(BB, nullptr);
5818 continue;
5819 }
5820 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5821 bool CondVal = Cond->getZExtValue();
5822 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5823 continue;
5824 }
5825 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5826 if (isa<UndefValue>(SI->getCondition())) {
5827 // Switch on undef is UB.
5828 HandleOnlyLiveSuccessor(BB, nullptr);
5829 continue;
5830 }
5831 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5832 HandleOnlyLiveSuccessor(BB,
5833 SI->findCaseValue(Cond)->getCaseSuccessor());
5834 continue;
5835 }
5836 }
5837 }
5838
5839 // Remove instructions inside unreachable blocks. This prevents the
5840 // instcombine code from having to deal with some bad special cases, and
5841 // reduces use counts of instructions.
5842 for (BasicBlock &BB : F) {
5843 if (LiveBlocks.count(&BB))
5844 continue;
5845
5846 unsigned NumDeadInstInBB;
5847 NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB);
5848
5849 MadeIRChange |= NumDeadInstInBB != 0;
5850 NumDeadInst += NumDeadInstInBB;
5851 }
5852
5853 // Once we've found all of the instructions to add to instcombine's worklist,
5854 // add them in reverse order. This way instcombine will visit from the top
5855 // of the function down. This jives well with the way that it adds all uses
5856 // of instructions to the worklist after doing a transformation, thus avoiding
5857 // some N^2 behavior in pathological cases.
5858 Worklist.reserve(InstrsForInstructionWorklist.size());
5859 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
5860 // DCE instruction if trivially dead. As we iterate in reverse program
5861 // order here, we will clean up whole chains of dead instructions.
5862 if (isInstructionTriviallyDead(Inst, &TLI) ||
5863 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
5864 ++NumDeadInst;
5865 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
5866 salvageDebugInfo(*Inst);
5867 Inst->eraseFromParent();
5868 MadeIRChange = true;
5869 continue;
5870 }
5871
5872 Worklist.push(Inst);
5873 }
5874
5875 return MadeIRChange;
5876}
5877
5879 // Collect backedges.
5881 for (BasicBlock *BB : RPOT) {
5882 Visited.insert(BB);
5883 for (BasicBlock *Succ : successors(BB))
5884 if (Visited.contains(Succ))
5885 BackEdges.insert({BB, Succ});
5886 }
5887 ComputedBackEdges = true;
5888}
5889
5895 const InstCombineOptions &Opts) {
5896 auto &DL = F.getDataLayout();
5897 bool VerifyFixpoint = Opts.VerifyFixpoint &&
5898 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
5899
5900 /// Builder - This is an IRBuilder that automatically inserts new
5901 /// instructions into the worklist when they are created.
5903 F.getContext(), TargetFolder(DL),
5904 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
5905 Worklist.add(I);
5906 if (auto *Assume = dyn_cast<AssumeInst>(I))
5907 AC.registerAssumption(Assume);
5908 }));
5909
5911
5912 // Lower dbg.declare intrinsics otherwise their value may be clobbered
5913 // by instcombiner.
5914 bool MadeIRChange = false;
5916 MadeIRChange = LowerDbgDeclare(F);
5917
5918 // Iterate while there is work to do.
5919 unsigned Iteration = 0;
5920 while (true) {
5921 if (Iteration >= Opts.MaxIterations && !VerifyFixpoint) {
5922 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
5923 << " on " << F.getName()
5924 << " reached; stopping without verifying fixpoint\n");
5925 break;
5926 }
5927
5928 ++Iteration;
5929 ++NumWorklistIterations;
5930 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
5931 << F.getName() << "\n");
5932
5933 InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,
5934 ORE, BFI, BPI, PSI, DL, RPOT);
5936 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
5937 MadeChangeInThisIteration |= IC.run();
5938 if (!MadeChangeInThisIteration)
5939 break;
5940
5941 MadeIRChange = true;
5942 if (Iteration > Opts.MaxIterations) {
5944 "Instruction Combining on " + Twine(F.getName()) +
5945 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
5946 " iterations. " +
5947 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
5948 "'instcombine-no-verify-fixpoint' to suppress this error.");
5949 }
5950 }
5951
5952 if (Iteration == 1)
5953 ++NumOneIteration;
5954 else if (Iteration == 2)
5955 ++NumTwoIterations;
5956 else if (Iteration == 3)
5957 ++NumThreeIterations;
5958 else
5959 ++NumFourOrMoreIterations;
5960
5961 return MadeIRChange;
5962}
5963
5965
5967 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
5968 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
5969 OS, MapClassName2PassName);
5970 OS << '<';
5971 OS << "max-iterations=" << Options.MaxIterations << ";";
5972 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
5973 OS << '>';
5974}
5975
5976char InstCombinePass::ID = 0;
5977
5980 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
5981 // No changes since last InstCombine pass, exit early.
5982 if (LRT.shouldSkip(&ID))
5983 return PreservedAnalyses::all();
5984
5985 auto &AC = AM.getResult<AssumptionAnalysis>(F);
5986 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
5987 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
5989 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
5990
5991 auto *AA = &AM.getResult<AAManager>(F);
5992 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
5993 ProfileSummaryInfo *PSI =
5994 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
5995 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
5996 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
5998
5999 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6000 BFI, BPI, PSI, Options)) {
6001 // No changes, all analyses are preserved.
6002 LRT.update(&ID, /*Changed=*/false);
6003 return PreservedAnalyses::all();
6004 }
6005
6006 // Mark all the analyses that instcombine updates as preserved.
6008 LRT.update(&ID, /*Changed=*/true);
6011 return PA;
6012}
6013
6015 AU.setPreservesCFG();
6028}
6029
6031 if (skipFunction(F))
6032 return false;
6033
6034 // Required analyses.
6035 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
6036 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
6037 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
6038 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
6039 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6040 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
6041
6042 // Optional analyses.
6043 ProfileSummaryInfo *PSI =
6044 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
6045 BlockFrequencyInfo *BFI =
6046 (PSI && PSI->hasProfileSummary()) ?
6047 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
6048 nullptr;
6049 BranchProbabilityInfo *BPI = nullptr;
6050 if (auto *WrapperPass =
6051 getAnalysisIfAvailable<BranchProbabilityInfoWrapperPass>())
6052 BPI = &WrapperPass->getBPI();
6053
6054 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6055 BFI, BPI, PSI, InstCombineOptions());
6056}
6057
6059
6062}
6063
6065 "Combine redundant instructions", false, false)
6077
6078// Initialization Routines
6081}
6082
6084 return new InstructionCombiningPass();
6085}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Resource Access
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
Definition: DebugCounter.h:194
This file defines the DenseMap class.
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
Hexagon Vector Combine
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition: IVUsers.cpp:48
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static std::optional< ModRefInfo > isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI, bool KnowInit)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
unsigned OpIndex
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
#define LLVM_DEBUG(...)
Definition: Debug.h:119
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:247
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition: blake3_impl.h:83
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
A private abstract base class describing the concept of an individual alias analysis implementation.
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:234
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition: APInt.cpp:1758
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition: APInt.h:423
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition: APInt.cpp:1890
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:936
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition: APInt.h:371
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:380
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1488
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1928
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:827
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1960
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:334
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:440
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:306
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1941
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:851
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:431
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:412
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition: ArrayRef.h:224
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
Class to represent array types.
Definition: DerivedTypes.h:398
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
uint64_t getNumElements() const
Definition: DerivedTypes.h:410
Type * getElementType() const
Definition: DerivedTypes.h:411
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
LLVM_ABI uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
Definition: Attributes.cpp:447
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:223
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:528
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:393
LLVM_ABI iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
Definition: BasicBlock.cpp:206
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:337
const Instruction & front() const
Definition: BasicBlock.h:482
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:549
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:437
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:445
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:170
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
Definition: BasicBlock.cpp:406
size_t size() const
Definition: BasicBlock.h:480
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:233
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition: InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition: InstrTypes.h:294
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
LLVM_ABI void swapSuccEdgesProbabilities(const BasicBlock *Src)
Swap outgoing edges probabilities for Src with branch terminator.
Represents analyses that only rely on functions' control flow.
Definition: Analysis.h:73
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1116
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1348
void setAttributes(AttributeList A)
Set the attributes for this call.
Definition: InstrTypes.h:1427
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:1955
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1292
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1424
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:678
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:701
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:703
@ ICMP_EQ
equal
Definition: InstrTypes.h:699
@ ICMP_NE
not equal
Definition: InstrTypes.h:700
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:829
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:791
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Definition: CmpPredicate.h:23
ConstantArray - Constant Array Declarations.
Definition: Constants.h:433
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1314
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition: Constants.h:776
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2654
static LLVM_ABI Constant * getNot(Constant *C)
Definition: Constants.cpp:2641
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2647
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
Definition: Constants.cpp:2694
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
Definition: Constants.cpp:2635
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:868
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:875
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
Definition: Constants.cpp:882
This class represents a range of values.
Definition: ConstantRange.h:47
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition: Constants.h:517
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
Definition: Constants.cpp:1474
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1423
This is an important base class in LLVM.
Definition: Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
Definition: Constants.cpp:403
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
Definition: Constants.cpp:784
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:420
const Constant * stripPointerCasts() const
Definition: Constant.h:219
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:435
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
LLVM_ABI SmallVector< APInt > getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const
Get GEP indices to access Offset inside ElemTy.
Definition: DataLayout.cpp:970
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
Definition: DataLayout.h:220
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
Definition: DataLayout.cpp:753
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
Definition: DataLayout.cpp:877
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:504
unsigned getIndexSizeInBits(unsigned AS) const
The size in bits of indices used for address calculation in getelementptr and for addresses in the gi...
Definition: DataLayout.h:398
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:674
LLVM_ABI int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value * > Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:892
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Definition: DebugCounter.h:88
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:203
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:177
bool empty() const
Definition: DenseMap.h:119
iterator end()
Definition: DenseMap.h:87
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:230
void registerBranch(BranchInst *BI)
Add a branch condition to the cache.
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:284
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:322
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:334
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:135
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:22
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:314
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:188
const BasicBlock & getEntryBlock() const
Definition: Function.h:807
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutNoUnsignedSignedWrap() const
static GEPNoWrapFlags all()
static GEPNoWrapFlags noUnsignedWrap()
bool hasNoUnsignedWrap() const
bool isInBounds() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
GEPNoWrapFlags withoutNoUnsignedWrap() const
static GEPNoWrapFlags none()
GEPNoWrapFlags getNoWrapFlags() const
Definition: Operator.h:425
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:949
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:973
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:997
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:114
Value * CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1737
LLVM_ABI Value * CreateSelectFMF(Value *C, Value *True, Value *False, FMFSource FMFSource, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1010
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Definition: IRBuilder.cpp:1115
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2618
ConstantInt * getTrue()
Get the constant value for i1 true.
Definition: IRBuilder.h:502
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1005
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2094
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition: IRBuilder.h:2637
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:2036
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition: IRBuilder.h:345
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1931
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1923
void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)
Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.
Definition: IRBuilder.h:262
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Definition: IRBuilder.cpp:823
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:834
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:522
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2463
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2494
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1805
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1420
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1847
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Definition: IRBuilder.h:630
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2082
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition: IRBuilder.h:2593
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1551
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1403
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2508
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2068
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1708
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:207
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1532
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1599
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2439
Value * CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1731
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:552
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition: IRBuilder.h:538
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition: IRBuilder.h:75
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI InstCombinePass(InstCombineOptions Opts={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * foldBinopWithRecurrence(BinaryOperator &BO)
Try to fold binary operators whose operands are simple interleaved recurrences to a single recurrence...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, Instruction *CxtI, unsigned Depth=0)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
Definition: InstCombiner.h:48
SimplifyQuery SQ
Definition: InstCombiner.h:77
const DataLayout & getDataLayout() const
Definition: InstCombiner.h:337
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
Definition: InstCombiner.h:228
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
Definition: InstCombiner.h:143
TargetLibraryInfo & TLI
Definition: InstCombiner.h:74
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Definition: InstCombiner.h:456
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Definition: InstCombiner.h:368
AAResults * AA
Definition: InstCombiner.h:70
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Definition: InstCombiner.h:388
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
Definition: InstCombiner.h:56
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
Definition: InstCombiner.h:187
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
Definition: InstCombiner.h:420
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
Definition: InstCombiner.h:160
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Definition: InstCombiner.h:65
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
Definition: InstCombiner.h:377
BranchProbabilityInfo * BPI
Definition: InstCombiner.h:80
ReversePostOrderTraversal< BasicBlock * > & RPOT
Definition: InstCombiner.h:84
const DataLayout & DL
Definition: InstCombiner.h:76
DomConditionCache DC
Definition: InstCombiner.h:82
const bool MinimizeSize
Definition: InstCombiner.h:68
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
Definition: InstCombiner.h:433
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
AssumptionCache & AC
Definition: InstCombiner.h:73
void addToWorklist(Instruction *I)
Definition: InstCombiner.h:332
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
Definition: InstCombiner.h:97
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
Definition: InstCombiner.h:412
DominatorTree & DT
Definition: InstCombiner.h:75
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
Definition: InstCombiner.h:280
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
Definition: InstCombiner.h:89
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
BuilderTy & Builder
Definition: InstCombiner.h:61
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
Definition: InstCombiner.h:209
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
Definition: InstCombiner.h:358
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition: InstCombine.h:68
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void add(Instruction *I)
Add instruction to the worklist.
void push(Instruction *I)
Push the instruction onto the worklist stack.
void zap()
Check that the worklist is empty and nuke the backing store for the map.
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
Definition: Instruction.h:366
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:513
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:78
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
Definition: Metadata.cpp:1804
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:82
bool isTerminator() const
Definition: Instruction.h:315
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:312
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
Definition: Instruction.h:371
bool isShift() const
Definition: Instruction.h:320
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:510
bool isIntDivRem() const
Definition: Instruction.h:318
Class to represent integer types.
Definition: DerivedTypes.h:42
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:319
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:49
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Definition: Instructions.h:180
Value * getPointerOperand()
Definition: Instructions.h:259
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:209
Metadata node.
Definition: Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1445
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1451
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:899
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition: Metadata.h:63
This class represents min/max intrinsics.
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Definition: PassManager.h:716
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition: Operator.h:78
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition: Operator.h:111
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition: Operator.h:105
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:38
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition: Constants.h:1468
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1885
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition: Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition: Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition: Registry.h:44
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:168
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition: SmallPtrSet.h:99
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:470
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:401
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:476
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:356
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:134
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:182
bool empty() const
Definition: SmallVector.h:82
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:938
void reserve(size_type N)
Definition: SmallVector.h:664
iterator erase(const_iterator CI)
Definition: SmallVector.h:738
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:684
typename SuperClass::iterator iterator
Definition: SmallVector.h:578
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
An instruction for storing to memory.
Definition: Instructions.h:296
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Definition: TargetFolder.h:35
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
Targets can implement their own combinations for target-specific intrinsics.
LLVM_ABI std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
Can be used to implement target-specific instruction combining.
LLVM_ABI std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
Can be used to implement target-specific instruction combining.
LLVM_ABI bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:273
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:261
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:311
LLVM_ABI const fltSemantics & getFltSemantics() const
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:240
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:352
This class represents a cast unsigned integer to floating point.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1866
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
op_range operands()
Definition: User.h:292
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition: User.cpp:21
op_iterator op_begin()
Definition: User.h:284
const Use & getOperandUse(unsigned i) const
Definition: User.h:245
Value * getOperand(unsigned i) const
Definition: User.h:232
unsigned getNumOperands() const
Definition: User.h:254
op_iterator op_end()
Definition: User.h:286
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition: User.cpp:115
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:759
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition: Value.cpp:166
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:439
iterator_range< user_iterator > users()
Definition: Value.h:426
bool hasUseList() const
Check if this Value has a use-list.
Definition: Value.h:344
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition: Value.cpp:150
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:701
bool use_empty() const
Definition: Value.h:346
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1098
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition: Value.cpp:878
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:396
Base class of all SIMD vector types.
Definition: DerivedTypes.h:430
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:695
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Value handle that is nullable, but tries to track the Value.
Definition: ValueHandle.h:205
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:203
constexpr bool isZero() const
Definition: TypeSize.h:157
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:34
reverse_self_iterator getReverseIterator()
Definition: ilist_node.h:137
self_iterator getIterator()
Definition: ilist_node.h:134
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:662
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
Definition: Intrinsics.cpp:751
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:524
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
Definition: PatternMatch.h:160
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:100
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:165
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:862
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:962
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
Definition: PatternMatch.h:186
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
Definition: PatternMatch.h:560
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:168
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
Definition: PatternMatch.h:245
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
Definition: PatternMatch.h:305
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
Definition: PatternMatch.h:931
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
Definition: PatternMatch.h:793
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
Definition: PatternMatch.h:316
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:152
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
@ FalseVal
Definition: TGLexer.h:59
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:338
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition: DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition: STLExtras.h:860
void stable_sort(R &&Range)
Definition: STLExtras.h:2077
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &)
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition: Local.cpp:2485
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool succ_empty(const Instruction *I)
Definition: CFG.h:256
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI FunctionPass * createInstructionCombiningPass()
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
Definition: DebugInfo.cpp:124
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2491
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1723
auto successors(const MachineBasicBlock *BB)
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2155
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:663
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Definition: APFloat.h:1555
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition: Local.cpp:2468
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:157
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
constexpr bool has_single_bit(T Value) noexcept
Definition: bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1751
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:402
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition: Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
Definition: ValueTracking.h:47
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:49
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition: Local.cpp:1795
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1172
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition: Local.cpp:1662
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition: Local.cpp:2037
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition: Local.cpp:2414
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition: STLExtras.h:345
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: ModRef.h:28
@ Ref
The access may reference the value stored in memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
@ NoModRef
The access neither references nor modifies the value stored in memory.
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:223
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition: STLExtras.h:1980
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1916
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition: STLExtras.h:2107
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
Definition: DebugInfo.cpp:129
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
bool isRefSet(const ModRefInfo MRI)
Definition: ModRef.h:52
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition: Error.cpp:180
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:858
#define N
static constexpr roundingMode rmNearestTiesToEven
Definition: APFloat.h:304
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
Definition: APFloat.cpp:324
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition: KnownBits.h:244
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:44
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition: KnownBits.h:241
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:70
SimplifyQuery getWithInstruction(const Instruction *I) const
SimplifyQuery getWithoutUndef() const