LLVM 22.0.0git
InstCombiner.h
Go to the documentation of this file.
1//===- InstCombiner.h - InstCombine implementation --------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file provides the interface for the instcombine pass implementation.
11/// The interface is used for generic transformations in this folder and
12/// target specific combinations in the targets.
13/// The visitor implementation is in \c InstCombinerImpl in
14/// \c InstCombineInternal.h.
15///
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
19#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
20
26#include "llvm/IR/IRBuilder.h"
28#include "llvm/Support/Debug.h"
30#include <cassert>
31
32#define DEBUG_TYPE "instcombine"
34
35namespace llvm {
36
37class AAResults;
38class AssumptionCache;
39class OptimizationRemarkEmitter;
40class ProfileSummaryInfo;
41class TargetLibraryInfo;
42class TargetTransformInfo;
43
44/// The core instruction combiner logic.
45///
46/// This class provides both the logic to recursively visit instructions and
47/// combine them.
49 /// Only used to call target specific intrinsic combining.
50 /// It must **NOT** be used for any other purpose, as InstCombine is a
51 /// target-independent canonicalization transform.
52 TargetTransformInfo &TTIForTargetIntrinsicsOnly;
53
54public:
55 /// Maximum size of array considered when transforming.
57
58 /// An IRBuilder that automatically inserts new instructions into the
59 /// worklist.
62
63protected:
64 /// A worklist of the instructions that need to be simplified.
66
68
69 // Mode in which we are running the combiner.
70 const bool MinimizeSize;
71
73
74 // Required analyses.
78 const DataLayout &DL;
85
87
88 bool MadeIRChange = false;
89
90 /// Edges that are known to never be taken.
92
93 /// Order of predecessors to canonicalize phi nodes towards.
95
96 /// Backedges, used to avoid pushing instructions across backedges in cases
97 /// where this may result in infinite combine loops. For irreducible loops
98 /// this picks an arbitrary backedge.
100 bool ComputedBackEdges = false;
101
102public:
115
116 virtual ~InstCombiner() = default;
117
118 /// Return the source operand of a potentially bitcasted value while
119 /// optionally checking if it has one use. If there is no bitcast or the one
120 /// use check is not met, return the input value itself.
121 static Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) {
122 if (auto *BitCast = dyn_cast<BitCastInst>(V))
123 if (!OneUseOnly || BitCast->hasOneUse())
124 return BitCast->getOperand(0);
125
126 // V is not a bitcast or V has more than one use and OneUseOnly is true.
127 return V;
128 }
129
130 /// Assign a complexity or rank value to LLVM Values. This is used to reduce
131 /// the amount of pattern matching needed for compares and commutative
132 /// instructions. For example, if we have:
133 /// icmp ugt X, Constant
134 /// or
135 /// xor (add X, Constant), cast Z
136 ///
137 /// We do not have to consider the commuted variants of these patterns because
138 /// canonicalization based on complexity guarantees the above ordering.
139 ///
140 /// This routine maps IR values to various complexity ranks:
141 /// 0 -> undef
142 /// 1 -> Constants
143 /// 2 -> Cast and (f)neg/not instructions
144 /// 3 -> Other instructions and arguments
145 static unsigned getComplexity(Value *V) {
146 if (isa<Constant>(V))
147 return isa<UndefValue>(V) ? 0 : 1;
148
152 return 2;
153
154 return 3;
155 }
156
157 /// Predicate canonicalization reduces the number of patterns that need to be
158 /// matched by other transforms. For example, we may swap the operands of a
159 /// conditional branch or select to create a compare with a canonical
160 /// (inverted) predicate which is then more likely to be matched with other
161 /// values.
163 switch (Pred) {
164 case CmpInst::ICMP_NE:
169 // TODO: There are 16 FCMP predicates. Should others be (not) canonical?
173 return false;
174 default:
175 return true;
176 }
177 }
178
179 /// Add one to a Constant
181 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
182 }
183
184 /// Subtract one from a Constant
186 return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
187 }
188
190 // a ? b : false and a ? true : b are the canonical form of logical and/or.
191 // This includes !a ? b : false and !a ? true : b. Absorbing the not into
192 // the select by swapping operands would break recognition of this pattern
193 // in other analyses, so don't do that.
198 }
199
200 /// Return nonnull value if V is free to invert under the condition of
201 /// WillInvertAllUses.
202 /// If Builder is nonnull, it will return a simplified ~V.
203 /// If Builder is null, it will return an arbitrary nonnull value (not
204 /// dereferenceable).
205 /// If the inversion will consume instructions, `DoesConsume` will be set to
206 /// true. Otherwise it will be false.
207 Value *getFreelyInvertedImpl(Value *V, bool WillInvertAllUses,
208 BuilderTy *Builder, bool &DoesConsume,
209 unsigned Depth);
210
211 Value *getFreelyInverted(Value *V, bool WillInvertAllUses,
212 BuilderTy *Builder, bool &DoesConsume) {
213 DoesConsume = false;
214 return getFreelyInvertedImpl(V, WillInvertAllUses, Builder, DoesConsume,
215 /*Depth*/ 0);
216 }
217
218 Value *getFreelyInverted(Value *V, bool WillInvertAllUses,
220 bool Unused;
221 return getFreelyInverted(V, WillInvertAllUses, Builder, Unused);
222 }
223
224 /// Return true if the specified value is free to invert (apply ~ to).
225 /// This happens in cases where the ~ can be eliminated. If WillInvertAllUses
226 /// is true, work under the assumption that the caller intends to remove all
227 /// uses of V and only keep uses of ~V.
228 ///
229 /// See also: canFreelyInvertAllUsersOf()
230 bool isFreeToInvert(Value *V, bool WillInvertAllUses,
231 bool &DoesConsume) {
232 return getFreelyInverted(V, WillInvertAllUses, /*Builder*/ nullptr,
233 DoesConsume) != nullptr;
234 }
235
236 bool isFreeToInvert(Value *V, bool WillInvertAllUses) {
237 bool Unused;
238 return isFreeToInvert(V, WillInvertAllUses, Unused);
239 }
240
241 /// Given i1 V, can every user of V be freely adapted if V is changed to !V ?
242 /// InstCombine's freelyInvertAllUsersOf() must be kept in sync with this fn.
243 /// NOTE: for Instructions only!
244 ///
245 /// See also: isFreeToInvert()
247 // Look at every user of V.
248 for (Use &U : V->uses()) {
249 if (U.getUser() == IgnoredUser)
250 continue; // Don't consider this user.
251
252 auto *I = cast<Instruction>(U.getUser());
253 switch (I->getOpcode()) {
254 case Instruction::Select:
255 if (U.getOperandNo() != 0) // Only if the value is used as select cond.
256 return false;
258 return false;
259 break;
260 case Instruction::Br:
261 assert(U.getOperandNo() == 0 && "Must be branching on that value.");
262 break; // Free to invert by swapping true/false values/destinations.
263 case Instruction::Xor: // Can invert 'xor' if it's a 'not', by ignoring
264 // it.
266 return false; // Not a 'not'.
267 break;
268 default:
269 return false; // Don't know, likely not freely invertible.
270 }
271 // So far all users were free to invert...
272 }
273 return true; // Can freely invert all users!
274 }
275
276 /// Some binary operators require special handling to avoid poison and
277 /// undefined behavior. If a constant vector has undef elements, replace those
278 /// undefs with identity constants if possible because those are always safe
279 /// to execute. If no identity constant exists, replace undef with some other
280 /// safe constant.
281 static Constant *
283 bool IsRHSConstant) {
284 auto *InVTy = cast<FixedVectorType>(In->getType());
285
286 Type *EltTy = InVTy->getElementType();
287 auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant);
288 if (!SafeC) {
289 // TODO: Should this be available as a constant utility function? It is
290 // similar to getBinOpAbsorber().
291 if (IsRHSConstant) {
292 switch (Opcode) {
293 case Instruction::SRem: // X % 1 = 0
294 case Instruction::URem: // X %u 1 = 0
295 SafeC = ConstantInt::get(EltTy, 1);
296 break;
297 case Instruction::FRem: // X % 1.0 (doesn't simplify, but it is safe)
298 SafeC = ConstantFP::get(EltTy, 1.0);
299 break;
300 default:
302 "Only rem opcodes have no identity constant for RHS");
303 }
304 } else {
305 switch (Opcode) {
306 case Instruction::Shl: // 0 << X = 0
307 case Instruction::LShr: // 0 >>u X = 0
308 case Instruction::AShr: // 0 >> X = 0
309 case Instruction::SDiv: // 0 / X = 0
310 case Instruction::UDiv: // 0 /u X = 0
311 case Instruction::SRem: // 0 % X = 0
312 case Instruction::URem: // 0 %u X = 0
313 case Instruction::Sub: // 0 - X (doesn't simplify, but it is safe)
314 case Instruction::FSub: // 0.0 - X (doesn't simplify, but it is safe)
315 case Instruction::FDiv: // 0.0 / X (doesn't simplify, but it is safe)
316 case Instruction::FRem: // 0.0 % X = 0
317 SafeC = Constant::getNullValue(EltTy);
318 break;
319 default:
320 llvm_unreachable("Expected to find identity constant for opcode");
321 }
322 }
323 }
324 assert(SafeC && "Must have safe constant for binop");
325 unsigned NumElts = InVTy->getNumElements();
326 SmallVector<Constant *, 16> Out(NumElts);
327 for (unsigned i = 0; i != NumElts; ++i) {
328 Constant *C = In->getAggregateElement(i);
329 Out[i] = isa<UndefValue>(C) ? SafeC : C;
330 }
331 return ConstantVector::get(Out);
332 }
333
335
338 DominatorTree &getDominatorTree() const { return DT; }
339 const DataLayout &getDataLayout() const { return DL; }
340 const SimplifyQuery &getSimplifyQuery() const { return SQ; }
346
347 // Call target specific combiners
348 std::optional<Instruction *> targetInstCombineIntrinsic(IntrinsicInst &II);
349 std::optional<Value *>
350 targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask,
351 KnownBits &Known,
352 bool &KnownBitsComputed);
353 std::optional<Value *> targetSimplifyDemandedVectorEltsIntrinsic(
354 IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
355 APInt &UndefElts2, APInt &UndefElts3,
356 std::function<void(Instruction *, unsigned, APInt, APInt &)>
357 SimplifyAndSetOp);
358
359 void computeBackEdges();
360 bool isBackEdge(const BasicBlock *From, const BasicBlock *To) {
363 return BackEdges.contains({From, To});
364 }
365
366 /// Inserts an instruction \p New before instruction \p Old
367 ///
368 /// Also adds the new instruction to the worklist and returns \p New so that
369 /// it is suitable for use as the return from the visitation patterns.
371 assert(New && !New->getParent() &&
372 "New instruction already inserted into a basic block!");
373 New->insertBefore(Old); // Insert inst
374 Worklist.add(New);
375 return New;
376 }
377
378 /// Same as InsertNewInstBefore, but also sets the debug loc.
380 New->setDebugLoc(Old->getDebugLoc());
381 return InsertNewInstBefore(New, Old);
382 }
383
384 /// A combiner-aware RAUW-like routine.
385 ///
386 /// This method is to be used when an instruction is found to be dead,
387 /// replaceable with another preexisting expression. Here we add all uses of
388 /// I to the worklist, replace all uses of I with the new value, then return
389 /// I, so that the inst combiner will know that I was modified.
391 // If there are no uses to replace, then we return nullptr to indicate that
392 // no changes were made to the program.
393 if (I.use_empty()) return nullptr;
394
395 Worklist.pushUsersToWorkList(I); // Add all modified instrs to worklist.
396
397 // If we are replacing the instruction with itself, this must be in a
398 // segment of unreachable code, so just clobber the instruction.
399 if (&I == V)
400 V = PoisonValue::get(I.getType());
401
402 LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n"
403 << " with " << *V << '\n');
404
405 // If V is a new unnamed instruction, take the name from the old one.
406 if (V->use_empty() && isa<Instruction>(V) && !V->hasName() && I.hasName())
407 V->takeName(&I);
408
409 I.replaceAllUsesWith(V);
410 return &I;
411 }
412
413 /// Replace operand of instruction and add old operand to the worklist.
415 Value *OldOp = I.getOperand(OpNum);
416 I.setOperand(OpNum, V);
417 Worklist.handleUseCountDecrement(OldOp);
418 return &I;
419 }
420
421 /// Replace use and add the previously used value to the worklist.
422 void replaceUse(Use &U, Value *NewValue) {
423 Value *OldOp = U;
424 U = NewValue;
425 Worklist.handleUseCountDecrement(OldOp);
426 }
427
428 /// Combiner aware instruction erasure.
429 ///
430 /// When dealing with an instruction that has side effects or produces a void
431 /// value, we can't rely on DCE to delete the instruction. Instead, visit
432 /// methods should return the value returned by this function.
434
435 void computeKnownBits(const Value *V, KnownBits &Known,
436 const Instruction *CxtI, unsigned Depth = 0) const {
437 llvm::computeKnownBits(V, Known, SQ.getWithInstruction(CxtI), Depth);
438 }
439
441 unsigned Depth = 0) const {
442 return llvm::computeKnownBits(V, SQ.getWithInstruction(CxtI), Depth);
443 }
444
445 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false,
446 const Instruction *CxtI = nullptr,
447 unsigned Depth = 0) {
448 return llvm::isKnownToBeAPowerOfTwo(V, OrZero, SQ.getWithInstruction(CxtI),
449 Depth);
450 }
451
452 bool MaskedValueIsZero(const Value *V, const APInt &Mask,
453 const Instruction *CxtI = nullptr,
454 unsigned Depth = 0) const {
455 return llvm::MaskedValueIsZero(V, Mask, SQ.getWithInstruction(CxtI), Depth);
456 }
457
458 unsigned ComputeNumSignBits(const Value *Op,
459 const Instruction *CxtI = nullptr,
460 unsigned Depth = 0) const {
461 return llvm::ComputeNumSignBits(Op, DL, &AC, CxtI, &DT, Depth);
462 }
463
465 const Instruction *CxtI = nullptr,
466 unsigned Depth = 0) const {
467 return llvm::ComputeMaxSignificantBits(Op, DL, &AC, CxtI, &DT, Depth);
468 }
469
471 const Value *RHS,
472 const Instruction *CxtI,
473 bool IsNSW = false) const {
475 LHS, RHS, SQ.getWithInstruction(CxtI), IsNSW);
476 }
477
479 const Instruction *CxtI) const {
481 SQ.getWithInstruction(CxtI));
482 }
483
487 const Instruction *CxtI) const {
489 SQ.getWithInstruction(CxtI));
490 }
491
495 const Instruction *CxtI) const {
497 SQ.getWithInstruction(CxtI));
498 }
499
501 const Value *RHS,
502 const Instruction *CxtI) const {
504 SQ.getWithInstruction(CxtI));
505 }
506
508 const Instruction *CxtI) const {
510 SQ.getWithInstruction(CxtI));
511 }
512
513 virtual bool SimplifyDemandedBits(Instruction *I, unsigned OpNo,
514 const APInt &DemandedMask, KnownBits &Known,
515 const SimplifyQuery &Q,
516 unsigned Depth = 0) = 0;
517
518 bool SimplifyDemandedBits(Instruction *I, unsigned OpNo,
519 const APInt &DemandedMask, KnownBits &Known) {
520 return SimplifyDemandedBits(I, OpNo, DemandedMask, Known,
521 SQ.getWithInstruction(I));
522 }
523
524 virtual Value *
525 SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts,
526 unsigned Depth = 0,
527 bool AllowMultipleUsers = false) = 0;
528
529 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
530};
531
532} // namespace llvm
533
534#undef DEBUG_TYPE
535
536#endif
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define LLVM_LIBRARY_VISIBILITY
Definition Compiler.h:137
#define I(x, y, z)
Definition MD5.cpp:58
uint64_t IntrinsicInst * II
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Analysis providing branch probability information.
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:708
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:683
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:702
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:686
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:700
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:706
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:704
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2780
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
SimplifyQuery SQ
const DataLayout & getDataLayout() const
unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
bool isFreeToInvert(Value *V, bool WillInvertAllUses)
virtual Instruction * eraseInstFromFunction(Instruction &I)=0
Combiner aware instruction erasure.
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
DominatorTree & getDominatorTree() const
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI, bool IsNSW=false) const
virtual ~InstCombiner()=default
BlockFrequencyInfo * BFI
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
SmallDenseMap< BasicBlock *, SmallVector< BasicBlock * >, 8 > PredOrder
Order of predecessors to canonicalize phi nodes towards.
TargetLibraryInfo & TLI
TargetLibraryInfo & getTargetLibraryInfo() const
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
BlockFrequencyInfo * getBlockFrequencyInfo() const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
bool SimplifyDemandedBits(Instruction *I, unsigned OpNo, const APInt &DemandedMask, KnownBits &Known)
virtual bool SimplifyDemandedBits(Instruction *I, unsigned OpNo, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0)=0
ReversePostOrderTraversal< BasicBlock * > & RPOT
const DataLayout & DL
DomConditionCache DC
const bool MinimizeSize
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
virtual Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts, unsigned Depth=0, bool AllowMultipleUsers=false)=0
static Value * peekThroughBitcast(Value *V, bool OneUseOnly=false)
Return the source operand of a potentially bitcasted value while optionally checking if it has one us...
KnownBits computeKnownBits(const Value *V, const Instruction *CxtI, unsigned Depth=0) const
bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser)
Given i1 V, can every user of V be freely adapted if V is changed to !V ?
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder)
AssumptionCache & AC
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const Instruction *CxtI=nullptr, unsigned Depth=0) const
DominatorTree & DT
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
ProfileSummaryInfo * getProfileSummaryInfo() const
OptimizationRemarkEmitter & getOptimizationRemarkEmitter() const
ProfileSummaryInfo * PSI
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
BuilderTy & Builder
AssumptionCache & getAssumptionCache() const
OptimizationRemarkEmitter & ORE
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
InstCombiner(InstructionWorklist &Worklist, BuilderTy &Builder, Function &F, AAResults *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const DataLayout &DL, ReversePostOrderTraversal< BasicBlock * > &RPOT)
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
static Constant * AddOne(Constant *C)
Add one to a Constant.
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
A wrapper class for inspecting calls to intrinsic functions.
The optimization diagnostic interface.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Analysis providing profile information.
This class represents the LLVM 'select' instruction.
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition DenseSet.h:291
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
iterator_range< use_iterator > uses()
Definition Value.h:380
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
LogicalOp_match< LHS, RHS, Instruction::And > m_LogicalAnd(const LHS &L, const RHS &R)
Matches L && R either in the form of L & R or L ?
bool match(Val *V, const Pattern &P)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
LogicalOp_match< LHS, RHS, Instruction::Or > m_LogicalOr(const LHS &L, const RHS &R)
Matches L || R either in the form of L | R or L ?
This is an optimization pass for GlobalISel generic memory operations.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
LLVM_ABI OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ, bool IsNSW=false)
LLVM_ABI OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
TargetTransformInfo TTI
LLVM_ABI OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
DWARFExpression::Operation Op
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Get the upper bound on bit size for this Value Op as a signed integer.
LLVM_ABI OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.