35 return Base->getPointerAlignment(
DL) >= Alignment;
48 if (!CtxI ||
Ptr->canBeFreed())
54 bool IsAligned =
Ptr->getPointerAlignment(
DL) >= Alignment;
56 Ptr, {Attribute::Dereferenceable, Attribute::Alignment}, *AC,
60 if (RK.
AttrKind == Attribute::Alignment)
61 AlignRK = std::max(AlignRK, RK);
62 if (RK.
AttrKind == Attribute::Dereferenceable)
63 DerefRK = std::max(DerefRK, RK);
64 IsAligned |= AlignRK && AlignRK.
ArgValue >= Alignment.
value();
65 if (IsAligned && DerefRK && CheckSize(DerefRK))
79 assert(V->getType()->isPointerTy() &&
"Base must be pointer");
86 if (!Visited.
insert(V).second)
112 CtxI, AC, DT, TLI, Visited, MaxDepth);
117 if (BC->getSrcTy()->isPointerTy())
119 BC->getOperand(0), Alignment,
Size,
DL, CtxI, AC, DT, TLI,
124 if (
const SelectInst *Sel = dyn_cast<SelectInst>(V)) {
126 Size,
DL, CtxI, AC, DT, TLI,
127 Visited, MaxDepth) &&
129 Size,
DL, CtxI, AC, DT, TLI,
133 auto IsKnownDeref = [&]() {
134 bool CheckForNonNull, CheckForFreed;
135 if (!
Size.ule(V->getPointerDereferenceableBytes(
DL, CheckForNonNull,
139 if (CheckForNonNull &&
150 auto *
I = dyn_cast<Instruction>(V);
151 if (
I && !isa<AllocaInst>(
I))
155 if (IsKnownDeref()) {
166 if (
const auto *Call = dyn_cast<CallBase>(V)) {
169 AC, DT, TLI, Visited, MaxDepth);
186 APInt KnownDerefBytes(
Size.getBitWidth(), ObjSize);
200 if (
const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
202 Alignment,
Size,
DL, CtxI, AC, DT,
203 TLI, Visited, MaxDepth);
207 Size,
DL, CtxI, AC, DT, TLI,
228 return ::isDereferenceableAndAlignedPointer(V, Alignment,
Size,
DL, CtxI, AC,
229 DT, TLI, Visited, 16);
246 APInt AccessSize(
DL.getPointerTypeSizeInBits(V->getType()),
247 DL.getTypeStoreSize(Ty));
283 if (isa<CastInst>(
A) || isa<PHINode>(
A) || isa<GetElementPtrInst>(
A))
285 if (cast<Instruction>(
A)->isIdenticalToWhenDefined(BI))
298 APInt EltSize(
DL.getIndexTypeSizeInBits(
Ptr->getType()),
299 DL.getTypeStoreSize(LI->
getType()).getFixedValue());
303 if (L->isLoopInvariant(
Ptr))
305 Ptr, Alignment, EltSize,
DL, &*L->getHeader()->getFirstNonPHIIt(), AC,
309 auto *AddRec = dyn_cast<SCEVAddRecExpr>(PtrScev);
313 if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
316 auto *Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
323 if (EltSize.
urem(Alignment.
value()) != 0)
327 if (EltSize.
ugt(Step->getAPInt().abs()))
330 const SCEV *MaxBECount =
333 const SCEV *BECount = Predicates
336 if (isa<SCEVCouldNotCompute>(MaxBECount))
349 L, PtrScev, LI->
getType(), BECount, MaxBECount, &SE,
nullptr, &DT, AC);
350 if (isa<SCEVCouldNotCompute>(AccessStart) ||
351 isa<SCEVCouldNotCompute>(AccessEnd))
356 if (isa<SCEVCouldNotCompute>(PtrDiff))
365 const SCEV *AccessSizeSCEV =
nullptr;
366 if (
const SCEVUnknown *NewBase = dyn_cast<SCEVUnknown>(AccessStart)) {
367 Base = NewBase->getValue();
368 AccessSize = MaxPtrDiff;
369 AccessSizeSCEV = PtrDiff;
370 }
else if (
auto *MinAdd = dyn_cast<SCEVAddExpr>(AccessStart)) {
371 if (MinAdd->getNumOperands() != 2)
374 const auto *
Offset = dyn_cast<SCEVConstant>(MinAdd->getOperand(0));
375 const auto *NewBase = dyn_cast<SCEVUnknown>(MinAdd->getOperand(1));
383 if (
Offset->getAPInt().isNegative())
389 if (
Offset->getAPInt().urem(Alignment.
value()) != 0)
392 bool Overflow =
false;
393 AccessSize = MaxPtrDiff.
uadd_ov(
Offset->getAPInt(), Overflow);
397 Base = NewBase->getValue();
401 Instruction *HeaderFirstNonPHI = &*L->getHeader()->getFirstNonPHIIt();
409 DL, HeaderFirstNonPHI, AC, &DT) ||
411 HeaderFirstNonPHI, AC, &DT);
417 return F.hasFnAttribute(Attribute::SanitizeThread) ||
419 F.hasFnAttribute(Attribute::SanitizeAddress) ||
420 F.hasFnAttribute(Attribute::SanitizeHWAddress);
457 if (
Size.getBitWidth() > 64)
471 V = V->stripPointerCasts();
478 if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
479 !isa<LifetimeIntrinsic>(BBI))
485 if (
LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
489 if (LI->isVolatile())
491 AccessedPtr = LI->getPointerOperand();
492 AccessedTy = LI->getType();
493 AccessedAlign = LI->getAlign();
494 }
else if (
StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
496 if (SI->isVolatile())
498 AccessedPtr = SI->getPointerOperand();
499 AccessedTy = SI->getValueOperand()->getType();
500 AccessedAlign = SI->getAlign();
504 if (AccessedAlign < Alignment)
508 if (AccessedPtr == V &&
509 TypeSize::isKnownLE(LoadSize,
DL.getTypeStoreSize(AccessedTy)))
513 TypeSize::isKnownLE(LoadSize,
DL.getTypeStoreSize(AccessedTy)))
541 cl::desc(
"Use this to specify the default maximum number of instructions "
542 "to scan backward from a given instruction, when searching for "
543 "available loaded value"));
547 unsigned MaxInstsToScan,
549 unsigned *NumScanedInst) {
551 if (!Load->isUnordered())
556 ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoad,
564 const Value *StorePtr,
567 APInt LoadOffset(
DL.getIndexTypeSizeInBits(LoadPtr->
getType()), 0);
568 APInt StoreOffset(
DL.getIndexTypeSizeInBits(StorePtr->
getType()), 0);
570 DL, LoadOffset,
false);
572 DL, StoreOffset,
false);
573 if (LoadBase != StoreBase)
578 LoadOffset + LoadAccessSize.toRaw());
580 StoreOffset + StoreAccessSize.toRaw());
585 Type *AccessTy,
bool AtLeastAtomic,
590 if (
LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
593 if (LI->isAtomic() < AtLeastAtomic)
610 if (
StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
613 if (SI->isAtomic() < AtLeastAtomic)
616 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
623 Value *Val = SI->getValueOperand();
628 TypeSize LoadSize =
DL.getTypeSizeInBits(AccessTy);
629 if (TypeSize::isKnownLE(LoadSize, StoreSize))
630 if (
auto *
C = dyn_cast<Constant>(Val))
634 if (
auto *MSI = dyn_cast<MemSetInst>(Inst)) {
640 auto *Val = dyn_cast<ConstantInt>(MSI->getValue());
641 auto *Len = dyn_cast<ConstantInt>(MSI->getLength());
646 int64_t StoreOffset = 0, LoadOffset = 0;
647 const Value *StoreBase =
649 const Value *LoadBase =
651 if (StoreBase != LoadBase || LoadOffset < StoreOffset)
657 TypeSize LoadTypeSize =
DL.getTypeSizeInBits(AccessTy);
663 if ((Len->getValue() * 8).ult(LoadSize + (LoadOffset - StoreOffset) * 8))
667 : Val->getValue().trunc(LoadSize);
682 if (MaxInstsToScan == 0)
683 MaxInstsToScan = ~0U;
688 while (ScanFrom != ScanBB->
begin()) {
702 if (MaxInstsToScan-- == 0)
708 AtLeastAtomic,
DL, IsLoadCSE))
712 if (
StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
713 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
718 if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
719 (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
720 StrippedPtr != StorePtr)
729 Loc.
Ptr, AccessTy, SI->getPointerOperand(),
730 SI->getValueOperand()->getType(),
DL))
764 unsigned MaxInstsToScan) {
766 Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts();
768 Type *AccessTy = Load->getType();
769 bool AtLeastAtomic = Load->isAtomic();
771 if (!Load->isUnordered())
780 if (Inst.isDebugOrPseudoInst())
783 if (MaxInstsToScan-- == 0)
787 AtLeastAtomic,
DL, IsLoadCSE);
791 if (Inst.mayWriteToMemory())
814 while (!Worklist.empty() && --Limit) {
815 auto *
User = Worklist.pop_back_val();
818 if (isa<ICmpInst, PtrToIntInst>(
User))
820 if (isa<PHINode, SelectInst>(
User))
835 if (isa<ConstantPointerNull>(To))
837 if (isa<Constant>(To) &&
846 assert(U->getType() == To->
getType() &&
"values must have matching types");
852 if (isa<LifetimeIntrinsic>(U.getUser()))
864 if (!
From->getType()->isPointerTy())
875 if (
auto *LI = dyn_cast<LoadInst>(&
I)) {
878 }
else if (
I.mayReadFromMemory() ||
I.mayWriteToMemory() ||
I.mayThrow())
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
@ Available
We know the block is fully available. This is a fixpoint.
static bool isAligned(const Value *Base, Align Alignment, const DataLayout &DL)
static cl::opt< bool > UseSymbolicMaxBTCForDerefInLoop("use-symbolic-maxbtc-deref-loop", cl::init(false))
static bool AreEquivalentAddressValues(const Value *A, const Value *B)
Test if A and B will obviously have the same value.
static bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, const TargetLibraryInfo *TLI, SmallPtrSetImpl< const Value * > &Visited, unsigned MaxDepth)
Test if V is always a pointer to allocated and suitably aligned memory for a simple load or store.
static bool isPointerAlwaysReplaceable(const Value *From, const Value *To, const DataLayout &DL)
static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr, Type *LoadTy, const Value *StorePtr, Type *StoreTy, const DataLayout &DL)
static bool isPointerUseReplacable(const Use &U)
static bool isDereferenceableAndAlignedPointerViaAssumption(const Value *Ptr, Align Alignment, function_ref< bool(const RetainedKnowledge &RK)> CheckSize, const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT)
static Value * getAvailableLoadStore(Instruction *Inst, const Value *Ptr, Type *AccessTy, bool AtLeastAtomic, const DataLayout &DL, bool *IsLoadCSE)
static bool suppressSpeculativeLoadForSanitizers(const Instruction &CtxI)
This file provides utility analysis objects describing memory locations.
Class for arbitrary precision integers.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
bool getBoolValue() const
Convert APInt to a boolean value.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
InstListType::iterator iterator
Instruction iterators...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
@ ICMP_ULE
unsigned less or equal
This is the shared class of boolean and integer constants.
This class represents a range of values.
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
A parsed version of the target data layout string in and methods for querying it.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Represents calls to the gc.relocate intrinsic.
LLVM_ABI bool isDebugOrPseudoInst() const LLVM_READONLY
Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
An instruction for reading from memory.
Value * getPointerOperand()
Align getAlign() const
Return the alignment of the access that is being performed.
static LocationSize precise(uint64_t Value)
Represents a single loop in the control flow graph.
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
LLVM_ABI const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
This class represents the LLVM 'select' instruction.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Provides information about what library functions are available for the current target.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
@ C
The default llvm calling convention, compatible with C.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI RetainedKnowledge getKnowledgeForValue(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, function_ref< bool(RetainedKnowledge, Instruction *, const CallBase::BundleOpInfo *)> Filter=[](auto...) { return true;})
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and it match...
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
LLVM_ABI Value * findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst)
Scan backwards to see if we have the value of the given pointer available locally within a small numb...
LLVM_ABI bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
LLVM_ABI Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, BatchAAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
LLVM_ABI bool isDereferenceableReadOnlyLoop(Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Return true if the loop L cannot fault on any iteration and only contains read-only memory accesses.
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
LLVM_ABI bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
bool isModSet(const ModRefInfo MRI)
LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
LLVM_ABI cl::opt< unsigned > DefMaxInstsToScan
The default number of maximum instructions to scan in the block, used by FindAvailableLoadedValue().
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
LLVM_ABI std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds, DominatorTree *DT, AssumptionCache *AC)
Calculate Start and End points of memory access using exact backedge taken count BTC if computable or...
LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
LLVM_ABI bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT, AssumptionCache *AC=nullptr, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Return true if we can prove that the given load (which is assumed to be within the specified loop) wo...
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
Represent one information held inside an operand bundle of an llvm.assume.
Attribute::AttrKind AttrKind