LLVM 22.0.0git
Loads.cpp
Go to the documentation of this file.
1//===- Loads.cpp - Local load analysis ------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines simple local analyses for load instructions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Analysis/Loads.h"
23#include "llvm/IR/DataLayout.h"
25#include "llvm/IR/Operator.h"
26
27using namespace llvm;
28
29static cl::opt<bool>
30 UseSymbolicMaxBTCForDerefInLoop("use-symbolic-maxbtc-deref-loop",
31 cl::init(false));
32
33static bool isAligned(const Value *Base, Align Alignment,
34 const DataLayout &DL) {
35 return Base->getPointerAlignment(DL) >= Alignment;
36}
37
39 const Value *Ptr, Align Alignment,
40 function_ref<bool(const RetainedKnowledge &RK)> CheckSize,
41 const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC,
42 const DominatorTree *DT) {
43 // Dereferenceable information from assumptions is only valid if the value
44 // cannot be freed between the assumption and use. For now just use the
45 // information for values that cannot be freed in the function.
46 // TODO: More precisely check if the pointer can be freed between assumption
47 // and use.
48 if (!CtxI || Ptr->canBeFreed())
49 return false;
50 /// Look through assumes to see if both dereferencability and alignment can
51 /// be proven by an assume if needed.
52 RetainedKnowledge AlignRK;
53 RetainedKnowledge DerefRK;
54 bool IsAligned = Ptr->getPointerAlignment(DL) >= Alignment;
56 Ptr, {Attribute::Dereferenceable, Attribute::Alignment}, *AC,
57 [&](RetainedKnowledge RK, Instruction *Assume, auto) {
58 if (!isValidAssumeForContext(Assume, CtxI, DT))
59 return false;
60 if (RK.AttrKind == Attribute::Alignment)
61 AlignRK = std::max(AlignRK, RK);
62 if (RK.AttrKind == Attribute::Dereferenceable)
63 DerefRK = std::max(DerefRK, RK);
64 IsAligned |= AlignRK && AlignRK.ArgValue >= Alignment.value();
65 if (IsAligned && DerefRK && CheckSize(DerefRK))
66 return true; // We have found what we needed so we stop looking
67 return false; // Other assumes may have better information. so
68 // keep looking
69 });
70}
71
72/// Test if V is always a pointer to allocated and suitably aligned memory for
73/// a simple load or store.
75 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
76 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
78 unsigned MaxDepth) {
79 assert(V->getType()->isPointerTy() && "Base must be pointer");
80
81 // Recursion limit.
82 if (MaxDepth-- == 0)
83 return false;
84
85 // Already visited? Bail out, we've likely hit unreachable code.
86 if (!Visited.insert(V).second)
87 return false;
88
89 // Note that it is not safe to speculate into a malloc'd region because
90 // malloc may return null.
91
92 // For GEPs, determine if the indexing lands within the allocated object.
93 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
94 const Value *Base = GEP->getPointerOperand();
95
96 APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
97 if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
98 !Offset.urem(APInt(Offset.getBitWidth(), Alignment.value()))
99 .isMinValue())
100 return false;
101
102 // If the base pointer is dereferenceable for Offset+Size bytes, then the
103 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
104 // pointer is aligned to Align bytes, and the Offset is divisible by Align
105 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
106 // aligned to Align bytes.
107
108 // Offset and Size may have different bit widths if we have visited an
109 // addrspacecast, so we can't do arithmetic directly on the APInt values.
111 Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL,
112 CtxI, AC, DT, TLI, Visited, MaxDepth);
113 }
114
115 // bitcast instructions are no-ops as far as dereferenceability is concerned.
116 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
117 if (BC->getSrcTy()->isPointerTy())
119 BC->getOperand(0), Alignment, Size, DL, CtxI, AC, DT, TLI,
120 Visited, MaxDepth);
121 }
122
123 // Recurse into both hands of select.
124 if (const SelectInst *Sel = dyn_cast<SelectInst>(V)) {
125 return isDereferenceableAndAlignedPointer(Sel->getTrueValue(), Alignment,
126 Size, DL, CtxI, AC, DT, TLI,
127 Visited, MaxDepth) &&
128 isDereferenceableAndAlignedPointer(Sel->getFalseValue(), Alignment,
129 Size, DL, CtxI, AC, DT, TLI,
130 Visited, MaxDepth);
131 }
132
133 auto IsKnownDeref = [&]() {
134 bool CheckForNonNull, CheckForFreed;
135 if (!Size.ule(V->getPointerDereferenceableBytes(DL, CheckForNonNull,
136 CheckForFreed)) ||
137 CheckForFreed)
138 return false;
139 if (CheckForNonNull &&
140 !isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)))
141 return false;
142 // When using something like !dereferenceable on a load, the
143 // dereferenceability may only be valid on a specific control-flow path.
144 // If the instruction doesn't dominate the context instruction, we're
145 // asking about dereferenceability under the assumption that the
146 // instruction has been speculated to the point of the context instruction,
147 // in which case we don't know if the dereferenceability info still holds.
148 // We don't bother handling allocas here, as they aren't speculatable
149 // anyway.
150 auto *I = dyn_cast<Instruction>(V);
151 if (I && !isa<AllocaInst>(I))
152 return CtxI && isValidAssumeForContext(I, CtxI, DT);
153 return true;
154 };
155 if (IsKnownDeref()) {
156 // As we recursed through GEPs to get here, we've incrementally checked
157 // that each step advanced by a multiple of the alignment. If our base is
158 // properly aligned, then the original offset accessed must also be.
159 return isAligned(V, Alignment, DL);
160 }
161
162 /// TODO refactor this function to be able to search independently for
163 /// Dereferencability and Alignment requirements.
164
165
166 if (const auto *Call = dyn_cast<CallBase>(V)) {
167 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
168 return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI,
169 AC, DT, TLI, Visited, MaxDepth);
170
171 // If we have a call we can't recurse through, check to see if this is an
172 // allocation function for which we can establish an minimum object size.
173 // Such a minimum object size is analogous to a deref_or_null attribute in
174 // that we still need to prove the result non-null at point of use.
175 // NOTE: We can only use the object size as a base fact as we a) need to
176 // prove alignment too, and b) don't want the compile time impact of a
177 // separate recursive walk.
178 ObjectSizeOpts Opts;
179 // TODO: It may be okay to round to align, but that would imply that
180 // accessing slightly out of bounds was legal, and we're currently
181 // inconsistent about that. For the moment, be conservative.
182 Opts.RoundToAlign = false;
183 Opts.NullIsUnknownSize = true;
184 uint64_t ObjSize;
185 if (getObjectSize(V, ObjSize, DL, TLI, Opts)) {
186 APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
187 if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
188 isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)) &&
189 !V->canBeFreed()) {
190 // As we recursed through GEPs to get here, we've incrementally
191 // checked that each step advanced by a multiple of the alignment. If
192 // our base is properly aligned, then the original offset accessed
193 // must also be.
194 return isAligned(V, Alignment, DL);
195 }
196 }
197 }
198
199 // For gc.relocate, look through relocations
200 if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
201 return isDereferenceableAndAlignedPointer(RelocateInst->getDerivedPtr(),
202 Alignment, Size, DL, CtxI, AC, DT,
203 TLI, Visited, MaxDepth);
204
205 if (const AddrSpaceCastOperator *ASC = dyn_cast<AddrSpaceCastOperator>(V))
206 return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment,
207 Size, DL, CtxI, AC, DT, TLI,
208 Visited, MaxDepth);
209
211 V, Alignment,
212 [Size](const RetainedKnowledge &RK) {
213 return RK.ArgValue >= Size.getZExtValue();
214 },
215 DL, CtxI, AC, DT);
216}
217
219 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
220 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
221 const TargetLibraryInfo *TLI) {
222 // Note: At the moment, Size can be zero. This ends up being interpreted as
223 // a query of whether [Base, V] is dereferenceable and V is aligned (since
224 // that's what the implementation happened to do). It's unclear if this is
225 // the desired semantic, but at least SelectionDAG does exercise this case.
226
228 return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC,
229 DT, TLI, Visited, 16);
230}
231
233 const Value *V, Type *Ty, Align Alignment, const DataLayout &DL,
234 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
235 const TargetLibraryInfo *TLI) {
236 // For unsized types or scalable vectors we don't know exactly how many bytes
237 // are dereferenced, so bail out.
238 if (!Ty->isSized() || Ty->isScalableTy())
239 return false;
240
241 // When dereferenceability information is provided by a dereferenceable
242 // attribute, we know exactly how many bytes are dereferenceable. If we can
243 // determine the exact offset to the attributed variable, we can use that
244 // information here.
245
246 APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
247 DL.getTypeStoreSize(Ty));
248 return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
249 AC, DT, TLI);
250}
251
253 const DataLayout &DL,
254 const Instruction *CtxI,
255 AssumptionCache *AC,
256 const DominatorTree *DT,
257 const TargetLibraryInfo *TLI) {
258 return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, AC, DT,
259 TLI);
260}
261
262/// Test if A and B will obviously have the same value.
263///
264/// This includes recognizing that %t0 and %t1 will have the same
265/// value in code like this:
266/// \code
267/// %t0 = getelementptr \@a, 0, 3
268/// store i32 0, i32* %t0
269/// %t1 = getelementptr \@a, 0, 3
270/// %t2 = load i32* %t1
271/// \endcode
272///
273static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
274 // Test if the values are trivially equivalent.
275 if (A == B)
276 return true;
277
278 // Test if the values come from identical arithmetic instructions.
279 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
280 // this function is only used when one address use dominates the
281 // other, which means that they'll always either have the same
282 // value or one of them will have an undefined value.
283 if (isa<CastInst>(A) || isa<PHINode>(A) || isa<GetElementPtrInst>(A))
284 if (const Instruction *BI = dyn_cast<Instruction>(B))
285 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
286 return true;
287
288 // Otherwise they may not be equivalent.
289 return false;
290}
291
293 LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT,
295 const Align Alignment = LI->getAlign();
296 auto &DL = LI->getDataLayout();
297 Value *Ptr = LI->getPointerOperand();
298 APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
299 DL.getTypeStoreSize(LI->getType()).getFixedValue());
300
301 // If given a uniform (i.e. non-varying) address, see if we can prove the
302 // access is safe within the loop w/o needing predication.
303 if (L->isLoopInvariant(Ptr))
305 Ptr, Alignment, EltSize, DL, &*L->getHeader()->getFirstNonPHIIt(), AC,
306 &DT);
307
308 const SCEV *PtrScev = SE.getSCEV(Ptr);
309 auto *AddRec = dyn_cast<SCEVAddRecExpr>(PtrScev);
310
311 // Check to see if we have a repeating access pattern and it's possible
312 // to prove all accesses are well aligned.
313 if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
314 return false;
315
316 auto *Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
317 if (!Step)
318 return false;
319
320 // For the moment, restrict ourselves to the case where the access size is a
321 // multiple of the requested alignment and the base is aligned.
322 // TODO: generalize if a case found which warrants
323 if (EltSize.urem(Alignment.value()) != 0)
324 return false;
325
326 // TODO: Handle overlapping accesses.
327 if (EltSize.ugt(Step->getAPInt().abs()))
328 return false;
329
330 const SCEV *MaxBECount =
331 Predicates ? SE.getPredicatedSymbolicMaxBackedgeTakenCount(L, *Predicates)
333 const SCEV *BECount = Predicates
334 ? SE.getPredicatedBackedgeTakenCount(L, *Predicates)
335 : SE.getBackedgeTakenCount(L);
336 if (isa<SCEVCouldNotCompute>(MaxBECount))
337 return false;
338
339 if (isa<SCEVCouldNotCompute>(BECount) && !UseSymbolicMaxBTCForDerefInLoop) {
340 // TODO: Support symbolic max backedge taken counts for loops without
341 // computable backedge taken counts.
342 MaxBECount =
343 Predicates
346 }
347
348 const auto &[AccessStart, AccessEnd] = getStartAndEndForAccess(
349 L, PtrScev, LI->getType(), BECount, MaxBECount, &SE, nullptr, &DT, AC);
350 if (isa<SCEVCouldNotCompute>(AccessStart) ||
351 isa<SCEVCouldNotCompute>(AccessEnd))
352 return false;
353
354 // Try to get the access size.
355 const SCEV *PtrDiff = SE.getMinusSCEV(AccessEnd, AccessStart);
356 if (isa<SCEVCouldNotCompute>(PtrDiff))
357 return false;
358 APInt MaxPtrDiff = SE.getUnsignedRangeMax(PtrDiff);
359
360 Value *Base = nullptr;
361 APInt AccessSize;
362 const SCEV *AccessSizeSCEV = nullptr;
363 if (const SCEVUnknown *NewBase = dyn_cast<SCEVUnknown>(AccessStart)) {
364 Base = NewBase->getValue();
365 AccessSize = MaxPtrDiff;
366 AccessSizeSCEV = PtrDiff;
367 } else if (auto *MinAdd = dyn_cast<SCEVAddExpr>(AccessStart)) {
368 if (MinAdd->getNumOperands() != 2)
369 return false;
370
371 const auto *Offset = dyn_cast<SCEVConstant>(MinAdd->getOperand(0));
372 const auto *NewBase = dyn_cast<SCEVUnknown>(MinAdd->getOperand(1));
373 if (!Offset || !NewBase)
374 return false;
375
376 // The following code below assumes the offset is unsigned, but GEP
377 // offsets are treated as signed so we can end up with a signed value
378 // here too. For example, suppose the initial PHI value is (i8 255),
379 // the offset will be treated as (i8 -1) and sign-extended to (i64 -1).
380 if (Offset->getAPInt().isNegative())
381 return false;
382
383 // For the moment, restrict ourselves to the case where the offset is a
384 // multiple of the requested alignment and the base is aligned.
385 // TODO: generalize if a case found which warrants
386 if (Offset->getAPInt().urem(Alignment.value()) != 0)
387 return false;
388
389 AccessSize = MaxPtrDiff + Offset->getAPInt();
390 AccessSizeSCEV = SE.getAddExpr(PtrDiff, Offset);
391 Base = NewBase->getValue();
392 } else
393 return false;
394
395 Instruction *HeaderFirstNonPHI = &*L->getHeader()->getFirstNonPHIIt();
397 Base, Alignment,
398 [&SE, AccessSizeSCEV](const RetainedKnowledge &RK) {
399 return SE.isKnownPredicate(CmpInst::ICMP_ULE, AccessSizeSCEV,
400 SE.getSCEV(RK.IRArgValue));
401 },
402 DL, HeaderFirstNonPHI, AC, &DT) ||
403 isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL,
404 HeaderFirstNonPHI, AC, &DT);
405}
406
408 const Function &F = *CtxI.getFunction();
409 // Speculative load may create a race that did not exist in the source.
410 return F.hasFnAttribute(Attribute::SanitizeThread) ||
411 // Speculative load may load data from dirty regions.
412 F.hasFnAttribute(Attribute::SanitizeAddress) ||
413 F.hasFnAttribute(Attribute::SanitizeHWAddress);
414}
415
418}
419
420/// Check if executing a load of this pointer value cannot trap.
421///
422/// If DT and ScanFrom are specified this method performs context-sensitive
423/// analysis and returns true if it is safe to load immediately before ScanFrom.
424///
425/// If it is not obviously safe to load from the specified pointer, we do
426/// a quick local scan of the basic block containing \c ScanFrom, to determine
427/// if the address is already accessed.
428///
429/// This uses the pointee type to determine how many bytes need to be safe to
430/// load from the pointer.
432 const DataLayout &DL,
433 Instruction *ScanFrom,
434 AssumptionCache *AC,
435 const DominatorTree *DT,
436 const TargetLibraryInfo *TLI) {
437 // If DT is not specified we can't make context-sensitive query
438 const Instruction* CtxI = DT ? ScanFrom : nullptr;
439 if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, DT,
440 TLI)) {
441 // With sanitizers `Dereferenceable` is not always enough for unconditional
442 // load.
443 if (!ScanFrom || !suppressSpeculativeLoadForSanitizers(*ScanFrom))
444 return true;
445 }
446
447 if (!ScanFrom)
448 return false;
449
450 if (Size.getBitWidth() > 64)
451 return false;
452 const TypeSize LoadSize = TypeSize::getFixed(Size.getZExtValue());
453
454 // Otherwise, be a little bit aggressive by scanning the local block where we
455 // want to check to see if the pointer is already being loaded or stored
456 // from/to. If so, the previous load or store would have already trapped,
457 // so there is no harm doing an extra load (also, CSE will later eliminate
458 // the load entirely).
459 BasicBlock::iterator BBI = ScanFrom->getIterator(),
460 E = ScanFrom->getParent()->begin();
461
462 // We can at least always strip pointer casts even though we can't use the
463 // base here.
464 V = V->stripPointerCasts();
465
466 while (BBI != E) {
467 --BBI;
468
469 // If we see a free or a call which may write to memory (i.e. which might do
470 // a free) the pointer could be marked invalid.
471 if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
472 !isa<LifetimeIntrinsic>(BBI))
473 return false;
474
475 Value *AccessedPtr;
476 Type *AccessedTy;
477 Align AccessedAlign;
478 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
479 // Ignore volatile loads. The execution of a volatile load cannot
480 // be used to prove an address is backed by regular memory; it can,
481 // for example, point to an MMIO register.
482 if (LI->isVolatile())
483 continue;
484 AccessedPtr = LI->getPointerOperand();
485 AccessedTy = LI->getType();
486 AccessedAlign = LI->getAlign();
487 } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
488 // Ignore volatile stores (see comment for loads).
489 if (SI->isVolatile())
490 continue;
491 AccessedPtr = SI->getPointerOperand();
492 AccessedTy = SI->getValueOperand()->getType();
493 AccessedAlign = SI->getAlign();
494 } else
495 continue;
496
497 if (AccessedAlign < Alignment)
498 continue;
499
500 // Handle trivial cases.
501 if (AccessedPtr == V &&
502 TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
503 return true;
504
505 if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
506 TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
507 return true;
508 }
509 return false;
510}
511
513 const DataLayout &DL,
514 Instruction *ScanFrom,
515 AssumptionCache *AC,
516 const DominatorTree *DT,
517 const TargetLibraryInfo *TLI) {
518 TypeSize TySize = DL.getTypeStoreSize(Ty);
519 if (TySize.isScalable())
520 return false;
521 APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue());
522 return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT,
523 TLI);
524}
525
526/// DefMaxInstsToScan - the default number of maximum instructions
527/// to scan in the block, used by FindAvailableLoadedValue().
528/// FindAvailableLoadedValue() was introduced in r60148, to improve jump
529/// threading in part by eliminating partially redundant loads.
530/// At that point, the value of MaxInstsToScan was already set to '6'
531/// without documented explanation.
533llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
534 cl::desc("Use this to specify the default maximum number of instructions "
535 "to scan backward from a given instruction, when searching for "
536 "available loaded value"));
537
539 BasicBlock::iterator &ScanFrom,
540 unsigned MaxInstsToScan,
541 BatchAAResults *AA, bool *IsLoad,
542 unsigned *NumScanedInst) {
543 // Don't CSE load that is volatile or anything stronger than unordered.
544 if (!Load->isUnordered())
545 return nullptr;
546
548 return findAvailablePtrLoadStore(Loc, Load->getType(), Load->isAtomic(),
549 ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoad,
550 NumScanedInst);
551}
552
553// Check if the load and the store have the same base, constant offsets and
554// non-overlapping access ranges.
555static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr,
556 Type *LoadTy,
557 const Value *StorePtr,
558 Type *StoreTy,
559 const DataLayout &DL) {
560 APInt LoadOffset(DL.getIndexTypeSizeInBits(LoadPtr->getType()), 0);
561 APInt StoreOffset(DL.getIndexTypeSizeInBits(StorePtr->getType()), 0);
562 const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets(
563 DL, LoadOffset, /* AllowNonInbounds */ false);
564 const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets(
565 DL, StoreOffset, /* AllowNonInbounds */ false);
566 if (LoadBase != StoreBase)
567 return false;
568 auto LoadAccessSize = LocationSize::precise(DL.getTypeStoreSize(LoadTy));
569 auto StoreAccessSize = LocationSize::precise(DL.getTypeStoreSize(StoreTy));
570 ConstantRange LoadRange(LoadOffset,
571 LoadOffset + LoadAccessSize.toRaw());
572 ConstantRange StoreRange(StoreOffset,
573 StoreOffset + StoreAccessSize.toRaw());
574 return LoadRange.intersectWith(StoreRange).isEmptySet();
575}
576
578 Type *AccessTy, bool AtLeastAtomic,
579 const DataLayout &DL, bool *IsLoadCSE) {
580 // If this is a load of Ptr, the loaded value is available.
581 // (This is true even if the load is volatile or atomic, although
582 // those cases are unlikely.)
583 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
584 // We can value forward from an atomic to a non-atomic, but not the
585 // other way around.
586 if (LI->isAtomic() < AtLeastAtomic)
587 return nullptr;
588
589 Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts();
590 if (!AreEquivalentAddressValues(LoadPtr, Ptr))
591 return nullptr;
592
593 if (CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
594 if (IsLoadCSE)
595 *IsLoadCSE = true;
596 return LI;
597 }
598 }
599
600 // If this is a store through Ptr, the value is available!
601 // (This is true even if the store is volatile or atomic, although
602 // those cases are unlikely.)
603 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
604 // We can value forward from an atomic to a non-atomic, but not the
605 // other way around.
606 if (SI->isAtomic() < AtLeastAtomic)
607 return nullptr;
608
609 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
610 if (!AreEquivalentAddressValues(StorePtr, Ptr))
611 return nullptr;
612
613 if (IsLoadCSE)
614 *IsLoadCSE = false;
615
616 Value *Val = SI->getValueOperand();
617 if (CastInst::isBitOrNoopPointerCastable(Val->getType(), AccessTy, DL))
618 return Val;
619
620 TypeSize StoreSize = DL.getTypeSizeInBits(Val->getType());
621 TypeSize LoadSize = DL.getTypeSizeInBits(AccessTy);
622 if (TypeSize::isKnownLE(LoadSize, StoreSize))
623 if (auto *C = dyn_cast<Constant>(Val))
624 return ConstantFoldLoadFromConst(C, AccessTy, DL);
625 }
626
627 if (auto *MSI = dyn_cast<MemSetInst>(Inst)) {
628 // Don't forward from (non-atomic) memset to atomic load.
629 if (AtLeastAtomic)
630 return nullptr;
631
632 // Only handle constant memsets.
633 auto *Val = dyn_cast<ConstantInt>(MSI->getValue());
634 auto *Len = dyn_cast<ConstantInt>(MSI->getLength());
635 if (!Val || !Len)
636 return nullptr;
637
638 // Handle offsets.
639 int64_t StoreOffset = 0, LoadOffset = 0;
640 const Value *StoreBase =
641 GetPointerBaseWithConstantOffset(MSI->getDest(), StoreOffset, DL);
642 const Value *LoadBase =
644 if (StoreBase != LoadBase || LoadOffset < StoreOffset)
645 return nullptr;
646
647 if (IsLoadCSE)
648 *IsLoadCSE = false;
649
650 TypeSize LoadTypeSize = DL.getTypeSizeInBits(AccessTy);
651 if (LoadTypeSize.isScalable())
652 return nullptr;
653
654 // Make sure the read bytes are contained in the memset.
655 uint64_t LoadSize = LoadTypeSize.getFixedValue();
656 if ((Len->getValue() * 8).ult(LoadSize + (LoadOffset - StoreOffset) * 8))
657 return nullptr;
658
659 APInt Splat = LoadSize >= 8 ? APInt::getSplat(LoadSize, Val->getValue())
660 : Val->getValue().trunc(LoadSize);
661 ConstantInt *SplatC = ConstantInt::get(MSI->getContext(), Splat);
662 if (CastInst::isBitOrNoopPointerCastable(SplatC->getType(), AccessTy, DL))
663 return SplatC;
664
665 return nullptr;
666 }
667
668 return nullptr;
669}
670
672 const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic,
673 BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan,
674 BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) {
675 if (MaxInstsToScan == 0)
676 MaxInstsToScan = ~0U;
677
678 const DataLayout &DL = ScanBB->getDataLayout();
679 const Value *StrippedPtr = Loc.Ptr->stripPointerCasts();
680
681 while (ScanFrom != ScanBB->begin()) {
682 // We must ignore debug info directives when counting (otherwise they
683 // would affect codegen).
684 Instruction *Inst = &*--ScanFrom;
685 if (Inst->isDebugOrPseudoInst())
686 continue;
687
688 // Restore ScanFrom to expected value in case next test succeeds
689 ScanFrom++;
690
691 if (NumScanedInst)
692 ++(*NumScanedInst);
693
694 // Don't scan huge blocks.
695 if (MaxInstsToScan-- == 0)
696 return nullptr;
697
698 --ScanFrom;
699
700 if (Value *Available = getAvailableLoadStore(Inst, StrippedPtr, AccessTy,
701 AtLeastAtomic, DL, IsLoadCSE))
702 return Available;
703
704 // Try to get the store size for the type.
705 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
706 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
707
708 // If both StrippedPtr and StorePtr reach all the way to an alloca or
709 // global and they are different, ignore the store. This is a trivial form
710 // of alias analysis that is important for reg2mem'd code.
711 if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
712 (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
713 StrippedPtr != StorePtr)
714 continue;
715
716 if (!AA) {
717 // When AA isn't available, but if the load and the store have the same
718 // base, constant offsets and non-overlapping access ranges, ignore the
719 // store. This is a simple form of alias analysis that is used by the
720 // inliner. FIXME: use BasicAA if possible.
722 Loc.Ptr, AccessTy, SI->getPointerOperand(),
723 SI->getValueOperand()->getType(), DL))
724 continue;
725 } else {
726 // If we have alias analysis and it says the store won't modify the
727 // loaded value, ignore the store.
728 if (!isModSet(AA->getModRefInfo(SI, Loc)))
729 continue;
730 }
731
732 // Otherwise the store that may or may not alias the pointer, bail out.
733 ++ScanFrom;
734 return nullptr;
735 }
736
737 // If this is some other instruction that may clobber Ptr, bail out.
738 if (Inst->mayWriteToMemory()) {
739 // If alias analysis claims that it really won't modify the load,
740 // ignore it.
741 if (AA && !isModSet(AA->getModRefInfo(Inst, Loc)))
742 continue;
743
744 // May modify the pointer, bail out.
745 ++ScanFrom;
746 return nullptr;
747 }
748 }
749
750 // Got to the start of the block, we didn't find it, but are done for this
751 // block.
752 return nullptr;
753}
754
756 bool *IsLoadCSE,
757 unsigned MaxInstsToScan) {
758 const DataLayout &DL = Load->getDataLayout();
759 Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts();
760 BasicBlock *ScanBB = Load->getParent();
761 Type *AccessTy = Load->getType();
762 bool AtLeastAtomic = Load->isAtomic();
763
764 if (!Load->isUnordered())
765 return nullptr;
766
767 // Try to find an available value first, and delay expensive alias analysis
768 // queries until later.
769 Value *Available = nullptr;
770 SmallVector<Instruction *> MustNotAliasInsts;
771 for (Instruction &Inst : make_range(++Load->getReverseIterator(),
772 ScanBB->rend())) {
773 if (Inst.isDebugOrPseudoInst())
774 continue;
775
776 if (MaxInstsToScan-- == 0)
777 return nullptr;
778
779 Available = getAvailableLoadStore(&Inst, StrippedPtr, AccessTy,
780 AtLeastAtomic, DL, IsLoadCSE);
781 if (Available)
782 break;
783
784 if (Inst.mayWriteToMemory())
785 MustNotAliasInsts.push_back(&Inst);
786 }
787
788 // If we found an available value, ensure that the instructions in between
789 // did not modify the memory location.
790 if (Available) {
792 for (Instruction *Inst : MustNotAliasInsts)
793 if (isModSet(AA.getModRefInfo(Inst, Loc)))
794 return nullptr;
795 }
796
797 return Available;
798}
799
800// Returns true if a use is either in an ICmp/PtrToInt or a Phi/Select that only
801// feeds into them.
802static bool isPointerUseReplacable(const Use &U) {
803 unsigned Limit = 40;
804 SmallVector<const User *> Worklist({U.getUser()});
806
807 while (!Worklist.empty() && --Limit) {
808 auto *User = Worklist.pop_back_val();
809 if (!Visited.insert(User).second)
810 continue;
811 if (isa<ICmpInst, PtrToIntInst>(User))
812 continue;
813 if (isa<PHINode, SelectInst>(User))
814 Worklist.append(User->user_begin(), User->user_end());
815 else
816 return false;
817 }
818
819 return Limit != 0;
820}
821
822// Returns true if `To` is a null pointer, constant dereferenceable pointer or
823// both pointers have the same underlying objects.
824static bool isPointerAlwaysReplaceable(const Value *From, const Value *To,
825 const DataLayout &DL) {
826 // This is not strictly correct, but we do it for now to retain important
827 // optimizations.
828 if (isa<ConstantPointerNull>(To))
829 return true;
830 if (isa<Constant>(To) &&
832 return true;
835}
836
838 const DataLayout &DL) {
839 assert(U->getType() == To->getType() && "values must have matching types");
840 // Not a pointer, just return true.
841 if (!To->getType()->isPointerTy())
842 return true;
843
844 // Do not perform replacements in lifetime intrinsic arguments.
845 if (isa<LifetimeIntrinsic>(U.getUser()))
846 return false;
847
848 if (isPointerAlwaysReplaceable(&*U, To, DL))
849 return true;
850 return isPointerUseReplacable(U);
851}
852
854 const DataLayout &DL) {
855 assert(From->getType() == To->getType() && "values must have matching types");
856 // Not a pointer, just return true.
857 if (!From->getType()->isPointerTy())
858 return true;
859
861}
862
866 for (BasicBlock *BB : L->blocks()) {
867 for (Instruction &I : *BB) {
868 if (auto *LI = dyn_cast<LoadInst>(&I)) {
869 if (!isDereferenceableAndAlignedInLoop(LI, L, *SE, *DT, AC, Predicates))
870 return false;
871 } else if (I.mayReadFromMemory() || I.mayWriteToMemory() || I.mayThrow())
872 return false;
873 }
874 }
875 return true;
876}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
uint64_t Size
@ Available
We know the block is fully available. This is a fixpoint.
Hexagon Common GEP
static bool isAligned(const Value *Base, Align Alignment, const DataLayout &DL)
Definition: Loads.cpp:33
static cl::opt< bool > UseSymbolicMaxBTCForDerefInLoop("use-symbolic-maxbtc-deref-loop", cl::init(false))
static bool AreEquivalentAddressValues(const Value *A, const Value *B)
Test if A and B will obviously have the same value.
Definition: Loads.cpp:273
static bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, const TargetLibraryInfo *TLI, SmallPtrSetImpl< const Value * > &Visited, unsigned MaxDepth)
Test if V is always a pointer to allocated and suitably aligned memory for a simple load or store.
Definition: Loads.cpp:74
static bool isPointerAlwaysReplaceable(const Value *From, const Value *To, const DataLayout &DL)
Definition: Loads.cpp:824
static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr, Type *LoadTy, const Value *StorePtr, Type *StoreTy, const DataLayout &DL)
Definition: Loads.cpp:555
static bool isPointerUseReplacable(const Use &U)
Definition: Loads.cpp:802
static bool isDereferenceableAndAlignedPointerViaAssumption(const Value *Ptr, Align Alignment, function_ref< bool(const RetainedKnowledge &RK)> CheckSize, const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT)
Definition: Loads.cpp:38
static Value * getAvailableLoadStore(Instruction *Inst, const Value *Ptr, Type *AccessTy, bool AtLeastAtomic, const DataLayout &DL, bool *IsLoadCSE)
Definition: Loads.cpp:577
static bool suppressSpeculativeLoadForSanitizers(const Instruction &CtxI)
Definition: Loads.cpp:407
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
Class for arbitrary precision integers.
Definition: APInt.h:78
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition: APInt.h:1182
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition: APInt.cpp:1666
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition: APInt.cpp:651
bool getBoolValue() const
Convert APInt to a boolean value.
Definition: APInt.h:471
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1221
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:459
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:252
reverse_iterator rend()
Definition: BasicBlock.h:477
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:170
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:704
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
This class represents a range of values.
Definition: ConstantRange.h:47
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
Represents calls to the gc.relocate intrinsic.
LLVM_ABI bool isDebugOrPseudoInst() const LLVM_READONLY
Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:82
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:86
An instruction for reading from memory.
Definition: Instructions.h:180
Value * getPointerOperand()
Definition: Instructions.h:259
bool isUnordered() const
Definition: Instructions.h:253
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:215
static LocationSize precise(uint64_t Value)
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:40
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
LLVM_ABI const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
This class represents the LLVM 'select' instruction.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:380
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:401
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:806
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
An instruction for storing to memory.
Definition: Instructions.h:296
Provides information about what library functions are available for the current target.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:346
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:267
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:311
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
user_iterator user_begin()
Definition: Value.h:402
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:701
user_iterator user_end()
Definition: Value.h:410
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1098
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:203
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:172
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:34
self_iterator getIterator()
Definition: ilist_node.h:134
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:477
LLVM_ABI RetainedKnowledge getKnowledgeForValue(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, function_ref< bool(RetainedKnowledge, Instruction *, const CallBase::BundleOpInfo *)> Filter=[](auto...) { return true;})
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and it match...
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition: Loads.cpp:232
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
LLVM_ABI Value * findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst)
Scan backwards to see if we have the value of the given pointer available locally within a small numb...
Definition: Loads.cpp:671
LLVM_ABI bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
Definition: Loads.cpp:416
LLVM_ABI Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, BatchAAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
Definition: Loads.cpp:538
LLVM_ABI bool isDereferenceableReadOnlyLoop(Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Return true if the loop L cannot fault on any iteration and only contains read-only memory accesses.
Definition: Loads.cpp:863
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
LLVM_ABI bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
Definition: Loads.cpp:837
LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
Definition: Loads.cpp:853
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:49
LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
Definition: Loads.cpp:431
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
LLVM_ABI cl::opt< unsigned > DefMaxInstsToScan
The default number of maximum instructions to scan in the block, used by FindAvailableLoadedValue().
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
LLVM_ABI std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds, DominatorTree *DT, AssumptionCache *AC)
Calculate Start and End points of memory access using exact backedge taken count BTC if computable or...
LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
Definition: Loads.cpp:252
LLVM_ABI bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT, AssumptionCache *AC=nullptr, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Return true if we can prove that the given load (which is assumed to be within the specified loop) wo...
Definition: Loads.cpp:292
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
Represent one information held inside an operand bundle of an llvm.assume.
Attribute::AttrKind AttrKind