LLVM 22.0.0git
Loads.cpp
Go to the documentation of this file.
1//===- Loads.cpp - Local load analysis ------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines simple local analyses for load instructions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Analysis/Loads.h"
23#include "llvm/IR/DataLayout.h"
25#include "llvm/IR/Operator.h"
26
27using namespace llvm;
28
29static cl::opt<bool>
30 UseSymbolicMaxBTCForDerefInLoop("use-symbolic-maxbtc-deref-loop",
31 cl::init(false));
32
33static bool isAligned(const Value *Base, Align Alignment,
34 const DataLayout &DL) {
35 return Base->getPointerAlignment(DL) >= Alignment;
36}
37
39 const Value *Ptr, Align Alignment,
40 function_ref<bool(const RetainedKnowledge &RK)> CheckSize,
41 const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC,
42 const DominatorTree *DT) {
43 // Dereferenceable information from assumptions is only valid if the value
44 // cannot be freed between the assumption and use. For now just use the
45 // information for values that cannot be freed in the function.
46 // TODO: More precisely check if the pointer can be freed between assumption
47 // and use.
48 if (!CtxI || Ptr->canBeFreed())
49 return false;
50 /// Look through assumes to see if both dereferencability and alignment can
51 /// be proven by an assume if needed.
52 RetainedKnowledge AlignRK;
53 RetainedKnowledge DerefRK;
54 bool IsAligned = Ptr->getPointerAlignment(DL) >= Alignment;
56 Ptr, {Attribute::Dereferenceable, Attribute::Alignment}, *AC,
57 [&](RetainedKnowledge RK, Instruction *Assume, auto) {
58 if (!isValidAssumeForContext(Assume, CtxI, DT))
59 return false;
60 if (RK.AttrKind == Attribute::Alignment)
61 AlignRK = std::max(AlignRK, RK);
62 if (RK.AttrKind == Attribute::Dereferenceable)
63 DerefRK = std::max(DerefRK, RK);
64 IsAligned |= AlignRK && AlignRK.ArgValue >= Alignment.value();
65 if (IsAligned && DerefRK && CheckSize(DerefRK))
66 return true; // We have found what we needed so we stop looking
67 return false; // Other assumes may have better information. so
68 // keep looking
69 });
70}
71
72/// Test if V is always a pointer to allocated and suitably aligned memory for
73/// a simple load or store.
75 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
76 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
78 unsigned MaxDepth) {
79 assert(V->getType()->isPointerTy() && "Base must be pointer");
80
81 // Recursion limit.
82 if (MaxDepth-- == 0)
83 return false;
84
85 // Already visited? Bail out, we've likely hit unreachable code.
86 if (!Visited.insert(V).second)
87 return false;
88
89 // Note that it is not safe to speculate into a malloc'd region because
90 // malloc may return null.
91
92 // For GEPs, determine if the indexing lands within the allocated object.
93 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
94 const Value *Base = GEP->getPointerOperand();
95
96 APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
97 if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
98 !Offset.urem(APInt(Offset.getBitWidth(), Alignment.value()))
99 .isMinValue())
100 return false;
101
102 // If the base pointer is dereferenceable for Offset+Size bytes, then the
103 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
104 // pointer is aligned to Align bytes, and the Offset is divisible by Align
105 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
106 // aligned to Align bytes.
107
108 // Offset and Size may have different bit widths if we have visited an
109 // addrspacecast, so we can't do arithmetic directly on the APInt values.
111 Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL,
112 CtxI, AC, DT, TLI, Visited, MaxDepth);
113 }
114
115 // bitcast instructions are no-ops as far as dereferenceability is concerned.
116 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
117 if (BC->getSrcTy()->isPointerTy())
119 BC->getOperand(0), Alignment, Size, DL, CtxI, AC, DT, TLI,
120 Visited, MaxDepth);
121 }
122
123 // Recurse into both hands of select.
124 if (const SelectInst *Sel = dyn_cast<SelectInst>(V)) {
125 return isDereferenceableAndAlignedPointer(Sel->getTrueValue(), Alignment,
126 Size, DL, CtxI, AC, DT, TLI,
127 Visited, MaxDepth) &&
128 isDereferenceableAndAlignedPointer(Sel->getFalseValue(), Alignment,
129 Size, DL, CtxI, AC, DT, TLI,
130 Visited, MaxDepth);
131 }
132
133 auto IsKnownDeref = [&]() {
134 bool CheckForNonNull, CheckForFreed;
135 if (!Size.ule(V->getPointerDereferenceableBytes(DL, CheckForNonNull,
136 CheckForFreed)) ||
137 CheckForFreed)
138 return false;
139 if (CheckForNonNull &&
140 !isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)))
141 return false;
142 // When using something like !dereferenceable on a load, the
143 // dereferenceability may only be valid on a specific control-flow path.
144 // If the instruction doesn't dominate the context instruction, we're
145 // asking about dereferenceability under the assumption that the
146 // instruction has been speculated to the point of the context instruction,
147 // in which case we don't know if the dereferenceability info still holds.
148 // We don't bother handling allocas here, as they aren't speculatable
149 // anyway.
150 auto *I = dyn_cast<Instruction>(V);
151 if (I && !isa<AllocaInst>(I))
152 return CtxI && isValidAssumeForContext(I, CtxI, DT);
153 return true;
154 };
155 if (IsKnownDeref()) {
156 // As we recursed through GEPs to get here, we've incrementally checked
157 // that each step advanced by a multiple of the alignment. If our base is
158 // properly aligned, then the original offset accessed must also be.
159 return isAligned(V, Alignment, DL);
160 }
161
162 /// TODO refactor this function to be able to search independently for
163 /// Dereferencability and Alignment requirements.
164
165
166 if (const auto *Call = dyn_cast<CallBase>(V)) {
167 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
168 return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI,
169 AC, DT, TLI, Visited, MaxDepth);
170
171 // If we have a call we can't recurse through, check to see if this is an
172 // allocation function for which we can establish an minimum object size.
173 // Such a minimum object size is analogous to a deref_or_null attribute in
174 // that we still need to prove the result non-null at point of use.
175 // NOTE: We can only use the object size as a base fact as we a) need to
176 // prove alignment too, and b) don't want the compile time impact of a
177 // separate recursive walk.
178 ObjectSizeOpts Opts;
179 // TODO: It may be okay to round to align, but that would imply that
180 // accessing slightly out of bounds was legal, and we're currently
181 // inconsistent about that. For the moment, be conservative.
182 Opts.RoundToAlign = false;
183 Opts.NullIsUnknownSize = true;
184 uint64_t ObjSize;
185 if (getObjectSize(V, ObjSize, DL, TLI, Opts)) {
186 APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
187 if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
188 isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)) &&
189 !V->canBeFreed()) {
190 // As we recursed through GEPs to get here, we've incrementally
191 // checked that each step advanced by a multiple of the alignment. If
192 // our base is properly aligned, then the original offset accessed
193 // must also be.
194 return isAligned(V, Alignment, DL);
195 }
196 }
197 }
198
199 // For gc.relocate, look through relocations
200 if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
201 return isDereferenceableAndAlignedPointer(RelocateInst->getDerivedPtr(),
202 Alignment, Size, DL, CtxI, AC, DT,
203 TLI, Visited, MaxDepth);
204
205 if (const AddrSpaceCastOperator *ASC = dyn_cast<AddrSpaceCastOperator>(V))
206 return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment,
207 Size, DL, CtxI, AC, DT, TLI,
208 Visited, MaxDepth);
209
211 V, Alignment,
212 [Size](const RetainedKnowledge &RK) {
213 return RK.ArgValue >= Size.getZExtValue();
214 },
215 DL, CtxI, AC, DT);
216}
217
219 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
220 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
221 const TargetLibraryInfo *TLI) {
222 // Note: At the moment, Size can be zero. This ends up being interpreted as
223 // a query of whether [Base, V] is dereferenceable and V is aligned (since
224 // that's what the implementation happened to do). It's unclear if this is
225 // the desired semantic, but at least SelectionDAG does exercise this case.
226
228 return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC,
229 DT, TLI, Visited, 16);
230}
231
233 const Value *V, Type *Ty, Align Alignment, const DataLayout &DL,
234 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
235 const TargetLibraryInfo *TLI) {
236 // For unsized types or scalable vectors we don't know exactly how many bytes
237 // are dereferenced, so bail out.
238 if (!Ty->isSized() || Ty->isScalableTy())
239 return false;
240
241 // When dereferenceability information is provided by a dereferenceable
242 // attribute, we know exactly how many bytes are dereferenceable. If we can
243 // determine the exact offset to the attributed variable, we can use that
244 // information here.
245
246 APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
247 DL.getTypeStoreSize(Ty));
248 return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
249 AC, DT, TLI);
250}
251
253 const DataLayout &DL,
254 const Instruction *CtxI,
255 AssumptionCache *AC,
256 const DominatorTree *DT,
257 const TargetLibraryInfo *TLI) {
258 return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, AC, DT,
259 TLI);
260}
261
262/// Test if A and B will obviously have the same value.
263///
264/// This includes recognizing that %t0 and %t1 will have the same
265/// value in code like this:
266/// \code
267/// %t0 = getelementptr \@a, 0, 3
268/// store i32 0, i32* %t0
269/// %t1 = getelementptr \@a, 0, 3
270/// %t2 = load i32* %t1
271/// \endcode
272///
273static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
274 // Test if the values are trivially equivalent.
275 if (A == B)
276 return true;
277
278 // Test if the values come from identical arithmetic instructions.
279 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
280 // this function is only used when one address use dominates the
281 // other, which means that they'll always either have the same
282 // value or one of them will have an undefined value.
283 if (isa<CastInst>(A) || isa<PHINode>(A) || isa<GetElementPtrInst>(A))
284 if (const Instruction *BI = dyn_cast<Instruction>(B))
285 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
286 return true;
287
288 // Otherwise they may not be equivalent.
289 return false;
290}
291
293 LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT,
295 const Align Alignment = LI->getAlign();
296 auto &DL = LI->getDataLayout();
297 Value *Ptr = LI->getPointerOperand();
298 APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
299 DL.getTypeStoreSize(LI->getType()).getFixedValue());
300
301 // If given a uniform (i.e. non-varying) address, see if we can prove the
302 // access is safe within the loop w/o needing predication.
303 if (L->isLoopInvariant(Ptr))
305 Ptr, Alignment, EltSize, DL, &*L->getHeader()->getFirstNonPHIIt(), AC,
306 &DT);
307
308 const SCEV *PtrScev = SE.getSCEV(Ptr);
309 auto *AddRec = dyn_cast<SCEVAddRecExpr>(PtrScev);
310
311 // Check to see if we have a repeating access pattern and it's possible
312 // to prove all accesses are well aligned.
313 if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
314 return false;
315
316 auto *Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
317 if (!Step)
318 return false;
319
320 // For the moment, restrict ourselves to the case where the access size is a
321 // multiple of the requested alignment and the base is aligned.
322 // TODO: generalize if a case found which warrants
323 if (EltSize.urem(Alignment.value()) != 0)
324 return false;
325
326 // TODO: Handle overlapping accesses.
327 if (EltSize.ugt(Step->getAPInt().abs()))
328 return false;
329
330 const SCEV *MaxBECount =
331 Predicates ? SE.getPredicatedSymbolicMaxBackedgeTakenCount(L, *Predicates)
333 const SCEV *BECount = Predicates
334 ? SE.getPredicatedBackedgeTakenCount(L, *Predicates)
335 : SE.getBackedgeTakenCount(L);
336 if (isa<SCEVCouldNotCompute>(MaxBECount))
337 return false;
338
339 if (isa<SCEVCouldNotCompute>(BECount) && !UseSymbolicMaxBTCForDerefInLoop) {
340 // TODO: Support symbolic max backedge taken counts for loops without
341 // computable backedge taken counts.
342 MaxBECount =
343 Predicates
346 }
347
348 const auto &[AccessStart, AccessEnd] = getStartAndEndForAccess(
349 L, PtrScev, LI->getType(), BECount, MaxBECount, &SE, nullptr, &DT, AC);
350 if (isa<SCEVCouldNotCompute>(AccessStart) ||
351 isa<SCEVCouldNotCompute>(AccessEnd))
352 return false;
353
354 // Try to get the access size.
355 const SCEV *PtrDiff = SE.getMinusSCEV(AccessEnd, AccessStart);
356 if (isa<SCEVCouldNotCompute>(PtrDiff))
357 return false;
358 ScalarEvolution::LoopGuards LoopGuards =
359 ScalarEvolution::LoopGuards::collect(AddRec->getLoop(), SE);
360 APInt MaxPtrDiff =
361 SE.getUnsignedRangeMax(SE.applyLoopGuards(PtrDiff, LoopGuards));
362
363 Value *Base = nullptr;
364 APInt AccessSize;
365 const SCEV *AccessSizeSCEV = nullptr;
366 if (const SCEVUnknown *NewBase = dyn_cast<SCEVUnknown>(AccessStart)) {
367 Base = NewBase->getValue();
368 AccessSize = MaxPtrDiff;
369 AccessSizeSCEV = PtrDiff;
370 } else if (auto *MinAdd = dyn_cast<SCEVAddExpr>(AccessStart)) {
371 if (MinAdd->getNumOperands() != 2)
372 return false;
373
374 const auto *Offset = dyn_cast<SCEVConstant>(MinAdd->getOperand(0));
375 const auto *NewBase = dyn_cast<SCEVUnknown>(MinAdd->getOperand(1));
376 if (!Offset || !NewBase)
377 return false;
378
379 // The following code below assumes the offset is unsigned, but GEP
380 // offsets are treated as signed so we can end up with a signed value
381 // here too. For example, suppose the initial PHI value is (i8 255),
382 // the offset will be treated as (i8 -1) and sign-extended to (i64 -1).
383 if (Offset->getAPInt().isNegative())
384 return false;
385
386 // For the moment, restrict ourselves to the case where the offset is a
387 // multiple of the requested alignment and the base is aligned.
388 // TODO: generalize if a case found which warrants
389 if (Offset->getAPInt().urem(Alignment.value()) != 0)
390 return false;
391
392 bool Overflow = false;
393 AccessSize = MaxPtrDiff.uadd_ov(Offset->getAPInt(), Overflow);
394 if (Overflow)
395 return false;
396 AccessSizeSCEV = SE.getAddExpr(PtrDiff, Offset);
397 Base = NewBase->getValue();
398 } else
399 return false;
400
401 Instruction *HeaderFirstNonPHI = &*L->getHeader()->getFirstNonPHIIt();
403 Base, Alignment,
404 [&SE, AccessSizeSCEV, &LoopGuards](const RetainedKnowledge &RK) {
405 return SE.isKnownPredicate(
406 CmpInst::ICMP_ULE, AccessSizeSCEV,
407 SE.applyLoopGuards(SE.getSCEV(RK.IRArgValue), LoopGuards));
408 },
409 DL, HeaderFirstNonPHI, AC, &DT) ||
410 isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL,
411 HeaderFirstNonPHI, AC, &DT);
412}
413
415 const Function &F = *CtxI.getFunction();
416 // Speculative load may create a race that did not exist in the source.
417 return F.hasFnAttribute(Attribute::SanitizeThread) ||
418 // Speculative load may load data from dirty regions.
419 F.hasFnAttribute(Attribute::SanitizeAddress) ||
420 F.hasFnAttribute(Attribute::SanitizeHWAddress);
421}
422
425}
426
427/// Check if executing a load of this pointer value cannot trap.
428///
429/// If DT and ScanFrom are specified this method performs context-sensitive
430/// analysis and returns true if it is safe to load immediately before ScanFrom.
431///
432/// If it is not obviously safe to load from the specified pointer, we do
433/// a quick local scan of the basic block containing \c ScanFrom, to determine
434/// if the address is already accessed.
435///
436/// This uses the pointee type to determine how many bytes need to be safe to
437/// load from the pointer.
439 const DataLayout &DL,
440 Instruction *ScanFrom,
441 AssumptionCache *AC,
442 const DominatorTree *DT,
443 const TargetLibraryInfo *TLI) {
444 // If DT is not specified we can't make context-sensitive query
445 const Instruction* CtxI = DT ? ScanFrom : nullptr;
446 if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, DT,
447 TLI)) {
448 // With sanitizers `Dereferenceable` is not always enough for unconditional
449 // load.
450 if (!ScanFrom || !suppressSpeculativeLoadForSanitizers(*ScanFrom))
451 return true;
452 }
453
454 if (!ScanFrom)
455 return false;
456
457 if (Size.getBitWidth() > 64)
458 return false;
459 const TypeSize LoadSize = TypeSize::getFixed(Size.getZExtValue());
460
461 // Otherwise, be a little bit aggressive by scanning the local block where we
462 // want to check to see if the pointer is already being loaded or stored
463 // from/to. If so, the previous load or store would have already trapped,
464 // so there is no harm doing an extra load (also, CSE will later eliminate
465 // the load entirely).
466 BasicBlock::iterator BBI = ScanFrom->getIterator(),
467 E = ScanFrom->getParent()->begin();
468
469 // We can at least always strip pointer casts even though we can't use the
470 // base here.
471 V = V->stripPointerCasts();
472
473 while (BBI != E) {
474 --BBI;
475
476 // If we see a free or a call which may write to memory (i.e. which might do
477 // a free) the pointer could be marked invalid.
478 if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
479 !isa<LifetimeIntrinsic>(BBI))
480 return false;
481
482 Value *AccessedPtr;
483 Type *AccessedTy;
484 Align AccessedAlign;
485 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
486 // Ignore volatile loads. The execution of a volatile load cannot
487 // be used to prove an address is backed by regular memory; it can,
488 // for example, point to an MMIO register.
489 if (LI->isVolatile())
490 continue;
491 AccessedPtr = LI->getPointerOperand();
492 AccessedTy = LI->getType();
493 AccessedAlign = LI->getAlign();
494 } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
495 // Ignore volatile stores (see comment for loads).
496 if (SI->isVolatile())
497 continue;
498 AccessedPtr = SI->getPointerOperand();
499 AccessedTy = SI->getValueOperand()->getType();
500 AccessedAlign = SI->getAlign();
501 } else
502 continue;
503
504 if (AccessedAlign < Alignment)
505 continue;
506
507 // Handle trivial cases.
508 if (AccessedPtr == V &&
509 TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
510 return true;
511
512 if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
513 TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
514 return true;
515 }
516 return false;
517}
518
520 const DataLayout &DL,
521 Instruction *ScanFrom,
522 AssumptionCache *AC,
523 const DominatorTree *DT,
524 const TargetLibraryInfo *TLI) {
525 TypeSize TySize = DL.getTypeStoreSize(Ty);
526 if (TySize.isScalable())
527 return false;
528 APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue());
529 return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT,
530 TLI);
531}
532
533/// DefMaxInstsToScan - the default number of maximum instructions
534/// to scan in the block, used by FindAvailableLoadedValue().
535/// FindAvailableLoadedValue() was introduced in r60148, to improve jump
536/// threading in part by eliminating partially redundant loads.
537/// At that point, the value of MaxInstsToScan was already set to '6'
538/// without documented explanation.
540llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
541 cl::desc("Use this to specify the default maximum number of instructions "
542 "to scan backward from a given instruction, when searching for "
543 "available loaded value"));
544
546 BasicBlock::iterator &ScanFrom,
547 unsigned MaxInstsToScan,
548 BatchAAResults *AA, bool *IsLoad,
549 unsigned *NumScanedInst) {
550 // Don't CSE load that is volatile or anything stronger than unordered.
551 if (!Load->isUnordered())
552 return nullptr;
553
555 return findAvailablePtrLoadStore(Loc, Load->getType(), Load->isAtomic(),
556 ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoad,
557 NumScanedInst);
558}
559
560// Check if the load and the store have the same base, constant offsets and
561// non-overlapping access ranges.
562static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr,
563 Type *LoadTy,
564 const Value *StorePtr,
565 Type *StoreTy,
566 const DataLayout &DL) {
567 APInt LoadOffset(DL.getIndexTypeSizeInBits(LoadPtr->getType()), 0);
568 APInt StoreOffset(DL.getIndexTypeSizeInBits(StorePtr->getType()), 0);
569 const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets(
570 DL, LoadOffset, /* AllowNonInbounds */ false);
571 const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets(
572 DL, StoreOffset, /* AllowNonInbounds */ false);
573 if (LoadBase != StoreBase)
574 return false;
575 auto LoadAccessSize = LocationSize::precise(DL.getTypeStoreSize(LoadTy));
576 auto StoreAccessSize = LocationSize::precise(DL.getTypeStoreSize(StoreTy));
577 ConstantRange LoadRange(LoadOffset,
578 LoadOffset + LoadAccessSize.toRaw());
579 ConstantRange StoreRange(StoreOffset,
580 StoreOffset + StoreAccessSize.toRaw());
581 return LoadRange.intersectWith(StoreRange).isEmptySet();
582}
583
585 Type *AccessTy, bool AtLeastAtomic,
586 const DataLayout &DL, bool *IsLoadCSE) {
587 // If this is a load of Ptr, the loaded value is available.
588 // (This is true even if the load is volatile or atomic, although
589 // those cases are unlikely.)
590 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
591 // We can value forward from an atomic to a non-atomic, but not the
592 // other way around.
593 if (LI->isAtomic() < AtLeastAtomic)
594 return nullptr;
595
596 Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts();
597 if (!AreEquivalentAddressValues(LoadPtr, Ptr))
598 return nullptr;
599
600 if (CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
601 if (IsLoadCSE)
602 *IsLoadCSE = true;
603 return LI;
604 }
605 }
606
607 // If this is a store through Ptr, the value is available!
608 // (This is true even if the store is volatile or atomic, although
609 // those cases are unlikely.)
610 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
611 // We can value forward from an atomic to a non-atomic, but not the
612 // other way around.
613 if (SI->isAtomic() < AtLeastAtomic)
614 return nullptr;
615
616 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
617 if (!AreEquivalentAddressValues(StorePtr, Ptr))
618 return nullptr;
619
620 if (IsLoadCSE)
621 *IsLoadCSE = false;
622
623 Value *Val = SI->getValueOperand();
624 if (CastInst::isBitOrNoopPointerCastable(Val->getType(), AccessTy, DL))
625 return Val;
626
627 TypeSize StoreSize = DL.getTypeSizeInBits(Val->getType());
628 TypeSize LoadSize = DL.getTypeSizeInBits(AccessTy);
629 if (TypeSize::isKnownLE(LoadSize, StoreSize))
630 if (auto *C = dyn_cast<Constant>(Val))
631 return ConstantFoldLoadFromConst(C, AccessTy, DL);
632 }
633
634 if (auto *MSI = dyn_cast<MemSetInst>(Inst)) {
635 // Don't forward from (non-atomic) memset to atomic load.
636 if (AtLeastAtomic)
637 return nullptr;
638
639 // Only handle constant memsets.
640 auto *Val = dyn_cast<ConstantInt>(MSI->getValue());
641 auto *Len = dyn_cast<ConstantInt>(MSI->getLength());
642 if (!Val || !Len)
643 return nullptr;
644
645 // Handle offsets.
646 int64_t StoreOffset = 0, LoadOffset = 0;
647 const Value *StoreBase =
648 GetPointerBaseWithConstantOffset(MSI->getDest(), StoreOffset, DL);
649 const Value *LoadBase =
651 if (StoreBase != LoadBase || LoadOffset < StoreOffset)
652 return nullptr;
653
654 if (IsLoadCSE)
655 *IsLoadCSE = false;
656
657 TypeSize LoadTypeSize = DL.getTypeSizeInBits(AccessTy);
658 if (LoadTypeSize.isScalable())
659 return nullptr;
660
661 // Make sure the read bytes are contained in the memset.
662 uint64_t LoadSize = LoadTypeSize.getFixedValue();
663 if ((Len->getValue() * 8).ult(LoadSize + (LoadOffset - StoreOffset) * 8))
664 return nullptr;
665
666 APInt Splat = LoadSize >= 8 ? APInt::getSplat(LoadSize, Val->getValue())
667 : Val->getValue().trunc(LoadSize);
668 ConstantInt *SplatC = ConstantInt::get(MSI->getContext(), Splat);
669 if (CastInst::isBitOrNoopPointerCastable(SplatC->getType(), AccessTy, DL))
670 return SplatC;
671
672 return nullptr;
673 }
674
675 return nullptr;
676}
677
679 const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic,
680 BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan,
681 BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) {
682 if (MaxInstsToScan == 0)
683 MaxInstsToScan = ~0U;
684
685 const DataLayout &DL = ScanBB->getDataLayout();
686 const Value *StrippedPtr = Loc.Ptr->stripPointerCasts();
687
688 while (ScanFrom != ScanBB->begin()) {
689 // We must ignore debug info directives when counting (otherwise they
690 // would affect codegen).
691 Instruction *Inst = &*--ScanFrom;
692 if (Inst->isDebugOrPseudoInst())
693 continue;
694
695 // Restore ScanFrom to expected value in case next test succeeds
696 ScanFrom++;
697
698 if (NumScanedInst)
699 ++(*NumScanedInst);
700
701 // Don't scan huge blocks.
702 if (MaxInstsToScan-- == 0)
703 return nullptr;
704
705 --ScanFrom;
706
707 if (Value *Available = getAvailableLoadStore(Inst, StrippedPtr, AccessTy,
708 AtLeastAtomic, DL, IsLoadCSE))
709 return Available;
710
711 // Try to get the store size for the type.
712 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
713 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
714
715 // If both StrippedPtr and StorePtr reach all the way to an alloca or
716 // global and they are different, ignore the store. This is a trivial form
717 // of alias analysis that is important for reg2mem'd code.
718 if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
719 (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
720 StrippedPtr != StorePtr)
721 continue;
722
723 if (!AA) {
724 // When AA isn't available, but if the load and the store have the same
725 // base, constant offsets and non-overlapping access ranges, ignore the
726 // store. This is a simple form of alias analysis that is used by the
727 // inliner. FIXME: use BasicAA if possible.
729 Loc.Ptr, AccessTy, SI->getPointerOperand(),
730 SI->getValueOperand()->getType(), DL))
731 continue;
732 } else {
733 // If we have alias analysis and it says the store won't modify the
734 // loaded value, ignore the store.
735 if (!isModSet(AA->getModRefInfo(SI, Loc)))
736 continue;
737 }
738
739 // Otherwise the store that may or may not alias the pointer, bail out.
740 ++ScanFrom;
741 return nullptr;
742 }
743
744 // If this is some other instruction that may clobber Ptr, bail out.
745 if (Inst->mayWriteToMemory()) {
746 // If alias analysis claims that it really won't modify the load,
747 // ignore it.
748 if (AA && !isModSet(AA->getModRefInfo(Inst, Loc)))
749 continue;
750
751 // May modify the pointer, bail out.
752 ++ScanFrom;
753 return nullptr;
754 }
755 }
756
757 // Got to the start of the block, we didn't find it, but are done for this
758 // block.
759 return nullptr;
760}
761
763 bool *IsLoadCSE,
764 unsigned MaxInstsToScan) {
765 const DataLayout &DL = Load->getDataLayout();
766 Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts();
767 BasicBlock *ScanBB = Load->getParent();
768 Type *AccessTy = Load->getType();
769 bool AtLeastAtomic = Load->isAtomic();
770
771 if (!Load->isUnordered())
772 return nullptr;
773
774 // Try to find an available value first, and delay expensive alias analysis
775 // queries until later.
776 Value *Available = nullptr;
777 SmallVector<Instruction *> MustNotAliasInsts;
778 for (Instruction &Inst : make_range(++Load->getReverseIterator(),
779 ScanBB->rend())) {
780 if (Inst.isDebugOrPseudoInst())
781 continue;
782
783 if (MaxInstsToScan-- == 0)
784 return nullptr;
785
786 Available = getAvailableLoadStore(&Inst, StrippedPtr, AccessTy,
787 AtLeastAtomic, DL, IsLoadCSE);
788 if (Available)
789 break;
790
791 if (Inst.mayWriteToMemory())
792 MustNotAliasInsts.push_back(&Inst);
793 }
794
795 // If we found an available value, ensure that the instructions in between
796 // did not modify the memory location.
797 if (Available) {
799 for (Instruction *Inst : MustNotAliasInsts)
800 if (isModSet(AA.getModRefInfo(Inst, Loc)))
801 return nullptr;
802 }
803
804 return Available;
805}
806
807// Returns true if a use is either in an ICmp/PtrToInt or a Phi/Select that only
808// feeds into them.
809static bool isPointerUseReplacable(const Use &U) {
810 unsigned Limit = 40;
811 SmallVector<const User *> Worklist({U.getUser()});
813
814 while (!Worklist.empty() && --Limit) {
815 auto *User = Worklist.pop_back_val();
816 if (!Visited.insert(User).second)
817 continue;
818 if (isa<ICmpInst, PtrToIntInst>(User))
819 continue;
820 if (isa<PHINode, SelectInst>(User))
821 Worklist.append(User->user_begin(), User->user_end());
822 else
823 return false;
824 }
825
826 return Limit != 0;
827}
828
829// Returns true if `To` is a null pointer, constant dereferenceable pointer or
830// both pointers have the same underlying objects.
831static bool isPointerAlwaysReplaceable(const Value *From, const Value *To,
832 const DataLayout &DL) {
833 // This is not strictly correct, but we do it for now to retain important
834 // optimizations.
835 if (isa<ConstantPointerNull>(To))
836 return true;
837 if (isa<Constant>(To) &&
839 return true;
842}
843
845 const DataLayout &DL) {
846 assert(U->getType() == To->getType() && "values must have matching types");
847 // Not a pointer, just return true.
848 if (!To->getType()->isPointerTy())
849 return true;
850
851 // Do not perform replacements in lifetime intrinsic arguments.
852 if (isa<LifetimeIntrinsic>(U.getUser()))
853 return false;
854
855 if (isPointerAlwaysReplaceable(&*U, To, DL))
856 return true;
857 return isPointerUseReplacable(U);
858}
859
861 const DataLayout &DL) {
862 assert(From->getType() == To->getType() && "values must have matching types");
863 // Not a pointer, just return true.
864 if (!From->getType()->isPointerTy())
865 return true;
866
868}
869
873 for (BasicBlock *BB : L->blocks()) {
874 for (Instruction &I : *BB) {
875 if (auto *LI = dyn_cast<LoadInst>(&I)) {
876 if (!isDereferenceableAndAlignedInLoop(LI, L, *SE, *DT, AC, Predicates))
877 return false;
878 } else if (I.mayReadFromMemory() || I.mayWriteToMemory() || I.mayThrow())
879 return false;
880 }
881 }
882 return true;
883}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
uint64_t Size
@ Available
We know the block is fully available. This is a fixpoint.
Hexagon Common GEP
static bool isAligned(const Value *Base, Align Alignment, const DataLayout &DL)
Definition: Loads.cpp:33
static cl::opt< bool > UseSymbolicMaxBTCForDerefInLoop("use-symbolic-maxbtc-deref-loop", cl::init(false))
static bool AreEquivalentAddressValues(const Value *A, const Value *B)
Test if A and B will obviously have the same value.
Definition: Loads.cpp:273
static bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, const TargetLibraryInfo *TLI, SmallPtrSetImpl< const Value * > &Visited, unsigned MaxDepth)
Test if V is always a pointer to allocated and suitably aligned memory for a simple load or store.
Definition: Loads.cpp:74
static bool isPointerAlwaysReplaceable(const Value *From, const Value *To, const DataLayout &DL)
Definition: Loads.cpp:831
static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr, Type *LoadTy, const Value *StorePtr, Type *StoreTy, const DataLayout &DL)
Definition: Loads.cpp:562
static bool isPointerUseReplacable(const Use &U)
Definition: Loads.cpp:809
static bool isDereferenceableAndAlignedPointerViaAssumption(const Value *Ptr, Align Alignment, function_ref< bool(const RetainedKnowledge &RK)> CheckSize, const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT)
Definition: Loads.cpp:38
static Value * getAvailableLoadStore(Instruction *Inst, const Value *Ptr, Type *AccessTy, bool AtLeastAtomic, const DataLayout &DL, bool *IsLoadCSE)
Definition: Loads.cpp:584
static bool suppressSpeculativeLoadForSanitizers(const Instruction &CtxI)
Definition: Loads.cpp:414
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
Class for arbitrary precision integers.
Definition: APInt.h:78
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition: APInt.h:1182
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition: APInt.cpp:1666
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1935
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition: APInt.cpp:651
bool getBoolValue() const
Convert APInt to a boolean value.
Definition: APInt.h:471
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1221
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:459
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:252
reverse_iterator rend()
Definition: BasicBlock.h:477
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:170
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:704
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
This class represents a range of values.
Definition: ConstantRange.h:47
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
Represents calls to the gc.relocate intrinsic.
LLVM_ABI bool isDebugOrPseudoInst() const LLVM_READONLY
Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:82
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:86
An instruction for reading from memory.
Definition: Instructions.h:180
Value * getPointerOperand()
Definition: Instructions.h:259
bool isUnordered() const
Definition: Instructions.h:253
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:215
static LocationSize precise(uint64_t Value)
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:40
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
LLVM_ABI const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
This class represents the LLVM 'select' instruction.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:380
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:401
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:806
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
An instruction for storing to memory.
Definition: Instructions.h:296
Provides information about what library functions are available for the current target.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:346
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:267
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:311
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
user_iterator user_begin()
Definition: Value.h:402
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:701
user_iterator user_end()
Definition: Value.h:410
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1101
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:203
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:172
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:34
self_iterator getIterator()
Definition: ilist_node.h:134
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:477
LLVM_ABI RetainedKnowledge getKnowledgeForValue(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, function_ref< bool(RetainedKnowledge, Instruction *, const CallBase::BundleOpInfo *)> Filter=[](auto...) { return true;})
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and it match...
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition: Loads.cpp:232
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
LLVM_ABI Value * findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst)
Scan backwards to see if we have the value of the given pointer available locally within a small numb...
Definition: Loads.cpp:678
LLVM_ABI bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
Definition: Loads.cpp:423
LLVM_ABI Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, BatchAAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
Definition: Loads.cpp:545
LLVM_ABI bool isDereferenceableReadOnlyLoop(Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Return true if the loop L cannot fault on any iteration and only contains read-only memory accesses.
Definition: Loads.cpp:870
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
LLVM_ABI bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
Definition: Loads.cpp:844
LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
Definition: Loads.cpp:860
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:49
LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
Definition: Loads.cpp:438
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
LLVM_ABI cl::opt< unsigned > DefMaxInstsToScan
The default number of maximum instructions to scan in the block, used by FindAvailableLoadedValue().
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
LLVM_ABI std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds, DominatorTree *DT, AssumptionCache *AC)
Calculate Start and End points of memory access using exact backedge taken count BTC if computable or...
LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
Definition: Loads.cpp:252
LLVM_ABI bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT, AssumptionCache *AC=nullptr, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Return true if we can prove that the given load (which is assumed to be within the specified loop) wo...
Definition: Loads.cpp:292
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
Represent one information held inside an operand bundle of an llvm.assume.
Attribute::AttrKind AttrKind