72#define DEBUG_TYPE "loop-accesses"
76 cl::desc(
"Sets the SIMD width. Zero is autoselect."),
82 cl::desc(
"Sets the vectorization interleave count. "
83 "Zero is autoselect."),
90 cl::desc(
"When performing memory disambiguation checks at runtime do not "
91 "generate more than this number of comparisons (default = 8)."),
98 cl::desc(
"Maximum number of comparisons done when trying to merge "
99 "runtime memory checks. (default = 100)"),
108 cl::desc(
"Maximum number of dependences collected by "
109 "loop-access analysis (default = 100)"),
125 cl::desc(
"Enable symbolic stride memory access versioning"));
130 "store-to-load-forwarding-conflict-detection",
cl::Hidden,
131 cl::desc(
"Enable conflict detection in loop-access analysis"),
136 cl::desc(
"Maximum recursion depth when finding forked SCEVs (default = 5)"),
141 cl::desc(
"Speculate that non-constant strides are unit in LAA"),
147 "Hoist inner loop runtime memory checks to outer loop if possible"),
152 return ::VectorizationInterleave.getNumOccurrences() > 0;
171 assert(isa<SCEVUnknown>(StrideSCEV) &&
"shouldn't be in map");
179 <<
" by: " << *Expr <<
"\n");
185 :
High(RtCheck.Pointers[Index].
End),
Low(RtCheck.Pointers[Index].Start),
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
217 const SCEV *MaxBTC,
const SCEV *EltSize,
221 auto *StartPtr = dyn_cast<SCEVUnknown>(PointerBase);
225 bool CheckForNonNull, CheckForFreed;
226 Value *StartPtrV = StartPtr->getValue();
228 DL, CheckForNonNull, CheckForFreed);
230 if (DerefBytes && (CheckForNonNull || CheckForFreed))
238 auto *CtxI = L->getLoopPredecessor()->getTerminator();
242 StartPtrV, {Attribute::Dereferenceable}, *AC, CtxI, DT);
249 if (DerefBytesSCEV->
isZero())
265 const SCEV *OffsetAtLastIter =
267 if (!OffsetAtLastIter)
275 if (IsKnownNonNegative) {
279 const SCEV *EndBytes =
297 DenseMap<std::pair<const SCEV *, Type *>,
300 std::pair<const SCEV *, const SCEV *> *PtrBoundsPair;
303 {{PtrExpr, AccessTy},
307 PtrBoundsPair = &Iter->second;
317 ScStart = ScEnd = PtrExpr;
318 }
else if (
auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
319 ScStart = AR->getStart();
320 if (!isa<SCEVCouldNotCompute>(BTC))
325 ScEnd = AR->evaluateAtIteration(BTC, *SE);
336 ScEnd = AR->evaluateAtIteration(MaxBTC, *SE);
341 ConstantInt::get(EltSizeSCEV->
getType(), -1), AR->getType())));
344 const SCEV *Step = AR->getStepRecurrence(*SE);
348 if (
const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
349 if (CStep->getValue()->isNegative())
367 std::pair<const SCEV *, const SCEV *> Res = {ScStart, ScEnd};
369 *PtrBoundsPair = Res;
376 Type *AccessTy,
bool WritePtr,
377 unsigned DepSetId,
unsigned ASId,
383 Lp, PtrExpr, AccessTy, BTC, SymbolicMaxBTC, PSE.
getSE(),
385 assert(!isa<SCEVCouldNotCompute>(ScStart) &&
386 !isa<SCEVCouldNotCompute>(ScEnd) &&
387 "must be able to compute both start and end expressions");
388 Pointers.emplace_back(
Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
392bool RuntimePointerChecking::tryToCreateDiffCheck(
415 if (AccSrc.
size() != 1 || AccSink.
size() != 1)
419 if (AccSink[0] < AccSrc[0])
423 const SCEV *SrcStart;
424 const SCEV *SinkStart;
426 if (!
match(Src->Expr,
440 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy))
445 std::max(
DL.getTypeAllocSize(SrcTy),
DL.getTypeAllocSize(DstTy));
463 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
464 isa<SCEVCouldNotCompute>(SrcStartInt))
472 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
473 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
474 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
475 const Loop *StartARLoop = SrcStartAR->getLoop();
476 if (StartARLoop == SinkStartAR->getLoop() &&
481 SrcStartAR->getStepRecurrence(*SE) !=
482 SinkStartAR->getStepRecurrence(*SE)) {
483 LLVM_DEBUG(
dbgs() <<
"LAA: Not creating diff runtime check, since these "
484 "cannot be hoisted out of the outer loop\n");
490 <<
"SrcStart: " << *SrcStartInt <<
'\n'
491 <<
"SinkStartInt: " << *SinkStartInt <<
'\n');
492 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
493 Src->NeedsFreeze ||
Sink->NeedsFreeze);
506 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
514void RuntimePointerChecking::generateChecks(
517 groupChecks(DepCands, UseDependencies);
523 for (
const auto &
I : M.Members)
524 for (
const auto &J :
N.Members)
537 return Diff->isNegative() ? J :
I;
544 RtCheck.
Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
545 RtCheck.
Pointers[Index].NeedsFreeze, *RtCheck.SE);
553 "all pointers in a checking group must be in the same address space");
579void RuntimePointerChecking::groupChecks(
625 if (!UseDependencies) {
631 unsigned TotalComparisons = 0;
634 for (
unsigned Index = 0; Index <
Pointers.size(); ++Index)
635 PositionMap[
Pointers[Index].PointerValue].push_back(Index);
661 auto PointerI = PositionMap.
find(M.getPointer());
664 if (PointerI == PositionMap.
end())
666 for (
unsigned Pointer : PointerI->second) {
683 if (Group.addPointer(Pointer, *
this)) {
693 Groups.emplace_back(Pointer, *
this);
706 return (PtrToPartition[PtrIdx1] != -1 &&
707 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
731 PtrIndices[&CG] =
Idx;
737 unsigned Depth)
const {
740 for (
const auto &[Check1, Check2] : Checks) {
741 const auto &
First = Check1->Members, &Second = Check2->Members;
743 OS.
indent(
Depth + 2) <<
"Comparing group GRP" << PtrIndices.at(Check1)
745 for (
unsigned K :
First)
747 OS.
indent(
Depth + 2) <<
"Against group GRP" << PtrIndices.at(Check2)
749 for (
unsigned K : Second)
762 OS.
indent(
Depth + 2) <<
"Group GRP" << PtrIndices.at(&CG) <<
":\n";
763 OS.
indent(
Depth + 4) <<
"(Low: " << *CG.Low <<
" High: " << *CG.High
765 for (
unsigned Member : CG.Members) {
777class AccessAnalysis {
787 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),
788 LoopAliasScopes(LoopAliasScopes) {
790 BAA.enableCrossIterationMode();
796 AST.add(adjustLoc(Loc));
797 Accesses[MemAccessInfo(
Ptr,
false)].insert(AccessTy);
799 ReadOnlyPtr.insert(
Ptr);
805 AST.add(adjustLoc(Loc));
806 Accesses[MemAccessInfo(
Ptr,
true)].insert(AccessTy);
820 Loop *TheLoop,
unsigned &RunningDepId,
821 unsigned ASId,
bool Assume);
833 Value *&UncomputablePtr,
bool AllowPartial);
837 void buildDependenceSets() {
838 processMemAccesses();
846 bool isDependencyCheckNeeded()
const {
return !CheckDeps.empty(); }
854 const MemAccessInfoList &getDependenciesToCheck()
const {
return CheckDeps; }
878 return LoopAliasScopes.contains(cast<MDNode>(Scope));
887 void processMemAccesses();
897 MemAccessInfoList CheckDeps;
924 bool IsRTCheckAnalysisNeeded =
false;
940static std::optional<int64_t>
943 if (isa<ScalableVectorType>(AccessTy)) {
944 LLVM_DEBUG(
dbgs() <<
"LAA: Bad stride - Scalable object: " << *AccessTy
952 dbgs() <<
"LAA: Bad stride - Not striding over innermost loop ";
956 dbgs() <<
"SCEV: " << *AR <<
"\n";
965 const APInt *APStepVal;
968 dbgs() <<
"LAA: Bad stride - Not a constant strided ";
971 dbgs() <<
"SCEV: " << *AR <<
"\n";
977 TypeSize AllocSize =
DL.getTypeAllocSize(AccessTy);
981 std::optional<int64_t> StepVal = APStepVal->
trySExtValue();
986 return *StepVal %
Size ? std::nullopt : std::make_optional(*StepVal /
Size);
993 std::optional<int64_t> Stride = std::nullopt) {
1006 if (
auto *
GEP = dyn_cast_if_present<GetElementPtrInst>(
Ptr);
1007 GEP &&
GEP->hasNoUnsignedSignedWrap())
1018 (Stride == 1 || Stride == -1))
1022 if (
Ptr && Assume) {
1025 <<
"LAA: Pointer: " << *
Ptr <<
"\n"
1026 <<
"LAA: SCEV: " << *AR <<
"\n"
1027 <<
"LAA: Added an overflow assumption\n");
1040 while (!WorkList.
empty()) {
1044 auto *PN = dyn_cast<PHINode>(
Ptr);
1048 if (PN && InnermostLoop.
contains(PN->getParent()) &&
1049 PN->getParent() != InnermostLoop.
getHeader()) {
1082 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(
Ptr) ||
1083 !isa<Instruction>(
Ptr) ||
Depth == 0) {
1094 auto GetBinOpExpr = [&SE](
unsigned Opcode,
const SCEV *L,
const SCEV *R) {
1096 case Instruction::Add:
1098 case Instruction::Sub:
1106 unsigned Opcode =
I->getOpcode();
1108 case Instruction::GetElementPtr: {
1109 auto *
GEP = cast<GetElementPtrInst>(
I);
1110 Type *SourceTy =
GEP->getSourceElementType();
1113 if (
I->getNumOperands() != 2 || SourceTy->
isVectorTy()) {
1123 bool NeedsFreeze =
any_of(BaseScevs, UndefPoisonCheck) ||
1124 any_of(OffsetScevs, UndefPoisonCheck);
1129 if (OffsetScevs.
size() == 2 && BaseScevs.
size() == 1)
1131 else if (BaseScevs.
size() == 2 && OffsetScevs.
size() == 1)
1134 ScevList.emplace_back(Scev, NeedsFreeze);
1145 for (
auto [
B, O] :
zip(BaseScevs, OffsetScevs)) {
1156 case Instruction::Select: {
1163 if (ChildScevs.
size() == 2)
1169 case Instruction::PHI: {
1174 if (
I->getNumOperands() == 2) {
1178 if (ChildScevs.
size() == 2)
1184 case Instruction::Add:
1185 case Instruction::Sub: {
1193 any_of(LScevs, UndefPoisonCheck) ||
any_of(RScevs, UndefPoisonCheck);
1198 if (LScevs.
size() == 2 && RScevs.
size() == 1)
1200 else if (RScevs.
size() == 2 && LScevs.
size() == 1)
1203 ScevList.emplace_back(Scev, NeedsFreeze);
1207 for (
auto [L, R] :
zip(LScevs, RScevs))
1208 ScevList.emplace_back(GetBinOpExpr(Opcode, get<0>(L), get<0>(R)),
1214 LLVM_DEBUG(
dbgs() <<
"ForkedPtr unhandled instruction: " << *
I <<
"\n");
1231 if (Scevs.
size() == 2 &&
1232 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1234 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1245bool AccessAnalysis::createCheckForAccess(
1249 unsigned &RunningDepId,
unsigned ASId,
bool Assume) {
1254 assert(!TranslatedPtrs.
empty() &&
"must have some translated pointers");
1259 for (
auto &
P : TranslatedPtrs) {
1272 if (TranslatedPtrs.size() == 1) {
1280 if (!
isNoWrap(PSE, AR, TranslatedPtrs.size() == 1 ?
Ptr :
nullptr, AccessTy,
1286 for (
auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1290 if (isDependencyCheckNeeded()) {
1292 unsigned &LeaderId = DepSetId[Leader];
1294 LeaderId = RunningDepId++;
1298 DepId = RunningDepId++;
1300 bool IsWrite =
Access.getInt();
1301 RtCheck.
insert(TheLoop,
Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1309bool AccessAnalysis::canCheckPtrAtRT(
1312 bool AllowPartial) {
1315 bool CanDoRT =
true;
1317 bool MayNeedRTCheck =
false;
1318 if (!IsRTCheckAnalysisNeeded)
return true;
1320 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1325 for (
const auto &AS : AST) {
1326 int NumReadPtrChecks = 0;
1327 int NumWritePtrChecks = 0;
1328 bool CanDoAliasSetRT =
true;
1330 auto ASPointers = AS.getPointers();
1334 unsigned RunningDepId = 1;
1342 for (
const Value *ConstPtr : ASPointers) {
1344 bool IsWrite =
Accesses.contains(MemAccessInfo(
Ptr,
true));
1346 ++NumWritePtrChecks;
1354 if (NumWritePtrChecks == 0 ||
1355 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1356 assert((ASPointers.size() <= 1 ||
1359 MemAccessInfo AccessWrite(
const_cast<Value *
>(
Ptr),
1361 return !DepCands.
contains(AccessWrite);
1363 "Can only skip updating CanDoRT below, if all entries in AS "
1364 "are reads or there is at most 1 entry");
1368 for (
auto &
Access : AccessInfos) {
1370 if (!createCheckForAccess(RtCheck,
Access, AccessTy, StridesMap,
1371 DepSetId, TheLoop, RunningDepId, ASId,
1374 << *
Access.getPointer() <<
'\n');
1376 CanDoAliasSetRT =
false;
1390 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.
empty();
1394 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1398 CanDoAliasSetRT =
true;
1399 for (
const auto &[
Access, AccessTy] : Retries) {
1400 if (!createCheckForAccess(RtCheck,
Access, AccessTy, StridesMap,
1401 DepSetId, TheLoop, RunningDepId, ASId,
1403 CanDoAliasSetRT =
false;
1404 UncomputablePtr =
Access.getPointer();
1411 CanDoRT &= CanDoAliasSetRT;
1412 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1421 unsigned NumPointers = RtCheck.
Pointers.size();
1422 for (
unsigned i = 0; i < NumPointers; ++i) {
1423 for (
unsigned j = i + 1;
j < NumPointers; ++
j) {
1425 if (RtCheck.
Pointers[i].DependencySetId ==
1426 RtCheck.
Pointers[j].DependencySetId)
1439 dbgs() <<
"LAA: Runtime check would require comparison between"
1440 " different address spaces\n");
1446 if (MayNeedRTCheck && (CanDoRT || AllowPartial))
1450 <<
" pointer comparisons.\n");
1457 bool CanDoRTIfNeeded = !RtCheck.
Need || CanDoRT;
1458 assert(CanDoRTIfNeeded == (CanDoRT || !MayNeedRTCheck) &&
1459 "CanDoRTIfNeeded depends on RtCheck.Need");
1460 if (!CanDoRTIfNeeded && !AllowPartial)
1462 return CanDoRTIfNeeded;
1465void AccessAnalysis::processMemAccesses() {
1475 dbgs() <<
"\t" << *
A.getPointer() <<
" ("
1478 : (ReadOnlyPtr.contains(
A.getPointer()) ?
"read-only"
1487 for (
const auto &AS : AST) {
1491 auto ASPointers = AS.getPointers();
1493 bool SetHasWrite =
false;
1498 UnderlyingObjToAccessMap;
1499 UnderlyingObjToAccessMap ObjToLastAccess;
1502 PtrAccessMap DeferredAccesses;
1506 for (
int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1507 bool UseDeferred = SetIteration > 0;
1508 PtrAccessMap &S = UseDeferred ? DeferredAccesses :
Accesses;
1510 for (
const Value *ConstPtr : ASPointers) {
1515 for (
const auto &[AC,
_] : S) {
1516 if (AC.getPointer() !=
Ptr)
1519 bool IsWrite = AC.getInt();
1523 bool IsReadOnlyPtr = ReadOnlyPtr.contains(
Ptr) && !IsWrite;
1524 if (UseDeferred && !IsReadOnlyPtr)
1528 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1529 S.contains(MemAccessInfo(
Ptr,
false))) &&
1530 "Alias-set pointer not in the access set?");
1540 if (!UseDeferred && IsReadOnlyPtr) {
1543 DeferredAccesses.insert({
Access, {}});
1551 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1552 CheckDeps.push_back(
Access);
1553 IsRTCheckAnalysisNeeded =
true;
1565 <<
"Underlying objects for pointer " << *
Ptr <<
"\n");
1566 for (
const Value *UnderlyingObj : UOs) {
1569 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1575 auto [It,
Inserted] = ObjToLastAccess.try_emplace(
1577 cast<PointerType>(
Ptr->getType())->getAddressSpace()},
1593std::optional<int64_t>
1597 bool Assume,
bool ShouldCheckWrap) {
1602 assert(
Ptr->getType()->isPointerTy() &&
"Unexpected non-ptr");
1610 <<
" SCEV: " << *PtrScev <<
"\n");
1611 return std::nullopt;
1614 std::optional<int64_t> Stride =
1616 if (!ShouldCheckWrap || !Stride)
1619 if (
isNoWrap(PSE, AR,
Ptr, AccessTy, Lp, Assume, Stride))
1623 dbgs() <<
"LAA: Bad stride - Pointer may wrap in the address space "
1624 << *
Ptr <<
" SCEV: " << *AR <<
"\n");
1625 return std::nullopt;
1633 assert(PtrA && PtrB &&
"Expected non-nullptr pointers.");
1641 return std::nullopt;
1648 return std::nullopt;
1649 unsigned IdxWidth =
DL.getIndexSizeInBits(ASA);
1651 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1657 std::optional<int64_t> Val;
1658 if (PtrA1 == PtrB1) {
1661 ASA = cast<PointerType>(PtrA1->
getType())->getAddressSpace();
1662 ASB = cast<PointerType>(PtrB1->
getType())->getAddressSpace();
1665 return std::nullopt;
1667 IdxWidth =
DL.getIndexSizeInBits(ASA);
1668 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1677 std::optional<APInt> Diff =
1680 return std::nullopt;
1681 Val = Diff->trySExtValue();
1685 return std::nullopt;
1687 int64_t
Size =
DL.getTypeStoreSize(ElemTyA);
1688 int64_t Dist = *Val /
Size;
1692 if (!StrictCheck || Dist *
Size == Val)
1694 return std::nullopt;
1702 "Expected list of pointer operands.");
1705 Value *Ptr0 = VL[0];
1707 using DistOrdPair = std::pair<int64_t, unsigned>;
1709 std::set<DistOrdPair,
decltype(Compare)> Offsets(Compare);
1710 Offsets.emplace(0, 0);
1711 bool IsConsecutive =
true;
1713 std::optional<int64_t> Diff =
1721 auto [It, IsInserted] = Offsets.emplace(
Offset,
Idx);
1725 IsConsecutive &= std::next(It) == Offsets.end();
1727 SortedIndices.
clear();
1728 if (!IsConsecutive) {
1732 SortedIndices[
Idx] = Off.second;
1746 std::optional<int64_t> Diff =
1755 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1756 InstMap.push_back(SI);
1764 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1765 InstMap.push_back(LI);
1793 case ForwardButPreventsForwarding:
1795 case IndirectUnsafe:
1798 case BackwardVectorizable:
1800 case BackwardVectorizableButPreventsForwarding:
1813 case ForwardButPreventsForwarding:
1818 case BackwardVectorizable:
1820 case BackwardVectorizableButPreventsForwarding:
1821 case IndirectUnsafe:
1827bool MemoryDepChecker::couldPreventStoreLoadForward(
uint64_t Distance,
1829 unsigned CommonStride) {
1842 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1844 uint64_t MaxVFWithoutSLForwardIssuesPowerOf2 =
1846 MaxStoreLoadForwardSafeDistanceInBits);
1849 for (
uint64_t VF = 2 * TypeByteSize;
1850 VF <= MaxVFWithoutSLForwardIssuesPowerOf2; VF *= 2) {
1853 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1854 MaxVFWithoutSLForwardIssuesPowerOf2 = (VF >> 1);
1859 if (MaxVFWithoutSLForwardIssuesPowerOf2 < 2 * TypeByteSize) {
1861 dbgs() <<
"LAA: Distance " << Distance
1862 <<
" that could cause a store-load forwarding conflict\n");
1867 MaxVFWithoutSLForwardIssuesPowerOf2 <
1868 MaxStoreLoadForwardSafeDistanceInBits &&
1869 MaxVFWithoutSLForwardIssuesPowerOf2 !=
1872 bit_floor(MaxVFWithoutSLForwardIssuesPowerOf2 / CommonStride);
1873 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
1874 MaxStoreLoadForwardSafeDistanceInBits =
1875 std::min(MaxStoreLoadForwardSafeDistanceInBits, MaxVFInBits);
1898 const SCEV &MaxBTC,
const SCEV &Dist,
1921 const SCEV *CastedDist = &Dist;
1922 const SCEV *CastedProduct = Product;
1929 if (DistTypeSizeBits > ProductTypeSizeBits)
1954 assert(Stride > 1 &&
"The stride must be greater than 1");
1955 assert(TypeByteSize > 0 &&
"The type size in byte must be non-zero");
1956 assert(Distance > 0 &&
"The distance must be non-zero");
1959 if (Distance % TypeByteSize)
1978 return Distance % Stride;
1981bool MemoryDepChecker::areAccessesCompletelyBeforeOrAfter(
const SCEV *Src,
1988 const auto &[SrcStart_, SrcEnd_] =
1991 if (isa<SCEVCouldNotCompute>(SrcStart_) || isa<SCEVCouldNotCompute>(SrcEnd_))
1994 const auto &[SinkStart_, SinkEnd_] =
1997 if (isa<SCEVCouldNotCompute>(SinkStart_) ||
1998 isa<SCEVCouldNotCompute>(SinkEnd_))
2015 MemoryDepChecker::DepDistanceStrideAndSizeInfo>
2016MemoryDepChecker::getDependenceDistanceStrideAndSize(
2020 auto &SE = *PSE.
getSE();
2021 const auto &[APtr, AIsWrite] =
A;
2022 const auto &[BPtr, BIsWrite] =
B;
2025 if (!AIsWrite && !BIsWrite)
2032 if (APtr->getType()->getPointerAddressSpace() !=
2033 BPtr->getType()->getPointerAddressSpace())
2036 std::optional<int64_t> StrideAPtr =
2037 getPtrStride(PSE, ATy, APtr, InnermostLoop, SymbolicStrides,
true,
true);
2038 std::optional<int64_t> StrideBPtr =
2039 getPtrStride(PSE, BTy, BPtr, InnermostLoop, SymbolicStrides,
true,
true);
2047 if (StrideAPtr && *StrideAPtr < 0) {
2056 LLVM_DEBUG(
dbgs() <<
"LAA: Src Scev: " << *Src <<
"Sink Scev: " << *Sink
2058 LLVM_DEBUG(
dbgs() <<
"LAA: Distance for " << *AInst <<
" to " << *BInst
2059 <<
": " << *Dist <<
"\n");
2068 if (!StrideAPtr || !StrideBPtr) {
2069 LLVM_DEBUG(
dbgs() <<
"Pointer access with non-constant stride\n");
2073 int64_t StrideAPtrInt = *StrideAPtr;
2074 int64_t StrideBPtrInt = *StrideBPtr;
2075 LLVM_DEBUG(
dbgs() <<
"LAA: Src induction step: " << StrideAPtrInt
2076 <<
" Sink induction step: " << StrideBPtrInt <<
"\n");
2079 if (!StrideAPtrInt || !StrideBPtrInt)
2084 if ((StrideAPtrInt > 0) != (StrideBPtrInt > 0)) {
2086 dbgs() <<
"Pointer access with strides in different directions\n");
2090 TypeSize AStoreSz =
DL.getTypeStoreSize(ATy);
2091 TypeSize BStoreSz =
DL.getTypeStoreSize(BTy);
2097 uint64_t TypeByteSize = (AStoreSz == BStoreSz) ? BSz : 0;
2099 uint64_t StrideAScaled = std::abs(StrideAPtrInt) * ASz;
2100 uint64_t StrideBScaled = std::abs(StrideBPtrInt) * BSz;
2102 uint64_t MaxStride = std::max(StrideAScaled, StrideBScaled);
2104 std::optional<uint64_t> CommonStride;
2105 if (StrideAScaled == StrideBScaled)
2106 CommonStride = StrideAScaled;
2110 if (!isa<SCEVConstant>(Dist))
2111 ShouldRetryWithRuntimeChecks |= StrideAPtrInt == StrideBPtrInt;
2114 if (isa<SCEVCouldNotCompute>(Dist)) {
2119 return DepDistanceStrideAndSizeInfo(Dist, MaxStride, CommonStride,
2120 TypeByteSize, AIsWrite, BIsWrite);
2124MemoryDepChecker::isDependent(
const MemAccessInfo &
A,
unsigned AIdx,
2126 assert(AIdx < BIdx &&
"Must pass arguments in program order");
2131 auto CheckCompletelyBeforeOrAfter = [&]() {
2132 auto *APtr =
A.getPointer();
2133 auto *BPtr =
B.getPointer();
2138 return areAccessesCompletelyBeforeOrAfter(Src, ATy, Sink, BTy);
2144 getDependenceDistanceStrideAndSize(
A, InstMap[AIdx],
B, InstMap[BIdx]);
2145 if (std::holds_alternative<Dependence::DepType>(Res)) {
2147 CheckCompletelyBeforeOrAfter())
2149 return std::get<Dependence::DepType>(Res);
2152 auto &[Dist, MaxStride, CommonStride, TypeByteSize, AIsWrite, BIsWrite] =
2153 std::get<DepDistanceStrideAndSizeInfo>(Res);
2154 bool HasSameSize = TypeByteSize > 0;
2171 const APInt *APDist =
nullptr;
2179 if (ConstDist > 0 && CommonStride && CommonStride > 1 && HasSameSize &&
2198 LLVM_DEBUG(
dbgs() <<
"LAA: possibly zero dependence difference but "
2199 "different type sizes\n");
2203 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2218 couldPreventStoreLoadForward(ConstDist, TypeByteSize)) {
2220 dbgs() <<
"LAA: Forward but may prevent st->ld forwarding\n");
2231 if (MinDistance <= 0) {
2237 if (CheckCompletelyBeforeOrAfter())
2239 LLVM_DEBUG(
dbgs() <<
"LAA: ReadWrite-Write positive dependency with "
2240 "different type sizes\n");
2249 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2284 uint64_t MinDistanceNeeded = MaxStride * (MinNumIter - 1) + TypeByteSize;
2285 if (MinDistanceNeeded >
static_cast<uint64_t>(MinDistance)) {
2294 LLVM_DEBUG(
dbgs() <<
"LAA: Failure because of positive minimum distance "
2295 << MinDistance <<
'\n');
2301 if (MinDistanceNeeded > MinDepDistBytes) {
2303 << MinDistanceNeeded <<
" size in bytes\n");
2308 std::min(
static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2310 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2312 couldPreventStoreLoadForward(MinDistance, TypeByteSize, *CommonStride))
2315 uint64_t MaxVF = MinDepDistBytes / MaxStride;
2316 LLVM_DEBUG(
dbgs() <<
"LAA: Positive min distance " << MinDistance
2317 <<
" with max VF = " << MaxVF <<
'\n');
2319 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2320 if (!ConstDist && MaxVFInBits < MaxTargetVectorWidthInBits) {
2329 if (CheckCompletelyBeforeOrAfter())
2332 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2339 MinDepDistBytes = -1;
2354 bool AIIsWrite = AI->getInt();
2358 (AIIsWrite ? AI : std::next(AI));
2362 for (std::vector<unsigned>::iterator I1 = Acc.begin(), I1E = Acc.end();
2366 for (std::vector<unsigned>::iterator
2367 I2 = (OI == AI ? std::next(I1) :
Accesses[*OI].begin()),
2368 I2E = (OI == AI ? I1E :
Accesses[*OI].end());
2370 auto A = std::make_pair(&*AI, *I1);
2371 auto B = std::make_pair(&*OI, *I2);
2378 isDependent(*
A.first,
A.second, *
B.first,
B.second);
2385 if (RecordDependences) {
2387 Dependences.emplace_back(
A.second,
B.second,
Type);
2390 RecordDependences =
false;
2391 Dependences.clear();
2393 <<
"Too many dependences, stopped recording\n");
2405 LLVM_DEBUG(
dbgs() <<
"Total Dependences: " << Dependences.size() <<
"\n");
2415 transform(
I->second, std::back_inserter(Insts),
2416 [&](
unsigned Idx) { return this->InstMap[Idx]; });
2427 "ForwardButPreventsForwarding",
2429 "BackwardVectorizable",
2430 "BackwardVectorizableButPreventsForwarding"};
2440bool LoopAccessInfo::canAnalyzeLoop() {
2449 recordAnalysis(
"NotInnerMostLoop") <<
"loop is not the innermost loop";
2456 dbgs() <<
"LAA: loop control flow is not understood by analyzer\n");
2457 recordAnalysis(
"CFGNotUnderstood")
2458 <<
"loop control flow is not understood by analyzer";
2466 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2467 recordAnalysis(
"CantComputeNumberOfIterations")
2468 <<
"could not determine number of loop iterations";
2469 LLVM_DEBUG(
dbgs() <<
"LAA: SCEV could not compute the loop exit count.\n");
2487 unsigned NumReads = 0;
2488 unsigned NumReadWrites = 0;
2490 bool HasComplexMemInst =
false;
2493 HasConvergentOp =
false;
2495 PtrRtChecking->Pointers.
clear();
2496 PtrRtChecking->Need =
false;
2500 const bool EnableMemAccessVersioningOfLoop =
2512 if (
auto *Call = dyn_cast<CallBase>(&
I)) {
2513 if (
Call->isConvergent())
2514 HasConvergentOp =
true;
2519 if (HasComplexMemInst && HasConvergentOp)
2523 if (HasComplexMemInst)
2527 if (
auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
2528 for (
Metadata *
Op : Decl->getScopeList()->operands())
2529 LoopAliasScopes.
insert(cast<MDNode>(
Op));
2534 auto *
Call = dyn_cast<CallInst>(&
I);
2541 if (
I.mayReadFromMemory()) {
2542 auto hasPointerArgs = [](
CallBase *CB) {
2544 return Arg->getType()->isPointerTy();
2551 if (Call && !
Call->isNoBuiltin() &&
Call->getCalledFunction() &&
2555 auto *Ld = dyn_cast<LoadInst>(&
I);
2557 recordAnalysis(
"CantVectorizeInstruction", Ld)
2558 <<
"instruction cannot be vectorized";
2559 HasComplexMemInst =
true;
2562 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2563 recordAnalysis(
"NonSimpleLoad", Ld)
2564 <<
"read with atomic ordering or volatile read";
2566 HasComplexMemInst =
true;
2572 if (EnableMemAccessVersioningOfLoop)
2573 collectStridedAccess(Ld);
2578 if (
I.mayWriteToMemory()) {
2579 auto *St = dyn_cast<StoreInst>(&
I);
2581 recordAnalysis(
"CantVectorizeInstruction", St)
2582 <<
"instruction cannot be vectorized";
2583 HasComplexMemInst =
true;
2586 if (!St->isSimple() && !IsAnnotatedParallel) {
2587 recordAnalysis(
"NonSimpleStore", St)
2588 <<
"write with atomic ordering or volatile write";
2590 HasComplexMemInst =
true;
2596 if (EnableMemAccessVersioningOfLoop)
2597 collectStridedAccess(St);
2602 if (HasComplexMemInst)
2610 if (!Stores.
size()) {
2616 AccessAnalysis
Accesses(TheLoop, AA, LI, DepCands, *PSE, LoopAliasScopes);
2632 if (isInvariant(
Ptr)) {
2634 StoresToInvariantAddresses.push_back(ST);
2635 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2642 if (Seen.
insert({Ptr, AccessTy}).second) {
2649 if (blockNeedsPredication(
ST->getParent(), TheLoop, DT))
2653 [&Accesses, AccessTy, Loc](
Value *
Ptr) {
2654 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2655 Accesses.addStore(NewLoc, AccessTy);
2660 if (IsAnnotatedParallel) {
2662 dbgs() <<
"LAA: A loop annotated parallel, ignore memory dependency "
2677 bool IsReadOnlyPtr =
false;
2679 if (Seen.
insert({Ptr, AccessTy}).second ||
2682 IsReadOnlyPtr =
true;
2688 LLVM_DEBUG(
dbgs() <<
"LAA: Found an unsafe dependency between a uniform "
2689 "load and uniform store to the same address!\n");
2690 HasLoadStoreDependenceInvolvingLoopInvariantAddress =
true;
2697 if (blockNeedsPredication(
LD->getParent(), TheLoop, DT))
2701 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](
Value *
Ptr) {
2702 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2703 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2709 if (NumReadWrites == 1 && NumReads == 0) {
2720 Value *UncomputablePtr =
nullptr;
2721 HasCompletePtrRtChecking =
Accesses.canCheckPtrAtRT(
2722 *PtrRtChecking, TheLoop, SymbolicStrides, UncomputablePtr, AllowPartial);
2723 if (!HasCompletePtrRtChecking) {
2724 const auto *
I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2725 recordAnalysis(
"CantIdentifyArrayBounds",
I)
2726 <<
"cannot identify array bounds";
2727 LLVM_DEBUG(
dbgs() <<
"LAA: We can't vectorize because we can't find "
2728 <<
"the array bounds.\n");
2733 dbgs() <<
"LAA: May be able to perform a memory runtime check if needed.\n");
2735 bool DepsAreSafe =
true;
2736 if (
Accesses.isDependencyCheckNeeded()) {
2745 Accesses.resetDepChecks(*DepChecker);
2747 PtrRtChecking->reset();
2748 PtrRtChecking->Need =
true;
2750 UncomputablePtr =
nullptr;
2751 HasCompletePtrRtChecking =
2752 Accesses.canCheckPtrAtRT(*PtrRtChecking, TheLoop, SymbolicStrides,
2753 UncomputablePtr, AllowPartial);
2756 if (!HasCompletePtrRtChecking) {
2757 auto *
I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2758 recordAnalysis(
"CantCheckMemDepsAtRunTime",
I)
2759 <<
"cannot check memory dependencies at runtime";
2760 LLVM_DEBUG(
dbgs() <<
"LAA: Can't vectorize with memory checks\n");
2767 if (HasConvergentOp) {
2768 recordAnalysis(
"CantInsertRuntimeCheckWithConvergent")
2769 <<
"cannot add control dependency to convergent operation";
2770 LLVM_DEBUG(
dbgs() <<
"LAA: We can't vectorize because a runtime check "
2771 "would be needed with a convergent operation\n");
2777 dbgs() <<
"LAA: No unsafe dependent memory operations in loop. We"
2778 << (PtrRtChecking->Need ?
"" :
" don't")
2779 <<
" need runtime memory checks.\n");
2783 emitUnsafeDependenceRemark();
2787void LoopAccessInfo::emitUnsafeDependenceRemark() {
2788 const auto *Deps = getDepChecker().getDependences();
2796 if (Found == Deps->end())
2800 LLVM_DEBUG(
dbgs() <<
"LAA: unsafe dependent memory operations in loop\n");
2803 bool HasForcedDistribution =
false;
2804 std::optional<const MDOperand *>
Value =
2808 assert(
Op && mdconst::hasa<ConstantInt>(*
Op) &&
"invalid metadata");
2809 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2812 const std::string
Info =
2813 HasForcedDistribution
2814 ?
"unsafe dependent memory operations in loop."
2815 :
"unsafe dependent memory operations in loop. Use "
2816 "#pragma clang loop distribute(enable) to allow loop distribution "
2817 "to attempt to isolate the offending operations into a separate "
2828 R <<
"\nBackward loop carried data dependence.";
2831 R <<
"\nForward loop carried data dependence that prevents "
2832 "store-to-load forwarding.";
2835 R <<
"\nBackward loop carried data dependence that prevents "
2836 "store-to-load forwarding.";
2839 R <<
"\nUnsafe indirect dependence.";
2842 R <<
"\nUnknown data dependence.";
2849 SourceLoc = DD->getDebugLoc();
2851 R <<
" Memory location is the same as accessed at "
2852 <<
ore::NV(
"Location", SourceLoc);
2867 assert(!Report &&
"Multiple reports generated");
2873 CodeRegion =
I->getParent();
2876 if (
I->getDebugLoc())
2877 DL =
I->getDebugLoc();
2880 Report = std::make_unique<OptimizationRemarkAnalysis>(
DEBUG_TYPE, RemarkName,
2886 auto *SE = PSE->
getSE();
2899 auto *
GEP = dyn_cast<GetElementPtrInst>(
Ptr);
2904 for (
const Use &U :
GEP->operands()) {
2919 auto *PtrTy = dyn_cast<PointerType>(
Ptr->getType());
2933 while (
auto *
C = dyn_cast<SCEVIntegralCastExpr>(V))
2934 V =
C->getOperand();
2945 if (isa<SCEVUnknown>(V))
2948 if (
auto *
C = dyn_cast<SCEVIntegralCastExpr>(V))
2949 if (isa<SCEVUnknown>(
C->getOperand()))
2955void LoopAccessInfo::collectStridedAccess(
Value *MemAccess) {
2970 LLVM_DEBUG(
dbgs() <<
"LAA: Found a strided access that is a candidate for "
2975 LLVM_DEBUG(
dbgs() <<
" Chose not to due to -laa-speculate-unit-stride\n");
3000 const SCEV *CastedStride = StrideExpr;
3001 const SCEV *CastedBECount = MaxBTC;
3003 if (BETypeSizeBits >= StrideTypeSizeBits)
3007 const SCEV *StrideMinusBETaken = SE->
getMinusSCEV(CastedStride, CastedBECount);
3013 dbgs() <<
"LAA: Stride>=TripCount; No point in versioning as the "
3014 "Stride==1 predicate will imply that the loop executes "
3018 LLVM_DEBUG(
dbgs() <<
"LAA: Found a strided access that we can version.\n");
3022 const SCEV *StrideBase = StrideExpr;
3023 if (
const auto *
C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
3024 StrideBase =
C->getOperand();
3025 SymbolicStrides[
Ptr] = cast<SCEVUnknown>(StrideBase);
3034 PtrRtChecking(nullptr), TheLoop(L), AllowPartial(AllowPartial) {
3035 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3039 MaxTargetVectorWidthInBits =
3042 DepChecker = std::make_unique<MemoryDepChecker>(
3043 *PSE, AC, DT, L, SymbolicStrides, MaxTargetVectorWidthInBits);
3044 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
3045 if (canAnalyzeLoop())
3046 CanVecMem = analyzeLoop(AA, LI, TLI, DT);
3054 OS <<
" with a maximum safe vector width of "
3058 OS <<
", with a maximum safe store-load forward width of " << SLDist
3061 if (PtrRtChecking->Need)
3062 OS <<
" with run-time checks";
3066 if (HasConvergentOp)
3074 for (
const auto &Dep : *Dependences) {
3082 PtrRtChecking->print(
OS,
Depth);
3083 if (PtrRtChecking->Need && !HasCompletePtrRtChecking)
3084 OS.
indent(
Depth) <<
"Generated run-time checks are incomplete\n";
3088 <<
"Non vectorizable stores to invariant address were "
3089 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3090 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3093 <<
"found in loop.\n";
3105 bool AllowPartial) {
3106 const auto &[It, Inserted] = LoopAccessInfoMap.try_emplace(&L);
3110 if (Inserted || It->second->hasAllowPartial() != AllowPartial)
3111 It->second = std::make_unique<LoopAccessInfo>(&L, &SE,
TTI, TLI, &AA, &DT,
3112 &LI, AC, AllowPartial);
3121 for (
const auto &[L, LAI] : LoopAccessInfoMap) {
3122 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3123 LAI->getPSE().getPredicate().isAlwaysTrue())
3125 LoopAccessInfoMap.erase(L);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Forward Handle Accesses
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
This header defines various interfaces for pass management in LLVM.
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static const SCEV * addSCEVNoOverflow(const SCEV *A, const SCEV *B, ScalarEvolution &SE, const Instruction *CtxI)
Returns A + B, if it is guaranteed not to unsigned wrap.
static std::optional< int64_t > getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy, Value *Ptr, PredicatedScalarEvolution &PSE)
Try to compute a constant stride for AR.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static DenseMap< const RuntimeCheckingPtrGroup *, unsigned > getPtrToIdxMap(ArrayRef< RuntimeCheckingPtrGroup > CheckingGroups)
Assign each RuntimeCheckingPtrGroup pointer an index for stable UTC output.
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static const SCEV * mulSCEVOverflow(const SCEV *A, const SCEV *B, ScalarEvolution &SE, const Instruction *CtxI)
Returns A * B, if it is guaranteed not to unsigned wrap.
static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR, Value *Ptr, Type *AccessTy, const Loop *L, bool Assume, std::optional< int64_t > Stride=std::nullopt)
Check whether AR is a non-wrapping AddRec.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &MaxBTC, const SCEV &Dist, uint64_t MaxStride)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static Value * getLoopVariantGEPOperand(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If Ptr is a GEP, which has a loop-variant operand, return that operand.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static bool evaluatePtrAddRecAtMaxBTCWillNotWrap(const SCEVAddRecExpr *AR, const SCEV *MaxBTC, const SCEV *EltSize, ScalarEvolution &SE, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Return true, if evaluating AR at MaxBTC cannot wrap, because AR at MaxBTC is guaranteed inbounds of t...
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
This file provides utility analysis objects describing memory locations.
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
A private abstract base class describing the concept of an individual alias analysis implementation.
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
APInt abs() const
Get the absolute value.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
int64_t getSExtValue() const
Get sign extended value.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
@ ICMP_UGE
unsigned greater or equal
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
Analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator_range< member_iterator > members(const ECValue &ECV) const
bool contains(const ElemTy &V) const
Returns true if V is contained an equivalence class.
const ECValue & insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
member_iterator findLeader(const ElemTy &V) const
findLeader - Given a value in the set, return a member iterator for the equivalence class it is in.
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
PointerType * getType() const
Global values are always pointers.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
An instruction for reading from memory.
Value * getPointerOperand()
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const LoopAccessInfo & getInfo(Loop &L, bool AllowPartial=false)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
LLVM_ABI bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
LLVM_ABI LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI, AssumptionCache *AC, bool AllowPartial=false)
static LLVM_ABI bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V, bool HasCoroSuspendInst=false) const
Return true if the specified value is loop invariant.
std::string getLocStr() const
Return a string containing the debug location of the loop (file name + line number if present,...
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
ArrayRef< MDOperand > operands() const
Tracking metadata reference owned by Metadata.
This class implements a map that also provides access to all stored values in a deterministic order.
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
DominatorTree * getDT() const
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyStoreLoadForwardDistances() const
Return true if there are no store-load forwarding dependencies.
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
LLVM_ABI bool areDepsSafe(const DepCandidates &AccessSets, const MemAccessInfoList &CheckDeps)
Check whether the dependencies between the accesses are safe, and records the dependence information ...
const SmallVectorImpl< Instruction * > & getMemoryInstructions() const
The vector of memory access instructions.
bool shouldRetryWithRuntimeChecks() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
AssumptionCache * getAC() const
const SmallVectorImpl< Dependence > * getDependences() const
Returns the memory dependences.
DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > & getPointerBounds()
LLVM_ABI SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
@ PossiblySafeWithRtChecks
LLVM_ABI void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
uint64_t getStoreLoadForwardSafeDistanceInBits() const
Return safe power-of-2 number of elements, which do not prevent store-load forwarding,...
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
LLVM_ABI void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
LLVM_ABI bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
LLVM_ABI void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static LLVM_ABI bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
LLVM_ABI void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
The main scalar evolution driver.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?...
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
bool contains(const T &V) const
Check if the SmallSet contains the given element.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr ScalarTy getFixedValue() const
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
bool match(Val *V, const Pattern &P)
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
class_match< const SCEVConstant > m_SCEVConstant()
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
class_match< const SCEV > m_SCEV()
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
unsigned getPointerAddressSpace(const Type *T)
LLVM_ABI std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
LLVM_ABI std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
LLVM_ABI std::optional< int64_t > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
LLVM_ABI bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
LLVM_ABI RetainedKnowledge getKnowledgeValidInContext(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, const Instruction *CtxI, const DominatorTree *DT=nullptr)
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and the know...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
LLVM_ABI bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
LLVM_ABI std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds, DominatorTree *DT, AssumptionCache *AC)
Calculate Start and End points of memory access using exact backedge taken count BTC if computable or...
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
IR Values for the lower and upper bounds of a pointer evolution.
MDNode * Scope
The tag for alias scope specification (used with noalias).
MDNode * TBAA
The tag for type-based alias analysis.
MDNode * NoAlias
The tag specifying the noalias scope.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Dependece between memory access instructions.
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
LLVM_ABI bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
LLVM_ABI bool isForward() const
Lexically forward dependence.
LLVM_ABI bool isBackward() const
Lexically backward dependence.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
DepType
The type of the dependence.
@ BackwardVectorizableButPreventsForwarding
@ ForwardButPreventsForwarding
static LLVM_ABI const char * DepName[]
String version of the types.
static LLVM_ABI VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
Represent one information held inside an operand bundle of an llvm.assume.
unsigned AddressSpace
Address space of the involved pointers.
LLVM_ABI bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
LLVM_ABI RuntimeCheckingPtrGroup(unsigned Index, const RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static LLVM_ABI const unsigned MaxVectorWidth
Maximum SIMD width.
static LLVM_ABI unsigned VectorizationFactor
VF as overridden by the user.
static LLVM_ABI unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static LLVM_ABI bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static LLVM_ABI unsigned VectorizationInterleave
Interleave factor as overridden by the user.
static LLVM_ABI bool HoistRuntimeChecks
Function object to check whether the first component of a container supported by std::get (like std::...