19 const DependencyGraph &DAG) {
20 auto Skip = [&DAG](
auto OpIt) {
21 auto *
I = dyn_cast<Instruction>((*OpIt).get());
22 return I ==
nullptr || DAG.getNode(
I) ==
nullptr;
24 while (OpIt != OpItE &&
Skip(OpIt))
31 if (!isa<MemDGNode>(N)) {
32 assert(OpIt != OpItE &&
"Can't dereference end iterator!");
33 return DAG->getNode(cast<Instruction>((
Value *)*OpIt));
38 return DAG->getNode(cast<Instruction>((
Value *)*OpIt));
40 assert(MemIt != cast<MemDGNode>(N)->MemPreds.end() &&
41 "Cant' dereference end iterator!");
47 if (!isa<MemDGNode>(N)) {
48 assert(OpIt != OpItE &&
"Already at end!");
51 OpIt = PredIterator::skipBadIt(OpIt, OpItE, *DAG);
59 OpIt = PredIterator::skipBadIt(OpIt, OpItE, *DAG);
63 assert(MemIt != cast<MemDGNode>(N)->MemPreds.end() &&
"Already at end!");
69 assert(DAG ==
Other.DAG &&
"Iterators of different DAGs!");
70 assert(N ==
Other.N &&
"Iterators of different nodes!");
71 return OpIt ==
Other.OpIt && MemIt ==
Other.MemIt;
75 if (this->SB !=
nullptr)
76 this->SB->eraseFromBundle(
this);
83 SB->eraseFromBundle(
this);
95 static constexpr const unsigned Indent = 4;
96 for (
auto *Pred : MemPreds)
97 OS.indent(Indent) <<
"<-" << *Pred->getInstruction() <<
"\n";
109 I =
I->getNextNode();
112 return cast<MemDGNode>(DAG.
getNode(
I));
122 I =
I->getPrevNode();
125 return cast<MemDGNode>(DAG.
getNode(
I));
135 if (TopMemN ==
nullptr)
138 assert(BotMemN !=
nullptr &&
"TopMemN should be null too!");
143DependencyGraph::DependencyType
148 return DependencyType::ReadAfterWrite;
150 return DependencyType::WriteAfterWrite;
153 return DependencyType::WriteAfterRead;
155 if (isa<sandboxir::PHINode>(FromI) || isa<sandboxir::PHINode>(ToI))
156 return DependencyType::Control;
158 return DependencyType::Control;
161 return DependencyType::Other;
162 return DependencyType::None;
167 if (
auto *LI = dyn_cast<LoadInst>(
I))
168 return !LI->isUnordered();
169 if (
auto *SI = dyn_cast<StoreInst>(
I))
170 return !SI->isUnordered();
175 bool Is = IsOrdered(
I);
177 "An ordered instruction must be a MemDepCandidate!");
182 DependencyType DepType) {
183 std::optional<MemoryLocation> DstLocOpt =
189 "Expected a mem instr");
196 case DependencyType::ReadAfterWrite:
197 case DependencyType::WriteAfterWrite:
199 case DependencyType::WriteAfterRead:
206bool DependencyGraph::hasDep(Instruction *SrcI, Instruction *DstI) {
207 DependencyType RoughDepType = getRoughDepType(SrcI, DstI);
208 switch (RoughDepType) {
209 case DependencyType::ReadAfterWrite:
210 case DependencyType::WriteAfterWrite:
211 case DependencyType::WriteAfterRead:
212 return alias(SrcI, DstI, RoughDepType);
213 case DependencyType::Control:
219 case DependencyType::Other:
221 case DependencyType::None:
227void DependencyGraph::scanAndAddDeps(MemDGNode &DstN,
229 assert(isa<MemDGNode>(DstN) &&
230 "DstN is the mem dep destination, so it must be mem");
231 Instruction *DstI = DstN.getInstruction();
234 for (MemDGNode &SrcN :
reverse(SrcScanRange)) {
235 Instruction *SrcI = SrcN.getInstruction();
236 if (hasDep(SrcI, DstI))
237 DstN.addMemPred(&SrcN);
241void DependencyGraph::setDefUseUnscheduledSuccs(
250 for (Instruction &
I : NewInterval) {
251 for (Value *
Op :
I.operands()) {
252 auto *OpI = dyn_cast<Instruction>(
Op);
256 if (OpI->getParent() !=
I.getParent())
258 if (!NewInterval.contains(OpI))
263 ++OpN->UnscheduledSuccs;
268 bool NewIsAbove = DAGInterval.empty() || NewInterval.comesBefore(DAGInterval);
269 const auto &TopInterval = NewIsAbove ? NewInterval : DAGInterval;
270 const auto &BotInterval = NewIsAbove ? DAGInterval : NewInterval;
281 for (Instruction &BotI : BotInterval) {
284 if (BotN->scheduled())
286 for (Value *
Op : BotI.operands()) {
287 auto *OpI = dyn_cast<Instruction>(
Op);
293 if (!TopInterval.contains(OpI))
295 ++OpN->UnscheduledSuccs;
303 MemDGNode *LastMemN = dyn_cast<MemDGNode>(LastN);
307 if (
auto *MemN = dyn_cast<MemDGNode>(
N)) {
308 MemN->setPrevNode(LastMemN);
313 if (!DAGInterval.empty()) {
314 bool NewIsAbove = NewInterval.comesBefore(DAGInterval);
315 const auto &TopInterval = NewIsAbove ? NewInterval : DAGInterval;
316 const auto &BotInterval = NewIsAbove ? DAGInterval : NewInterval;
321 assert((LinkTopN ==
nullptr || LinkBotN ==
nullptr ||
322 LinkTopN->comesBefore(LinkBotN)) &&
324 if (LinkTopN !=
nullptr && LinkBotN !=
nullptr) {
325 LinkTopN->setNextNode(LinkBotN);
330 auto UnionIntvl = DAGInterval.getUnionInterval(NewInterval);
335 if (ChainTopN !=
nullptr && ChainBotN !=
nullptr) {
336 for (
auto *
N = ChainTopN->getNextNode(), *LastN = ChainTopN;
N !=
nullptr;
337 LastN =
N,
N =
N->getNextNode()) {
338 assert(
N == LastN->getNextNode() &&
"Bad chain!");
339 assert(
N->getPrevNode() == LastN &&
"Bad chain!");
345 setDefUseUnscheduledSuccs(NewInterval);
348MemDGNode *DependencyGraph::getMemDGNodeBefore(DGNode *
N,
bool IncludingN,
349 MemDGNode *SkipN)
const {
350 auto *
I =
N->getInstruction();
351 for (
auto *PrevI = IncludingN ?
I :
I->getPrevNode(); PrevI !=
nullptr;
352 PrevI = PrevI->getPrevNode()) {
354 if (PrevN ==
nullptr)
356 auto *PrevMemN = dyn_cast<MemDGNode>(PrevN);
357 if (PrevMemN !=
nullptr && PrevMemN != SkipN)
363MemDGNode *DependencyGraph::getMemDGNodeAfter(DGNode *
N,
bool IncludingN,
364 MemDGNode *SkipN)
const {
365 auto *
I =
N->getInstruction();
366 for (
auto *NextI = IncludingN ?
I :
I->getNextNode(); NextI !=
nullptr;
367 NextI = NextI->getNextNode()) {
369 if (NextN ==
nullptr)
371 auto *NextMemN = dyn_cast<MemDGNode>(NextN);
372 if (NextMemN !=
nullptr && NextMemN != SkipN)
378void DependencyGraph::notifyCreateInstr(Instruction *
I) {
383 if (!(DAGInterval.contains(
I) || DAGInterval.touches(
I)))
386 DAGInterval = DAGInterval.getUnionInterval({
I,
I});
388 auto *MemN = dyn_cast<MemDGNode>(
N);
391 if (MemN !=
nullptr) {
392 if (
auto *PrevMemN = getMemDGNodeBefore(MemN,
false)) {
393 PrevMemN->NextMemN = MemN;
394 MemN->PrevMemN = PrevMemN;
396 if (
auto *NextMemN = getMemDGNodeAfter(MemN,
false)) {
397 NextMemN->PrevMemN = MemN;
398 MemN->NextMemN = NextMemN;
403 if (DAGInterval.top()->comesBefore(
I)) {
406 scanAndAddDeps(*MemN, SrcInterval);
409 if (
I->comesBefore(DAGInterval.bottom())) {
411 for (MemDGNode &BelowN :
418void DependencyGraph::notifyMoveInstr(Instruction *
I,
const BBIterator &To) {
424 assert(!(To != BB->end() && &*To ==
I->getNextNode()) &&
425 !(To == BB->end() && std::next(
I->getIterator()) == BB->end()) &&
426 "Should not have been called if destination is same as origin.");
430 assert(To.getNodeParent() ==
I->getParent() &&
431 "TODO: We don't support movement across BBs!");
433 (To == std::next(DAGInterval.bottom()->getIterator()) ||
434 (To != BB->end() && std::next(To) == DAGInterval.top()->getIterator()) ||
435 (To != BB->end() && DAGInterval.contains(&*To))) &&
436 "TODO: To should be either within the DAGInterval or right "
440 auto OrigDAGInterval = DAGInterval;
443 DAGInterval.notifyMoveInstr(
I, To);
456 MemN->detachFromChain();
471 if (To == BB->end() ||
472 To == std::next(OrigDAGInterval.bottom()->getIterator())) {
477 getMemDGNodeBefore(InsertAfterN,
true, MemN));
482 getMemDGNodeBefore(BeforeToN,
false, MemN));
484 getMemDGNodeAfter(BeforeToN,
true, MemN));
488void DependencyGraph::notifyEraseInstr(Instruction *
I) {
496 if (
auto *MemN = dyn_cast<MemDGNode>(
getNode(
I))) {
498 auto *PrevMemN = getMemDGNodeBefore(MemN,
false);
499 auto *NextMemN = getMemDGNodeAfter(MemN,
false);
500 if (PrevMemN !=
nullptr)
501 PrevMemN->NextMemN = NextMemN;
502 if (NextMemN !=
nullptr)
503 NextMemN->PrevMemN = PrevMemN;
506 while (!MemN->memPreds().empty()) {
507 auto *PredN = *MemN->memPreds().begin();
508 MemN->removeMemPred(PredN);
510 while (!MemN->memSuccs().empty()) {
511 auto *SuccN = *MemN->memSuccs().begin();
512 SuccN->removeMemPred(MemN);
518 for (
auto *PredN :
N->preds(*
this))
519 PredN->decrUnscheduledSuccs();
522 InstrToNodeMap.erase(
I);
525void DependencyGraph::notifySetUse(
const Use &U, Value *NewSrc) {
528 if (
auto *CurrSrcI = dyn_cast<Instruction>(
U.get())) {
529 if (
auto *CurrSrcN =
getNode(CurrSrcI)) {
530 CurrSrcN->decrUnscheduledSuccs();
533 if (
auto *NewSrcI = dyn_cast<Instruction>(NewSrc)) {
534 if (
auto *NewSrcN =
getNode(NewSrcI)) {
535 ++NewSrcN->UnscheduledSuccs;
546 auto NewInterval = Union.getSingleDiff(DAGInterval);
547 if (NewInterval.empty())
550 createNewNodes(NewInterval);
566 if (!DstRange.empty()) {
569 scanAndAddDeps(DstN, SrcRange);
574 if (MemDAGInterval.empty()) {
575 FullScan(NewInterval);
592 else if (DAGInterval.bottom()->comesBefore(NewInterval.top())) {
594 auto SrcRangeFull = MemDAGInterval.getUnionInterval(DstRange);
598 scanAndAddDeps(DstN, SrcRange);
602 else if (NewInterval.bottom()->comesBefore(DAGInterval.top())) {
616 FullScan(NewInterval);
632 auto DstRangeOld = MemDAGInterval;
635 scanAndAddDeps(DstN, SrcRange);
648 Nodes.
reserve(InstrToNodeMap.size());
649 for (
const auto &Pair : InstrToNodeMap)
655 for (
auto *
N : Nodes)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
std::pair< uint64_t, uint64_t > Interval
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
bool empty() const
empty - Check if the array is empty.
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
LLVM_ABI bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class implements an extremely fast bulk output stream that can only output to a stream.
A DependencyGraph Node that points to an Instruction and contains memory dependency edges.
virtual void print(raw_ostream &OS, bool PrintDeps=true) const
static bool isMemDepCandidate(Instruction *I)
We consider I as a Memory Dependency Candidate instruction if it reads/write memory or if it has side...
void setSchedBundle(SchedBundle &SB)
unsigned UnscheduledSuccs
The number of unscheduled successors.
SchedBundle * SB
The scheduler bundle that this node belongs to.
bool Scheduled
This is true if this node has been scheduled.
static bool isMemDepNodeCandidate(Instruction *I)
\Returns true if I is a memory dependency candidate instruction.
static bool isFenceLike(Instruction *I)
\Returns true if I is fence like. It excludes non-mem intrinsics.
LLVM_DUMP_METHOD void dump() const
Instruction * getInstruction() const
static bool isStackSaveOrRestoreIntrinsic(Instruction *I)
LLVM_DUMP_METHOD void dump() const
DGNode * getNode(Instruction *I) const
DGNode * getNodeOrNull(Instruction *I) const
Like getNode() but returns nullptr if I is nullptr.
void print(raw_ostream &OS) const
DGNode * getOrCreateNode(Instruction *I)
LLVM_ABI Interval< Instruction > extend(ArrayRef< Instruction * > Instrs)
Build/extend the dependency graph such that it includes Instrs.
A sandboxir::User with operands, opcode and linked with previous/next instructions in an instruction ...
bool mayWriteToMemory() const
bool mayReadFromMemory() const
bool isTerminator() const
static LLVM_ABI MemDGNode * getBotMemDGNode(const Interval< Instruction > &Intvl, const DependencyGraph &DAG)
Scans the instruction chain in Intvl bottom-up, returning the bottom-most MemDGNode,...
static LLVM_ABI MemDGNode * getTopMemDGNode(const Interval< Instruction > &Intvl, const DependencyGraph &DAG)
Scans the instruction chain in Intvl top-down, returning the top-most MemDGNode, or nullptr.
static LLVM_ABI Interval< MemDGNode > make(const Interval< Instruction > &Instrs, DependencyGraph &DAG)
Given Instrs it finds their closest mem nodes in the interval and returns the corresponding mem range...
A DependencyGraph Node for instructions that may read/write memory, or have some ordering constraints...
virtual void print(raw_ostream &OS, bool PrintDeps=true) const override
Iterate over both def-use and mem dependencies.
LLVM_ABI value_type operator*()
LLVM_ABI PredIterator & operator++()
LLVM_ABI bool operator==(const PredIterator &Other) const
The nodes that need to be scheduled back-to-back in a single scheduling cycle form a SchedBundle.
@ Reverting
‍Tracking changes
TrackerState getState() const
\Returns the current state of the tracker.
OperandUseIterator op_iterator
static ModRefInfo aliasAnalysisGetModRefInfo(BatchAAResults &BatchAA, const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Equivalent to BatchAA::getModRefInfo().
static std::optional< llvm::MemoryLocation > memoryLocationGetOrNone(const Instruction *I)
Equivalent to MemoryLocation::getOrNone(I).
A SandboxIR Value has users. This is the base class.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ BasicBlock
Various leaf nodes.
static bool isOrdered(Instruction *I)
template class LLVM_TEMPLATE_ABI Interval< MemDGNode >
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
auto reverse(ContainerTy &&C)
bool isModSet(const ModRefInfo MRI)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ ModRef
The access may reference and may modify the value stored in memory.
DWARFExpression::Operation Op
bool isRefSet(const ModRefInfo MRI)