LLVM 22.0.0git
MachineScheduler.h
Go to the documentation of this file.
1//===- MachineScheduler.h - MachineInstr Scheduling Pass --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides an interface for customizing the standard MachineScheduler
10// pass. Note that the entire pass may be replaced as follows:
11//
12// <Target>TargetMachine::createPassConfig(PassManagerBase &PM) {
13// PM.substitutePass(&MachineSchedulerID, &CustomSchedulerPassID);
14// ...}
15//
16// The MachineScheduler pass is only responsible for choosing the regions to be
17// scheduled. Targets can override the DAG builder and scheduler without
18// replacing the pass as follows:
19//
20// ScheduleDAGInstrs *<Target>TargetMachine::
21// createMachineScheduler(MachineSchedContext *C) {
22// return new CustomMachineScheduler(C);
23// }
24//
25// The default scheduler, ScheduleDAGMILive, builds the DAG and drives list
26// scheduling while updating the instruction stream, register pressure, and live
27// intervals. Most targets don't need to override the DAG builder and list
28// scheduler, but subtargets that require custom scheduling heuristics may
29// plugin an alternate MachineSchedStrategy. The strategy is responsible for
30// selecting the highest priority node from the list:
31//
32// ScheduleDAGInstrs *<Target>TargetMachine::
33// createMachineScheduler(MachineSchedContext *C) {
34// return new ScheduleDAGMILive(C, CustomStrategy(C));
35// }
36//
37// The DAG builder can also be customized in a sense by adding DAG mutations
38// that will run after DAG building and before list scheduling. DAG mutations
39// can adjust dependencies based on target-specific knowledge or add weak edges
40// to aid heuristics:
41//
42// ScheduleDAGInstrs *<Target>TargetMachine::
43// createMachineScheduler(MachineSchedContext *C) {
44// ScheduleDAGMI *DAG = createSchedLive(C);
45// DAG->addMutation(new CustomDAGMutation(...));
46// return DAG;
47// }
48//
49// A target that supports alternative schedulers can use the
50// MachineSchedRegistry to allow command line selection. This can be done by
51// implementing the following boilerplate:
52//
53// static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
54// return new CustomMachineScheduler(C);
55// }
56// static MachineSchedRegistry
57// SchedCustomRegistry("custom", "Run my target's custom scheduler",
58// createCustomMachineSched);
59//
60//
61// Finally, subtargets that don't need to implement custom heuristics but would
62// like to configure the GenericScheduler's policy for a given scheduler region,
63// including scheduling direction and register pressure tracking policy, can do
64// this:
65//
66// void <SubTarget>Subtarget::
67// overrideSchedPolicy(MachineSchedPolicy &Policy,
68// const SchedRegion &Region) const {
69// Policy.<Flag> = true;
70// }
71//
72//===----------------------------------------------------------------------===//
73
74#ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
75#define LLVM_CODEGEN_MACHINESCHEDULER_H
76
77#include "llvm/ADT/APInt.h"
78#include "llvm/ADT/ArrayRef.h"
79#include "llvm/ADT/BitVector.h"
80#include "llvm/ADT/STLExtras.h"
82#include "llvm/ADT/StringRef.h"
83#include "llvm/ADT/Twine.h"
94#include <algorithm>
95#include <cassert>
97#include <memory>
98#include <string>
99#include <vector>
100
101namespace llvm {
102namespace impl_detail {
103// FIXME: Remove these declarations once RegisterClassInfo is queryable as an
104// analysis.
107} // namespace impl_detail
108
109namespace MISched {
115};
116} // namespace MISched
117
118LLVM_ABI extern cl::opt<MISched::Direction> PreRADirection;
120#ifndef NDEBUG
123#else
124LLVM_ABI extern const bool ViewMISchedDAGs;
125LLVM_ABI extern const bool PrintDAGs;
126#endif
127
128class AAResults;
129class LiveIntervals;
130class MachineDominatorTree;
131class MachineFunction;
132class MachineInstr;
133class MachineLoopInfo;
134class RegisterClassInfo;
135class SchedDFSResult;
136class ScheduleHazardRecognizer;
137class TargetInstrInfo;
138class TargetPassConfig;
139class TargetRegisterInfo;
140
141/// MachineSchedContext provides enough context from the MachineScheduler pass
142/// for the target to instantiate a scheduler.
144 MachineFunction *MF = nullptr;
145 const MachineLoopInfo *MLI = nullptr;
146 const MachineDominatorTree *MDT = nullptr;
147 const TargetMachine *TM = nullptr;
148 AAResults *AA = nullptr;
149 LiveIntervals *LIS = nullptr;
150
152
156 virtual ~MachineSchedContext();
157};
158
159/// MachineSchedRegistry provides a selection of available machine instruction
160/// schedulers.
163 ScheduleDAGInstrs *(*)(MachineSchedContext *)> {
164public:
166
167 // RegisterPassParser requires a (misnamed) FunctionPassCtor type.
169
171
172 MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
174 Registry.Add(this);
175 }
176
177 ~MachineSchedRegistry() { Registry.Remove(this); }
178
179 // Accessors.
180 //
183 }
184
186 return (MachineSchedRegistry *)Registry.getList();
187 }
188
190 Registry.setListener(L);
191 }
192};
193
194class ScheduleDAGMI;
195
196/// Define a generic scheduling policy for targets that don't provide their own
197/// MachineSchedStrategy. This can be overriden for each scheduling region
198/// before building the DAG.
200 // Allow the scheduler to disable register pressure tracking.
202 /// Track LaneMasks to allow reordering of independent subregister writes
203 /// of the same vreg. \sa MachineSchedStrategy::shouldTrackLaneMasks()
205
206 // Allow the scheduler to force top-down or bottom-up scheduling. If neither
207 // is true, the scheduler runs in both directions and converges.
208 bool OnlyTopDown = false;
209 bool OnlyBottomUp = false;
210
211 // Disable heuristic that tries to fetch nodes from long dependency chains
212 // first.
214
215 // Compute DFSResult for use in scheduling heuristics.
216 bool ComputeDFSResult = false;
217
219};
220
221/// A region of an MBB for scheduling.
223 /// RegionBegin is the first instruction in the scheduling region, and
224 /// RegionEnd is either MBB->end() or the scheduling boundary after the
225 /// last instruction in the scheduling region. These iterators cannot refer
226 /// to instructions outside of the identified scheduling region because
227 /// those may be reordered before scheduling this region.
231
233 unsigned N)
235};
236
237/// MachineSchedStrategy - Interface to the scheduling algorithm used by
238/// ScheduleDAGMI.
239///
240/// Initialization sequence:
241/// initPolicy -> shouldTrackPressure -> initialize(DAG) -> registerRoots
243 virtual void anchor();
244
245public:
246 virtual ~MachineSchedStrategy() = default;
247
248 /// Optionally override the per-region scheduling policy.
251 unsigned NumRegionInstrs) {}
252
253 virtual MachineSchedPolicy getPolicy() const { return {}; }
254 virtual void dumpPolicy() const {}
255
256 /// Check if pressure tracking is needed before building the DAG and
257 /// initializing this strategy. Called after initPolicy.
258 virtual bool shouldTrackPressure() const { return true; }
259
260 /// Returns true if lanemasks should be tracked. LaneMask tracking is
261 /// necessary to reorder independent subregister defs for the same vreg.
262 /// This has to be enabled in combination with shouldTrackPressure().
263 virtual bool shouldTrackLaneMasks() const { return false; }
264
265 // If this method returns true, handling of the scheduling regions
266 // themselves (in case of a scheduling boundary in MBB) will be done
267 // beginning with the topmost region of MBB.
268 virtual bool doMBBSchedRegionsTopDown() const { return false; }
269
270 /// Initialize the strategy after building the DAG for a new region.
271 virtual void initialize(ScheduleDAGMI *DAG) = 0;
272
273 /// Tell the strategy that MBB is about to be processed.
274 virtual void enterMBB(MachineBasicBlock *MBB) {};
275
276 /// Tell the strategy that current MBB is done.
277 virtual void leaveMBB() {};
278
279 /// Notify this strategy that all roots have been released (including those
280 /// that depend on EntrySU or ExitSU).
281 virtual void registerRoots() {}
282
283 /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
284 /// schedule the node at the top of the unscheduled region. Otherwise it will
285 /// be scheduled at the bottom.
286 virtual SUnit *pickNode(bool &IsTopNode) = 0;
287
288 /// Scheduler callback to notify that a new subtree is scheduled.
289 virtual void scheduleTree(unsigned SubtreeID) {}
290
291 /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
292 /// instruction and updated scheduled/remaining flags in the DAG nodes.
293 virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
294
295 /// When all predecessor dependencies have been resolved, free this node for
296 /// top-down scheduling.
297 virtual void releaseTopNode(SUnit *SU) = 0;
298
299 /// When all successor dependencies have been resolved, free this node for
300 /// bottom-up scheduling.
301 virtual void releaseBottomNode(SUnit *SU) = 0;
302};
303
304/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply
305/// schedules machine instructions according to the given MachineSchedStrategy
306/// without much extra book-keeping. This is the common functionality between
307/// PreRA and PostRA MachineScheduler.
309protected:
312 std::unique_ptr<MachineSchedStrategy> SchedImpl;
313
314 /// Ordered list of DAG postprocessing steps.
315 std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
316
317 /// The top of the unscheduled zone.
319
320 /// The bottom of the unscheduled zone.
322
323#if LLVM_ENABLE_ABI_BREAKING_CHECKS
324 /// The number of instructions scheduled so far. Used to cut off the
325 /// scheduler at the point determined by misched-cutoff.
326 unsigned NumInstrsScheduled = 0;
327#endif
328
329public:
330 ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S,
331 bool RemoveKillFlags)
332 : ScheduleDAGInstrs(*C->MF, C->MLI, RemoveKillFlags), AA(C->AA),
333 LIS(C->LIS), SchedImpl(std::move(S)) {}
334
335 // Provide a vtable anchor
336 ~ScheduleDAGMI() override;
337
338 /// If this method returns true, handling of the scheduling regions
339 /// themselves (in case of a scheduling boundary in MBB) will be done
340 /// beginning with the topmost region of MBB.
341 bool doMBBSchedRegionsTopDown() const override {
342 return SchedImpl->doMBBSchedRegionsTopDown();
343 }
344
345 // Returns LiveIntervals instance for use in DAG mutators and such.
346 LiveIntervals *getLIS() const { return LIS; }
347
348 /// Return true if this DAG supports VReg liveness and RegPressure.
349 virtual bool hasVRegLiveness() const { return false; }
350
351 /// Add a postprocessing step to the DAG builder.
352 /// Mutations are applied in the order that they are added after normal DAG
353 /// building and before MachineSchedStrategy initialization.
354 ///
355 /// ScheduleDAGMI takes ownership of the Mutation object.
356 void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
357 if (Mutation)
358 Mutations.push_back(std::move(Mutation));
359 }
360
361 MachineBasicBlock::iterator top() const { return CurrentTop; }
362 MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
363
364 /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
365 /// region. This covers all instructions in a block, while schedule() may only
366 /// cover a subset.
367 void enterRegion(MachineBasicBlock *bb,
370 unsigned regioninstrs) override;
371
372 /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
373 /// reorderable instructions.
374 void schedule() override;
375
376 void startBlock(MachineBasicBlock *bb) override;
377 void finishBlock() override;
378
379 /// Change the position of an instruction within the basic block and update
380 /// live ranges and region boundary iterators.
381 void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
382
383 void viewGraph(const Twine &Name, const Twine &Title) override;
384 void viewGraph() override;
385
386protected:
387 // Top-Level entry points for the schedule() driver...
388
389 /// Apply each ScheduleDAGMutation step in order. This allows different
390 /// instances of ScheduleDAGMI to perform custom DAG postprocessing.
391 void postProcessDAG();
392
393 /// Release ExitSU predecessors and setup scheduler queues.
394 void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
395
396 /// Update scheduler DAG and queues after scheduling an instruction.
397 void updateQueues(SUnit *SU, bool IsTopNode);
398
399 /// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
400 void placeDebugValues();
401
402 /// dump the scheduled Sequence.
403 void dumpSchedule() const;
404 /// Print execution trace of the schedule top-down or bottom-up.
405 void dumpScheduleTraceTopDown() const;
406 void dumpScheduleTraceBottomUp() const;
407
408 // Lesser helpers...
409 bool checkSchedLimit();
410
411 void findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
412 SmallVectorImpl<SUnit*> &BotRoots);
413
414 void releaseSucc(SUnit *SU, SDep *SuccEdge);
415 void releaseSuccessors(SUnit *SU);
416 void releasePred(SUnit *SU, SDep *PredEdge);
417 void releasePredecessors(SUnit *SU);
418};
419
420/// ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules
421/// machine instructions while updating LiveIntervals and tracking regpressure.
423protected:
425
426 /// Information about DAG subtrees. If DFSResult is NULL, then SchedulerTrees
427 /// will be empty.
428 SchedDFSResult *DFSResult = nullptr;
430
432
433 /// Maps vregs to the SUnits of their uses in the current scheduling region.
435
436 // Map each SU to its summary of pressure changes. This array is updated for
437 // liveness during bottom-up scheduling. Top-down scheduling may proceed but
438 // has no affect on the pressure diffs.
440
441 /// Register pressure in this region computed by initRegPressure.
442 bool ShouldTrackPressure = false;
443 bool ShouldTrackLaneMasks = false;
446
447 /// List of pressure sets that exceed the target's pressure limit before
448 /// scheduling, listed in increasing set ID order. Each pressure set is paired
449 /// with its max pressure in the currently scheduled regions.
450 std::vector<PressureChange> RegionCriticalPSets;
451
452 /// The top of the unscheduled zone.
455
456 /// The bottom of the unscheduled zone.
459
460public:
462 std::unique_ptr<MachineSchedStrategy> S)
463 : ScheduleDAGMI(C, std::move(S), /*RemoveKillFlags=*/false),
464 RegClassInfo(C->RegClassInfo), RPTracker(RegPressure),
465 TopRPTracker(TopPressure), BotRPTracker(BotPressure) {}
466
467 ~ScheduleDAGMILive() override;
468
469 /// Return true if this DAG supports VReg liveness and RegPressure.
470 bool hasVRegLiveness() const override { return true; }
471
472 /// Return true if register pressure tracking is enabled.
473 bool isTrackingPressure() const { return ShouldTrackPressure; }
474
475 /// Get current register pressure for the top scheduled instructions.
476 const IntervalPressure &getTopPressure() const { return TopPressure; }
477 const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
478
479 /// Get current register pressure for the bottom scheduled instructions.
480 const IntervalPressure &getBotPressure() const { return BotPressure; }
481 const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
482
483 /// Get register pressure for the entire scheduling region before scheduling.
484 const IntervalPressure &getRegPressure() const { return RegPressure; }
485
486 const std::vector<PressureChange> &getRegionCriticalPSets() const {
487 return RegionCriticalPSets;
488 }
489
491 return SUPressureDiffs[SU->NodeNum];
492 }
493 const PressureDiff &getPressureDiff(const SUnit *SU) const {
494 return SUPressureDiffs[SU->NodeNum];
495 }
496
497 /// Compute a DFSResult after DAG building is complete, and before any
498 /// queue comparisons.
499 void computeDFSResult();
500
501 /// Return a non-null DFS result if the scheduling strategy initialized it.
502 const SchedDFSResult *getDFSResult() const { return DFSResult; }
503
504 BitVector &getScheduledTrees() { return ScheduledTrees; }
505
506 /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
507 /// region. This covers all instructions in a block, while schedule() may only
508 /// cover a subset.
509 void enterRegion(MachineBasicBlock *bb,
512 unsigned regioninstrs) override;
513
514 /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
515 /// reorderable instructions.
516 void schedule() override;
517
518 /// Compute the cyclic critical path through the DAG.
519 unsigned computeCyclicCriticalPath();
520
521 void dump() const override;
522
523protected:
524 // Top-Level entry points for the schedule() driver...
525
526 /// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking
527 /// enabled. This sets up three trackers. RPTracker will cover the entire DAG
528 /// region, TopTracker and BottomTracker will be initialized to the top and
529 /// bottom of the DAG region without covereing any unscheduled instruction.
530 void buildDAGWithRegPressure();
531
532 /// Release ExitSU predecessors and setup scheduler queues. Re-position
533 /// the Top RP tracker in case the region beginning has changed.
534 void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
535
536 /// Move an instruction and update register pressure.
537 void scheduleMI(SUnit *SU, bool IsTopNode);
538
539 // Lesser helpers...
540
541 void initRegPressure();
542
543 void updatePressureDiffs(ArrayRef<VRegMaskOrUnit> LiveUses);
544
545 void updateScheduledPressure(const SUnit *SU,
546 const std::vector<unsigned> &NewMaxPressure);
547
548 void collectVRegUses(SUnit &SU);
549};
550
551//===----------------------------------------------------------------------===//
552///
553/// Helpers for implementing custom MachineSchedStrategy classes. These take
554/// care of the book-keeping associated with list scheduling heuristics.
555///
556//===----------------------------------------------------------------------===//
557
558/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
559/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
560/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
561///
562/// This is a convenience class that may be used by implementations of
563/// MachineSchedStrategy.
565 unsigned ID;
566 std::string Name;
567 std::vector<SUnit*> Queue;
568
569public:
570 ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
571
572 unsigned getID() const { return ID; }
573
574 StringRef getName() const { return Name; }
575
576 // SU is in this queue if it's NodeQueueID is a superset of this ID.
577 bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
578
579 bool empty() const { return Queue.empty(); }
580
581 void clear() { Queue.clear(); }
582
583 unsigned size() const { return Queue.size(); }
584
585 using iterator = std::vector<SUnit*>::iterator;
586
587 iterator begin() { return Queue.begin(); }
588
589 iterator end() { return Queue.end(); }
590
591 ArrayRef<SUnit*> elements() { return Queue; }
592
593 iterator find(SUnit *SU) { return llvm::find(Queue, SU); }
594
595 void push(SUnit *SU) {
596 Queue.push_back(SU);
597 SU->NodeQueueId |= ID;
598 }
599
601 (*I)->NodeQueueId &= ~ID;
602 *I = Queue.back();
603 unsigned idx = I - Queue.begin();
604 Queue.pop_back();
605 return Queue.begin() + idx;
606 }
607
608 LLVM_ABI void dump() const;
609};
610
611/// Summarize the unscheduled region.
613 // Critical path through the DAG in expected latency.
614 unsigned CriticalPath;
616
617 // Scaled count of micro-ops left to schedule.
619
621
622 // Unscheduled resources
624
626
627 void reset() {
628 CriticalPath = 0;
629 CyclicCritPath = 0;
630 RemIssueCount = 0;
633 }
634
635 LLVM_ABI void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
636};
637
638/// ResourceSegments are a collection of intervals closed on the
639/// left and opened on the right:
640///
641/// list{ [a1, b1), [a2, b2), ..., [a_N, b_N) }
642///
643/// The collection has the following properties:
644///
645/// 1. The list is ordered: a_i < b_i and b_i < a_(i+1)
646///
647/// 2. The intervals in the collection do not intersect each other.
648///
649/// A \ref ResourceSegments instance represents the cycle
650/// reservation history of the instance of and individual resource.
652public:
653 /// Represents an interval of discrete integer values closed on
654 /// the left and open on the right: [a, b).
655 typedef std::pair<int64_t, int64_t> IntervalTy;
656
657 /// Adds an interval [a, b) to the collection of the instance.
658 ///
659 /// When adding [a, b[ to the collection, the operation merges the
660 /// adjacent intervals. For example
661 ///
662 /// 0 1 2 3 4 5 6 7 8 9 10
663 /// [-----) [--) [--)
664 /// + [--)
665 /// = [-----------) [--)
666 ///
667 /// To be able to debug duplicate resource usage, the function has
668 /// assertion that checks that no interval should be added if it
669 /// overlaps any of the intervals in the collection. We can
670 /// require this because by definition a \ref ResourceSegments is
671 /// attached only to an individual resource instance.
672 LLVM_ABI void add(IntervalTy A, const unsigned CutOff = 10);
673
674public:
675 /// Checks whether intervals intersect.
677
678 /// These function return the interval used by a resource in bottom and top
679 /// scheduling.
680 ///
681 /// Consider an instruction that uses resources X0, X1 and X2 as follows:
682 ///
683 /// X0 X1 X1 X2 +--------+-------------+--------------+
684 /// |Resource|AcquireAtCycle|ReleaseAtCycle|
685 /// +--------+-------------+--------------+
686 /// | X0 | 0 | 1 |
687 /// +--------+-------------+--------------+
688 /// | X1 | 1 | 3 |
689 /// +--------+-------------+--------------+
690 /// | X2 | 3 | 4 |
691 /// +--------+-------------+--------------+
692 ///
693 /// If we can schedule the instruction at cycle C, we need to
694 /// compute the interval of the resource as follows:
695 ///
696 /// # TOP DOWN SCHEDULING
697 ///
698 /// Cycles scheduling flows to the _right_, in the same direction
699 /// of time.
700 ///
701 /// C 1 2 3 4 5 ...
702 /// ------|------|------|------|------|------|----->
703 /// X0 X1 X1 X2 ---> direction of time
704 /// X0 [C, C+1)
705 /// X1 [C+1, C+3)
706 /// X2 [C+3, C+4)
707 ///
708 /// Therefore, the formula to compute the interval for a resource
709 /// of an instruction that can be scheduled at cycle C in top-down
710 /// scheduling is:
711 ///
712 /// [C+AcquireAtCycle, C+ReleaseAtCycle)
713 ///
714 ///
715 /// # BOTTOM UP SCHEDULING
716 ///
717 /// Cycles scheduling flows to the _left_, in opposite direction
718 /// of time.
719 ///
720 /// In bottom up scheduling, the scheduling happens in opposite
721 /// direction to the execution of the cycles of the
722 /// instruction. When the instruction is scheduled at cycle `C`,
723 /// the resources are allocated in the past relative to `C`:
724 ///
725 /// 2 1 C -1 -2 -3 -4 -5 ...
726 /// <-----|------|------|------|------|------|------|------|---
727 /// X0 X1 X1 X2 ---> direction of time
728 /// X0 (C+1, C]
729 /// X1 (C, C-2]
730 /// X2 (C-2, C-3]
731 ///
732 /// Therefore, the formula to compute the interval for a resource
733 /// of an instruction that can be scheduled at cycle C in bottom-up
734 /// scheduling is:
735 ///
736 /// [C-ReleaseAtCycle+1, C-AcquireAtCycle+1)
737 ///
738 ///
739 /// NOTE: In both cases, the number of cycles booked by a
740 /// resources is the value (ReleaseAtCycle - AcquireAtCycle).
741 static IntervalTy getResourceIntervalBottom(unsigned C, unsigned AcquireAtCycle,
742 unsigned ReleaseAtCycle) {
743 return std::make_pair<long, long>((long)C - (long)ReleaseAtCycle + 1L,
744 (long)C - (long)AcquireAtCycle + 1L);
745 }
746 static IntervalTy getResourceIntervalTop(unsigned C, unsigned AcquireAtCycle,
747 unsigned ReleaseAtCycle) {
748 return std::make_pair<long, long>((long)C + (long)AcquireAtCycle,
749 (long)C + (long)ReleaseAtCycle);
750 }
751
752private:
753 /// Finds the first cycle in which a resource can be allocated.
754 ///
755 /// The function uses the \param IntervalBuider [*] to build a
756 /// resource interval [a, b[ out of the input parameters \param
757 /// CurrCycle, \param AcquireAtCycle and \param ReleaseAtCycle.
758 ///
759 /// The function then loops through the intervals in the ResourceSegments
760 /// and shifts the interval [a, b[ and the ReturnCycle to the
761 /// right until there is no intersection between the intervals of
762 /// the \ref ResourceSegments instance and the new shifted [a, b[. When
763 /// this condition is met, the ReturnCycle (which
764 /// correspond to the cycle in which the resource can be
765 /// allocated) is returned.
766 ///
767 /// c = CurrCycle in input
768 /// c 1 2 3 4 5 6 7 8 9 10 ... ---> (time
769 /// flow)
770 /// ResourceSegments... [---) [-------) [-----------)
771 /// c [1 3[ -> AcquireAtCycle=1, ReleaseAtCycle=3
772 /// ++c [1 3)
773 /// ++c [1 3)
774 /// ++c [1 3)
775 /// ++c [1 3)
776 /// ++c [1 3) ---> returns c
777 /// incremented by 5 (c+5)
778 ///
779 ///
780 /// Notice that for bottom-up scheduling the diagram is slightly
781 /// different because the current cycle c is always on the right
782 /// of the interval [a, b) (see \ref
783 /// `getResourceIntervalBottom`). This is because the cycle
784 /// increments for bottom-up scheduling moved in the direction
785 /// opposite to the direction of time:
786 ///
787 /// --------> direction of time.
788 /// XXYZZZ (resource usage)
789 /// --------> direction of top-down execution cycles.
790 /// <-------- direction of bottom-up execution cycles.
791 ///
792 /// Even though bottom-up scheduling moves against the flow of
793 /// time, the algorithm used to find the first free slot in between
794 /// intervals is the same as for top-down scheduling.
795 ///
796 /// [*] See \ref `getResourceIntervalTop` and
797 /// \ref `getResourceIntervalBottom` to see how such resource intervals
798 /// are built.
799 LLVM_ABI unsigned getFirstAvailableAt(
800 unsigned CurrCycle, unsigned AcquireAtCycle, unsigned ReleaseAtCycle,
801 std::function<IntervalTy(unsigned, unsigned, unsigned)> IntervalBuilder)
802 const;
803
804public:
805 /// getFirstAvailableAtFromBottom and getFirstAvailableAtFromTop
806 /// should be merged in a single function in which a function that
807 /// creates the `NewInterval` is passed as a parameter.
808 unsigned getFirstAvailableAtFromBottom(unsigned CurrCycle,
809 unsigned AcquireAtCycle,
810 unsigned ReleaseAtCycle) const {
811 return getFirstAvailableAt(CurrCycle, AcquireAtCycle, ReleaseAtCycle,
813 }
814 unsigned getFirstAvailableAtFromTop(unsigned CurrCycle,
815 unsigned AcquireAtCycle,
816 unsigned ReleaseAtCycle) const {
817 return getFirstAvailableAt(CurrCycle, AcquireAtCycle, ReleaseAtCycle,
819 }
820
821private:
822 std::list<IntervalTy> _Intervals;
823 /// Merge all adjacent intervals in the collection. For all pairs
824 /// of adjacient intervals, it performs [a, b) + [b, c) -> [a, c).
825 ///
826 /// Before performing the merge operation, the intervals are
827 /// sorted with \ref sort_predicate.
828 LLVM_ABI void sortAndMerge();
829
830public:
831 // constructor for empty set
832 explicit ResourceSegments(){};
833 bool empty() const { return _Intervals.empty(); }
834 explicit ResourceSegments(const std::list<IntervalTy> &Intervals)
835 : _Intervals(Intervals) {
836 sortAndMerge();
837 }
838
839 friend bool operator==(const ResourceSegments &c1,
840 const ResourceSegments &c2) {
841 return c1._Intervals == c2._Intervals;
842 }
844 const ResourceSegments &Segments) {
845 os << "{ ";
846 for (auto p : Segments._Intervals)
847 os << "[" << p.first << ", " << p.second << "), ";
848 os << "}\n";
849 return os;
850 }
851};
852
853/// Each Scheduling boundary is associated with ready queues. It tracks the
854/// current cycle in the direction of movement, and maintains the state
855/// of "hazards" and other interlocks at the current cycle.
857public:
858 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
859 enum {
862 LogMaxQID = 2
863 };
864
865 ScheduleDAGMI *DAG = nullptr;
866 const TargetSchedModel *SchedModel = nullptr;
867 SchedRemainder *Rem = nullptr;
868
871
873
874private:
875 /// True if the pending Q should be checked/updated before scheduling another
876 /// instruction.
877 bool CheckPending;
878
879 /// Number of cycles it takes to issue the instructions scheduled in this
880 /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
881 /// See getStalls().
882 unsigned CurrCycle;
883
884 /// Micro-ops issued in the current cycle
885 unsigned CurrMOps;
886
887 /// MinReadyCycle - Cycle of the soonest available instruction.
888 unsigned MinReadyCycle;
889
890 // The expected latency of the critical path in this scheduled zone.
891 unsigned ExpectedLatency;
892
893 // The latency of dependence chains leading into this zone.
894 // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
895 // For each cycle scheduled: DLat -= 1.
896 unsigned DependentLatency;
897
898 /// Count the scheduled (issued) micro-ops that can be retired by
899 /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
900 unsigned RetiredMOps;
901
902 // Count scheduled resources that have been executed. Resources are
903 // considered executed if they become ready in the time that it takes to
904 // saturate any resource including the one in question. Counts are scaled
905 // for direct comparison with other resources. Counts can be compared with
906 // MOps * getMicroOpFactor and Latency * getLatencyFactor.
907 SmallVector<unsigned, 16> ExecutedResCounts;
908
909 /// Cache the max count for a single resource.
910 unsigned MaxExecutedResCount;
911
912 // Cache the critical resources ID in this scheduled zone.
913 unsigned ZoneCritResIdx;
914
915 // Is the scheduled region resource limited vs. latency limited.
916 bool IsResourceLimited;
917
918public:
919private:
920 /// Record how resources have been allocated across the cycles of
921 /// the execution.
922 std::map<unsigned, ResourceSegments> ReservedResourceSegments;
923 std::vector<unsigned> ReservedCycles;
924 /// For each PIdx, stores first index into ReservedResourceSegments that
925 /// corresponds to it.
926 ///
927 /// For example, consider the following 3 resources (ResourceCount =
928 /// 3):
929 ///
930 /// +------------+--------+
931 /// |ResourceName|NumUnits|
932 /// +------------+--------+
933 /// | X | 2 |
934 /// +------------+--------+
935 /// | Y | 3 |
936 /// +------------+--------+
937 /// | Z | 1 |
938 /// +------------+--------+
939 ///
940 /// In this case, the total number of resource instances is 6. The
941 /// vector \ref ReservedResourceSegments will have a slot for each instance.
942 /// The vector \ref ReservedCyclesIndex will track at what index the first
943 /// instance of the resource is found in the vector of \ref
944 /// ReservedResourceSegments:
945 ///
946 /// Indexes of instances in
947 /// ReservedResourceSegments
948 ///
949 /// 0 1 2 3 4 5
950 /// ReservedCyclesIndex[0] = 0; [X0, X1,
951 /// ReservedCyclesIndex[1] = 2; Y0, Y1, Y2
952 /// ReservedCyclesIndex[2] = 5; Z
953 SmallVector<unsigned, 16> ReservedCyclesIndex;
954
955 // For each PIdx, stores the resource group IDs of its subunits
956 SmallVector<APInt, 16> ResourceGroupSubUnitMasks;
957
958#if LLVM_ENABLE_ABI_BREAKING_CHECKS
959 // Remember the greatest possible stall as an upper bound on the number of
960 // times we should retry the pending queue because of a hazard.
961 unsigned MaxObservedStall;
962#endif
963
964public:
965 /// Pending queues extend the ready queues with the same ID and the
966 /// PendingFlag set.
967 SchedBoundary(unsigned ID, const Twine &Name):
968 Available(ID, Name+".A"), Pending(ID << LogMaxQID, Name+".P") {
969 reset();
970 }
971 SchedBoundary &operator=(const SchedBoundary &other) = delete;
972 SchedBoundary(const SchedBoundary &other) = delete;
974
975 LLVM_ABI void reset();
976
977 LLVM_ABI void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
979
980 bool isTop() const {
981 return Available.getID() == TopQID;
982 }
983
984 /// Number of cycles to issue the instructions scheduled in this zone.
985 unsigned getCurrCycle() const { return CurrCycle; }
986
987 /// Micro-ops issued in the current cycle
988 unsigned getCurrMOps() const { return CurrMOps; }
989
990 // The latency of dependence chains leading into this zone.
991 unsigned getDependentLatency() const { return DependentLatency; }
992
993 /// Get the number of latency cycles "covered" by the scheduled
994 /// instructions. This is the larger of the critical path within the zone
995 /// and the number of cycles required to issue the instructions.
996 unsigned getScheduledLatency() const {
997 return std::max(ExpectedLatency, CurrCycle);
998 }
999
1000 unsigned getUnscheduledLatency(SUnit *SU) const {
1001 return isTop() ? SU->getHeight() : SU->getDepth();
1002 }
1003
1004 unsigned getResourceCount(unsigned ResIdx) const {
1005 return ExecutedResCounts[ResIdx];
1006 }
1007
1008 /// Get the scaled count of scheduled micro-ops and resources, including
1009 /// executed resources.
1010 unsigned getCriticalCount() const {
1011 if (!ZoneCritResIdx)
1012 return RetiredMOps * SchedModel->getMicroOpFactor();
1013 return getResourceCount(ZoneCritResIdx);
1014 }
1015
1016 /// Get a scaled count for the minimum execution time of the scheduled
1017 /// micro-ops that are ready to execute by getExecutedCount. Notice the
1018 /// feedback loop.
1019 unsigned getExecutedCount() const {
1020 return std::max(CurrCycle * SchedModel->getLatencyFactor(),
1021 MaxExecutedResCount);
1022 }
1023
1024 unsigned getZoneCritResIdx() const { return ZoneCritResIdx; }
1025
1026 // Is the scheduled region resource limited vs. latency limited.
1027 bool isResourceLimited() const { return IsResourceLimited; }
1028
1029 /// Get the difference between the given SUnit's ready time and the current
1030 /// cycle.
1031 LLVM_ABI unsigned getLatencyStallCycles(SUnit *SU);
1032
1033 LLVM_ABI unsigned getNextResourceCycleByInstance(unsigned InstanceIndex,
1034 unsigned ReleaseAtCycle,
1035 unsigned AcquireAtCycle);
1036
1037 LLVM_ABI std::pair<unsigned, unsigned>
1038 getNextResourceCycle(const MCSchedClassDesc *SC, unsigned PIdx,
1039 unsigned ReleaseAtCycle, unsigned AcquireAtCycle);
1040
1041 bool isUnbufferedGroup(unsigned PIdx) const {
1044 }
1045
1046 LLVM_ABI bool checkHazard(SUnit *SU);
1047
1048 LLVM_ABI unsigned findMaxLatency(ArrayRef<SUnit *> ReadySUs);
1049
1050 LLVM_ABI unsigned getOtherResourceCount(unsigned &OtherCritIdx);
1051
1052 /// Release SU to make it ready. If it's not in hazard, remove it from
1053 /// pending queue (if already in) and push into available queue.
1054 /// Otherwise, push the SU into pending queue.
1055 ///
1056 /// @param SU The unit to be released.
1057 /// @param ReadyCycle Until which cycle the unit is ready.
1058 /// @param InPQueue Whether SU is already in pending queue.
1059 /// @param Idx Position offset in pending queue (if in it).
1060 LLVM_ABI void releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue,
1061 unsigned Idx = 0);
1062
1063 LLVM_ABI void bumpCycle(unsigned NextCycle);
1064
1065 LLVM_ABI void incExecutedResources(unsigned PIdx, unsigned Count);
1066
1067 LLVM_ABI unsigned countResource(const MCSchedClassDesc *SC, unsigned PIdx,
1068 unsigned Cycles, unsigned ReadyCycle,
1069 unsigned StartAtCycle);
1070
1071 LLVM_ABI void bumpNode(SUnit *SU);
1072
1073 LLVM_ABI void releasePending();
1074
1075 LLVM_ABI void removeReady(SUnit *SU);
1076
1077 /// Call this before applying any other heuristics to the Available queue.
1078 /// Updates the Available/Pending Q's if necessary and returns the single
1079 /// available instruction, or NULL if there are multiple candidates.
1081
1082 /// Dump the state of the information that tracks resource usage.
1083 LLVM_ABI void dumpReservedCycles() const;
1084 LLVM_ABI void dumpScheduledState() const;
1085};
1086
1087/// Base class for GenericScheduler. This class maintains information about
1088/// scheduling candidates based on TargetSchedModel making it easy to implement
1089/// heuristics for either preRA or postRA scheduling.
1091public:
1092 /// Represent the type of SchedCandidate found within a single queue.
1093 /// pickNodeBidirectional depends on these listed by decreasing priority.
1113
1114#ifndef NDEBUG
1115 static const char *getReasonStr(GenericSchedulerBase::CandReason Reason);
1116#endif
1117
1118 /// Policy for scheduling the next instruction in the candidate's zone.
1119 struct CandPolicy {
1120 bool ReduceLatency = false;
1121 unsigned ReduceResIdx = 0;
1122 unsigned DemandResIdx = 0;
1123
1124 CandPolicy() = default;
1125
1126 bool operator==(const CandPolicy &RHS) const {
1127 return ReduceLatency == RHS.ReduceLatency &&
1128 ReduceResIdx == RHS.ReduceResIdx &&
1129 DemandResIdx == RHS.DemandResIdx;
1130 }
1131 bool operator!=(const CandPolicy &RHS) const {
1132 return !(*this == RHS);
1133 }
1134 };
1135
1136 /// Status of an instruction's critical resource consumption.
1138 // Count critical resources in the scheduled region required by SU.
1139 unsigned CritResources = 0;
1140
1141 // Count critical resources from another region consumed by SU.
1142 unsigned DemandedResources = 0;
1143
1145
1146 bool operator==(const SchedResourceDelta &RHS) const {
1147 return CritResources == RHS.CritResources
1148 && DemandedResources == RHS.DemandedResources;
1149 }
1150 bool operator!=(const SchedResourceDelta &RHS) const {
1151 return !operator==(RHS);
1152 }
1153 };
1154
1155 /// Store the state used by GenericScheduler heuristics, required for the
1156 /// lifetime of one invocation of pickNode().
1159
1160 // The best SUnit candidate.
1162
1163 // The reason for this candidate.
1165
1166 // Whether this candidate should be scheduled at top/bottom.
1167 bool AtTop;
1168
1169 // Register pressure values for the best candidate.
1171
1172 // Critical resource consumption of the best candidate.
1174
1177
1178 void reset(const CandPolicy &NewPolicy) {
1179 Policy = NewPolicy;
1180 SU = nullptr;
1181 Reason = NoCand;
1182 AtTop = false;
1185 }
1186
1187 bool isValid() const { return SU; }
1188
1189 // Copy the status of another candidate without changing policy.
1191 assert(Best.Reason != NoCand && "uninitialized Sched candidate");
1192 SU = Best.SU;
1193 Reason = Best.Reason;
1194 AtTop = Best.AtTop;
1195 RPDelta = Best.RPDelta;
1196 ResDelta = Best.ResDelta;
1197 }
1198
1201 };
1202
1203protected:
1206 const TargetRegisterInfo *TRI = nullptr;
1207 unsigned TopIdx = 0;
1208 unsigned BotIdx = 0;
1209 unsigned NumRegionInstrs = 0;
1210
1212
1214
1216
1217 LLVM_ABI void setPolicy(CandPolicy &Policy, bool IsPostRA,
1218 SchedBoundary &CurrZone, SchedBoundary *OtherZone);
1219
1220 MachineSchedPolicy getPolicy() const override { return RegionPolicy; }
1221
1222#ifndef NDEBUG
1223 void traceCandidate(const SchedCandidate &Cand);
1224#endif
1225
1226private:
1227 bool shouldReduceLatency(const CandPolicy &Policy, SchedBoundary &CurrZone,
1228 bool ComputeRemLatency, unsigned &RemLatency) const;
1229};
1230
1231// Utility functions used by heuristics in tryCandidate().
1232LLVM_ABI bool tryLess(int TryVal, int CandVal,
1233 GenericSchedulerBase::SchedCandidate &TryCand,
1234 GenericSchedulerBase::SchedCandidate &Cand,
1236LLVM_ABI bool tryGreater(int TryVal, int CandVal,
1237 GenericSchedulerBase::SchedCandidate &TryCand,
1238 GenericSchedulerBase::SchedCandidate &Cand,
1240LLVM_ABI bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
1241 GenericSchedulerBase::SchedCandidate &Cand,
1242 SchedBoundary &Zone);
1243LLVM_ABI bool tryPressure(const PressureChange &TryP,
1244 const PressureChange &CandP,
1245 GenericSchedulerBase::SchedCandidate &TryCand,
1246 GenericSchedulerBase::SchedCandidate &Cand,
1248 const TargetRegisterInfo *TRI,
1249 const MachineFunction &MF);
1250LLVM_ABI unsigned getWeakLeft(const SUnit *SU, bool isTop);
1251LLVM_ABI int biasPhysReg(const SUnit *SU, bool isTop);
1252
1253/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
1254/// the schedule.
1256public:
1258 GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
1259 Bot(SchedBoundary::BotQID, "BotQ") {}
1260
1261 void initPolicy(MachineBasicBlock::iterator Begin,
1263 unsigned NumRegionInstrs) override;
1264
1265 void dumpPolicy() const override;
1266
1267 bool shouldTrackPressure() const override {
1268 return RegionPolicy.ShouldTrackPressure;
1269 }
1270
1271 bool shouldTrackLaneMasks() const override {
1272 return RegionPolicy.ShouldTrackLaneMasks;
1273 }
1274
1275 void initialize(ScheduleDAGMI *dag) override;
1276
1277 SUnit *pickNode(bool &IsTopNode) override;
1278
1279 void schedNode(SUnit *SU, bool IsTopNode) override;
1280
1281 void releaseTopNode(SUnit *SU) override {
1282 if (SU->isScheduled)
1283 return;
1284
1285 Top.releaseNode(SU, SU->TopReadyCycle, false);
1286 TopCand.SU = nullptr;
1287 }
1288
1289 void releaseBottomNode(SUnit *SU) override {
1290 if (SU->isScheduled)
1291 return;
1292
1293 Bot.releaseNode(SU, SU->BotReadyCycle, false);
1294 BotCand.SU = nullptr;
1295 }
1296
1297 void registerRoots() override;
1298
1299protected:
1300 ScheduleDAGMILive *DAG = nullptr;
1301
1302 // State of the top and bottom scheduled instruction boundaries.
1305
1308
1309 /// Candidate last picked from Top boundary.
1311 /// Candidate last picked from Bot boundary.
1313
1314 void checkAcyclicLatency();
1315
1316 void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop,
1317 const RegPressureTracker &RPTracker,
1318 RegPressureTracker &TempTracker);
1319
1320 virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand,
1321 SchedBoundary *Zone) const;
1322
1323 SUnit *pickNodeBidirectional(bool &IsTopNode);
1324
1325 void pickNodeFromQueue(SchedBoundary &Zone,
1326 const CandPolicy &ZonePolicy,
1327 const RegPressureTracker &RPTracker,
1328 SchedCandidate &Candidate);
1329
1330 void reschedulePhysReg(SUnit *SU, bool isTop);
1331};
1332
1333/// PostGenericScheduler - Interface to the scheduling algorithm used by
1334/// ScheduleDAGMI.
1335///
1336/// Callbacks from ScheduleDAGMI:
1337/// initPolicy -> initialize(DAG) -> registerRoots -> pickNode ...
1339protected:
1340 ScheduleDAGMI *DAG = nullptr;
1343
1344 /// Candidate last picked from Top boundary.
1346 /// Candidate last picked from Bot boundary.
1348
1351
1352public:
1354 : GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
1355 Bot(SchedBoundary::BotQID, "BotQ") {}
1356
1357 ~PostGenericScheduler() override = default;
1358
1359 void initPolicy(MachineBasicBlock::iterator Begin,
1361 unsigned NumRegionInstrs) override;
1362
1363 /// PostRA scheduling does not track pressure.
1364 bool shouldTrackPressure() const override { return false; }
1365
1366 void initialize(ScheduleDAGMI *Dag) override;
1367
1368 void registerRoots() override;
1369
1370 SUnit *pickNode(bool &IsTopNode) override;
1371
1372 SUnit *pickNodeBidirectional(bool &IsTopNode);
1373
1374 void scheduleTree(unsigned SubtreeID) override {
1375 llvm_unreachable("PostRA scheduler does not support subtree analysis.");
1376 }
1377
1378 void schedNode(SUnit *SU, bool IsTopNode) override;
1379
1380 void releaseTopNode(SUnit *SU) override {
1381 if (SU->isScheduled)
1382 return;
1383 Top.releaseNode(SU, SU->TopReadyCycle, false);
1384 TopCand.SU = nullptr;
1385 }
1386
1387 void releaseBottomNode(SUnit *SU) override {
1388 if (SU->isScheduled)
1389 return;
1390 Bot.releaseNode(SU, SU->BotReadyCycle, false);
1391 BotCand.SU = nullptr;
1392 }
1393
1394protected:
1395 virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand);
1396
1397 void pickNodeFromQueue(SchedBoundary &Zone, SchedCandidate &Cand);
1398};
1399
1400/// If ReorderWhileClustering is set to true, no attempt will be made to
1401/// reduce reordering due to store clustering.
1402LLVM_ABI std::unique_ptr<ScheduleDAGMutation>
1403createLoadClusterDAGMutation(const TargetInstrInfo *TII,
1404 const TargetRegisterInfo *TRI,
1405 bool ReorderWhileClustering = false);
1406
1407/// If ReorderWhileClustering is set to true, no attempt will be made to
1408/// reduce reordering due to store clustering.
1409LLVM_ABI std::unique_ptr<ScheduleDAGMutation>
1410createStoreClusterDAGMutation(const TargetInstrInfo *TII,
1411 const TargetRegisterInfo *TRI,
1412 bool ReorderWhileClustering = false);
1413
1414LLVM_ABI std::unique_ptr<ScheduleDAGMutation>
1415createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
1416 const TargetRegisterInfo *TRI);
1417
1418/// Create the standard converging machine scheduler. This will be used as the
1419/// default scheduler if the target does not set a default.
1420/// Adds default DAG mutations.
1421template <typename Strategy = GenericScheduler>
1423 ScheduleDAGMILive *DAG =
1424 new ScheduleDAGMILive(C, std::make_unique<Strategy>(C));
1425 // Register DAG post-processors.
1426 //
1427 // FIXME: extend the mutation API to allow earlier mutations to instantiate
1428 // data and pass it to later mutations. Have a single mutation that gathers
1429 // the interesting nodes in one pass.
1431
1432 const TargetSubtargetInfo &STI = C->MF->getSubtarget();
1433 // Add MacroFusion mutation if fusions are not empty.
1434 const auto &MacroFusions = STI.getMacroFusions();
1435 if (!MacroFusions.empty())
1436 DAG->addMutation(createMacroFusionDAGMutation(MacroFusions));
1437 return DAG;
1438}
1439
1440/// Create a generic scheduler with no vreg liveness or DAG mutation passes.
1441template <typename Strategy = PostGenericScheduler>
1443 ScheduleDAGMI *DAG = new ScheduleDAGMI(C, std::make_unique<Strategy>(C),
1444 /*RemoveKillFlags=*/true);
1445 const TargetSubtargetInfo &STI = C->MF->getSubtarget();
1446 // Add MacroFusion mutation if fusions are not empty.
1447 const auto &MacroFusions = STI.getMacroFusions();
1448 if (!MacroFusions.empty())
1449 DAG->addMutation(createMacroFusionDAGMutation(MacroFusions));
1450 return DAG;
1451}
1452
1453class MachineSchedulerPass : public PassInfoMixin<MachineSchedulerPass> {
1454 // FIXME: Remove this member once RegisterClassInfo is queryable as an
1455 // analysis.
1456 std::unique_ptr<impl_detail::MachineSchedulerImpl> Impl;
1457 const TargetMachine *TM;
1458
1459public:
1465};
1466
1468 : public PassInfoMixin<PostMachineSchedulerPass> {
1469 // FIXME: Remove this member once RegisterClassInfo is queryable as an
1470 // analysis.
1471 std::unique_ptr<impl_detail::PostMachineSchedulerImpl> Impl;
1472 const TargetMachine *TM;
1473
1474public:
1480};
1481} // end namespace llvm
1482
1483#endif // LLVM_CODEGEN_MACHINESCHEDULER_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
This file implements the BitVector class.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< ShadowStackGC > C("shadow-stack", "Very portable GC for uncooperative code generators")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_ABI
Definition: Compiler.h:213
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
std::string Name
bool End
Definition: ELF_riscv.cpp:480
expand large div rem
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
Register const TargetRegisterInfo * TRI
PowerPC VSX FMA Mutation
static const char * name
Definition: SMEABIPass.cpp:52
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, ArrayRef< StringLiteral > StandardNames)
Initialize the set of available library functions based on the specified target triple.
Value * RHS
A private abstract base class describing the concept of an individual alias analysis implementation.
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Base class for GenericScheduler.
void traceCandidate(const SchedCandidate &Cand)
LLVM_ABI void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone, SchedBoundary *OtherZone)
Set the CandPolicy given a scheduling zone given the current resources and latencies inside and outsi...
MachineSchedPolicy RegionPolicy
const TargetSchedModel * SchedModel
static const char * getReasonStr(GenericSchedulerBase::CandReason Reason)
MachineSchedPolicy getPolicy() const override
GenericSchedulerBase(const MachineSchedContext *C)
const MachineSchedContext * Context
CandReason
Represent the type of SchedCandidate found within a single queue.
const TargetRegisterInfo * TRI
GenericScheduler shrinks the unscheduled zone using heuristics to balance the schedule.
SchedCandidate BotCand
Candidate last picked from Bot boundary.
SchedCandidate TopCand
Candidate last picked from Top boundary.
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
bool shouldTrackPressure() const override
Check if pressure tracking is needed before building the DAG and initializing this strategy.
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
bool shouldTrackLaneMasks() const override
Returns true if lanemasks should be tracked.
GenericScheduler(const MachineSchedContext *C)
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Representation of each machine instruction.
Definition: MachineInstr.h:72
MachinePassRegistryListener - Listener to adds and removals of nodes in registration list.
MachinePassRegistryNode - Machine pass node stored in registration list.
MachinePassRegistryNode * getNext() const
MachinePassRegistry - Track the registration of machine passes.
MachineSchedRegistry provides a selection of available machine instruction schedulers.
static void setListener(MachinePassRegistryListener< FunctionPassCtor > *L)
static LLVM_ABI MachinePassRegistry< ScheduleDAGCtor > Registry
ScheduleDAGCtor FunctionPassCtor
MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
static MachineSchedRegistry * getList()
ScheduleDAGInstrs *(*)(MachineSchedContext *) ScheduleDAGCtor
MachineSchedRegistry * getNext() const
MachineSchedStrategy - Interface to the scheduling algorithm used by ScheduleDAGMI.
virtual bool shouldTrackPressure() const
Check if pressure tracking is needed before building the DAG and initializing this strategy.
virtual void leaveMBB()
Tell the strategy that current MBB is done.
virtual void enterMBB(MachineBasicBlock *MBB)
Tell the strategy that MBB is about to be processed.
virtual void scheduleTree(unsigned SubtreeID)
Scheduler callback to notify that a new subtree is scheduled.
virtual void schedNode(SUnit *SU, bool IsTopNode)=0
Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an instruction and updated scheduled/rem...
virtual ~MachineSchedStrategy()=default
virtual void initialize(ScheduleDAGMI *DAG)=0
Initialize the strategy after building the DAG for a new region.
virtual MachineSchedPolicy getPolicy() const
virtual void releaseTopNode(SUnit *SU)=0
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
virtual void dumpPolicy() const
virtual bool doMBBSchedRegionsTopDown() const
virtual SUnit * pickNode(bool &IsTopNode)=0
Pick the next node to schedule, or return NULL.
virtual void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs)
Optionally override the per-region scheduling policy.
virtual void releaseBottomNode(SUnit *SU)=0
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
virtual bool shouldTrackLaneMasks() const
Returns true if lanemasks should be tracked.
virtual void registerRoots()
Notify this strategy that all roots have been released (including those that depend on EntrySU or Exi...
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
LLVM_ABI MachineSchedulerPass(MachineSchedulerPass &&Other)
PostGenericScheduler - Interface to the scheduling algorithm used by ScheduleDAGMI.
bool shouldTrackPressure() const override
PostRA scheduling does not track pressure.
void scheduleTree(unsigned SubtreeID) override
Scheduler callback to notify that a new subtree is scheduled.
SchedCandidate BotCand
Candidate last picked from Bot boundary.
SchedCandidate TopCand
Candidate last picked from Top boundary.
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
~PostGenericScheduler() override=default
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
PostGenericScheduler(const MachineSchedContext *C)
LLVM_ABI PostMachineSchedulerPass(PostMachineSchedulerPass &&Other)
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:112
List of PressureChanges in order of increasing, unique PSetID.
Array of PressureDiffs.
Helpers for implementing custom MachineSchedStrategy classes.
void push(SUnit *SU)
iterator find(SUnit *SU)
ArrayRef< SUnit * > elements()
LLVM_ABI void dump() const
ReadyQueue(unsigned id, const Twine &name)
bool isInQueue(SUnit *SU) const
std::vector< SUnit * >::iterator iterator
bool empty() const
StringRef getName() const
unsigned size() const
iterator remove(iterator I)
unsigned getID() const
Track the current register pressure at some position in the instruction stream, and remember the high...
A static registration template.
Definition: Registry.h:126
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition: Registry.h:44
ResourceSegments are a collection of intervals closed on the left and opened on the right:
LLVM_ABI void add(IntervalTy A, const unsigned CutOff=10)
Adds an interval [a, b) to the collection of the instance.
static IntervalTy getResourceIntervalBottom(unsigned C, unsigned AcquireAtCycle, unsigned ReleaseAtCycle)
These function return the interval used by a resource in bottom and top scheduling.
friend bool operator==(const ResourceSegments &c1, const ResourceSegments &c2)
static LLVM_ABI bool intersects(IntervalTy A, IntervalTy B)
Checks whether intervals intersect.
unsigned getFirstAvailableAtFromTop(unsigned CurrCycle, unsigned AcquireAtCycle, unsigned ReleaseAtCycle) const
friend llvm::raw_ostream & operator<<(llvm::raw_ostream &os, const ResourceSegments &Segments)
std::pair< int64_t, int64_t > IntervalTy
Represents an interval of discrete integer values closed on the left and open on the right: [a,...
static IntervalTy getResourceIntervalTop(unsigned C, unsigned AcquireAtCycle, unsigned ReleaseAtCycle)
ResourceSegments(const std::list< IntervalTy > &Intervals)
unsigned getFirstAvailableAtFromBottom(unsigned CurrCycle, unsigned AcquireAtCycle, unsigned ReleaseAtCycle) const
getFirstAvailableAtFromBottom and getFirstAvailableAtFromTop should be merged in a single function in...
Scheduling dependency.
Definition: ScheduleDAG.h:51
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:249
unsigned NodeQueueId
Queue id of node.
Definition: ScheduleDAG.h:278
unsigned TopReadyCycle
Cycle relative to start when node is ready.
Definition: ScheduleDAG.h:285
unsigned NodeNum
Entry # of node in the node vector.
Definition: ScheduleDAG.h:277
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
Definition: ScheduleDAG.h:433
unsigned getDepth() const
Returns the depth of this node, which is the length of the maximum path up to any node which has no p...
Definition: ScheduleDAG.h:425
bool isScheduled
True once scheduled.
Definition: ScheduleDAG.h:305
unsigned BotReadyCycle
Cycle relative to end when node is ready.
Definition: ScheduleDAG.h:286
Each Scheduling boundary is associated with ready queues.
LLVM_ABI unsigned getNextResourceCycleByInstance(unsigned InstanceIndex, unsigned ReleaseAtCycle, unsigned AcquireAtCycle)
Compute the next cycle at which the given processor resource unit can be scheduled.
LLVM_ABI void releasePending()
Release pending ready nodes in to the available queue.
unsigned getDependentLatency() const
unsigned getScheduledLatency() const
Get the number of latency cycles "covered" by the scheduled instructions.
LLVM_ABI void incExecutedResources(unsigned PIdx, unsigned Count)
bool isResourceLimited() const
const TargetSchedModel * SchedModel
unsigned getExecutedCount() const
Get a scaled count for the minimum execution time of the scheduled micro-ops that are ready to execut...
LLVM_ABI unsigned getLatencyStallCycles(SUnit *SU)
Get the difference between the given SUnit's ready time and the current cycle.
SchedBoundary(const SchedBoundary &other)=delete
LLVM_ABI unsigned findMaxLatency(ArrayRef< SUnit * > ReadySUs)
ScheduleDAGMI * DAG
LLVM_ABI void dumpReservedCycles() const
Dump the state of the information that tracks resource usage.
LLVM_ABI unsigned getOtherResourceCount(unsigned &OtherCritIdx)
SchedRemainder * Rem
LLVM_ABI void bumpNode(SUnit *SU)
Move the boundary of scheduled code by one SUnit.
unsigned getCriticalCount() const
Get the scaled count of scheduled micro-ops and resources, including executed resources.
LLVM_ABI SUnit * pickOnlyChoice()
Call this before applying any other heuristics to the Available queue.
LLVM_ABI void releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue, unsigned Idx=0)
Release SU to make it ready.
LLVM_ABI unsigned countResource(const MCSchedClassDesc *SC, unsigned PIdx, unsigned Cycles, unsigned ReadyCycle, unsigned StartAtCycle)
Add the given processor resource to this scheduled zone.
SchedBoundary(unsigned ID, const Twine &Name)
Pending queues extend the ready queues with the same ID and the PendingFlag set.
ScheduleHazardRecognizer * HazardRec
bool isUnbufferedGroup(unsigned PIdx) const
LLVM_ABI void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem)
SchedBoundary & operator=(const SchedBoundary &other)=delete
unsigned getResourceCount(unsigned ResIdx) const
LLVM_ABI void bumpCycle(unsigned NextCycle)
Move the boundary of scheduled code by one cycle.
unsigned getCurrMOps() const
Micro-ops issued in the current cycle.
unsigned getCurrCycle() const
Number of cycles to issue the instructions scheduled in this zone.
LLVM_ABI bool checkHazard(SUnit *SU)
Does this SU have a hazard within the current instruction group.
LLVM_ABI std::pair< unsigned, unsigned > getNextResourceCycle(const MCSchedClassDesc *SC, unsigned PIdx, unsigned ReleaseAtCycle, unsigned AcquireAtCycle)
Compute the next cycle at which the given processor resource can be scheduled.
LLVM_ABI void dumpScheduledState() const
LLVM_ABI void removeReady(SUnit *SU)
Remove SU from the ready set for this boundary.
unsigned getZoneCritResIdx() const
unsigned getUnscheduledLatency(SUnit *SU) const
Compute the values of each DAG node for various metrics during DFS.
Definition: ScheduleDFS.h:65
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
VReg2SUnitMultiMap VRegUses
Maps vregs to the SUnits of their uses in the current scheduling region.
PressureDiff & getPressureDiff(const SUnit *SU)
RegPressureTracker BotRPTracker
std::vector< PressureChange > RegionCriticalPSets
List of pressure sets that exceed the target's pressure limit before scheduling, listed in increasing...
IntervalPressure TopPressure
The top of the unscheduled zone.
const RegPressureTracker & getBotRPTracker() const
ScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S)
IntervalPressure BotPressure
The bottom of the unscheduled zone.
bool isTrackingPressure() const
Return true if register pressure tracking is enabled.
bool hasVRegLiveness() const override
Return true if this DAG supports VReg liveness and RegPressure.
RegisterClassInfo * RegClassInfo
const SchedDFSResult * getDFSResult() const
Return a non-null DFS result if the scheduling strategy initialized it.
const PressureDiff & getPressureDiff(const SUnit *SU) const
const RegPressureTracker & getTopRPTracker() const
RegPressureTracker RPTracker
const IntervalPressure & getRegPressure() const
Get register pressure for the entire scheduling region before scheduling.
const IntervalPressure & getBotPressure() const
Get current register pressure for the bottom scheduled instructions.
BitVector & getScheduledTrees()
MachineBasicBlock::iterator LiveRegionEnd
const IntervalPressure & getTopPressure() const
Get current register pressure for the top scheduled instructions.
const std::vector< PressureChange > & getRegionCriticalPSets() const
IntervalPressure RegPressure
RegPressureTracker TopRPTracker
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
std::unique_ptr< MachineSchedStrategy > SchedImpl
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
MachineBasicBlock::iterator top() const
ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S, bool RemoveKillFlags)
MachineBasicBlock::iterator bottom() const
MachineBasicBlock::iterator CurrentBottom
The bottom of the unscheduled zone.
bool doMBBSchedRegionsTopDown() const override
If this method returns true, handling of the scheduling regions themselves (in case of a scheduling b...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
LiveIntervals * getLIS() const
~ScheduleDAGMI() override
LiveIntervals * LIS
MachineBasicBlock::iterator CurrentTop
The top of the unscheduled zone.
std::vector< std::unique_ptr< ScheduleDAGMutation > > Mutations
Ordered list of DAG postprocessing steps.
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:584
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:585
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:83
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
unsigned getMicroOpFactor() const
Multiply number of micro-ops by this factor to normalize it relative to other resources.
unsigned getLatencyFactor() const
Multiply cycle count by this factor to normalize it relative to other resources.
const MCProcResourceDesc * getProcResource(unsigned PIdx) const
Get a processor resource by ID for convenience.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual std::vector< MacroFusionPredTy > getMacroFusions() const
Get the list of MacroFusion predicates.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
Impl class for MachineScheduler.
Impl class for PostMachineScheduler.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
template class LLVM_TEMPLATE_ABI opt< bool >
Definition: CommandLine.cpp:79
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
ScheduleDAGMILive * createSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1770
cl::opt< bool > PrintDAGs
LLVM_ABI unsigned getWeakLeft(const SUnit *SU, bool isTop)
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createMacroFusionDAGMutation(ArrayRef< MacroFusionPredTy > Predicates, bool BranchOnly=false)
Create a DAG scheduling mutation to pair instructions back to back for instructions that benefit acco...
LLVM_ABI cl::opt< bool > VerifyScheduling
cl::opt< bool > ViewMISchedDAGs
LLVM_ABI bool tryPressure(const PressureChange &TryP, const PressureChange &CandP, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason, const TargetRegisterInfo *TRI, const MachineFunction &MF)
ScheduleDAGMI * createSchedPostRA(MachineSchedContext *C)
Create a generic scheduler with no vreg liveness or DAG mutation passes.
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
LLVM_ABI bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, SchedBoundary &Zone)
@ Other
Any other memory.
LLVM_ABI cl::opt< MISched::Direction > PreRADirection
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
LLVM_ABI bool tryGreater(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1886
LLVM_ABI bool tryLess(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
Return true if this heuristic determines order.
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createCopyConstrainDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
LLVM_ABI int biasPhysReg(const SUnit *SU, bool isTop)
Minimize physical register live ranges.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:856
#define N
Policy for scheduling the next instruction in the candidate's zone.
bool operator==(const CandPolicy &RHS) const
bool operator!=(const CandPolicy &RHS) const
Store the state used by GenericScheduler heuristics, required for the lifetime of one invocation of p...
void reset(const CandPolicy &NewPolicy)
LLVM_ABI void initResourceDelta(const ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel)
Status of an instruction's critical resource consumption.
bool operator!=(const SchedResourceDelta &RHS) const
bool operator==(const SchedResourceDelta &RHS) const
RegisterPressure computed within a region of instructions delimited by TopIdx and BottomIdx.
const unsigned * SubUnitsIdxBegin
Definition: MCSchedule.h:58
Summarize the scheduling resources required for an instruction of a particular scheduling class.
Definition: MCSchedule.h:123
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
RegisterClassInfo * RegClassInfo
MachineSchedContext & operator=(const MachineSchedContext &other)=delete
MachineSchedContext(const MachineSchedContext &other)=delete
Define a generic scheduling policy for targets that don't provide their own MachineSchedStrategy.
bool ShouldTrackLaneMasks
Track LaneMasks to allow reordering of independent subregister writes of the same vreg.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:70
Store the effects of a change in pressure on things that MI scheduler cares about.
A region of an MBB for scheduling.
MachineBasicBlock::iterator RegionBegin
RegionBegin is the first instruction in the scheduling region, and RegionEnd is either MBB->end() or ...
MachineBasicBlock::iterator RegionEnd
SchedRegion(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, unsigned N)
Summarize the unscheduled region.
LLVM_ABI void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel)
SmallVector< unsigned, 16 > RemainingCounts