LLVM 21.0.0git
MachineScheduler.h
Go to the documentation of this file.
1//===- MachineScheduler.h - MachineInstr Scheduling Pass --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides an interface for customizing the standard MachineScheduler
10// pass. Note that the entire pass may be replaced as follows:
11//
12// <Target>TargetMachine::createPassConfig(PassManagerBase &PM) {
13// PM.substitutePass(&MachineSchedulerID, &CustomSchedulerPassID);
14// ...}
15//
16// The MachineScheduler pass is only responsible for choosing the regions to be
17// scheduled. Targets can override the DAG builder and scheduler without
18// replacing the pass as follows:
19//
20// ScheduleDAGInstrs *<Target>TargetMachine::
21// createMachineScheduler(MachineSchedContext *C) {
22// return new CustomMachineScheduler(C);
23// }
24//
25// The default scheduler, ScheduleDAGMILive, builds the DAG and drives list
26// scheduling while updating the instruction stream, register pressure, and live
27// intervals. Most targets don't need to override the DAG builder and list
28// scheduler, but subtargets that require custom scheduling heuristics may
29// plugin an alternate MachineSchedStrategy. The strategy is responsible for
30// selecting the highest priority node from the list:
31//
32// ScheduleDAGInstrs *<Target>TargetMachine::
33// createMachineScheduler(MachineSchedContext *C) {
34// return new ScheduleDAGMILive(C, CustomStrategy(C));
35// }
36//
37// The DAG builder can also be customized in a sense by adding DAG mutations
38// that will run after DAG building and before list scheduling. DAG mutations
39// can adjust dependencies based on target-specific knowledge or add weak edges
40// to aid heuristics:
41//
42// ScheduleDAGInstrs *<Target>TargetMachine::
43// createMachineScheduler(MachineSchedContext *C) {
44// ScheduleDAGMI *DAG = createGenericSchedLive(C);
45// DAG->addMutation(new CustomDAGMutation(...));
46// return DAG;
47// }
48//
49// A target that supports alternative schedulers can use the
50// MachineSchedRegistry to allow command line selection. This can be done by
51// implementing the following boilerplate:
52//
53// static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
54// return new CustomMachineScheduler(C);
55// }
56// static MachineSchedRegistry
57// SchedCustomRegistry("custom", "Run my target's custom scheduler",
58// createCustomMachineSched);
59//
60//
61// Finally, subtargets that don't need to implement custom heuristics but would
62// like to configure the GenericScheduler's policy for a given scheduler region,
63// including scheduling direction and register pressure tracking policy, can do
64// this:
65//
66// void <SubTarget>Subtarget::
67// overrideSchedPolicy(MachineSchedPolicy &Policy,
68// unsigned NumRegionInstrs) const {
69// Policy.<Flag> = true;
70// }
71//
72//===----------------------------------------------------------------------===//
73
74#ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
75#define LLVM_CODEGEN_MACHINESCHEDULER_H
76
77#include "llvm/ADT/APInt.h"
78#include "llvm/ADT/ArrayRef.h"
79#include "llvm/ADT/BitVector.h"
80#include "llvm/ADT/STLExtras.h"
82#include "llvm/ADT/StringRef.h"
83#include "llvm/ADT/Twine.h"
93#include <algorithm>
94#include <cassert>
96#include <memory>
97#include <string>
98#include <vector>
99
100namespace llvm {
101namespace impl_detail {
102// FIXME: Remove these declarations once RegisterClassInfo is queryable as an
103// analysis.
106} // namespace impl_detail
107
108namespace MISched {
114};
115} // namespace MISched
116
117extern cl::opt<MISched::Direction> PreRADirection;
118extern cl::opt<bool> VerifyScheduling;
119#ifndef NDEBUG
120extern cl::opt<bool> ViewMISchedDAGs;
121extern cl::opt<bool> PrintDAGs;
122#else
123extern const bool ViewMISchedDAGs;
124extern const bool PrintDAGs;
125#endif
126
127class AAResults;
128class LiveIntervals;
129class MachineDominatorTree;
130class MachineFunction;
131class MachineInstr;
132class MachineLoopInfo;
133class RegisterClassInfo;
134class SchedDFSResult;
135class ScheduleHazardRecognizer;
136class TargetInstrInfo;
137class TargetPassConfig;
138class TargetRegisterInfo;
139
140/// MachineSchedContext provides enough context from the MachineScheduler pass
141/// for the target to instantiate a scheduler.
143 MachineFunction *MF = nullptr;
144 const MachineLoopInfo *MLI = nullptr;
145 const MachineDominatorTree *MDT = nullptr;
146 const TargetMachine *TM = nullptr;
147 AAResults *AA = nullptr;
148 LiveIntervals *LIS = nullptr;
149
151
155 virtual ~MachineSchedContext();
156};
157
158/// MachineSchedRegistry provides a selection of available machine instruction
159/// schedulers.
162 ScheduleDAGInstrs *(*)(MachineSchedContext *)> {
163public:
165
166 // RegisterPassParser requires a (misnamed) FunctionPassCtor type.
168
170
171 MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
173 Registry.Add(this);
174 }
175
176 ~MachineSchedRegistry() { Registry.Remove(this); }
177
178 // Accessors.
179 //
182 }
183
185 return (MachineSchedRegistry *)Registry.getList();
186 }
187
189 Registry.setListener(L);
190 }
191};
192
193class ScheduleDAGMI;
194
195/// Define a generic scheduling policy for targets that don't provide their own
196/// MachineSchedStrategy. This can be overriden for each scheduling region
197/// before building the DAG.
199 // Allow the scheduler to disable register pressure tracking.
201 /// Track LaneMasks to allow reordering of independent subregister writes
202 /// of the same vreg. \sa MachineSchedStrategy::shouldTrackLaneMasks()
204
205 // Allow the scheduler to force top-down or bottom-up scheduling. If neither
206 // is true, the scheduler runs in both directions and converges.
207 bool OnlyTopDown = false;
208 bool OnlyBottomUp = false;
209
210 // Disable heuristic that tries to fetch nodes from long dependency chains
211 // first.
213
214 // Compute DFSResult for use in scheduling heuristics.
215 bool ComputeDFSResult = false;
216
218};
219
220/// MachineSchedStrategy - Interface to the scheduling algorithm used by
221/// ScheduleDAGMI.
222///
223/// Initialization sequence:
224/// initPolicy -> shouldTrackPressure -> initialize(DAG) -> registerRoots
226 virtual void anchor();
227
228public:
229 virtual ~MachineSchedStrategy() = default;
230
231 /// Optionally override the per-region scheduling policy.
234 unsigned NumRegionInstrs) {}
235
236 virtual MachineSchedPolicy getPolicy() const { return {}; }
237 virtual void dumpPolicy() const {}
238
239 /// Check if pressure tracking is needed before building the DAG and
240 /// initializing this strategy. Called after initPolicy.
241 virtual bool shouldTrackPressure() const { return true; }
242
243 /// Returns true if lanemasks should be tracked. LaneMask tracking is
244 /// necessary to reorder independent subregister defs for the same vreg.
245 /// This has to be enabled in combination with shouldTrackPressure().
246 virtual bool shouldTrackLaneMasks() const { return false; }
247
248 // If this method returns true, handling of the scheduling regions
249 // themselves (in case of a scheduling boundary in MBB) will be done
250 // beginning with the topmost region of MBB.
251 virtual bool doMBBSchedRegionsTopDown() const { return false; }
252
253 /// Initialize the strategy after building the DAG for a new region.
254 virtual void initialize(ScheduleDAGMI *DAG) = 0;
255
256 /// Tell the strategy that MBB is about to be processed.
257 virtual void enterMBB(MachineBasicBlock *MBB) {};
258
259 /// Tell the strategy that current MBB is done.
260 virtual void leaveMBB() {};
261
262 /// Notify this strategy that all roots have been released (including those
263 /// that depend on EntrySU or ExitSU).
264 virtual void registerRoots() {}
265
266 /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
267 /// schedule the node at the top of the unscheduled region. Otherwise it will
268 /// be scheduled at the bottom.
269 virtual SUnit *pickNode(bool &IsTopNode) = 0;
270
271 /// Scheduler callback to notify that a new subtree is scheduled.
272 virtual void scheduleTree(unsigned SubtreeID) {}
273
274 /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
275 /// instruction and updated scheduled/remaining flags in the DAG nodes.
276 virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
277
278 /// When all predecessor dependencies have been resolved, free this node for
279 /// top-down scheduling.
280 virtual void releaseTopNode(SUnit *SU) = 0;
281
282 /// When all successor dependencies have been resolved, free this node for
283 /// bottom-up scheduling.
284 virtual void releaseBottomNode(SUnit *SU) = 0;
285};
286
287/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply
288/// schedules machine instructions according to the given MachineSchedStrategy
289/// without much extra book-keeping. This is the common functionality between
290/// PreRA and PostRA MachineScheduler.
292protected:
295 std::unique_ptr<MachineSchedStrategy> SchedImpl;
296
297 /// Ordered list of DAG postprocessing steps.
298 std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
299
300 /// The top of the unscheduled zone.
302
303 /// The bottom of the unscheduled zone.
305
306 /// Record the next node in a scheduled cluster.
307 const SUnit *NextClusterPred = nullptr;
308 const SUnit *NextClusterSucc = nullptr;
309
310#if LLVM_ENABLE_ABI_BREAKING_CHECKS
311 /// The number of instructions scheduled so far. Used to cut off the
312 /// scheduler at the point determined by misched-cutoff.
313 unsigned NumInstrsScheduled = 0;
314#endif
315
316public:
317 ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S,
318 bool RemoveKillFlags)
320 LIS(C->LIS), SchedImpl(std::move(S)) {}
321
322 // Provide a vtable anchor
323 ~ScheduleDAGMI() override;
324
325 /// If this method returns true, handling of the scheduling regions
326 /// themselves (in case of a scheduling boundary in MBB) will be done
327 /// beginning with the topmost region of MBB.
328 bool doMBBSchedRegionsTopDown() const override {
329 return SchedImpl->doMBBSchedRegionsTopDown();
330 }
331
332 // Returns LiveIntervals instance for use in DAG mutators and such.
333 LiveIntervals *getLIS() const { return LIS; }
334
335 /// Return true if this DAG supports VReg liveness and RegPressure.
336 virtual bool hasVRegLiveness() const { return false; }
337
338 /// Add a postprocessing step to the DAG builder.
339 /// Mutations are applied in the order that they are added after normal DAG
340 /// building and before MachineSchedStrategy initialization.
341 ///
342 /// ScheduleDAGMI takes ownership of the Mutation object.
343 void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
344 if (Mutation)
345 Mutations.push_back(std::move(Mutation));
346 }
347
350
351 /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
352 /// region. This covers all instructions in a block, while schedule() may only
353 /// cover a subset.
357 unsigned regioninstrs) override;
358
359 /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
360 /// reorderable instructions.
361 void schedule() override;
362
363 void startBlock(MachineBasicBlock *bb) override;
364 void finishBlock() override;
365
366 /// Change the position of an instruction within the basic block and update
367 /// live ranges and region boundary iterators.
369
370 const SUnit *getNextClusterPred() const { return NextClusterPred; }
371
372 const SUnit *getNextClusterSucc() const { return NextClusterSucc; }
373
374 void viewGraph(const Twine &Name, const Twine &Title) override;
375 void viewGraph() override;
376
377protected:
378 // Top-Level entry points for the schedule() driver...
379
380 /// Apply each ScheduleDAGMutation step in order. This allows different
381 /// instances of ScheduleDAGMI to perform custom DAG postprocessing.
382 void postProcessDAG();
383
384 /// Release ExitSU predecessors and setup scheduler queues.
385 void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
386
387 /// Update scheduler DAG and queues after scheduling an instruction.
388 void updateQueues(SUnit *SU, bool IsTopNode);
389
390 /// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
391 void placeDebugValues();
392
393 /// dump the scheduled Sequence.
394 void dumpSchedule() const;
395 /// Print execution trace of the schedule top-down or bottom-up.
396 void dumpScheduleTraceTopDown() const;
397 void dumpScheduleTraceBottomUp() const;
398
399 // Lesser helpers...
400 bool checkSchedLimit();
401
403 SmallVectorImpl<SUnit*> &BotRoots);
404
405 void releaseSucc(SUnit *SU, SDep *SuccEdge);
406 void releaseSuccessors(SUnit *SU);
407 void releasePred(SUnit *SU, SDep *PredEdge);
408 void releasePredecessors(SUnit *SU);
409};
410
411/// ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules
412/// machine instructions while updating LiveIntervals and tracking regpressure.
414protected:
416
417 /// Information about DAG subtrees. If DFSResult is NULL, then SchedulerTrees
418 /// will be empty.
421
423
424 /// Maps vregs to the SUnits of their uses in the current scheduling region.
426
427 // Map each SU to its summary of pressure changes. This array is updated for
428 // liveness during bottom-up scheduling. Top-down scheduling may proceed but
429 // has no affect on the pressure diffs.
431
432 /// Register pressure in this region computed by initRegPressure.
437
438 /// List of pressure sets that exceed the target's pressure limit before
439 /// scheduling, listed in increasing set ID order. Each pressure set is paired
440 /// with its max pressure in the currently scheduled regions.
441 std::vector<PressureChange> RegionCriticalPSets;
442
443 /// The top of the unscheduled zone.
446
447 /// The bottom of the unscheduled zone.
450
451public:
453 std::unique_ptr<MachineSchedStrategy> S)
454 : ScheduleDAGMI(C, std::move(S), /*RemoveKillFlags=*/false),
457
458 ~ScheduleDAGMILive() override;
459
460 /// Return true if this DAG supports VReg liveness and RegPressure.
461 bool hasVRegLiveness() const override { return true; }
462
463 /// Return true if register pressure tracking is enabled.
465
466 /// Get current register pressure for the top scheduled instructions.
467 const IntervalPressure &getTopPressure() const { return TopPressure; }
469
470 /// Get current register pressure for the bottom scheduled instructions.
471 const IntervalPressure &getBotPressure() const { return BotPressure; }
473
474 /// Get register pressure for the entire scheduling region before scheduling.
475 const IntervalPressure &getRegPressure() const { return RegPressure; }
476
477 const std::vector<PressureChange> &getRegionCriticalPSets() const {
478 return RegionCriticalPSets;
479 }
480
482 return SUPressureDiffs[SU->NodeNum];
483 }
484 const PressureDiff &getPressureDiff(const SUnit *SU) const {
485 return SUPressureDiffs[SU->NodeNum];
486 }
487
488 /// Compute a DFSResult after DAG building is complete, and before any
489 /// queue comparisons.
490 void computeDFSResult();
491
492 /// Return a non-null DFS result if the scheduling strategy initialized it.
493 const SchedDFSResult *getDFSResult() const { return DFSResult; }
494
496
497 /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
498 /// region. This covers all instructions in a block, while schedule() may only
499 /// cover a subset.
503 unsigned regioninstrs) override;
504
505 /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
506 /// reorderable instructions.
507 void schedule() override;
508
509 /// Compute the cyclic critical path through the DAG.
510 unsigned computeCyclicCriticalPath();
511
512 void dump() const override;
513
514protected:
515 // Top-Level entry points for the schedule() driver...
516
517 /// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking
518 /// enabled. This sets up three trackers. RPTracker will cover the entire DAG
519 /// region, TopTracker and BottomTracker will be initialized to the top and
520 /// bottom of the DAG region without covereing any unscheduled instruction.
522
523 /// Release ExitSU predecessors and setup scheduler queues. Re-position
524 /// the Top RP tracker in case the region beginning has changed.
525 void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
526
527 /// Move an instruction and update register pressure.
528 void scheduleMI(SUnit *SU, bool IsTopNode);
529
530 // Lesser helpers...
531
532 void initRegPressure();
533
535
536 void updateScheduledPressure(const SUnit *SU,
537 const std::vector<unsigned> &NewMaxPressure);
538
539 void collectVRegUses(SUnit &SU);
540};
541
542//===----------------------------------------------------------------------===//
543///
544/// Helpers for implementing custom MachineSchedStrategy classes. These take
545/// care of the book-keeping associated with list scheduling heuristics.
546///
547//===----------------------------------------------------------------------===//
548
549/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
550/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
551/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
552///
553/// This is a convenience class that may be used by implementations of
554/// MachineSchedStrategy.
556 unsigned ID;
557 std::string Name;
558 std::vector<SUnit*> Queue;
559
560public:
561 ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
562
563 unsigned getID() const { return ID; }
564
565 StringRef getName() const { return Name; }
566
567 // SU is in this queue if it's NodeQueueID is a superset of this ID.
568 bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
569
570 bool empty() const { return Queue.empty(); }
571
572 void clear() { Queue.clear(); }
573
574 unsigned size() const { return Queue.size(); }
575
576 using iterator = std::vector<SUnit*>::iterator;
577
578 iterator begin() { return Queue.begin(); }
579
580 iterator end() { return Queue.end(); }
581
582 ArrayRef<SUnit*> elements() { return Queue; }
583
584 iterator find(SUnit *SU) { return llvm::find(Queue, SU); }
585
586 void push(SUnit *SU) {
587 Queue.push_back(SU);
588 SU->NodeQueueId |= ID;
589 }
590
592 (*I)->NodeQueueId &= ~ID;
593 *I = Queue.back();
594 unsigned idx = I - Queue.begin();
595 Queue.pop_back();
596 return Queue.begin() + idx;
597 }
598
599 void dump() const;
600};
601
602/// Summarize the unscheduled region.
604 // Critical path through the DAG in expected latency.
605 unsigned CriticalPath;
607
608 // Scaled count of micro-ops left to schedule.
610
612
613 // Unscheduled resources
615
617
618 void reset() {
619 CriticalPath = 0;
620 CyclicCritPath = 0;
621 RemIssueCount = 0;
624 }
625
626 void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
627};
628
629/// ResourceSegments are a collection of intervals closed on the
630/// left and opened on the right:
631///
632/// list{ [a1, b1), [a2, b2), ..., [a_N, b_N) }
633///
634/// The collection has the following properties:
635///
636/// 1. The list is ordered: a_i < b_i and b_i < a_(i+1)
637///
638/// 2. The intervals in the collection do not intersect each other.
639///
640/// A \ref ResourceSegments instance represents the cycle
641/// reservation history of the instance of and individual resource.
643public:
644 /// Represents an interval of discrete integer values closed on
645 /// the left and open on the right: [a, b).
646 typedef std::pair<int64_t, int64_t> IntervalTy;
647
648 /// Adds an interval [a, b) to the collection of the instance.
649 ///
650 /// When adding [a, b[ to the collection, the operation merges the
651 /// adjacent intervals. For example
652 ///
653 /// 0 1 2 3 4 5 6 7 8 9 10
654 /// [-----) [--) [--)
655 /// + [--)
656 /// = [-----------) [--)
657 ///
658 /// To be able to debug duplicate resource usage, the function has
659 /// assertion that checks that no interval should be added if it
660 /// overlaps any of the intervals in the collection. We can
661 /// require this because by definition a \ref ResourceSegments is
662 /// attached only to an individual resource instance.
663 void add(IntervalTy A, const unsigned CutOff = 10);
664
665public:
666 /// Checks whether intervals intersect.
667 static bool intersects(IntervalTy A, IntervalTy B);
668
669 /// These function return the interval used by a resource in bottom and top
670 /// scheduling.
671 ///
672 /// Consider an instruction that uses resources X0, X1 and X2 as follows:
673 ///
674 /// X0 X1 X1 X2 +--------+-------------+--------------+
675 /// |Resource|AcquireAtCycle|ReleaseAtCycle|
676 /// +--------+-------------+--------------+
677 /// | X0 | 0 | 1 |
678 /// +--------+-------------+--------------+
679 /// | X1 | 1 | 3 |
680 /// +--------+-------------+--------------+
681 /// | X2 | 3 | 4 |
682 /// +--------+-------------+--------------+
683 ///
684 /// If we can schedule the instruction at cycle C, we need to
685 /// compute the interval of the resource as follows:
686 ///
687 /// # TOP DOWN SCHEDULING
688 ///
689 /// Cycles scheduling flows to the _right_, in the same direction
690 /// of time.
691 ///
692 /// C 1 2 3 4 5 ...
693 /// ------|------|------|------|------|------|----->
694 /// X0 X1 X1 X2 ---> direction of time
695 /// X0 [C, C+1)
696 /// X1 [C+1, C+3)
697 /// X2 [C+3, C+4)
698 ///
699 /// Therefore, the formula to compute the interval for a resource
700 /// of an instruction that can be scheduled at cycle C in top-down
701 /// scheduling is:
702 ///
703 /// [C+AcquireAtCycle, C+ReleaseAtCycle)
704 ///
705 ///
706 /// # BOTTOM UP SCHEDULING
707 ///
708 /// Cycles scheduling flows to the _left_, in opposite direction
709 /// of time.
710 ///
711 /// In bottom up scheduling, the scheduling happens in opposite
712 /// direction to the execution of the cycles of the
713 /// instruction. When the instruction is scheduled at cycle `C`,
714 /// the resources are allocated in the past relative to `C`:
715 ///
716 /// 2 1 C -1 -2 -3 -4 -5 ...
717 /// <-----|------|------|------|------|------|------|------|---
718 /// X0 X1 X1 X2 ---> direction of time
719 /// X0 (C+1, C]
720 /// X1 (C, C-2]
721 /// X2 (C-2, C-3]
722 ///
723 /// Therefore, the formula to compute the interval for a resource
724 /// of an instruction that can be scheduled at cycle C in bottom-up
725 /// scheduling is:
726 ///
727 /// [C-ReleaseAtCycle+1, C-AcquireAtCycle+1)
728 ///
729 ///
730 /// NOTE: In both cases, the number of cycles booked by a
731 /// resources is the value (ReleaseAtCycle - AcquireAtCycle).
732 static IntervalTy getResourceIntervalBottom(unsigned C, unsigned AcquireAtCycle,
733 unsigned ReleaseAtCycle) {
734 return std::make_pair<long, long>((long)C - (long)ReleaseAtCycle + 1L,
735 (long)C - (long)AcquireAtCycle + 1L);
736 }
737 static IntervalTy getResourceIntervalTop(unsigned C, unsigned AcquireAtCycle,
738 unsigned ReleaseAtCycle) {
739 return std::make_pair<long, long>((long)C + (long)AcquireAtCycle,
740 (long)C + (long)ReleaseAtCycle);
741 }
742
743private:
744 /// Finds the first cycle in which a resource can be allocated.
745 ///
746 /// The function uses the \param IntervalBuider [*] to build a
747 /// resource interval [a, b[ out of the input parameters \param
748 /// CurrCycle, \param AcquireAtCycle and \param ReleaseAtCycle.
749 ///
750 /// The function then loops through the intervals in the ResourceSegments
751 /// and shifts the interval [a, b[ and the ReturnCycle to the
752 /// right until there is no intersection between the intervals of
753 /// the \ref ResourceSegments instance and the new shifted [a, b[. When
754 /// this condition is met, the ReturnCycle (which
755 /// correspond to the cycle in which the resource can be
756 /// allocated) is returned.
757 ///
758 /// c = CurrCycle in input
759 /// c 1 2 3 4 5 6 7 8 9 10 ... ---> (time
760 /// flow)
761 /// ResourceSegments... [---) [-------) [-----------)
762 /// c [1 3[ -> AcquireAtCycle=1, ReleaseAtCycle=3
763 /// ++c [1 3)
764 /// ++c [1 3)
765 /// ++c [1 3)
766 /// ++c [1 3)
767 /// ++c [1 3) ---> returns c
768 /// incremented by 5 (c+5)
769 ///
770 ///
771 /// Notice that for bottom-up scheduling the diagram is slightly
772 /// different because the current cycle c is always on the right
773 /// of the interval [a, b) (see \ref
774 /// `getResourceIntervalBottom`). This is because the cycle
775 /// increments for bottom-up scheduling moved in the direction
776 /// opposite to the direction of time:
777 ///
778 /// --------> direction of time.
779 /// XXYZZZ (resource usage)
780 /// --------> direction of top-down execution cycles.
781 /// <-------- direction of bottom-up execution cycles.
782 ///
783 /// Even though bottom-up scheduling moves against the flow of
784 /// time, the algorithm used to find the first free slot in between
785 /// intervals is the same as for top-down scheduling.
786 ///
787 /// [*] See \ref `getResourceIntervalTop` and
788 /// \ref `getResourceIntervalBottom` to see how such resource intervals
789 /// are built.
790 unsigned getFirstAvailableAt(
791 unsigned CurrCycle, unsigned AcquireAtCycle, unsigned ReleaseAtCycle,
792 std::function<IntervalTy(unsigned, unsigned, unsigned)> IntervalBuilder)
793 const;
794
795public:
796 /// getFirstAvailableAtFromBottom and getFirstAvailableAtFromTop
797 /// should be merged in a single function in which a function that
798 /// creates the `NewInterval` is passed as a parameter.
799 unsigned getFirstAvailableAtFromBottom(unsigned CurrCycle,
800 unsigned AcquireAtCycle,
801 unsigned ReleaseAtCycle) const {
802 return getFirstAvailableAt(CurrCycle, AcquireAtCycle, ReleaseAtCycle,
804 }
805 unsigned getFirstAvailableAtFromTop(unsigned CurrCycle,
806 unsigned AcquireAtCycle,
807 unsigned ReleaseAtCycle) const {
808 return getFirstAvailableAt(CurrCycle, AcquireAtCycle, ReleaseAtCycle,
810 }
811
812private:
813 std::list<IntervalTy> _Intervals;
814 /// Merge all adjacent intervals in the collection. For all pairs
815 /// of adjacient intervals, it performs [a, b) + [b, c) -> [a, c).
816 ///
817 /// Before performing the merge operation, the intervals are
818 /// sorted with \ref sort_predicate.
819 void sortAndMerge();
820
821public:
822 // constructor for empty set
823 explicit ResourceSegments(){};
824 bool empty() const { return _Intervals.empty(); }
825 explicit ResourceSegments(const std::list<IntervalTy> &Intervals)
826 : _Intervals(Intervals) {
827 sortAndMerge();
828 }
829
830 friend bool operator==(const ResourceSegments &c1,
831 const ResourceSegments &c2) {
832 return c1._Intervals == c2._Intervals;
833 }
835 const ResourceSegments &Segments) {
836 os << "{ ";
837 for (auto p : Segments._Intervals)
838 os << "[" << p.first << ", " << p.second << "), ";
839 os << "}\n";
840 return os;
841 }
842};
843
844/// Each Scheduling boundary is associated with ready queues. It tracks the
845/// current cycle in the direction of movement, and maintains the state
846/// of "hazards" and other interlocks at the current cycle.
848public:
849 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
850 enum {
853 LogMaxQID = 2
854 };
855
856 ScheduleDAGMI *DAG = nullptr;
857 const TargetSchedModel *SchedModel = nullptr;
858 SchedRemainder *Rem = nullptr;
859
862
864
865private:
866 /// True if the pending Q should be checked/updated before scheduling another
867 /// instruction.
868 bool CheckPending;
869
870 /// Number of cycles it takes to issue the instructions scheduled in this
871 /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
872 /// See getStalls().
873 unsigned CurrCycle;
874
875 /// Micro-ops issued in the current cycle
876 unsigned CurrMOps;
877
878 /// MinReadyCycle - Cycle of the soonest available instruction.
879 unsigned MinReadyCycle;
880
881 // The expected latency of the critical path in this scheduled zone.
882 unsigned ExpectedLatency;
883
884 // The latency of dependence chains leading into this zone.
885 // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
886 // For each cycle scheduled: DLat -= 1.
887 unsigned DependentLatency;
888
889 /// Count the scheduled (issued) micro-ops that can be retired by
890 /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
891 unsigned RetiredMOps;
892
893 // Count scheduled resources that have been executed. Resources are
894 // considered executed if they become ready in the time that it takes to
895 // saturate any resource including the one in question. Counts are scaled
896 // for direct comparison with other resources. Counts can be compared with
897 // MOps * getMicroOpFactor and Latency * getLatencyFactor.
898 SmallVector<unsigned, 16> ExecutedResCounts;
899
900 /// Cache the max count for a single resource.
901 unsigned MaxExecutedResCount;
902
903 // Cache the critical resources ID in this scheduled zone.
904 unsigned ZoneCritResIdx;
905
906 // Is the scheduled region resource limited vs. latency limited.
907 bool IsResourceLimited;
908
909public:
910private:
911 /// Record how resources have been allocated across the cycles of
912 /// the execution.
913 std::map<unsigned, ResourceSegments> ReservedResourceSegments;
914 std::vector<unsigned> ReservedCycles;
915 /// For each PIdx, stores first index into ReservedResourceSegments that
916 /// corresponds to it.
917 ///
918 /// For example, consider the following 3 resources (ResourceCount =
919 /// 3):
920 ///
921 /// +------------+--------+
922 /// |ResourceName|NumUnits|
923 /// +------------+--------+
924 /// | X | 2 |
925 /// +------------+--------+
926 /// | Y | 3 |
927 /// +------------+--------+
928 /// | Z | 1 |
929 /// +------------+--------+
930 ///
931 /// In this case, the total number of resource instances is 6. The
932 /// vector \ref ReservedResourceSegments will have a slot for each instance.
933 /// The vector \ref ReservedCyclesIndex will track at what index the first
934 /// instance of the resource is found in the vector of \ref
935 /// ReservedResourceSegments:
936 ///
937 /// Indexes of instances in
938 /// ReservedResourceSegments
939 ///
940 /// 0 1 2 3 4 5
941 /// ReservedCyclesIndex[0] = 0; [X0, X1,
942 /// ReservedCyclesIndex[1] = 2; Y0, Y1, Y2
943 /// ReservedCyclesIndex[2] = 5; Z
944 SmallVector<unsigned, 16> ReservedCyclesIndex;
945
946 // For each PIdx, stores the resource group IDs of its subunits
947 SmallVector<APInt, 16> ResourceGroupSubUnitMasks;
948
949#if LLVM_ENABLE_ABI_BREAKING_CHECKS
950 // Remember the greatest possible stall as an upper bound on the number of
951 // times we should retry the pending queue because of a hazard.
952 unsigned MaxObservedStall;
953#endif
954
955public:
956 /// Pending queues extend the ready queues with the same ID and the
957 /// PendingFlag set.
958 SchedBoundary(unsigned ID, const Twine &Name):
959 Available(ID, Name+".A"), Pending(ID << LogMaxQID, Name+".P") {
960 reset();
961 }
962 SchedBoundary &operator=(const SchedBoundary &other) = delete;
963 SchedBoundary(const SchedBoundary &other) = delete;
965
966 void reset();
967
968 void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
970
971 bool isTop() const {
972 return Available.getID() == TopQID;
973 }
974
975 /// Number of cycles to issue the instructions scheduled in this zone.
976 unsigned getCurrCycle() const { return CurrCycle; }
977
978 /// Micro-ops issued in the current cycle
979 unsigned getCurrMOps() const { return CurrMOps; }
980
981 // The latency of dependence chains leading into this zone.
982 unsigned getDependentLatency() const { return DependentLatency; }
983
984 /// Get the number of latency cycles "covered" by the scheduled
985 /// instructions. This is the larger of the critical path within the zone
986 /// and the number of cycles required to issue the instructions.
987 unsigned getScheduledLatency() const {
988 return std::max(ExpectedLatency, CurrCycle);
989 }
990
991 unsigned getUnscheduledLatency(SUnit *SU) const {
992 return isTop() ? SU->getHeight() : SU->getDepth();
993 }
994
995 unsigned getResourceCount(unsigned ResIdx) const {
996 return ExecutedResCounts[ResIdx];
997 }
998
999 /// Get the scaled count of scheduled micro-ops and resources, including
1000 /// executed resources.
1001 unsigned getCriticalCount() const {
1002 if (!ZoneCritResIdx)
1003 return RetiredMOps * SchedModel->getMicroOpFactor();
1004 return getResourceCount(ZoneCritResIdx);
1005 }
1006
1007 /// Get a scaled count for the minimum execution time of the scheduled
1008 /// micro-ops that are ready to execute by getExecutedCount. Notice the
1009 /// feedback loop.
1010 unsigned getExecutedCount() const {
1011 return std::max(CurrCycle * SchedModel->getLatencyFactor(),
1012 MaxExecutedResCount);
1013 }
1014
1015 unsigned getZoneCritResIdx() const { return ZoneCritResIdx; }
1016
1017 // Is the scheduled region resource limited vs. latency limited.
1018 bool isResourceLimited() const { return IsResourceLimited; }
1019
1020 /// Get the difference between the given SUnit's ready time and the current
1021 /// cycle.
1022 unsigned getLatencyStallCycles(SUnit *SU);
1023
1024 unsigned getNextResourceCycleByInstance(unsigned InstanceIndex,
1025 unsigned ReleaseAtCycle,
1026 unsigned AcquireAtCycle);
1027
1028 std::pair<unsigned, unsigned> getNextResourceCycle(const MCSchedClassDesc *SC,
1029 unsigned PIdx,
1030 unsigned ReleaseAtCycle,
1031 unsigned AcquireAtCycle);
1032
1033 bool isUnbufferedGroup(unsigned PIdx) const {
1036 }
1037
1038 bool checkHazard(SUnit *SU);
1039
1040 unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
1041
1042 unsigned getOtherResourceCount(unsigned &OtherCritIdx);
1043
1044 /// Release SU to make it ready. If it's not in hazard, remove it from
1045 /// pending queue (if already in) and push into available queue.
1046 /// Otherwise, push the SU into pending queue.
1047 ///
1048 /// @param SU The unit to be released.
1049 /// @param ReadyCycle Until which cycle the unit is ready.
1050 /// @param InPQueue Whether SU is already in pending queue.
1051 /// @param Idx Position offset in pending queue (if in it).
1052 void releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue,
1053 unsigned Idx = 0);
1054
1055 void bumpCycle(unsigned NextCycle);
1056
1057 void incExecutedResources(unsigned PIdx, unsigned Count);
1058
1059 unsigned countResource(const MCSchedClassDesc *SC, unsigned PIdx,
1060 unsigned Cycles, unsigned ReadyCycle,
1061 unsigned StartAtCycle);
1062
1063 void bumpNode(SUnit *SU);
1064
1065 void releasePending();
1066
1067 void removeReady(SUnit *SU);
1068
1069 /// Call this before applying any other heuristics to the Available queue.
1070 /// Updates the Available/Pending Q's if necessary and returns the single
1071 /// available instruction, or NULL if there are multiple candidates.
1073
1074 /// Dump the state of the information that tracks resource usage.
1075 void dumpReservedCycles() const;
1076 void dumpScheduledState() const;
1077};
1078
1079/// Base class for GenericScheduler. This class maintains information about
1080/// scheduling candidates based on TargetSchedModel making it easy to implement
1081/// heuristics for either preRA or postRA scheduling.
1083public:
1084 /// Represent the type of SchedCandidate found within a single queue.
1085 /// pickNodeBidirectional depends on these listed by decreasing priority.
1090
1091#ifndef NDEBUG
1092 static const char *getReasonStr(GenericSchedulerBase::CandReason Reason);
1093#endif
1094
1095 /// Policy for scheduling the next instruction in the candidate's zone.
1096 struct CandPolicy {
1097 bool ReduceLatency = false;
1098 unsigned ReduceResIdx = 0;
1099 unsigned DemandResIdx = 0;
1100
1101 CandPolicy() = default;
1102
1103 bool operator==(const CandPolicy &RHS) const {
1104 return ReduceLatency == RHS.ReduceLatency &&
1105 ReduceResIdx == RHS.ReduceResIdx &&
1106 DemandResIdx == RHS.DemandResIdx;
1107 }
1108 bool operator!=(const CandPolicy &RHS) const {
1109 return !(*this == RHS);
1110 }
1111 };
1112
1113 /// Status of an instruction's critical resource consumption.
1115 // Count critical resources in the scheduled region required by SU.
1116 unsigned CritResources = 0;
1117
1118 // Count critical resources from another region consumed by SU.
1119 unsigned DemandedResources = 0;
1120
1122
1123 bool operator==(const SchedResourceDelta &RHS) const {
1124 return CritResources == RHS.CritResources
1125 && DemandedResources == RHS.DemandedResources;
1126 }
1127 bool operator!=(const SchedResourceDelta &RHS) const {
1128 return !operator==(RHS);
1129 }
1130 };
1131
1132 /// Store the state used by GenericScheduler heuristics, required for the
1133 /// lifetime of one invocation of pickNode().
1136
1137 // The best SUnit candidate.
1139
1140 // The reason for this candidate.
1142
1143 // Whether this candidate should be scheduled at top/bottom.
1144 bool AtTop;
1145
1146 // Register pressure values for the best candidate.
1148
1149 // Critical resource consumption of the best candidate.
1151
1154
1155 void reset(const CandPolicy &NewPolicy) {
1156 Policy = NewPolicy;
1157 SU = nullptr;
1158 Reason = NoCand;
1159 AtTop = false;
1162 }
1163
1164 bool isValid() const { return SU; }
1165
1166 // Copy the status of another candidate without changing policy.
1168 assert(Best.Reason != NoCand && "uninitialized Sched candidate");
1169 SU = Best.SU;
1170 Reason = Best.Reason;
1171 AtTop = Best.AtTop;
1172 RPDelta = Best.RPDelta;
1173 ResDelta = Best.ResDelta;
1174 }
1175
1176 void initResourceDelta(const ScheduleDAGMI *DAG,
1178 };
1179
1180protected:
1183 const TargetRegisterInfo *TRI = nullptr;
1184
1186
1188
1190
1191 void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone,
1192 SchedBoundary *OtherZone);
1193
1194 MachineSchedPolicy getPolicy() const override { return RegionPolicy; }
1195
1196#ifndef NDEBUG
1197 void traceCandidate(const SchedCandidate &Cand);
1198#endif
1199
1200private:
1201 bool shouldReduceLatency(const CandPolicy &Policy, SchedBoundary &CurrZone,
1202 bool ComputeRemLatency, unsigned &RemLatency) const;
1203};
1204
1205// Utility functions used by heuristics in tryCandidate().
1206bool tryLess(int TryVal, int CandVal,
1207 GenericSchedulerBase::SchedCandidate &TryCand,
1208 GenericSchedulerBase::SchedCandidate &Cand,
1210bool tryGreater(int TryVal, int CandVal,
1211 GenericSchedulerBase::SchedCandidate &TryCand,
1212 GenericSchedulerBase::SchedCandidate &Cand,
1214bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
1215 GenericSchedulerBase::SchedCandidate &Cand,
1216 SchedBoundary &Zone);
1217bool tryPressure(const PressureChange &TryP,
1218 const PressureChange &CandP,
1219 GenericSchedulerBase::SchedCandidate &TryCand,
1220 GenericSchedulerBase::SchedCandidate &Cand,
1222 const TargetRegisterInfo *TRI,
1223 const MachineFunction &MF);
1224unsigned getWeakLeft(const SUnit *SU, bool isTop);
1225int biasPhysReg(const SUnit *SU, bool isTop);
1226
1227/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
1228/// the schedule.
1230public:
1232 GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
1233 Bot(SchedBoundary::BotQID, "BotQ") {}
1234
1237 unsigned NumRegionInstrs) override;
1238
1239 void dumpPolicy() const override;
1240
1241 bool shouldTrackPressure() const override {
1243 }
1244
1245 bool shouldTrackLaneMasks() const override {
1247 }
1248
1249 void initialize(ScheduleDAGMI *dag) override;
1250
1251 SUnit *pickNode(bool &IsTopNode) override;
1252
1253 void schedNode(SUnit *SU, bool IsTopNode) override;
1254
1255 void releaseTopNode(SUnit *SU) override {
1256 if (SU->isScheduled)
1257 return;
1258
1259 Top.releaseNode(SU, SU->TopReadyCycle, false);
1260 TopCand.SU = nullptr;
1261 }
1262
1263 void releaseBottomNode(SUnit *SU) override {
1264 if (SU->isScheduled)
1265 return;
1266
1267 Bot.releaseNode(SU, SU->BotReadyCycle, false);
1268 BotCand.SU = nullptr;
1269 }
1270
1271 void registerRoots() override;
1272
1273protected:
1275
1276 // State of the top and bottom scheduled instruction boundaries.
1279
1280 /// Candidate last picked from Top boundary.
1282 /// Candidate last picked from Bot boundary.
1284
1285 void checkAcyclicLatency();
1286
1287 void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop,
1288 const RegPressureTracker &RPTracker,
1289 RegPressureTracker &TempTracker);
1290
1291 virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand,
1292 SchedBoundary *Zone) const;
1293
1294 SUnit *pickNodeBidirectional(bool &IsTopNode);
1295
1297 const CandPolicy &ZonePolicy,
1298 const RegPressureTracker &RPTracker,
1299 SchedCandidate &Candidate);
1300
1301 void reschedulePhysReg(SUnit *SU, bool isTop);
1302};
1303
1304/// PostGenericScheduler - Interface to the scheduling algorithm used by
1305/// ScheduleDAGMI.
1306///
1307/// Callbacks from ScheduleDAGMI:
1308/// initPolicy -> initialize(DAG) -> registerRoots -> pickNode ...
1310protected:
1311 ScheduleDAGMI *DAG = nullptr;
1314
1315 /// Candidate last picked from Top boundary.
1317 /// Candidate last picked from Bot boundary.
1319
1320public:
1322 : GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
1323 Bot(SchedBoundary::BotQID, "BotQ") {}
1324
1325 ~PostGenericScheduler() override = default;
1326
1329 unsigned NumRegionInstrs) override;
1330
1331 /// PostRA scheduling does not track pressure.
1332 bool shouldTrackPressure() const override { return false; }
1333
1334 void initialize(ScheduleDAGMI *Dag) override;
1335
1336 void registerRoots() override;
1337
1338 SUnit *pickNode(bool &IsTopNode) override;
1339
1340 SUnit *pickNodeBidirectional(bool &IsTopNode);
1341
1342 void scheduleTree(unsigned SubtreeID) override {
1343 llvm_unreachable("PostRA scheduler does not support subtree analysis.");
1344 }
1345
1346 void schedNode(SUnit *SU, bool IsTopNode) override;
1347
1348 void releaseTopNode(SUnit *SU) override {
1349 if (SU->isScheduled)
1350 return;
1351 Top.releaseNode(SU, SU->TopReadyCycle, false);
1352 TopCand.SU = nullptr;
1353 }
1354
1355 void releaseBottomNode(SUnit *SU) override {
1356 if (SU->isScheduled)
1357 return;
1358 Bot.releaseNode(SU, SU->BotReadyCycle, false);
1359 BotCand.SU = nullptr;
1360 }
1361
1362protected:
1363 virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand);
1364
1365 void pickNodeFromQueue(SchedBoundary &Zone, SchedCandidate &Cand);
1366};
1367
1368/// Create the standard converging machine scheduler. This will be used as the
1369/// default scheduler if the target does not set a default.
1370/// Adds default DAG mutations.
1371ScheduleDAGMILive *createGenericSchedLive(MachineSchedContext *C);
1372
1373/// Create a generic scheduler with no vreg liveness or DAG mutation passes.
1374ScheduleDAGMI *createGenericSchedPostRA(MachineSchedContext *C);
1375
1376/// If ReorderWhileClustering is set to true, no attempt will be made to
1377/// reduce reordering due to store clustering.
1378std::unique_ptr<ScheduleDAGMutation>
1379createLoadClusterDAGMutation(const TargetInstrInfo *TII,
1380 const TargetRegisterInfo *TRI,
1381 bool ReorderWhileClustering = false);
1382
1383/// If ReorderWhileClustering is set to true, no attempt will be made to
1384/// reduce reordering due to store clustering.
1385std::unique_ptr<ScheduleDAGMutation>
1386createStoreClusterDAGMutation(const TargetInstrInfo *TII,
1387 const TargetRegisterInfo *TRI,
1388 bool ReorderWhileClustering = false);
1389
1390std::unique_ptr<ScheduleDAGMutation>
1391createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
1392 const TargetRegisterInfo *TRI);
1393
1394class MachineSchedulerPass : public PassInfoMixin<MachineSchedulerPass> {
1395 // FIXME: Remove this member once RegisterClassInfo is queryable as an
1396 // analysis.
1397 std::unique_ptr<impl_detail::MachineSchedulerImpl> Impl;
1398 const TargetMachine *TM;
1399
1400public:
1406};
1407
1409 : public PassInfoMixin<PostMachineSchedulerPass> {
1410 // FIXME: Remove this member once RegisterClassInfo is queryable as an
1411 // analysis.
1412 std::unique_ptr<impl_detail::PostMachineSchedulerImpl> Impl;
1413 const TargetMachine *TM;
1414
1415public:
1421};
1422} // end namespace llvm
1423
1424#endif // LLVM_CODEGEN_MACHINESCHEDULER_H
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
This file implements the BitVector class.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
std::string Name
bool End
Definition: ELF_riscv.cpp:480
expand large div rem
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
PowerPC VSX FMA Mutation
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static const char * name
Definition: SMEABIPass.cpp:46
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
Value * RHS
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Base class for GenericScheduler.
void traceCandidate(const SchedCandidate &Cand)
void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone, SchedBoundary *OtherZone)
Set the CandPolicy given a scheduling zone given the current resources and latencies inside and outsi...
MachineSchedPolicy RegionPolicy
const TargetSchedModel * SchedModel
static const char * getReasonStr(GenericSchedulerBase::CandReason Reason)
MachineSchedPolicy getPolicy() const override
GenericSchedulerBase(const MachineSchedContext *C)
const MachineSchedContext * Context
CandReason
Represent the type of SchedCandidate found within a single queue.
const TargetRegisterInfo * TRI
GenericScheduler shrinks the unscheduled zone using heuristics to balance the schedule.
void checkAcyclicLatency()
Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic critical path by more cycle...
SchedCandidate BotCand
Candidate last picked from Bot boundary.
SchedCandidate TopCand
Candidate last picked from Top boundary.
virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand, SchedBoundary *Zone) const
Apply a set of heuristics to a new candidate.
ScheduleDAGMILive * DAG
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
void dumpPolicy() const override
void initialize(ScheduleDAGMI *dag) override
Initialize the strategy after building the DAG for a new region.
void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop, const RegPressureTracker &RPTracker, RegPressureTracker &TempTracker)
bool shouldTrackPressure() const override
Check if pressure tracking is needed before building the DAG and initializing this strategy.
void registerRoots() override
Notify this strategy that all roots have been released (including those that depend on EntrySU or Exi...
void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs) override
Initialize the per-region scheduling policy.
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
void reschedulePhysReg(SUnit *SU, bool isTop)
SUnit * pickNode(bool &IsTopNode) override
Pick the best node to balance the schedule. Implements MachineSchedStrategy.
void pickNodeFromQueue(SchedBoundary &Zone, const CandPolicy &ZonePolicy, const RegPressureTracker &RPTracker, SchedCandidate &Candidate)
Pick the best candidate from the queue.
bool shouldTrackLaneMasks() const override
Returns true if lanemasks should be tracked.
void schedNode(SUnit *SU, bool IsTopNode) override
Update the scheduler's state after scheduling a node.
GenericScheduler(const MachineSchedContext *C)
SUnit * pickNodeBidirectional(bool &IsTopNode)
Pick the best candidate node from either the top or bottom queue.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Representation of each machine instruction.
Definition: MachineInstr.h:71
MachinePassRegistryListener - Listener to adds and removals of nodes in registration list.
MachinePassRegistryNode - Machine pass node stored in registration list.
MachinePassRegistryNode * getNext() const
MachinePassRegistry - Track the registration of machine passes.
MachineSchedRegistry provides a selection of available machine instruction schedulers.
static void setListener(MachinePassRegistryListener< FunctionPassCtor > *L)
static MachinePassRegistry< ScheduleDAGCtor > Registry
ScheduleDAGCtor FunctionPassCtor
MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
static MachineSchedRegistry * getList()
ScheduleDAGInstrs *(*)(MachineSchedContext *) ScheduleDAGCtor
MachineSchedRegistry * getNext() const
MachineSchedStrategy - Interface to the scheduling algorithm used by ScheduleDAGMI.
virtual bool shouldTrackPressure() const
Check if pressure tracking is needed before building the DAG and initializing this strategy.
virtual void leaveMBB()
Tell the strategy that current MBB is done.
virtual void enterMBB(MachineBasicBlock *MBB)
Tell the strategy that MBB is about to be processed.
virtual void scheduleTree(unsigned SubtreeID)
Scheduler callback to notify that a new subtree is scheduled.
virtual void schedNode(SUnit *SU, bool IsTopNode)=0
Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an instruction and updated scheduled/rem...
virtual ~MachineSchedStrategy()=default
virtual void initialize(ScheduleDAGMI *DAG)=0
Initialize the strategy after building the DAG for a new region.
virtual MachineSchedPolicy getPolicy() const
virtual void releaseTopNode(SUnit *SU)=0
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
virtual void dumpPolicy() const
virtual bool doMBBSchedRegionsTopDown() const
virtual SUnit * pickNode(bool &IsTopNode)=0
Pick the next node to schedule, or return NULL.
virtual void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs)
Optionally override the per-region scheduling policy.
virtual void releaseBottomNode(SUnit *SU)=0
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
virtual bool shouldTrackLaneMasks() const
Returns true if lanemasks should be tracked.
virtual void registerRoots()
Notify this strategy that all roots have been released (including those that depend on EntrySU or Exi...
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
MachineSchedulerPass(MachineSchedulerPass &&Other)
PostGenericScheduler - Interface to the scheduling algorithm used by ScheduleDAGMI.
void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs) override
Optionally override the per-region scheduling policy.
virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand)
Apply a set of heuristics to a new candidate for PostRA scheduling.
bool shouldTrackPressure() const override
PostRA scheduling does not track pressure.
void schedNode(SUnit *SU, bool IsTopNode) override
Called after ScheduleDAGMI has scheduled an instruction and updated scheduled/remaining flags in the ...
void scheduleTree(unsigned SubtreeID) override
Scheduler callback to notify that a new subtree is scheduled.
SchedCandidate BotCand
Candidate last picked from Bot boundary.
void pickNodeFromQueue(SchedBoundary &Zone, SchedCandidate &Cand)
void initialize(ScheduleDAGMI *Dag) override
Initialize the strategy after building the DAG for a new region.
SchedCandidate TopCand
Candidate last picked from Top boundary.
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
~PostGenericScheduler() override=default
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
PostGenericScheduler(const MachineSchedContext *C)
SUnit * pickNodeBidirectional(bool &IsTopNode)
Pick the best candidate node from either the top or bottom queue.
void registerRoots() override
Notify this strategy that all roots have been released (including those that depend on EntrySU or Exi...
SUnit * pickNode(bool &IsTopNode) override
Pick the next node to schedule.
PostMachineSchedulerPass(PostMachineSchedulerPass &&Other)
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
List of PressureChanges in order of increasing, unique PSetID.
Array of PressureDiffs.
Helpers for implementing custom MachineSchedStrategy classes.
void push(SUnit *SU)
iterator find(SUnit *SU)
ArrayRef< SUnit * > elements()
ReadyQueue(unsigned id, const Twine &name)
bool isInQueue(SUnit *SU) const
std::vector< SUnit * >::iterator iterator
bool empty() const
StringRef getName() const
unsigned size() const
iterator remove(iterator I)
unsigned getID() const
Track the current register pressure at some position in the instruction stream, and remember the high...
A static registration template.
Definition: Registry.h:126
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition: Registry.h:44
ResourceSegments are a collection of intervals closed on the left and opened on the right:
void add(IntervalTy A, const unsigned CutOff=10)
Adds an interval [a, b) to the collection of the instance.
static IntervalTy getResourceIntervalBottom(unsigned C, unsigned AcquireAtCycle, unsigned ReleaseAtCycle)
These function return the interval used by a resource in bottom and top scheduling.
friend bool operator==(const ResourceSegments &c1, const ResourceSegments &c2)
static bool intersects(IntervalTy A, IntervalTy B)
Checks whether intervals intersect.
unsigned getFirstAvailableAtFromTop(unsigned CurrCycle, unsigned AcquireAtCycle, unsigned ReleaseAtCycle) const
friend llvm::raw_ostream & operator<<(llvm::raw_ostream &os, const ResourceSegments &Segments)
std::pair< int64_t, int64_t > IntervalTy
Represents an interval of discrete integer values closed on the left and open on the right: [a,...
static IntervalTy getResourceIntervalTop(unsigned C, unsigned AcquireAtCycle, unsigned ReleaseAtCycle)
ResourceSegments(const std::list< IntervalTy > &Intervals)
unsigned getFirstAvailableAtFromBottom(unsigned CurrCycle, unsigned AcquireAtCycle, unsigned ReleaseAtCycle) const
getFirstAvailableAtFromBottom and getFirstAvailableAtFromTop should be merged in a single function in...
Scheduling dependency.
Definition: ScheduleDAG.h:49
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:242
unsigned NodeQueueId
Queue id of node.
Definition: ScheduleDAG.h:271
unsigned TopReadyCycle
Cycle relative to start when node is ready.
Definition: ScheduleDAG.h:278
unsigned NodeNum
Entry # of node in the node vector.
Definition: ScheduleDAG.h:270
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
Definition: ScheduleDAG.h:424
unsigned getDepth() const
Returns the depth of this node, which is the length of the maximum path up to any node which has no p...
Definition: ScheduleDAG.h:416
bool isScheduled
True once scheduled.
Definition: ScheduleDAG.h:296
unsigned BotReadyCycle
Cycle relative to end when node is ready.
Definition: ScheduleDAG.h:279
Each Scheduling boundary is associated with ready queues.
unsigned getNextResourceCycleByInstance(unsigned InstanceIndex, unsigned ReleaseAtCycle, unsigned AcquireAtCycle)
Compute the next cycle at which the given processor resource unit can be scheduled.
void releasePending()
Release pending ready nodes in to the available queue.
unsigned getDependentLatency() const
unsigned getScheduledLatency() const
Get the number of latency cycles "covered" by the scheduled instructions.
void incExecutedResources(unsigned PIdx, unsigned Count)
bool isResourceLimited() const
const TargetSchedModel * SchedModel
unsigned getExecutedCount() const
Get a scaled count for the minimum execution time of the scheduled micro-ops that are ready to execut...
unsigned getLatencyStallCycles(SUnit *SU)
Get the difference between the given SUnit's ready time and the current cycle.
SchedBoundary(const SchedBoundary &other)=delete
unsigned findMaxLatency(ArrayRef< SUnit * > ReadySUs)
ScheduleDAGMI * DAG
void dumpReservedCycles() const
Dump the state of the information that tracks resource usage.
unsigned getOtherResourceCount(unsigned &OtherCritIdx)
SchedRemainder * Rem
void bumpNode(SUnit *SU)
Move the boundary of scheduled code by one SUnit.
unsigned getCriticalCount() const
Get the scaled count of scheduled micro-ops and resources, including executed resources.
SUnit * pickOnlyChoice()
Call this before applying any other heuristics to the Available queue.
void releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue, unsigned Idx=0)
Release SU to make it ready.
unsigned countResource(const MCSchedClassDesc *SC, unsigned PIdx, unsigned Cycles, unsigned ReadyCycle, unsigned StartAtCycle)
Add the given processor resource to this scheduled zone.
SchedBoundary(unsigned ID, const Twine &Name)
Pending queues extend the ready queues with the same ID and the PendingFlag set.
ScheduleHazardRecognizer * HazardRec
bool isUnbufferedGroup(unsigned PIdx) const
void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem)
SchedBoundary & operator=(const SchedBoundary &other)=delete
unsigned getResourceCount(unsigned ResIdx) const
void bumpCycle(unsigned NextCycle)
Move the boundary of scheduled code by one cycle.
unsigned getCurrMOps() const
Micro-ops issued in the current cycle.
unsigned getCurrCycle() const
Number of cycles to issue the instructions scheduled in this zone.
bool checkHazard(SUnit *SU)
Does this SU have a hazard within the current instruction group.
std::pair< unsigned, unsigned > getNextResourceCycle(const MCSchedClassDesc *SC, unsigned PIdx, unsigned ReleaseAtCycle, unsigned AcquireAtCycle)
Compute the next cycle at which the given processor resource can be scheduled.
void dumpScheduledState() const
void removeReady(SUnit *SU)
Remove SU from the ready set for this boundary.
unsigned getZoneCritResIdx() const
unsigned getUnscheduledLatency(SUnit *SU) const
Compute the values of each DAG node for various metrics during DFS.
Definition: ScheduleDFS.h:65
A ScheduleDAG for scheduling lists of MachineInstr.
MachineBasicBlock::iterator end() const
Returns an iterator to the bottom of the current scheduling region.
MachineBasicBlock::iterator begin() const
Returns an iterator to the top of the current scheduling region.
const MachineLoopInfo * MLI
bool RemoveKillFlags
True if the DAG builder should remove kill flags (in preparation for rescheduling).
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
void scheduleMI(SUnit *SU, bool IsTopNode)
Move an instruction and update register pressure.
void schedule() override
Implement ScheduleDAGInstrs interface for scheduling a sequence of reorderable instructions.
VReg2SUnitMultiMap VRegUses
Maps vregs to the SUnits of their uses in the current scheduling region.
void computeDFSResult()
Compute a DFSResult after DAG building is complete, and before any queue comparisons.
PressureDiff & getPressureDiff(const SUnit *SU)
SchedDFSResult * DFSResult
Information about DAG subtrees.
void enterRegion(MachineBasicBlock *bb, MachineBasicBlock::iterator begin, MachineBasicBlock::iterator end, unsigned regioninstrs) override
Implement the ScheduleDAGInstrs interface for handling the next scheduling region.
void initQueues(ArrayRef< SUnit * > TopRoots, ArrayRef< SUnit * > BotRoots)
Release ExitSU predecessors and setup scheduler queues.
RegPressureTracker BotRPTracker
void buildDAGWithRegPressure()
Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking enabled.
std::vector< PressureChange > RegionCriticalPSets
List of pressure sets that exceed the target's pressure limit before scheduling, listed in increasing...
void updateScheduledPressure(const SUnit *SU, const std::vector< unsigned > &NewMaxPressure)
IntervalPressure TopPressure
The top of the unscheduled zone.
const RegPressureTracker & getBotRPTracker() const
ScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S)
unsigned computeCyclicCriticalPath()
Compute the cyclic critical path through the DAG.
IntervalPressure BotPressure
The bottom of the unscheduled zone.
bool isTrackingPressure() const
Return true if register pressure tracking is enabled.
void updatePressureDiffs(ArrayRef< VRegMaskOrUnit > LiveUses)
Update the PressureDiff array for liveness after scheduling this instruction.
bool hasVRegLiveness() const override
Return true if this DAG supports VReg liveness and RegPressure.
RegisterClassInfo * RegClassInfo
const SchedDFSResult * getDFSResult() const
Return a non-null DFS result if the scheduling strategy initialized it.
const PressureDiff & getPressureDiff(const SUnit *SU) const
const RegPressureTracker & getTopRPTracker() const
RegPressureTracker RPTracker
bool ShouldTrackPressure
Register pressure in this region computed by initRegPressure.
const IntervalPressure & getRegPressure() const
Get register pressure for the entire scheduling region before scheduling.
const IntervalPressure & getBotPressure() const
Get current register pressure for the bottom scheduled instructions.
void dump() const override
BitVector & getScheduledTrees()
MachineBasicBlock::iterator LiveRegionEnd
const IntervalPressure & getTopPressure() const
Get current register pressure for the top scheduled instructions.
const std::vector< PressureChange > & getRegionCriticalPSets() const
IntervalPressure RegPressure
RegPressureTracker TopRPTracker
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void dumpSchedule() const
dump the scheduled Sequence.
std::unique_ptr< MachineSchedStrategy > SchedImpl
void startBlock(MachineBasicBlock *bb) override
Prepares to perform scheduling in the given block.
void releasePred(SUnit *SU, SDep *PredEdge)
ReleasePred - Decrement the NumSuccsLeft count of a predecessor.
void initQueues(ArrayRef< SUnit * > TopRoots, ArrayRef< SUnit * > BotRoots)
Release ExitSU predecessors and setup scheduler queues.
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos)
Change the position of an instruction within the basic block and update live ranges and region bounda...
void releasePredecessors(SUnit *SU)
releasePredecessors - Call releasePred on each of SU's predecessors.
void postProcessDAG()
Apply each ScheduleDAGMutation step in order.
const SUnit * NextClusterSucc
void dumpScheduleTraceTopDown() const
Print execution trace of the schedule top-down or bottom-up.
const SUnit * NextClusterPred
Record the next node in a scheduled cluster.
MachineBasicBlock::iterator top() const
void schedule() override
Implement ScheduleDAGInstrs interface for scheduling a sequence of reorderable instructions.
ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S, bool RemoveKillFlags)
void findRootsAndBiasEdges(SmallVectorImpl< SUnit * > &TopRoots, SmallVectorImpl< SUnit * > &BotRoots)
MachineBasicBlock::iterator bottom() const
MachineBasicBlock::iterator CurrentBottom
The bottom of the unscheduled zone.
bool doMBBSchedRegionsTopDown() const override
If this method returns true, handling of the scheduling regions themselves (in case of a scheduling b...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
void enterRegion(MachineBasicBlock *bb, MachineBasicBlock::iterator begin, MachineBasicBlock::iterator end, unsigned regioninstrs) override
Implement the ScheduleDAGInstrs interface for handling the next scheduling region.
LiveIntervals * getLIS() const
void viewGraph() override
Out-of-line implementation with no arguments is handy for gdb.
void releaseSucc(SUnit *SU, SDep *SuccEdge)
ReleaseSucc - Decrement the NumPredsLeft count of a successor.
void dumpScheduleTraceBottomUp() const
~ScheduleDAGMI() override
void finishBlock() override
Cleans up after scheduling in the given block.
LiveIntervals * LIS
const SUnit * getNextClusterPred() const
void updateQueues(SUnit *SU, bool IsTopNode)
Update scheduler DAG and queues after scheduling an instruction.
void placeDebugValues()
Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
MachineBasicBlock::iterator CurrentTop
The top of the unscheduled zone.
void releaseSuccessors(SUnit *SU)
releaseSuccessors - Call releaseSucc on each of SU's successors.
const SUnit * getNextClusterSucc() const
std::vector< std::unique_ptr< ScheduleDAGMutation > > Mutations
Ordered list of DAG postprocessing steps.
MachineFunction & MF
Machine function.
Definition: ScheduleDAG.h:577
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:81
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
unsigned getMicroOpFactor() const
Multiply number of micro-ops by this factor to normalize it relative to other resources.
unsigned getLatencyFactor() const
Multiply cycle count by this factor to normalize it relative to other resources.
const MCProcResourceDesc * getProcResource(unsigned PIdx) const
Get a processor resource by ID for convenience.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
Impl class for MachineScheduler.
Impl class for PostMachineScheduler.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1759
cl::opt< bool > PrintDAGs
unsigned getWeakLeft(const SUnit *SU, bool isTop)
cl::opt< bool > VerifyScheduling
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
cl::opt< bool > ViewMISchedDAGs
bool tryPressure(const PressureChange &TryP, const PressureChange &CandP, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason, const TargetRegisterInfo *TRI, const MachineFunction &MF)
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, SchedBoundary &Zone)
@ Other
Any other memory.
ScheduleDAGMI * createGenericSchedPostRA(MachineSchedContext *C)
Create a generic scheduler with no vreg liveness or DAG mutation passes.
cl::opt< MISched::Direction > PreRADirection
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
bool tryGreater(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1873
bool tryLess(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
Return true if this heuristic determines order.
std::unique_ptr< ScheduleDAGMutation > createCopyConstrainDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
int biasPhysReg(const SUnit *SU, bool isTop)
Minimize physical register live ranges.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
#define N
Policy for scheduling the next instruction in the candidate's zone.
bool operator==(const CandPolicy &RHS) const
bool operator!=(const CandPolicy &RHS) const
Store the state used by GenericScheduler heuristics, required for the lifetime of one invocation of p...
void reset(const CandPolicy &NewPolicy)
void initResourceDelta(const ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel)
Status of an instruction's critical resource consumption.
bool operator!=(const SchedResourceDelta &RHS) const
bool operator==(const SchedResourceDelta &RHS) const
RegisterPressure computed within a region of instructions delimited by TopIdx and BottomIdx.
const unsigned * SubUnitsIdxBegin
Definition: MCSchedule.h:56
Summarize the scheduling resources required for an instruction of a particular scheduling class.
Definition: MCSchedule.h:121
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
const MachineDominatorTree * MDT
RegisterClassInfo * RegClassInfo
const MachineLoopInfo * MLI
const TargetMachine * TM
MachineSchedContext & operator=(const MachineSchedContext &other)=delete
MachineSchedContext(const MachineSchedContext &other)=delete
Define a generic scheduling policy for targets that don't provide their own MachineSchedStrategy.
bool ShouldTrackLaneMasks
Track LaneMasks to allow reordering of independent subregister writes of the same vreg.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69
Store the effects of a change in pressure on things that MI scheduler cares about.
Summarize the unscheduled region.
void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel)
SmallVector< unsigned, 16 > RemainingCounts