LLVM 22.0.0git
PPCMachineScheduler.cpp
Go to the documentation of this file.
1//===- PPCMachineScheduler.cpp - MI Scheduler for PowerPC -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
11
12using namespace llvm;
13
14static cl::opt<bool>
15DisableAddiLoadHeuristic("disable-ppc-sched-addi-load",
16 cl::desc("Disable scheduling addi instruction before"
17 "load for ppc"), cl::Hidden);
18static cl::opt<bool>
19 EnableAddiHeuristic("ppc-postra-bias-addi",
20 cl::desc("Enable scheduling addi instruction as early"
21 "as possible post ra"),
22 cl::Hidden, cl::init(true));
23
25 return Cand.SU->getInstr()->getOpcode() == PPC::ADDI ||
26 Cand.SU->getInstr()->getOpcode() == PPC::ADDI8;
27}
28
29bool PPCPreRASchedStrategy::biasAddiLoadCandidate(SchedCandidate &Cand,
30 SchedCandidate &TryCand,
31 SchedBoundary &Zone) const {
33 return false;
34
35 SchedCandidate &FirstCand = Zone.isTop() ? TryCand : Cand;
36 SchedCandidate &SecondCand = Zone.isTop() ? Cand : TryCand;
37 if (isADDIInstr(FirstCand) && SecondCand.SU->getInstr()->mayLoad()) {
38 TryCand.Reason = Stall;
39 return true;
40 }
41 if (FirstCand.SU->getInstr()->mayLoad() && isADDIInstr(SecondCand)) {
42 TryCand.Reason = NoCand;
43 return true;
44 }
45
46 return false;
47}
48
50 SchedCandidate &TryCand,
51 SchedBoundary *Zone) const {
52 // From GenericScheduler::tryCandidate
53
54 // Initialize the candidate if needed.
55 if (!Cand.isValid()) {
56 TryCand.Reason = NodeOrder;
57 return true;
58 }
59
60 // Bias PhysReg Defs and copies to their uses and defined respectively.
61 if (tryGreater(biasPhysReg(TryCand.SU, TryCand.AtTop),
62 biasPhysReg(Cand.SU, Cand.AtTop), TryCand, Cand, PhysReg))
63 return TryCand.Reason != NoCand;
64
65 // Avoid exceeding the target's limit.
66 if (DAG->isTrackingPressure() &&
67 tryPressure(TryCand.RPDelta.Excess, Cand.RPDelta.Excess, TryCand, Cand,
68 RegExcess, TRI, DAG->MF))
69 return TryCand.Reason != NoCand;
70
71 // Avoid increasing the max critical pressure in the scheduled region.
72 if (DAG->isTrackingPressure() &&
74 TryCand, Cand, RegCritical, TRI, DAG->MF))
75 return TryCand.Reason != NoCand;
76
77 // We only compare a subset of features when comparing nodes between
78 // Top and Bottom boundary. Some properties are simply incomparable, in many
79 // other instances we should only override the other boundary if something
80 // is a clear good pick on one boundary. Skip heuristics that are more
81 // "tie-breaking" in nature.
82 bool SameBoundary = Zone != nullptr;
83 if (SameBoundary) {
84 // For loops that are acyclic path limited, aggressively schedule for
85 // latency. Within an single cycle, whenever CurrMOps > 0, allow normal
86 // heuristics to take precedence.
87 if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() &&
88 tryLatency(TryCand, Cand, *Zone))
89 return TryCand.Reason != NoCand;
90
91 // Prioritize instructions that read unbuffered resources by stall cycles.
92 if (tryLess(Zone->getLatencyStallCycles(TryCand.SU),
93 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
94 return TryCand.Reason != NoCand;
95 }
96
97 // Keep clustered nodes together to encourage downstream peephole
98 // optimizations which may reduce resource requirements.
99 //
100 // This is a best effort to set things up for a post-RA pass. Optimizations
101 // like generating loads of multiple registers should ideally be done within
102 // the scheduler pass by combining the loads during DAG postprocessing.
103 unsigned CandZoneCluster = Cand.AtTop ? TopClusterID : BotClusterID;
104 unsigned TryCandZoneCluster = TryCand.AtTop ? TopClusterID : BotClusterID;
105 bool CandIsClusterSucc =
106 isTheSameCluster(CandZoneCluster, Cand.SU->ParentClusterIdx);
107 bool TryCandIsClusterSucc =
108 isTheSameCluster(TryCandZoneCluster, TryCand.SU->ParentClusterIdx);
109
110 if (tryGreater(TryCandIsClusterSucc, CandIsClusterSucc, TryCand, Cand,
111 Cluster))
112 return TryCand.Reason != NoCand;
113
114 if (SameBoundary) {
115 // Weak edges are for clustering and other constraints.
116 if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop),
117 getWeakLeft(Cand.SU, Cand.AtTop), TryCand, Cand, Weak))
118 return TryCand.Reason != NoCand;
119 }
120
121 // Avoid increasing the max pressure of the entire region.
122 if (DAG->isTrackingPressure() &&
123 tryPressure(TryCand.RPDelta.CurrentMax, Cand.RPDelta.CurrentMax, TryCand,
124 Cand, RegMax, TRI, DAG->MF))
125 return TryCand.Reason != NoCand;
126
127 if (SameBoundary) {
128 // Avoid critical resource consumption and balance the schedule.
131 TryCand, Cand, ResourceReduce))
132 return TryCand.Reason != NoCand;
134 Cand.ResDelta.DemandedResources, TryCand, Cand,
136 return TryCand.Reason != NoCand;
137
138 // Avoid serializing long latency dependence chains.
139 // For acyclic path limited loops, latency was already checked above.
141 !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone))
142 return TryCand.Reason != NoCand;
143
144 // Fall through to original instruction order.
145 if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) ||
146 (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
147 TryCand.Reason = NodeOrder;
148 }
149 }
150
151 // GenericScheduler::tryCandidate end
152
153 // Add powerpc specific heuristic only when TryCand isn't selected or
154 // selected as node order.
155 if (TryCand.Reason != NodeOrder && TryCand.Reason != NoCand)
156 return true;
157
158 // There are some benefits to schedule the ADDI before the load to hide the
159 // latency, as RA may create a true dependency between the load and addi.
160 if (SameBoundary) {
161 if (biasAddiLoadCandidate(Cand, TryCand, *Zone))
162 return TryCand.Reason != NoCand;
163 }
164
165 return TryCand.Reason != NoCand;
166}
167
169 SchedCandidate &TryCand) const {
171 return false;
172
173 if (isADDIInstr(TryCand) && !isADDIInstr(Cand)) {
174 TryCand.Reason = Stall;
175 return true;
176 }
177 return false;
178}
179
181 SchedCandidate &TryCand) {
182 // From PostGenericScheduler::tryCandidate
183
184 // Initialize the candidate if needed.
185 if (!Cand.isValid()) {
186 TryCand.Reason = NodeOrder;
187 return true;
188 }
189
190 // Prioritize instructions that read unbuffered resources by stall cycles.
191 if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
192 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
193 return TryCand.Reason != NoCand;
194
195 // Keep clustered nodes together.
196 unsigned CandZoneCluster = Cand.AtTop ? TopClusterID : BotClusterID;
197 unsigned TryCandZoneCluster = TryCand.AtTop ? TopClusterID : BotClusterID;
198 bool CandIsClusterSucc =
199 isTheSameCluster(CandZoneCluster, Cand.SU->ParentClusterIdx);
200 bool TryCandIsClusterSucc =
201 isTheSameCluster(TryCandZoneCluster, TryCand.SU->ParentClusterIdx);
202
203 if (tryGreater(TryCandIsClusterSucc, CandIsClusterSucc, TryCand, Cand,
204 Cluster))
205 return TryCand.Reason != NoCand;
206
207 // Avoid critical resource consumption and balance the schedule.
209 TryCand, Cand, ResourceReduce))
210 return TryCand.Reason != NoCand;
212 Cand.ResDelta.DemandedResources, TryCand, Cand,
214 return TryCand.Reason != NoCand;
215
216 // Avoid serializing long latency dependence chains.
217 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
218 return TryCand.Reason != NoCand;
219 }
220
221 // Fall through to original instruction order.
222 if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
223 TryCand.Reason = NodeOrder;
224
225 // PostGenericScheduler::tryCandidate end
226
227 // Add powerpc post ra specific heuristic only when TryCand isn't selected or
228 // selected as node order.
229 if (TryCand.Reason != NodeOrder && TryCand.Reason != NoCand)
230 return true;
231
232 // There are some benefits to schedule the ADDI as early as possible post ra
233 // to avoid stalled by vector instructions which take up all the hw units.
234 // And ADDI is usually used to post inc the loop indvar, which matters the
235 // performance.
236 if (biasAddiCandidate(Cand, TryCand))
237 return TryCand.Reason != NoCand;
238
239 return TryCand.Reason != NoCand;
240}
241
243 // Custom PPC PostRA specific behavior here.
245}
246
248 // Custom PPC PostRA specific behavior here.
250}
251
253 // Custom PPC PostRA specific initialization here.
255}
256
258 // Custom PPC PostRA specific scheduling here.
259 return PostGenericScheduler::pickNode(IsTopNode);
260}
261
MachineBasicBlock & MBB
static bool isADDIInstr(const GenericScheduler::SchedCandidate &Cand)
static cl::opt< bool > EnableAddiHeuristic("ppc-postra-bias-addi", cl::desc("Enable scheduling addi instruction as early" "as possible post ra"), cl::Hidden, cl::init(true))
static cl::opt< bool > DisableAddiLoadHeuristic("disable-ppc-sched-addi-load", cl::desc("Disable scheduling addi instruction before" "load for ppc"), cl::Hidden)
MachineSchedPolicy RegionPolicy
const TargetSchedModel * SchedModel
const TargetRegisterInfo * TRI
ScheduleDAGMILive * DAG
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:587
virtual void leaveMBB()
Tell the strategy that current MBB is done.
virtual void enterMBB(MachineBasicBlock *MBB)
Tell the strategy that MBB is about to be processed.
SUnit * pickNode(bool &IsTopNode) override
Pick the next node to schedule.
void enterMBB(MachineBasicBlock *MBB) override
Tell the strategy that MBB is about to be processed.
bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand) override
Apply a set of heuristics to a new candidate for PostRA scheduling.
bool biasAddiCandidate(SchedCandidate &Cand, SchedCandidate &TryCand) const
void leaveMBB() override
Tell the strategy that current MBB is done.
void initialize(ScheduleDAGMI *Dag) override
Initialize the strategy after building the DAG for a new region.
bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand, SchedBoundary *Zone) const override
Apply a set of heuristics to a new candidate.
void initialize(ScheduleDAGMI *Dag) override
Initialize the strategy after building the DAG for a new region.
SUnit * pickNode(bool &IsTopNode) override
Pick the next node to schedule.
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:249
unsigned NodeNum
Entry # of node in the node vector.
Definition: ScheduleDAG.h:277
unsigned ParentClusterIdx
The parent cluster id.
Definition: ScheduleDAG.h:288
MachineInstr * getInstr() const
Returns the representative MachineInstr for this SUnit.
Definition: ScheduleDAG.h:399
Each Scheduling boundary is associated with ready queues.
LLVM_ABI unsigned getLatencyStallCycles(SUnit *SU)
Get the difference between the given SUnit's ready time and the current cycle.
unsigned getCurrMOps() const
Micro-ops issued in the current cycle.
bool isTrackingPressure() const
Return true if register pressure tracking is enabled.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
MachineFunction & MF
Machine function.
Definition: ScheduleDAG.h:586
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
LLVM_ABI unsigned getWeakLeft(const SUnit *SU, bool isTop)
LLVM_ABI bool tryPressure(const PressureChange &TryP, const PressureChange &CandP, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason, const TargetRegisterInfo *TRI, const MachineFunction &MF)
LLVM_ABI bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, SchedBoundary &Zone)
bool isTheSameCluster(unsigned A, unsigned B)
Return whether the input cluster ID's are the same and valid.
Definition: ScheduleDAG.h:244
LLVM_ABI bool tryGreater(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
LLVM_ABI bool tryLess(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
Return true if this heuristic determines order.
LLVM_ABI int biasPhysReg(const SUnit *SU, bool isTop)
Minimize physical register live ranges.
Store the state used by GenericScheduler heuristics, required for the lifetime of one invocation of p...
LLVM_ABI void initResourceDelta(const ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel)
PressureChange CriticalMax