LLVM 22.0.0git
SIOptimizeExecMasking.cpp
Go to the documentation of this file.
1//===-- SIOptimizeExecMasking.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "AMDGPU.h"
11#include "AMDGPULaneMaskUtils.h"
12#include "GCNSubtarget.h"
14#include "SIRegisterInfo.h"
21
22using namespace llvm;
23
24#define DEBUG_TYPE "si-optimize-exec-masking"
25
26namespace {
27
28class SIOptimizeExecMasking {
29public:
30 SIOptimizeExecMasking(MachineFunction *MF)
31 : MF(MF), ST(&MF->getSubtarget<GCNSubtarget>()), TII(ST->getInstrInfo()),
32 TRI(&TII->getRegisterInfo()), MRI(&MF->getRegInfo()),
34 bool run();
35
36private:
38 const GCNSubtarget *ST;
39 const SIInstrInfo *TII;
40 const SIRegisterInfo *TRI;
43
46 SmallVector<MachineOperand *, 1> KillFlagCandidates;
47
48 Register isCopyFromExec(const MachineInstr &MI) const;
49 Register isCopyToExec(const MachineInstr &MI) const;
50 bool removeTerminatorBit(MachineInstr &MI) const;
52 fixTerminators(MachineBasicBlock &MBB) const;
54 findExecCopy(MachineBasicBlock &MBB,
56 bool isRegisterInUseBetween(MachineInstr &Stop, MachineInstr &Start,
57 MCRegister Reg, bool UseLiveOuts = false,
58 bool IgnoreStart = false) const;
59 bool isRegisterInUseAfter(MachineInstr &Stop, MCRegister Reg) const;
60 MachineInstr *findInstrBackwards(
61 MachineInstr &Origin, std::function<bool(MachineInstr *)> Pred,
62 ArrayRef<MCRegister> NonModifiableRegs,
63 MachineInstr *Terminator = nullptr,
64 SmallVectorImpl<MachineOperand *> *KillFlagCandidates = nullptr,
65 unsigned MaxInstructions = 20) const;
66 bool optimizeExecSequence();
67 void tryRecordVCmpxAndSaveexecSequence(MachineInstr &MI);
68 bool optimizeVCMPSaveExecSequence(MachineInstr &SaveExecInstr,
69 MachineInstr &VCmp) const;
70
71 void tryRecordOrSaveexecXorSequence(MachineInstr &MI);
72 bool optimizeOrSaveexecXorSequences();
73};
74
75class SIOptimizeExecMaskingLegacy : public MachineFunctionPass {
76public:
77 static char ID;
78
79 SIOptimizeExecMaskingLegacy() : MachineFunctionPass(ID) {
81 }
82
83 bool runOnMachineFunction(MachineFunction &MF) override;
84
85 StringRef getPassName() const override {
86 return "SI optimize exec mask operations";
87 }
88
89 void getAnalysisUsage(AnalysisUsage &AU) const override {
90 AU.setPreservesCFG();
92 }
93};
94
95} // End anonymous namespace.
96
100 SIOptimizeExecMasking Impl(&MF);
101
102 if (!Impl.run())
103 return PreservedAnalyses::all();
104
106 PA.preserveSet<CFGAnalyses>();
107 return PA;
108}
109
110INITIALIZE_PASS_BEGIN(SIOptimizeExecMaskingLegacy, DEBUG_TYPE,
111 "SI optimize exec mask operations", false, false)
113INITIALIZE_PASS_END(SIOptimizeExecMaskingLegacy, DEBUG_TYPE,
114 "SI optimize exec mask operations", false, false)
115
116char SIOptimizeExecMaskingLegacy::ID = 0;
117
118char &llvm::SIOptimizeExecMaskingLegacyID = SIOptimizeExecMaskingLegacy::ID;
119
120/// If \p MI is a copy from exec, return the register copied to.
121Register SIOptimizeExecMasking::isCopyFromExec(const MachineInstr &MI) const {
122 switch (MI.getOpcode()) {
123 case AMDGPU::COPY:
124 case AMDGPU::S_MOV_B64:
125 case AMDGPU::S_MOV_B64_term:
126 case AMDGPU::S_MOV_B32:
127 case AMDGPU::S_MOV_B32_term: {
128 const MachineOperand &Src = MI.getOperand(1);
129 if (Src.isReg() && Src.getReg() == LMC.ExecReg)
130 return MI.getOperand(0).getReg();
131 }
132 }
133
134 return AMDGPU::NoRegister;
135}
136
137/// If \p MI is a copy to exec, return the register copied from.
138Register SIOptimizeExecMasking::isCopyToExec(const MachineInstr &MI) const {
139 switch (MI.getOpcode()) {
140 case AMDGPU::COPY:
141 case AMDGPU::S_MOV_B64:
142 case AMDGPU::S_MOV_B32: {
143 const MachineOperand &Dst = MI.getOperand(0);
144 if (Dst.isReg() && Dst.getReg() == LMC.ExecReg && MI.getOperand(1).isReg())
145 return MI.getOperand(1).getReg();
146 break;
147 }
148 case AMDGPU::S_MOV_B64_term:
149 case AMDGPU::S_MOV_B32_term:
150 llvm_unreachable("should have been replaced");
151 }
152
153 return Register();
154}
155
156/// If \p MI is a logical operation on an exec value,
157/// return the register copied to.
159 switch (MI.getOpcode()) {
160 case AMDGPU::S_AND_B64:
161 case AMDGPU::S_OR_B64:
162 case AMDGPU::S_XOR_B64:
163 case AMDGPU::S_ANDN2_B64:
164 case AMDGPU::S_ORN2_B64:
165 case AMDGPU::S_NAND_B64:
166 case AMDGPU::S_NOR_B64:
167 case AMDGPU::S_XNOR_B64: {
168 const MachineOperand &Src1 = MI.getOperand(1);
169 if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC)
170 return MI.getOperand(0).getReg();
171 const MachineOperand &Src2 = MI.getOperand(2);
172 if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC)
173 return MI.getOperand(0).getReg();
174 break;
175 }
176 case AMDGPU::S_AND_B32:
177 case AMDGPU::S_OR_B32:
178 case AMDGPU::S_XOR_B32:
179 case AMDGPU::S_ANDN2_B32:
180 case AMDGPU::S_ORN2_B32:
181 case AMDGPU::S_NAND_B32:
182 case AMDGPU::S_NOR_B32:
183 case AMDGPU::S_XNOR_B32: {
184 const MachineOperand &Src1 = MI.getOperand(1);
185 if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC_LO)
186 return MI.getOperand(0).getReg();
187 const MachineOperand &Src2 = MI.getOperand(2);
188 if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC_LO)
189 return MI.getOperand(0).getReg();
190 break;
191 }
192 }
193
194 return AMDGPU::NoRegister;
195}
196
197static unsigned getSaveExecOp(unsigned Opc) {
198 switch (Opc) {
199 case AMDGPU::S_AND_B64:
200 return AMDGPU::S_AND_SAVEEXEC_B64;
201 case AMDGPU::S_OR_B64:
202 return AMDGPU::S_OR_SAVEEXEC_B64;
203 case AMDGPU::S_XOR_B64:
204 return AMDGPU::S_XOR_SAVEEXEC_B64;
205 case AMDGPU::S_ANDN2_B64:
206 return AMDGPU::S_ANDN2_SAVEEXEC_B64;
207 case AMDGPU::S_ORN2_B64:
208 return AMDGPU::S_ORN2_SAVEEXEC_B64;
209 case AMDGPU::S_NAND_B64:
210 return AMDGPU::S_NAND_SAVEEXEC_B64;
211 case AMDGPU::S_NOR_B64:
212 return AMDGPU::S_NOR_SAVEEXEC_B64;
213 case AMDGPU::S_XNOR_B64:
214 return AMDGPU::S_XNOR_SAVEEXEC_B64;
215 case AMDGPU::S_AND_B32:
216 return AMDGPU::S_AND_SAVEEXEC_B32;
217 case AMDGPU::S_OR_B32:
218 return AMDGPU::S_OR_SAVEEXEC_B32;
219 case AMDGPU::S_XOR_B32:
220 return AMDGPU::S_XOR_SAVEEXEC_B32;
221 case AMDGPU::S_ANDN2_B32:
222 return AMDGPU::S_ANDN2_SAVEEXEC_B32;
223 case AMDGPU::S_ORN2_B32:
224 return AMDGPU::S_ORN2_SAVEEXEC_B32;
225 case AMDGPU::S_NAND_B32:
226 return AMDGPU::S_NAND_SAVEEXEC_B32;
227 case AMDGPU::S_NOR_B32:
228 return AMDGPU::S_NOR_SAVEEXEC_B32;
229 case AMDGPU::S_XNOR_B32:
230 return AMDGPU::S_XNOR_SAVEEXEC_B32;
231 default:
232 return AMDGPU::INSTRUCTION_LIST_END;
233 }
234}
235
236// These are only terminators to get correct spill code placement during
237// register allocation, so turn them back into normal instructions.
238bool SIOptimizeExecMasking::removeTerminatorBit(MachineInstr &MI) const {
239 switch (MI.getOpcode()) {
240 case AMDGPU::S_MOV_B32_term: {
241 bool RegSrc = MI.getOperand(1).isReg();
242 MI.setDesc(TII->get(RegSrc ? AMDGPU::COPY : AMDGPU::S_MOV_B32));
243 return true;
244 }
245 case AMDGPU::S_MOV_B64_term: {
246 bool RegSrc = MI.getOperand(1).isReg();
247 MI.setDesc(TII->get(RegSrc ? AMDGPU::COPY : AMDGPU::S_MOV_B64));
248 return true;
249 }
250 case AMDGPU::S_XOR_B64_term: {
251 // This is only a terminator to get the correct spill code placement during
252 // register allocation.
253 MI.setDesc(TII->get(AMDGPU::S_XOR_B64));
254 return true;
255 }
256 case AMDGPU::S_XOR_B32_term: {
257 // This is only a terminator to get the correct spill code placement during
258 // register allocation.
259 MI.setDesc(TII->get(AMDGPU::S_XOR_B32));
260 return true;
261 }
262 case AMDGPU::S_OR_B64_term: {
263 // This is only a terminator to get the correct spill code placement during
264 // register allocation.
265 MI.setDesc(TII->get(AMDGPU::S_OR_B64));
266 return true;
267 }
268 case AMDGPU::S_OR_B32_term: {
269 // This is only a terminator to get the correct spill code placement during
270 // register allocation.
271 MI.setDesc(TII->get(AMDGPU::S_OR_B32));
272 return true;
273 }
274 case AMDGPU::S_ANDN2_B64_term: {
275 // This is only a terminator to get the correct spill code placement during
276 // register allocation.
277 MI.setDesc(TII->get(AMDGPU::S_ANDN2_B64));
278 return true;
279 }
280 case AMDGPU::S_ANDN2_B32_term: {
281 // This is only a terminator to get the correct spill code placement during
282 // register allocation.
283 MI.setDesc(TII->get(AMDGPU::S_ANDN2_B32));
284 return true;
285 }
286 case AMDGPU::S_AND_B64_term: {
287 // This is only a terminator to get the correct spill code placement during
288 // register allocation.
289 MI.setDesc(TII->get(AMDGPU::S_AND_B64));
290 return true;
291 }
292 case AMDGPU::S_AND_B32_term: {
293 // This is only a terminator to get the correct spill code placement during
294 // register allocation.
295 MI.setDesc(TII->get(AMDGPU::S_AND_B32));
296 return true;
297 }
298 default:
299 return false;
300 }
301}
302
303// Turn all pseudoterminators in the block into their equivalent non-terminator
304// instructions. Returns the reverse iterator to the first non-terminator
305// instruction in the block.
307SIOptimizeExecMasking::fixTerminators(MachineBasicBlock &MBB) const {
309
310 bool Seen = false;
312 for (; I != E; ++I) {
313 if (!I->isTerminator())
314 return Seen ? FirstNonTerm : I;
315
316 if (removeTerminatorBit(*I)) {
317 if (!Seen) {
318 FirstNonTerm = I;
319 Seen = true;
320 }
321 }
322 }
323
324 return FirstNonTerm;
325}
326
327MachineBasicBlock::reverse_iterator SIOptimizeExecMasking::findExecCopy(
329 const unsigned InstLimit = 25;
330
331 auto E = MBB.rend();
332 for (unsigned N = 0; N <= InstLimit && I != E; ++I, ++N) {
333 Register CopyFromExec = isCopyFromExec(*I);
334 if (CopyFromExec.isValid())
335 return I;
336 }
337
338 return E;
339}
340
341// XXX - Seems LiveRegUnits doesn't work correctly since it will incorrectly
342// report the register as unavailable because a super-register with a lane mask
343// is unavailable.
344static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg) {
345 for (MachineBasicBlock *Succ : MBB.successors()) {
346 if (Succ->isLiveIn(Reg))
347 return true;
348 }
349
350 return false;
351}
352
353// Backwards-iterate from Origin (for n=MaxInstructions iterations) until either
354// the beginning of the BB is reached or Pred evaluates to true - which can be
355// an arbitrary condition based on the current MachineInstr, for instance an
356// target instruction. Breaks prematurely by returning nullptr if one of the
357// registers given in NonModifiableRegs is modified by the current instruction.
358MachineInstr *SIOptimizeExecMasking::findInstrBackwards(
359 MachineInstr &Origin, std::function<bool(MachineInstr *)> Pred,
360 ArrayRef<MCRegister> NonModifiableRegs, MachineInstr *Terminator,
361 SmallVectorImpl<MachineOperand *> *KillFlagCandidates,
362 unsigned MaxInstructions) const {
364 E = Origin.getParent()->rend();
365 unsigned CurrentIteration = 0;
366
367 for (++A; CurrentIteration < MaxInstructions && A != E; ++A) {
368 if (A->isDebugInstr())
369 continue;
370
371 if (Pred(&*A))
372 return &*A;
373
374 for (MCRegister Reg : NonModifiableRegs) {
375 if (A->modifiesRegister(Reg, TRI))
376 return nullptr;
377
378 // Check for kills that appear after the terminator instruction, that
379 // would not be detected by clearKillFlags, since they will cause the
380 // register to be dead at a later place, causing the verifier to fail.
381 // We use the candidates to clear the kill flags later.
382 if (Terminator && KillFlagCandidates && A != Terminator &&
383 A->killsRegister(Reg, TRI)) {
384 for (MachineOperand &MO : A->operands()) {
385 if (MO.isReg() && MO.isKill()) {
386 Register Candidate = MO.getReg();
387 if (Candidate != Reg && TRI->regsOverlap(Candidate, Reg))
388 KillFlagCandidates->push_back(&MO);
389 }
390 }
391 }
392 }
393
394 ++CurrentIteration;
395 }
396
397 return nullptr;
398}
399
400// Determine if a register Reg is not re-defined and still in use
401// in the range (Stop..Start].
402// It does so by backwards calculating liveness from the end of the BB until
403// either Stop or the beginning of the BB is reached.
404// After liveness is calculated, we can determine if Reg is still in use and not
405// defined inbetween the instructions.
406bool SIOptimizeExecMasking::isRegisterInUseBetween(MachineInstr &Stop,
407 MachineInstr &Start,
409 bool UseLiveOuts,
410 bool IgnoreStart) const {
411 LiveRegUnits LR(*TRI);
412 if (UseLiveOuts)
413 LR.addLiveOuts(*Stop.getParent());
414
416
417 if (IgnoreStart)
418 ++A;
419
420 for (; A != Stop.getParent()->rend() && A != Stop; ++A) {
421 LR.stepBackward(*A);
422 }
423
424 return !LR.available(Reg) || MRI->isReserved(Reg);
425}
426
427// Determine if a register Reg is not re-defined and still in use
428// in the range (Stop..BB.end].
429bool SIOptimizeExecMasking::isRegisterInUseAfter(MachineInstr &Stop,
430 MCRegister Reg) const {
431 return isRegisterInUseBetween(Stop, *Stop.getParent()->rbegin(), Reg, true);
432}
433
434// Optimize sequences emitted for control flow lowering. They are originally
435// emitted as the separate operations because spill code may need to be
436// inserted for the saved copy of exec.
437//
438// x = copy exec
439// z = s_<op>_b64 x, y
440// exec = copy z
441// =>
442// x = s_<op>_saveexec_b64 y
443//
444bool SIOptimizeExecMasking::optimizeExecSequence() {
445 bool Changed = false;
446 for (MachineBasicBlock &MBB : *MF) {
447 MachineBasicBlock::reverse_iterator I = fixTerminators(MBB);
449 if (I == E)
450 continue;
451
452 // It's possible to see other terminator copies after the exec copy. This
453 // can happen if control flow pseudos had their outputs used by phis.
454 Register CopyToExec;
455
456 unsigned SearchCount = 0;
457 const unsigned SearchLimit = 5;
458 while (I != E && SearchCount++ < SearchLimit) {
459 CopyToExec = isCopyToExec(*I);
460 if (CopyToExec)
461 break;
462 ++I;
463 }
464
465 if (!CopyToExec)
466 continue;
467
468 // Scan backwards to find the def.
469 auto *CopyToExecInst = &*I;
470 auto CopyFromExecInst = findExecCopy(MBB, I);
471 if (CopyFromExecInst == E) {
472 auto PrepareExecInst = std::next(I);
473 if (PrepareExecInst == E)
474 continue;
475 // Fold exec = COPY (S_AND_B64 reg, exec) -> exec = S_AND_B64 reg, exec
476 if (CopyToExecInst->getOperand(1).isKill() &&
477 isLogicalOpOnExec(*PrepareExecInst) == CopyToExec) {
478 LLVM_DEBUG(dbgs() << "Fold exec copy: " << *PrepareExecInst);
479
480 PrepareExecInst->getOperand(0).setReg(LMC.ExecReg);
481
482 LLVM_DEBUG(dbgs() << "into: " << *PrepareExecInst << '\n');
483
484 CopyToExecInst->eraseFromParent();
485 Changed = true;
486 }
487
488 continue;
489 }
490
491 if (isLiveOut(MBB, CopyToExec)) {
492 // The copied register is live out and has a second use in another block.
493 LLVM_DEBUG(dbgs() << "Exec copy source register is live out\n");
494 continue;
495 }
496
497 Register CopyFromExec = CopyFromExecInst->getOperand(0).getReg();
498 MachineInstr *SaveExecInst = nullptr;
499 SmallVector<MachineInstr *, 4> OtherUseInsts;
500
502 J = std::next(CopyFromExecInst->getIterator()),
503 JE = I->getIterator();
504 J != JE; ++J) {
505 if (SaveExecInst && J->readsRegister(LMC.ExecReg, TRI)) {
506 LLVM_DEBUG(dbgs() << "exec read prevents saveexec: " << *J << '\n');
507 // Make sure this is inserted after any VALU ops that may have been
508 // scheduled in between.
509 SaveExecInst = nullptr;
510 break;
511 }
512
513 bool ReadsCopyFromExec = J->readsRegister(CopyFromExec, TRI);
514
515 if (J->modifiesRegister(CopyToExec, TRI)) {
516 if (SaveExecInst) {
517 LLVM_DEBUG(dbgs() << "Multiple instructions modify "
518 << printReg(CopyToExec, TRI) << '\n');
519 SaveExecInst = nullptr;
520 break;
521 }
522
523 unsigned SaveExecOp = getSaveExecOp(J->getOpcode());
524 if (SaveExecOp == AMDGPU::INSTRUCTION_LIST_END)
525 break;
526
527 if (ReadsCopyFromExec) {
528 SaveExecInst = &*J;
529 LLVM_DEBUG(dbgs() << "Found save exec op: " << *SaveExecInst << '\n');
530 continue;
531 }
532 LLVM_DEBUG(dbgs() << "Instruction does not read exec copy: " << *J
533 << '\n');
534 break;
535 }
536 if (ReadsCopyFromExec && !SaveExecInst) {
537 // Make sure no other instruction is trying to use this copy, before it
538 // will be rewritten by the saveexec, i.e. hasOneUse. There may have
539 // been another use, such as an inserted spill. For example:
540 //
541 // %sgpr0_sgpr1 = COPY %exec
542 // spill %sgpr0_sgpr1
543 // %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1
544 //
545 LLVM_DEBUG(dbgs() << "Found second use of save inst candidate: " << *J
546 << '\n');
547 break;
548 }
549
550 if (SaveExecInst && J->readsRegister(CopyToExec, TRI)) {
551 assert(SaveExecInst != &*J);
552 OtherUseInsts.push_back(&*J);
553 }
554 }
555
556 if (!SaveExecInst)
557 continue;
558
559 LLVM_DEBUG(dbgs() << "Insert save exec op: " << *SaveExecInst << '\n');
560
561 MachineOperand &Src0 = SaveExecInst->getOperand(1);
562 MachineOperand &Src1 = SaveExecInst->getOperand(2);
563
564 MachineOperand *OtherOp = nullptr;
565
566 if (Src0.isReg() && Src0.getReg() == CopyFromExec) {
567 OtherOp = &Src1;
568 } else if (Src1.isReg() && Src1.getReg() == CopyFromExec) {
569 if (!SaveExecInst->isCommutable())
570 break;
571
572 OtherOp = &Src0;
573 } else
574 llvm_unreachable("unexpected");
575
576 CopyFromExecInst->eraseFromParent();
577
578 auto InsPt = SaveExecInst->getIterator();
579 const DebugLoc &DL = SaveExecInst->getDebugLoc();
580
581 BuildMI(MBB, InsPt, DL, TII->get(getSaveExecOp(SaveExecInst->getOpcode())),
582 CopyFromExec)
583 .addReg(OtherOp->getReg());
584 SaveExecInst->eraseFromParent();
585
586 CopyToExecInst->eraseFromParent();
587
588 for (MachineInstr *OtherInst : OtherUseInsts) {
589 OtherInst->substituteRegister(CopyToExec, LMC.ExecReg,
590 AMDGPU::NoSubRegister, *TRI);
591 }
592
593 Changed = true;
594 }
595
596 return Changed;
597}
598
599// Inserts the optimized s_mov_b32 / v_cmpx sequence based on the
600// operands extracted from a v_cmp ..., s_and_saveexec pattern.
601bool SIOptimizeExecMasking::optimizeVCMPSaveExecSequence(
602 MachineInstr &SaveExecInstr, MachineInstr &VCmp) const {
603 const int NewOpcode = AMDGPU::getVCMPXOpFromVCMP(VCmp.getOpcode());
604
605 if (NewOpcode == -1)
606 return false;
607
608 MachineOperand *Src0 = TII->getNamedOperand(VCmp, AMDGPU::OpName::src0);
609 MachineOperand *Src1 = TII->getNamedOperand(VCmp, AMDGPU::OpName::src1);
610
611 Register MoveDest = SaveExecInstr.getOperand(0).getReg();
612
613 MachineBasicBlock::instr_iterator InsertPosIt = SaveExecInstr.getIterator();
614 if (!SaveExecInstr.uses().empty()) {
615 bool IsSGPR32 = TRI->getRegSizeInBits(MoveDest, *MRI) == 32;
616 unsigned MovOpcode = IsSGPR32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
617 BuildMI(*SaveExecInstr.getParent(), InsertPosIt,
618 SaveExecInstr.getDebugLoc(), TII->get(MovOpcode), MoveDest)
619 .addReg(LMC.ExecReg);
620 }
621
622 // Omit dst as V_CMPX is implicitly writing to EXEC.
623 // Add dummy src and clamp modifiers, if needed.
624 auto Builder = BuildMI(*VCmp.getParent(), std::next(InsertPosIt),
625 VCmp.getDebugLoc(), TII->get(NewOpcode));
626
627 auto TryAddImmediateValueFromNamedOperand =
628 [&](AMDGPU::OpName OperandName) -> void {
629 if (auto *Mod = TII->getNamedOperand(VCmp, OperandName))
630 Builder.addImm(Mod->getImm());
631 };
632
633 TryAddImmediateValueFromNamedOperand(AMDGPU::OpName::src0_modifiers);
634 Builder.add(*Src0);
635
636 TryAddImmediateValueFromNamedOperand(AMDGPU::OpName::src1_modifiers);
637 Builder.add(*Src1);
638
639 TryAddImmediateValueFromNamedOperand(AMDGPU::OpName::clamp);
640
641 TryAddImmediateValueFromNamedOperand(AMDGPU::OpName::op_sel);
642
643 // The kill flags may no longer be correct.
644 if (Src0->isReg())
645 MRI->clearKillFlags(Src0->getReg());
646 if (Src1->isReg())
647 MRI->clearKillFlags(Src1->getReg());
648
649 for (MachineOperand *MO : KillFlagCandidates)
650 MO->setIsKill(false);
651
652 SaveExecInstr.eraseFromParent();
653 VCmp.eraseFromParent();
654
655 return true;
656}
657
658// Record (on GFX10.3 and later) occurences of
659// v_cmp_* SGPR, IMM, VGPR
660// s_and_saveexec_b32 EXEC_SGPR_DEST, SGPR
661// to be replaced with
662// s_mov_b32 EXEC_SGPR_DEST, exec_lo
663// v_cmpx_* IMM, VGPR
664// to reduce pipeline stalls.
665void SIOptimizeExecMasking::tryRecordVCmpxAndSaveexecSequence(
666 MachineInstr &MI) {
667 if (!ST->hasGFX10_3Insts())
668 return;
669
670 if (MI.getOpcode() != LMC.AndSaveExecOpc)
671 return;
672
673 Register SaveExecDest = MI.getOperand(0).getReg();
674 if (!TRI->isSGPRReg(*MRI, SaveExecDest))
675 return;
676
677 MachineOperand *SaveExecSrc0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
678 if (!SaveExecSrc0->isReg())
679 return;
680
681 // Tries to find a possibility to optimize a v_cmp ..., s_and_saveexec
682 // sequence by looking at an instance of an s_and_saveexec instruction.
683 // Returns a pointer to the v_cmp instruction if it is safe to replace the
684 // sequence (see the conditions in the function body). This is after register
685 // allocation, so some checks on operand dependencies need to be considered.
686 MachineInstr *VCmp = nullptr;
687
688 // Try to find the last v_cmp instruction that defs the saveexec input
689 // operand without any write to Exec or the saveexec input operand inbetween.
690 VCmp = findInstrBackwards(
691 MI,
692 [&](MachineInstr *Check) {
693 return AMDGPU::getVCMPXOpFromVCMP(Check->getOpcode()) != -1 &&
694 Check->modifiesRegister(SaveExecSrc0->getReg(), TRI);
695 },
696 {LMC.ExecReg, SaveExecSrc0->getReg()});
697
698 if (!VCmp)
699 return;
700
701 MachineOperand *VCmpDest = TII->getNamedOperand(*VCmp, AMDGPU::OpName::sdst);
702 assert(VCmpDest && "Should have an sdst operand!");
703
704 // Check if any of the v_cmp source operands is written by the saveexec.
705 MachineOperand *Src0 = TII->getNamedOperand(*VCmp, AMDGPU::OpName::src0);
706 if (Src0->isReg() && TRI->isSGPRReg(*MRI, Src0->getReg()) &&
707 MI.modifiesRegister(Src0->getReg(), TRI))
708 return;
709
710 MachineOperand *Src1 = TII->getNamedOperand(*VCmp, AMDGPU::OpName::src1);
711 if (Src1->isReg() && TRI->isSGPRReg(*MRI, Src1->getReg()) &&
712 MI.modifiesRegister(Src1->getReg(), TRI))
713 return;
714
715 // Don't do the transformation if the destination operand is included in
716 // it's MBB Live-outs, meaning it's used in any of its successors, leading
717 // to incorrect code if the v_cmp and therefore the def of
718 // the dest operand is removed.
719 if (isLiveOut(*VCmp->getParent(), VCmpDest->getReg()))
720 return;
721
722 // If the v_cmp target is in use between v_cmp and s_and_saveexec or after the
723 // s_and_saveexec, skip the optimization.
724 if (isRegisterInUseBetween(*VCmp, MI, VCmpDest->getReg(), false, true) ||
725 isRegisterInUseAfter(MI, VCmpDest->getReg()))
726 return;
727
728 // Try to determine if there is a write to any of the VCmp
729 // operands between the saveexec and the vcmp.
730 // If yes, additional VGPR spilling might need to be inserted. In this case,
731 // it's not worth replacing the instruction sequence.
733 if (Src0->isReg())
734 NonDefRegs.push_back(Src0->getReg());
735
736 if (Src1->isReg())
737 NonDefRegs.push_back(Src1->getReg());
738
739 if (!findInstrBackwards(
740 MI, [&](MachineInstr *Check) { return Check == VCmp; }, NonDefRegs,
741 VCmp, &KillFlagCandidates))
742 return;
743
744 if (VCmp)
745 SaveExecVCmpMapping[&MI] = VCmp;
746}
747
748// Record occurences of
749// s_or_saveexec s_o, s_i
750// s_xor exec, exec, s_o
751// to be replaced with
752// s_andn2_saveexec s_o, s_i.
753void SIOptimizeExecMasking::tryRecordOrSaveexecXorSequence(MachineInstr &MI) {
754 if (MI.getOpcode() == LMC.XorOpc && &MI != &MI.getParent()->front()) {
755 const MachineOperand &XorDst = MI.getOperand(0);
756 const MachineOperand &XorSrc0 = MI.getOperand(1);
757 const MachineOperand &XorSrc1 = MI.getOperand(2);
758
759 if (XorDst.isReg() && XorDst.getReg() == LMC.ExecReg && XorSrc0.isReg() &&
760 XorSrc1.isReg() &&
761 (XorSrc0.getReg() == LMC.ExecReg || XorSrc1.getReg() == LMC.ExecReg)) {
762
763 // Peek at the previous instruction and check if this is a relevant
764 // s_or_saveexec instruction.
765 MachineInstr &PossibleOrSaveexec = *MI.getPrevNode();
766 if (PossibleOrSaveexec.getOpcode() != LMC.OrSaveExecOpc)
767 return;
768
769 const MachineOperand &OrDst = PossibleOrSaveexec.getOperand(0);
770 const MachineOperand &OrSrc0 = PossibleOrSaveexec.getOperand(1);
771 if (OrDst.isReg() && OrSrc0.isReg()) {
772 if ((XorSrc0.getReg() == LMC.ExecReg &&
773 XorSrc1.getReg() == OrDst.getReg()) ||
774 (XorSrc0.getReg() == OrDst.getReg() &&
775 XorSrc1.getReg() == LMC.ExecReg)) {
776 OrXors.emplace_back(&PossibleOrSaveexec, &MI);
777 }
778 }
779 }
780 }
781}
782
783bool SIOptimizeExecMasking::optimizeOrSaveexecXorSequences() {
784 if (OrXors.empty()) {
785 return false;
786 }
787
788 bool Changed = false;
789
790 for (const auto &Pair : OrXors) {
791 MachineInstr *Or = nullptr;
792 MachineInstr *Xor = nullptr;
793 std::tie(Or, Xor) = Pair;
794 BuildMI(*Or->getParent(), Or->getIterator(), Or->getDebugLoc(),
795 TII->get(LMC.AndN2SaveExecOpc), Or->getOperand(0).getReg())
796 .addReg(Or->getOperand(1).getReg());
797
798 Or->eraseFromParent();
799 Xor->eraseFromParent();
800
801 Changed = true;
802 }
803
804 return Changed;
805}
806
807bool SIOptimizeExecMaskingLegacy::runOnMachineFunction(MachineFunction &MF) {
808 if (skipFunction(MF.getFunction()))
809 return false;
810
811 return SIOptimizeExecMasking(&MF).run();
812}
813
814bool SIOptimizeExecMasking::run() {
815 bool Changed = optimizeExecSequence();
816
817 OrXors.clear();
818 SaveExecVCmpMapping.clear();
819 KillFlagCandidates.clear();
820 static unsigned SearchWindow = 10;
821 for (MachineBasicBlock &MBB : *MF) {
822 unsigned SearchCount = 0;
823
824 for (auto &MI : llvm::reverse(MBB)) {
825 if (MI.isDebugInstr())
826 continue;
827
828 if (SearchCount >= SearchWindow) {
829 break;
830 }
831
832 tryRecordOrSaveexecXorSequence(MI);
833 tryRecordVCmpxAndSaveexecSequence(MI);
834
835 if (MI.modifiesRegister(LMC.ExecReg, TRI)) {
836 break;
837 }
838
839 ++SearchCount;
840 }
841 }
842
843 Changed |= optimizeOrSaveexecXorSequences();
844 for (const auto &Entry : SaveExecVCmpMapping) {
845 MachineInstr *SaveExecInstr = Entry.getFirst();
846 MachineInstr *VCmpInstr = Entry.getSecond();
847
848 Changed |= optimizeVCMPSaveExecSequence(*SaveExecInstr, *VCmpInstr);
849 }
850
851 return Changed;
852}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
Provides AMDGPU specific target descriptions.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
AMD GCN specific subclass of TargetSubtarget.
#define DEBUG_TYPE
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
A set of register units.
#define I(x, y, z)
Definition MD5.cpp:58
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static unsigned getSaveExecOp(unsigned Opc)
static Register isLogicalOpOnExec(const MachineInstr &MI)
If MI is a logical operation on an exec value, return the register copied to.
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
Interface definition for SIRegisterInfo.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const LaneMaskConstants & get(const GCNSubtarget &ST)
Represent the analysis usage information of a pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
A debug info location.
Definition DebugLoc.h:124
A set of register units used to track register liveness.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
Instructions::iterator instr_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
reverse_iterator rbegin()
MachineInstrBundleIterator< MachineInstr > iterator
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
mop_range uses()
Returns all operands which may be register uses.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isValid() const
Definition Register.h:107
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
reverse_self_iterator getReverseIterator()
Definition ilist_node.h:133
self_iterator getIterator()
Definition ilist_node.h:130
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_READONLY int getVCMPXOpFromVCMP(uint16_t Opcode)
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
std::reverse_iterator< iterator > rend() const
Definition BasicBlock.h:96
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
char & SIOptimizeExecMaskingLegacyID
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ Xor
Bitwise or logical XOR of integers.
void initializeSIOptimizeExecMaskingLegacyPass(PassRegistry &)
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
#define N