LLVM 22.0.0git
SIInstrInfo.cpp
Go to the documentation of this file.
1//===- SIInstrInfo.cpp - SI Instruction Information ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// SI Implementation of TargetInstrInfo.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SIInstrInfo.h"
15#include "AMDGPU.h"
16#include "AMDGPUInstrInfo.h"
17#include "AMDGPULaneMaskUtils.h"
18#include "GCNHazardRecognizer.h"
19#include "GCNSubtarget.h"
22#include "llvm/ADT/STLExtras.h"
33#include "llvm/IR/IntrinsicsAMDGPU.h"
34#include "llvm/MC/MCContext.h"
37
38using namespace llvm;
39
40#define DEBUG_TYPE "si-instr-info"
41
42#define GET_INSTRINFO_CTOR_DTOR
43#include "AMDGPUGenInstrInfo.inc"
44
45namespace llvm::AMDGPU {
46#define GET_D16ImageDimIntrinsics_IMPL
47#define GET_ImageDimIntrinsicTable_IMPL
48#define GET_RsrcIntrinsics_IMPL
49#include "AMDGPUGenSearchableTables.inc"
50} // namespace llvm::AMDGPU
51
52// Must be at least 4 to be able to branch over minimum unconditional branch
53// code. This is only for making it possible to write reasonably small tests for
54// long branches.
56BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16),
57 cl::desc("Restrict range of branch instructions (DEBUG)"));
58
60 "amdgpu-fix-16-bit-physreg-copies",
61 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"),
62 cl::init(true),
64
66 : AMDGPUGenInstrInfo(ST, AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN),
67 RI(ST), ST(ST) {
68 SchedModel.init(&ST);
69}
70
71//===----------------------------------------------------------------------===//
72// TargetInstrInfo callbacks
73//===----------------------------------------------------------------------===//
74
75static unsigned getNumOperandsNoGlue(SDNode *Node) {
76 unsigned N = Node->getNumOperands();
77 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
78 --N;
79 return N;
80}
81
82/// Returns true if both nodes have the same value for the given
83/// operand \p Op, or if both nodes do not have this operand.
85 AMDGPU::OpName OpName) {
86 unsigned Opc0 = N0->getMachineOpcode();
87 unsigned Opc1 = N1->getMachineOpcode();
88
89 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName);
90 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName);
91
92 if (Op0Idx == -1 && Op1Idx == -1)
93 return true;
94
95
96 if ((Op0Idx == -1 && Op1Idx != -1) ||
97 (Op1Idx == -1 && Op0Idx != -1))
98 return false;
99
100 // getNamedOperandIdx returns the index for the MachineInstr's operands,
101 // which includes the result as the first operand. We are indexing into the
102 // MachineSDNode's operands, so we need to skip the result operand to get
103 // the real index.
104 --Op0Idx;
105 --Op1Idx;
106
107 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
108}
109
110static bool canRemat(const MachineInstr &MI) {
111
115 return true;
116
117 if (SIInstrInfo::isSMRD(MI)) {
118 return !MI.memoperands_empty() &&
119 llvm::all_of(MI.memoperands(), [](const MachineMemOperand *MMO) {
120 return MMO->isLoad() && MMO->isInvariant();
121 });
122 }
123
124 return false;
125}
126
128 const MachineInstr &MI) const {
129
130 if (canRemat(MI)) {
131 // Normally VALU use of exec would block the rematerialization, but that
132 // is OK in this case to have an implicit exec read as all VALU do.
133 // We really want all of the generic logic for this except for this.
134
135 // Another potential implicit use is mode register. The core logic of
136 // the RA will not attempt rematerialization if mode is set anywhere
137 // in the function, otherwise it is safe since mode is not changed.
138
139 // There is difference to generic method which does not allow
140 // rematerialization if there are virtual register uses. We allow this,
141 // therefore this method includes SOP instructions as well.
142 if (!MI.hasImplicitDef() &&
143 MI.getNumImplicitOperands() == MI.getDesc().implicit_uses().size() &&
144 !MI.mayRaiseFPException())
145 return true;
146 }
147
149}
150
151// Returns true if the scalar result of a VALU instruction depends on exec.
152bool SIInstrInfo::resultDependsOnExec(const MachineInstr &MI) const {
153 // Ignore comparisons which are only used masked with exec.
154 // This allows some hoisting/sinking of VALU comparisons.
155 if (MI.isCompare()) {
156 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::sdst);
157 if (!Dst)
158 return true;
159
160 Register DstReg = Dst->getReg();
161 if (!DstReg.isVirtual())
162 return true;
163
164 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
165 for (MachineInstr &Use : MRI.use_nodbg_instructions(DstReg)) {
166 switch (Use.getOpcode()) {
167 case AMDGPU::S_AND_SAVEEXEC_B32:
168 case AMDGPU::S_AND_SAVEEXEC_B64:
169 break;
170 case AMDGPU::S_AND_B32:
171 case AMDGPU::S_AND_B64:
172 if (!Use.readsRegister(AMDGPU::EXEC, /*TRI=*/nullptr))
173 return true;
174 break;
175 default:
176 return true;
177 }
178 }
179 return false;
180 }
181
182 switch (MI.getOpcode()) {
183 default:
184 break;
185 case AMDGPU::V_READFIRSTLANE_B32:
186 return true;
187 }
188
189 return false;
190}
191
193 // Any implicit use of exec by VALU is not a real register read.
194 return MO.getReg() == AMDGPU::EXEC && MO.isImplicit() &&
195 isVALU(*MO.getParent()) && !resultDependsOnExec(*MO.getParent());
196}
197
199 MachineBasicBlock *SuccToSinkTo,
200 MachineCycleInfo *CI) const {
201 // Allow sinking if MI edits lane mask (divergent i1 in sgpr).
202 if (MI.getOpcode() == AMDGPU::SI_IF_BREAK)
203 return true;
204
205 MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
206 // Check if sinking of MI would create temporal divergent use.
207 for (auto Op : MI.uses()) {
208 if (Op.isReg() && Op.getReg().isVirtual() &&
209 RI.isSGPRClass(MRI.getRegClass(Op.getReg()))) {
210 MachineInstr *SgprDef = MRI.getVRegDef(Op.getReg());
211
212 // SgprDef defined inside cycle
213 MachineCycle *FromCycle = CI->getCycle(SgprDef->getParent());
214 if (FromCycle == nullptr)
215 continue;
216
217 MachineCycle *ToCycle = CI->getCycle(SuccToSinkTo);
218 // Check if there is a FromCycle that contains SgprDef's basic block but
219 // does not contain SuccToSinkTo and also has divergent exit condition.
220 while (FromCycle && !FromCycle->contains(ToCycle)) {
222 FromCycle->getExitingBlocks(ExitingBlocks);
223
224 // FromCycle has divergent exit condition.
225 for (MachineBasicBlock *ExitingBlock : ExitingBlocks) {
226 if (hasDivergentBranch(ExitingBlock))
227 return false;
228 }
229
230 FromCycle = FromCycle->getParentCycle();
231 }
232 }
233 }
234
235 return true;
236}
237
239 int64_t &Offset0,
240 int64_t &Offset1) const {
241 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode())
242 return false;
243
244 unsigned Opc0 = Load0->getMachineOpcode();
245 unsigned Opc1 = Load1->getMachineOpcode();
246
247 // Make sure both are actually loads.
248 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad())
249 return false;
250
251 // A mayLoad instruction without a def is not a load. Likely a prefetch.
252 if (!get(Opc0).getNumDefs() || !get(Opc1).getNumDefs())
253 return false;
254
255 if (isDS(Opc0) && isDS(Opc1)) {
256
257 // FIXME: Handle this case:
258 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1))
259 return false;
260
261 // Check base reg.
262 if (Load0->getOperand(0) != Load1->getOperand(0))
263 return false;
264
265 // Skip read2 / write2 variants for simplicity.
266 // TODO: We should report true if the used offsets are adjacent (excluded
267 // st64 versions).
268 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
269 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
270 if (Offset0Idx == -1 || Offset1Idx == -1)
271 return false;
272
273 // XXX - be careful of dataless loads
274 // getNamedOperandIdx returns the index for MachineInstrs. Since they
275 // include the output in the operand list, but SDNodes don't, we need to
276 // subtract the index by one.
277 Offset0Idx -= get(Opc0).NumDefs;
278 Offset1Idx -= get(Opc1).NumDefs;
279 Offset0 = Load0->getConstantOperandVal(Offset0Idx);
280 Offset1 = Load1->getConstantOperandVal(Offset1Idx);
281 return true;
282 }
283
284 if (isSMRD(Opc0) && isSMRD(Opc1)) {
285 // Skip time and cache invalidation instructions.
286 if (!AMDGPU::hasNamedOperand(Opc0, AMDGPU::OpName::sbase) ||
287 !AMDGPU::hasNamedOperand(Opc1, AMDGPU::OpName::sbase))
288 return false;
289
290 unsigned NumOps = getNumOperandsNoGlue(Load0);
291 if (NumOps != getNumOperandsNoGlue(Load1))
292 return false;
293
294 // Check base reg.
295 if (Load0->getOperand(0) != Load1->getOperand(0))
296 return false;
297
298 // Match register offsets, if both register and immediate offsets present.
299 assert(NumOps == 4 || NumOps == 5);
300 if (NumOps == 5 && Load0->getOperand(1) != Load1->getOperand(1))
301 return false;
302
303 const ConstantSDNode *Load0Offset =
305 const ConstantSDNode *Load1Offset =
307
308 if (!Load0Offset || !Load1Offset)
309 return false;
310
311 Offset0 = Load0Offset->getZExtValue();
312 Offset1 = Load1Offset->getZExtValue();
313 return true;
314 }
315
316 // MUBUF and MTBUF can access the same addresses.
317 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) {
318
319 // MUBUF and MTBUF have vaddr at different indices.
320 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) ||
321 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) ||
322 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc))
323 return false;
324
325 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
326 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
327
328 if (OffIdx0 == -1 || OffIdx1 == -1)
329 return false;
330
331 // getNamedOperandIdx returns the index for MachineInstrs. Since they
332 // include the output in the operand list, but SDNodes don't, we need to
333 // subtract the index by one.
334 OffIdx0 -= get(Opc0).NumDefs;
335 OffIdx1 -= get(Opc1).NumDefs;
336
337 SDValue Off0 = Load0->getOperand(OffIdx0);
338 SDValue Off1 = Load1->getOperand(OffIdx1);
339
340 // The offset might be a FrameIndexSDNode.
341 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
342 return false;
343
344 Offset0 = Off0->getAsZExtVal();
345 Offset1 = Off1->getAsZExtVal();
346 return true;
347 }
348
349 return false;
350}
351
352static bool isStride64(unsigned Opc) {
353 switch (Opc) {
354 case AMDGPU::DS_READ2ST64_B32:
355 case AMDGPU::DS_READ2ST64_B64:
356 case AMDGPU::DS_WRITE2ST64_B32:
357 case AMDGPU::DS_WRITE2ST64_B64:
358 return true;
359 default:
360 return false;
361 }
362}
363
366 int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
367 const TargetRegisterInfo *TRI) const {
368 if (!LdSt.mayLoadOrStore())
369 return false;
370
371 unsigned Opc = LdSt.getOpcode();
372 OffsetIsScalable = false;
373 const MachineOperand *BaseOp, *OffsetOp;
374 int DataOpIdx;
375
376 if (isDS(LdSt)) {
377 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr);
378 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset);
379 if (OffsetOp) {
380 // Normal, single offset LDS instruction.
381 if (!BaseOp) {
382 // DS_CONSUME/DS_APPEND use M0 for the base address.
383 // TODO: find the implicit use operand for M0 and use that as BaseOp?
384 return false;
385 }
386 BaseOps.push_back(BaseOp);
387 Offset = OffsetOp->getImm();
388 // Get appropriate operand, and compute width accordingly.
389 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
390 if (DataOpIdx == -1)
391 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
392 if (Opc == AMDGPU::DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64)
393 Width = LocationSize::precise(64);
394 else
395 Width = LocationSize::precise(getOpSize(LdSt, DataOpIdx));
396 } else {
397 // The 2 offset instructions use offset0 and offset1 instead. We can treat
398 // these as a load with a single offset if the 2 offsets are consecutive.
399 // We will use this for some partially aligned loads.
400 const MachineOperand *Offset0Op =
401 getNamedOperand(LdSt, AMDGPU::OpName::offset0);
402 const MachineOperand *Offset1Op =
403 getNamedOperand(LdSt, AMDGPU::OpName::offset1);
404
405 unsigned Offset0 = Offset0Op->getImm() & 0xff;
406 unsigned Offset1 = Offset1Op->getImm() & 0xff;
407 if (Offset0 + 1 != Offset1)
408 return false;
409
410 // Each of these offsets is in element sized units, so we need to convert
411 // to bytes of the individual reads.
412
413 unsigned EltSize;
414 if (LdSt.mayLoad())
415 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16;
416 else {
417 assert(LdSt.mayStore());
418 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
419 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8;
420 }
421
422 if (isStride64(Opc))
423 EltSize *= 64;
424
425 BaseOps.push_back(BaseOp);
426 Offset = EltSize * Offset0;
427 // Get appropriate operand(s), and compute width accordingly.
428 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
429 if (DataOpIdx == -1) {
430 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
431 Width = LocationSize::precise(getOpSize(LdSt, DataOpIdx));
432 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
433 Width = LocationSize::precise(
434 Width.getValue() + TypeSize::getFixed(getOpSize(LdSt, DataOpIdx)));
435 } else {
436 Width = LocationSize::precise(getOpSize(LdSt, DataOpIdx));
437 }
438 }
439 return true;
440 }
441
442 if (isMUBUF(LdSt) || isMTBUF(LdSt)) {
443 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc);
444 if (!RSrc) // e.g. BUFFER_WBINVL1_VOL
445 return false;
446 BaseOps.push_back(RSrc);
447 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
448 if (BaseOp && !BaseOp->isFI())
449 BaseOps.push_back(BaseOp);
450 const MachineOperand *OffsetImm =
451 getNamedOperand(LdSt, AMDGPU::OpName::offset);
452 Offset = OffsetImm->getImm();
453 const MachineOperand *SOffset =
454 getNamedOperand(LdSt, AMDGPU::OpName::soffset);
455 if (SOffset) {
456 if (SOffset->isReg())
457 BaseOps.push_back(SOffset);
458 else
459 Offset += SOffset->getImm();
460 }
461 // Get appropriate operand, and compute width accordingly.
462 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
463 if (DataOpIdx == -1)
464 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
465 if (DataOpIdx == -1) // LDS DMA
466 return false;
467 Width = LocationSize::precise(getOpSize(LdSt, DataOpIdx));
468 return true;
469 }
470
471 if (isImage(LdSt)) {
472 auto RsrcOpName =
473 isMIMG(LdSt) ? AMDGPU::OpName::srsrc : AMDGPU::OpName::rsrc;
474 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, RsrcOpName);
475 BaseOps.push_back(&LdSt.getOperand(SRsrcIdx));
476 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
477 if (VAddr0Idx >= 0) {
478 // GFX10 possible NSA encoding.
479 for (int I = VAddr0Idx; I < SRsrcIdx; ++I)
480 BaseOps.push_back(&LdSt.getOperand(I));
481 } else {
482 BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr));
483 }
484 Offset = 0;
485 // Get appropriate operand, and compute width accordingly.
486 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
487 if (DataOpIdx == -1)
488 return false; // no return sampler
489 Width = LocationSize::precise(getOpSize(LdSt, DataOpIdx));
490 return true;
491 }
492
493 if (isSMRD(LdSt)) {
494 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase);
495 if (!BaseOp) // e.g. S_MEMTIME
496 return false;
497 BaseOps.push_back(BaseOp);
498 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset);
499 Offset = OffsetOp ? OffsetOp->getImm() : 0;
500 // Get appropriate operand, and compute width accordingly.
501 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst);
502 if (DataOpIdx == -1)
503 return false;
504 Width = LocationSize::precise(getOpSize(LdSt, DataOpIdx));
505 return true;
506 }
507
508 if (isFLAT(LdSt)) {
509 // Instructions have either vaddr or saddr or both or none.
510 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
511 if (BaseOp)
512 BaseOps.push_back(BaseOp);
513 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr);
514 if (BaseOp)
515 BaseOps.push_back(BaseOp);
516 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm();
517 // Get appropriate operand, and compute width accordingly.
518 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
519 if (DataOpIdx == -1)
520 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
521 if (DataOpIdx == -1) // LDS DMA
522 return false;
523 Width = LocationSize::precise(getOpSize(LdSt, DataOpIdx));
524 return true;
525 }
526
527 return false;
528}
529
530static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
532 const MachineInstr &MI2,
534 // Only examine the first "base" operand of each instruction, on the
535 // assumption that it represents the real base address of the memory access.
536 // Other operands are typically offsets or indices from this base address.
537 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front()))
538 return true;
539
540 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand())
541 return false;
542
543 auto *MO1 = *MI1.memoperands_begin();
544 auto *MO2 = *MI2.memoperands_begin();
545 if (MO1->getAddrSpace() != MO2->getAddrSpace())
546 return false;
547
548 const auto *Base1 = MO1->getValue();
549 const auto *Base2 = MO2->getValue();
550 if (!Base1 || !Base2)
551 return false;
552 Base1 = getUnderlyingObject(Base1);
553 Base2 = getUnderlyingObject(Base2);
554
555 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
556 return false;
557
558 return Base1 == Base2;
559}
560
562 int64_t Offset1, bool OffsetIsScalable1,
564 int64_t Offset2, bool OffsetIsScalable2,
565 unsigned ClusterSize,
566 unsigned NumBytes) const {
567 // If the mem ops (to be clustered) do not have the same base ptr, then they
568 // should not be clustered
569 unsigned MaxMemoryClusterDWords = DefaultMemoryClusterDWordsLimit;
570 if (!BaseOps1.empty() && !BaseOps2.empty()) {
571 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent();
572 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent();
573 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2))
574 return false;
575
576 const SIMachineFunctionInfo *MFI =
577 FirstLdSt.getMF()->getInfo<SIMachineFunctionInfo>();
578 MaxMemoryClusterDWords = MFI->getMaxMemoryClusterDWords();
579 } else if (!BaseOps1.empty() || !BaseOps2.empty()) {
580 // If only one base op is empty, they do not have the same base ptr
581 return false;
582 }
583
584 // In order to avoid register pressure, on an average, the number of DWORDS
585 // loaded together by all clustered mem ops should not exceed
586 // MaxMemoryClusterDWords. This is an empirical value based on certain
587 // observations and performance related experiments.
588 // The good thing about this heuristic is - it avoids clustering of too many
589 // sub-word loads, and also avoids clustering of wide loads. Below is the
590 // brief summary of how the heuristic behaves for various `LoadSize` when
591 // MaxMemoryClusterDWords is 8.
592 //
593 // (1) 1 <= LoadSize <= 4: cluster at max 8 mem ops
594 // (2) 5 <= LoadSize <= 8: cluster at max 4 mem ops
595 // (3) 9 <= LoadSize <= 12: cluster at max 2 mem ops
596 // (4) 13 <= LoadSize <= 16: cluster at max 2 mem ops
597 // (5) LoadSize >= 17: do not cluster
598 const unsigned LoadSize = NumBytes / ClusterSize;
599 const unsigned NumDWords = ((LoadSize + 3) / 4) * ClusterSize;
600 return NumDWords <= MaxMemoryClusterDWords;
601}
602
603// FIXME: This behaves strangely. If, for example, you have 32 load + stores,
604// the first 16 loads will be interleaved with the stores, and the next 16 will
605// be clustered as expected. It should really split into 2 16 store batches.
606//
607// Loads are clustered until this returns false, rather than trying to schedule
608// groups of stores. This also means we have to deal with saying different
609// address space loads should be clustered, and ones which might cause bank
610// conflicts.
611//
612// This might be deprecated so it might not be worth that much effort to fix.
614 int64_t Offset0, int64_t Offset1,
615 unsigned NumLoads) const {
616 assert(Offset1 > Offset0 &&
617 "Second offset should be larger than first offset!");
618 // If we have less than 16 loads in a row, and the offsets are within 64
619 // bytes, then schedule together.
620
621 // A cacheline is 64 bytes (for global memory).
622 return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
623}
624
627 const DebugLoc &DL, MCRegister DestReg,
628 MCRegister SrcReg, bool KillSrc,
629 const char *Msg = "illegal VGPR to SGPR copy") {
630 MachineFunction *MF = MBB.getParent();
631
633 C.diagnose(DiagnosticInfoUnsupported(MF->getFunction(), Msg, DL, DS_Error));
634
635 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg)
636 .addReg(SrcReg, getKillRegState(KillSrc));
637}
638
639/// Handle copying from SGPR to AGPR, or from AGPR to AGPR on GFX908. It is not
640/// possible to have a direct copy in these cases on GFX908, so an intermediate
641/// VGPR copy is required.
645 const DebugLoc &DL, MCRegister DestReg,
646 MCRegister SrcReg, bool KillSrc,
647 RegScavenger &RS, bool RegsOverlap,
648 Register ImpDefSuperReg = Register(),
649 Register ImpUseSuperReg = Register()) {
650 assert((TII.getSubtarget().hasMAIInsts() &&
651 !TII.getSubtarget().hasGFX90AInsts()) &&
652 "Expected GFX908 subtarget.");
653
654 assert((AMDGPU::SReg_32RegClass.contains(SrcReg) ||
655 AMDGPU::AGPR_32RegClass.contains(SrcReg)) &&
656 "Source register of the copy should be either an SGPR or an AGPR.");
657
658 assert(AMDGPU::AGPR_32RegClass.contains(DestReg) &&
659 "Destination register of the copy should be an AGPR.");
660
661 const SIRegisterInfo &RI = TII.getRegisterInfo();
662
663 // First try to find defining accvgpr_write to avoid temporary registers.
664 // In the case of copies of overlapping AGPRs, we conservatively do not
665 // reuse previous accvgpr_writes. Otherwise, we may incorrectly pick up
666 // an accvgpr_write used for this same copy due to implicit-defs
667 if (!RegsOverlap) {
668 for (auto Def = MI, E = MBB.begin(); Def != E; ) {
669 --Def;
670
671 if (!Def->modifiesRegister(SrcReg, &RI))
672 continue;
673
674 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64 ||
675 Def->getOperand(0).getReg() != SrcReg)
676 break;
677
678 MachineOperand &DefOp = Def->getOperand(1);
679 assert(DefOp.isReg() || DefOp.isImm());
680
681 if (DefOp.isReg()) {
682 bool SafeToPropagate = true;
683 // Check that register source operand is not clobbered before MI.
684 // Immediate operands are always safe to propagate.
685 for (auto I = Def; I != MI && SafeToPropagate; ++I)
686 if (I->modifiesRegister(DefOp.getReg(), &RI))
687 SafeToPropagate = false;
688
689 if (!SafeToPropagate)
690 break;
691
692 for (auto I = Def; I != MI; ++I)
693 I->clearRegisterKills(DefOp.getReg(), &RI);
694 }
695
696 MachineInstrBuilder Builder =
697 BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
698 .add(DefOp);
699 if (ImpDefSuperReg)
700 Builder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit);
701
702 if (ImpUseSuperReg) {
703 Builder.addReg(ImpUseSuperReg,
705 }
706
707 return;
708 }
709 }
710
712 RS.backward(std::next(MI));
713
714 // Ideally we want to have three registers for a long reg_sequence copy
715 // to hide 2 waitstates between v_mov_b32 and accvgpr_write.
716 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
717 *MBB.getParent());
718
719 // Registers in the sequence are allocated contiguously so we can just
720 // use register number to pick one of three round-robin temps.
721 unsigned RegNo = (DestReg - AMDGPU::AGPR0) % 3;
722 Register Tmp =
723 MBB.getParent()->getInfo<SIMachineFunctionInfo>()->getVGPRForAGPRCopy();
724 assert(MBB.getParent()->getRegInfo().isReserved(Tmp) &&
725 "VGPR used for an intermediate copy should have been reserved.");
726
727 // Only loop through if there are any free registers left. We don't want to
728 // spill.
729 while (RegNo--) {
730 Register Tmp2 = RS.scavengeRegisterBackwards(AMDGPU::VGPR_32RegClass, MI,
731 /* RestoreAfter */ false, 0,
732 /* AllowSpill */ false);
733 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs)
734 break;
735 Tmp = Tmp2;
736 RS.setRegUsed(Tmp);
737 }
738
739 // Insert copy to temporary VGPR.
740 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32;
741 if (AMDGPU::AGPR_32RegClass.contains(SrcReg)) {
742 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32_e64;
743 } else {
744 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
745 }
746
747 MachineInstrBuilder UseBuilder = BuildMI(MBB, MI, DL, TII.get(TmpCopyOp), Tmp)
748 .addReg(SrcReg, getKillRegState(KillSrc));
749 if (ImpUseSuperReg) {
750 UseBuilder.addReg(ImpUseSuperReg,
752 }
753
754 MachineInstrBuilder DefBuilder
755 = BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
756 .addReg(Tmp, RegState::Kill);
757
758 if (ImpDefSuperReg)
759 DefBuilder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit);
760}
761
764 MCRegister DestReg, MCRegister SrcReg, bool KillSrc,
765 const TargetRegisterClass *RC, bool Forward) {
766 const SIRegisterInfo &RI = TII.getRegisterInfo();
767 ArrayRef<int16_t> BaseIndices = RI.getRegSplitParts(RC, 4);
769 MachineInstr *FirstMI = nullptr, *LastMI = nullptr;
770
771 for (unsigned Idx = 0; Idx < BaseIndices.size(); ++Idx) {
772 int16_t SubIdx = BaseIndices[Idx];
773 Register DestSubReg = RI.getSubReg(DestReg, SubIdx);
774 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
775 assert(DestSubReg && SrcSubReg && "Failed to find subregs!");
776 unsigned Opcode = AMDGPU::S_MOV_B32;
777
778 // Is SGPR aligned? If so try to combine with next.
779 bool AlignedDest = ((DestSubReg - AMDGPU::SGPR0) % 2) == 0;
780 bool AlignedSrc = ((SrcSubReg - AMDGPU::SGPR0) % 2) == 0;
781 if (AlignedDest && AlignedSrc && (Idx + 1 < BaseIndices.size())) {
782 // Can use SGPR64 copy
783 unsigned Channel = RI.getChannelFromSubReg(SubIdx);
784 SubIdx = RI.getSubRegFromChannel(Channel, 2);
785 DestSubReg = RI.getSubReg(DestReg, SubIdx);
786 SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
787 assert(DestSubReg && SrcSubReg && "Failed to find subregs!");
788 Opcode = AMDGPU::S_MOV_B64;
789 Idx++;
790 }
791
792 LastMI = BuildMI(MBB, I, DL, TII.get(Opcode), DestSubReg)
793 .addReg(SrcSubReg)
794 .addReg(SrcReg, RegState::Implicit);
795
796 if (!FirstMI)
797 FirstMI = LastMI;
798
799 if (!Forward)
800 I--;
801 }
802
803 assert(FirstMI && LastMI);
804 if (!Forward)
805 std::swap(FirstMI, LastMI);
806
807 FirstMI->addOperand(
808 MachineOperand::CreateReg(DestReg, true /*IsDef*/, true /*IsImp*/));
809
810 if (KillSrc)
811 LastMI->addRegisterKilled(SrcReg, &RI);
812}
813
816 const DebugLoc &DL, Register DestReg,
817 Register SrcReg, bool KillSrc, bool RenamableDest,
818 bool RenamableSrc) const {
819 const TargetRegisterClass *RC = RI.getPhysRegBaseClass(DestReg);
820 unsigned Size = RI.getRegSizeInBits(*RC);
821 const TargetRegisterClass *SrcRC = RI.getPhysRegBaseClass(SrcReg);
822 unsigned SrcSize = RI.getRegSizeInBits(*SrcRC);
823
824 // The rest of copyPhysReg assumes Src and Dst size are the same size.
825 // TODO-GFX11_16BIT If all true 16 bit instruction patterns are completed can
826 // we remove Fix16BitCopies and this code block?
827 if (Fix16BitCopies) {
828 if (((Size == 16) != (SrcSize == 16))) {
829 // Non-VGPR Src and Dst will later be expanded back to 32 bits.
830 assert(ST.useRealTrue16Insts());
831 Register &RegToFix = (Size == 32) ? DestReg : SrcReg;
832 MCRegister SubReg = RI.getSubReg(RegToFix, AMDGPU::lo16);
833 RegToFix = SubReg;
834
835 if (DestReg == SrcReg) {
836 // Identity copy. Insert empty bundle since ExpandPostRA expects an
837 // instruction here.
838 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE));
839 return;
840 }
841 RC = RI.getPhysRegBaseClass(DestReg);
842 Size = RI.getRegSizeInBits(*RC);
843 SrcRC = RI.getPhysRegBaseClass(SrcReg);
844 SrcSize = RI.getRegSizeInBits(*SrcRC);
845 }
846 }
847
848 if (RC == &AMDGPU::VGPR_32RegClass) {
849 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
850 AMDGPU::SReg_32RegClass.contains(SrcReg) ||
851 AMDGPU::AGPR_32RegClass.contains(SrcReg));
852 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ?
853 AMDGPU::V_ACCVGPR_READ_B32_e64 : AMDGPU::V_MOV_B32_e32;
854 BuildMI(MBB, MI, DL, get(Opc), DestReg)
855 .addReg(SrcReg, getKillRegState(KillSrc));
856 return;
857 }
858
859 if (RC == &AMDGPU::SReg_32_XM0RegClass ||
860 RC == &AMDGPU::SReg_32RegClass) {
861 if (SrcReg == AMDGPU::SCC) {
862 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg)
863 .addImm(1)
864 .addImm(0);
865 return;
866 }
867
868 if (DestReg == AMDGPU::VCC_LO) {
869 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) {
870 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO)
871 .addReg(SrcReg, getKillRegState(KillSrc));
872 } else {
873 // FIXME: Hack until VReg_1 removed.
874 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
875 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
876 .addImm(0)
877 .addReg(SrcReg, getKillRegState(KillSrc));
878 }
879
880 return;
881 }
882
883 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) {
884 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
885 return;
886 }
887
888 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
889 .addReg(SrcReg, getKillRegState(KillSrc));
890 return;
891 }
892
893 if (RC == &AMDGPU::SReg_64RegClass) {
894 if (SrcReg == AMDGPU::SCC) {
895 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg)
896 .addImm(1)
897 .addImm(0);
898 return;
899 }
900
901 if (DestReg == AMDGPU::VCC) {
902 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
903 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
904 .addReg(SrcReg, getKillRegState(KillSrc));
905 } else {
906 // FIXME: Hack until VReg_1 removed.
907 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
908 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
909 .addImm(0)
910 .addReg(SrcReg, getKillRegState(KillSrc));
911 }
912
913 return;
914 }
915
916 if (!AMDGPU::SReg_64_EncodableRegClass.contains(SrcReg)) {
917 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
918 return;
919 }
920
921 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
922 .addReg(SrcReg, getKillRegState(KillSrc));
923 return;
924 }
925
926 if (DestReg == AMDGPU::SCC) {
927 // Copying 64-bit or 32-bit sources to SCC barely makes sense,
928 // but SelectionDAG emits such copies for i1 sources.
929 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
930 // This copy can only be produced by patterns
931 // with explicit SCC, which are known to be enabled
932 // only for subtargets with S_CMP_LG_U64 present.
933 assert(ST.hasScalarCompareEq64());
934 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U64))
935 .addReg(SrcReg, getKillRegState(KillSrc))
936 .addImm(0);
937 } else {
938 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
939 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32))
940 .addReg(SrcReg, getKillRegState(KillSrc))
941 .addImm(0);
942 }
943
944 return;
945 }
946
947 if (RC == &AMDGPU::AGPR_32RegClass) {
948 if (AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
949 (ST.hasGFX90AInsts() && AMDGPU::SReg_32RegClass.contains(SrcReg))) {
950 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
951 .addReg(SrcReg, getKillRegState(KillSrc));
952 return;
953 }
954
955 if (AMDGPU::AGPR_32RegClass.contains(SrcReg) && ST.hasGFX90AInsts()) {
956 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_MOV_B32), DestReg)
957 .addReg(SrcReg, getKillRegState(KillSrc));
958 return;
959 }
960
961 // FIXME: Pass should maintain scavenger to avoid scan through the block on
962 // every AGPR spill.
963 RegScavenger RS;
964 const bool Overlap = RI.regsOverlap(SrcReg, DestReg);
965 indirectCopyToAGPR(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RS, Overlap);
966 return;
967 }
968
969 if (Size == 16) {
970 assert(AMDGPU::VGPR_16RegClass.contains(SrcReg) ||
971 AMDGPU::SReg_LO16RegClass.contains(SrcReg) ||
972 AMDGPU::AGPR_LO16RegClass.contains(SrcReg));
973
974 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg);
975 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg);
976 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg);
977 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg);
978 bool DstLow = !AMDGPU::isHi16Reg(DestReg, RI);
979 bool SrcLow = !AMDGPU::isHi16Reg(SrcReg, RI);
980 MCRegister NewDestReg = RI.get32BitRegister(DestReg);
981 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg);
982
983 if (IsSGPRDst) {
984 if (!IsSGPRSrc) {
985 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
986 return;
987 }
988
989 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg)
990 .addReg(NewSrcReg, getKillRegState(KillSrc));
991 return;
992 }
993
994 if (IsAGPRDst || IsAGPRSrc) {
995 if (!DstLow || !SrcLow) {
996 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc,
997 "Cannot use hi16 subreg with an AGPR!");
998 }
999
1000 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc);
1001 return;
1002 }
1003
1004 if (ST.useRealTrue16Insts()) {
1005 if (IsSGPRSrc) {
1006 assert(SrcLow);
1007 SrcReg = NewSrcReg;
1008 }
1009 // Use the smaller instruction encoding if possible.
1010 if (AMDGPU::VGPR_16_Lo128RegClass.contains(DestReg) &&
1011 (IsSGPRSrc || AMDGPU::VGPR_16_Lo128RegClass.contains(SrcReg))) {
1012 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B16_t16_e32), DestReg)
1013 .addReg(SrcReg);
1014 } else {
1015 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B16_t16_e64), DestReg)
1016 .addImm(0) // src0_modifiers
1017 .addReg(SrcReg)
1018 .addImm(0); // op_sel
1019 }
1020 return;
1021 }
1022
1023 if (IsSGPRSrc && !ST.hasSDWAScalar()) {
1024 if (!DstLow || !SrcLow) {
1025 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc,
1026 "Cannot use hi16 subreg on VI!");
1027 }
1028
1029 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg)
1030 .addReg(NewSrcReg, getKillRegState(KillSrc));
1031 return;
1032 }
1033
1034 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg)
1035 .addImm(0) // src0_modifiers
1036 .addReg(NewSrcReg)
1037 .addImm(0) // clamp
1044 // First implicit operand is $exec.
1045 MIB->tieOperands(0, MIB->getNumOperands() - 1);
1046 return;
1047 }
1048
1049 if (RC == RI.getVGPR64Class() && (SrcRC == RC || RI.isSGPRClass(SrcRC))) {
1050 if (ST.hasMovB64()) {
1051 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_e32), DestReg)
1052 .addReg(SrcReg, getKillRegState(KillSrc));
1053 return;
1054 }
1055 if (ST.hasPkMovB32()) {
1056 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DestReg)
1058 .addReg(SrcReg)
1060 .addReg(SrcReg)
1061 .addImm(0) // op_sel_lo
1062 .addImm(0) // op_sel_hi
1063 .addImm(0) // neg_lo
1064 .addImm(0) // neg_hi
1065 .addImm(0) // clamp
1066 .addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit);
1067 return;
1068 }
1069 }
1070
1071 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
1072 if (RI.isSGPRClass(RC)) {
1073 if (!RI.isSGPRClass(SrcRC)) {
1074 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
1075 return;
1076 }
1077 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg);
1078 expandSGPRCopy(*this, MBB, MI, DL, DestReg, SrcReg, CanKillSuperReg, RC,
1079 Forward);
1080 return;
1081 }
1082
1083 unsigned EltSize = 4;
1084 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
1085 if (RI.isAGPRClass(RC)) {
1086 if (ST.hasGFX90AInsts() && RI.isAGPRClass(SrcRC))
1087 Opcode = AMDGPU::V_ACCVGPR_MOV_B32;
1088 else if (RI.hasVGPRs(SrcRC) ||
1089 (ST.hasGFX90AInsts() && RI.isSGPRClass(SrcRC)))
1090 Opcode = AMDGPU::V_ACCVGPR_WRITE_B32_e64;
1091 else
1092 Opcode = AMDGPU::INSTRUCTION_LIST_END;
1093 } else if (RI.hasVGPRs(RC) && RI.isAGPRClass(SrcRC)) {
1094 Opcode = AMDGPU::V_ACCVGPR_READ_B32_e64;
1095 } else if ((Size % 64 == 0) && RI.hasVGPRs(RC) &&
1096 (RI.isProperlyAlignedRC(*RC) &&
1097 (SrcRC == RC || RI.isSGPRClass(SrcRC)))) {
1098 // TODO: In 96-bit case, could do a 64-bit mov and then a 32-bit mov.
1099 if (ST.hasMovB64()) {
1100 Opcode = AMDGPU::V_MOV_B64_e32;
1101 EltSize = 8;
1102 } else if (ST.hasPkMovB32()) {
1103 Opcode = AMDGPU::V_PK_MOV_B32;
1104 EltSize = 8;
1105 }
1106 }
1107
1108 // For the cases where we need an intermediate instruction/temporary register
1109 // (destination is an AGPR), we need a scavenger.
1110 //
1111 // FIXME: The pass should maintain this for us so we don't have to re-scan the
1112 // whole block for every handled copy.
1113 std::unique_ptr<RegScavenger> RS;
1114 if (Opcode == AMDGPU::INSTRUCTION_LIST_END)
1115 RS = std::make_unique<RegScavenger>();
1116
1117 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize);
1118
1119 // If there is an overlap, we can't kill the super-register on the last
1120 // instruction, since it will also kill the components made live by this def.
1121 const bool Overlap = RI.regsOverlap(SrcReg, DestReg);
1122 const bool CanKillSuperReg = KillSrc && !Overlap;
1123
1124 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
1125 unsigned SubIdx;
1126 if (Forward)
1127 SubIdx = SubIndices[Idx];
1128 else
1129 SubIdx = SubIndices[SubIndices.size() - Idx - 1];
1130 Register DestSubReg = RI.getSubReg(DestReg, SubIdx);
1131 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
1132 assert(DestSubReg && SrcSubReg && "Failed to find subregs!");
1133
1134 bool IsFirstSubreg = Idx == 0;
1135 bool UseKill = CanKillSuperReg && Idx == SubIndices.size() - 1;
1136
1137 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) {
1138 Register ImpDefSuper = IsFirstSubreg ? Register(DestReg) : Register();
1139 Register ImpUseSuper = SrcReg;
1140 indirectCopyToAGPR(*this, MBB, MI, DL, DestSubReg, SrcSubReg, UseKill,
1141 *RS, Overlap, ImpDefSuper, ImpUseSuper);
1142 } else if (Opcode == AMDGPU::V_PK_MOV_B32) {
1144 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DestSubReg)
1146 .addReg(SrcSubReg)
1148 .addReg(SrcSubReg)
1149 .addImm(0) // op_sel_lo
1150 .addImm(0) // op_sel_hi
1151 .addImm(0) // neg_lo
1152 .addImm(0) // neg_hi
1153 .addImm(0) // clamp
1154 .addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit);
1155 if (IsFirstSubreg)
1157 } else {
1158 MachineInstrBuilder Builder =
1159 BuildMI(MBB, MI, DL, get(Opcode), DestSubReg).addReg(SrcSubReg);
1160 if (IsFirstSubreg)
1161 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
1162
1163 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit);
1164 }
1165 }
1166}
1167
1168int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
1169 int NewOpc;
1170
1171 // Try to map original to commuted opcode
1172 NewOpc = AMDGPU::getCommuteRev(Opcode);
1173 if (NewOpc != -1)
1174 // Check if the commuted (REV) opcode exists on the target.
1175 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
1176
1177 // Try to map commuted to original opcode
1178 NewOpc = AMDGPU::getCommuteOrig(Opcode);
1179 if (NewOpc != -1)
1180 // Check if the original (non-REV) opcode exists on the target.
1181 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
1182
1183 return Opcode;
1184}
1185
1186const TargetRegisterClass *
1188 return &AMDGPU::VGPR_32RegClass;
1189}
1190
1193 const DebugLoc &DL, Register DstReg,
1195 Register TrueReg,
1196 Register FalseReg) const {
1197 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1198 const TargetRegisterClass *BoolXExecRC = RI.getWaveMaskRegClass();
1200 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass &&
1201 "Not a VGPR32 reg");
1202
1203 if (Cond.size() == 1) {
1204 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1205 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1206 .add(Cond[0]);
1207 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1208 .addImm(0)
1209 .addReg(FalseReg)
1210 .addImm(0)
1211 .addReg(TrueReg)
1212 .addReg(SReg);
1213 } else if (Cond.size() == 2) {
1214 assert(Cond[0].isImm() && "Cond[0] is not an immediate");
1215 switch (Cond[0].getImm()) {
1216 case SIInstrInfo::SCC_TRUE: {
1217 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1218 BuildMI(MBB, I, DL, get(LMC.CSelectOpc), SReg).addImm(1).addImm(0);
1219 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1220 .addImm(0)
1221 .addReg(FalseReg)
1222 .addImm(0)
1223 .addReg(TrueReg)
1224 .addReg(SReg);
1225 break;
1226 }
1227 case SIInstrInfo::SCC_FALSE: {
1228 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1229 BuildMI(MBB, I, DL, get(LMC.CSelectOpc), SReg).addImm(0).addImm(1);
1230 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1231 .addImm(0)
1232 .addReg(FalseReg)
1233 .addImm(0)
1234 .addReg(TrueReg)
1235 .addReg(SReg);
1236 break;
1237 }
1238 case SIInstrInfo::VCCNZ: {
1239 MachineOperand RegOp = Cond[1];
1240 RegOp.setImplicit(false);
1241 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1242 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1243 .add(RegOp);
1244 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1245 .addImm(0)
1246 .addReg(FalseReg)
1247 .addImm(0)
1248 .addReg(TrueReg)
1249 .addReg(SReg);
1250 break;
1251 }
1252 case SIInstrInfo::VCCZ: {
1253 MachineOperand RegOp = Cond[1];
1254 RegOp.setImplicit(false);
1255 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1256 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1257 .add(RegOp);
1258 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1259 .addImm(0)
1260 .addReg(TrueReg)
1261 .addImm(0)
1262 .addReg(FalseReg)
1263 .addReg(SReg);
1264 break;
1265 }
1266 case SIInstrInfo::EXECNZ: {
1267 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1268 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
1269 BuildMI(MBB, I, DL, get(LMC.OrSaveExecOpc), SReg2).addImm(0);
1270 BuildMI(MBB, I, DL, get(LMC.CSelectOpc), SReg).addImm(1).addImm(0);
1271 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1272 .addImm(0)
1273 .addReg(FalseReg)
1274 .addImm(0)
1275 .addReg(TrueReg)
1276 .addReg(SReg);
1277 break;
1278 }
1279 case SIInstrInfo::EXECZ: {
1280 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1281 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
1282 BuildMI(MBB, I, DL, get(LMC.OrSaveExecOpc), SReg2).addImm(0);
1283 BuildMI(MBB, I, DL, get(LMC.CSelectOpc), SReg).addImm(0).addImm(1);
1284 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1285 .addImm(0)
1286 .addReg(FalseReg)
1287 .addImm(0)
1288 .addReg(TrueReg)
1289 .addReg(SReg);
1290 llvm_unreachable("Unhandled branch predicate EXECZ");
1291 break;
1292 }
1293 default:
1294 llvm_unreachable("invalid branch predicate");
1295 }
1296 } else {
1297 llvm_unreachable("Can only handle Cond size 1 or 2");
1298 }
1299}
1300
1303 const DebugLoc &DL,
1304 Register SrcReg, int Value) const {
1305 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1306 Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
1307 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg)
1308 .addImm(Value)
1309 .addReg(SrcReg);
1310
1311 return Reg;
1312}
1313
1316 const DebugLoc &DL,
1317 Register SrcReg, int Value) const {
1318 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1319 Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
1320 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg)
1321 .addImm(Value)
1322 .addReg(SrcReg);
1323
1324 return Reg;
1325}
1326
1328 const Register Reg,
1329 int64_t &ImmVal) const {
1330 switch (MI.getOpcode()) {
1331 case AMDGPU::V_MOV_B32_e32:
1332 case AMDGPU::S_MOV_B32:
1333 case AMDGPU::S_MOVK_I32:
1334 case AMDGPU::S_MOV_B64:
1335 case AMDGPU::V_MOV_B64_e32:
1336 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
1337 case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
1338 case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
1339 case AMDGPU::S_MOV_B64_IMM_PSEUDO:
1340 case AMDGPU::V_MOV_B64_PSEUDO: {
1341 const MachineOperand &Src0 = MI.getOperand(1);
1342 if (Src0.isImm()) {
1343 ImmVal = Src0.getImm();
1344 return MI.getOperand(0).getReg() == Reg;
1345 }
1346
1347 return false;
1348 }
1349 case AMDGPU::S_BREV_B32:
1350 case AMDGPU::V_BFREV_B32_e32:
1351 case AMDGPU::V_BFREV_B32_e64: {
1352 const MachineOperand &Src0 = MI.getOperand(1);
1353 if (Src0.isImm()) {
1354 ImmVal = static_cast<int64_t>(reverseBits<int32_t>(Src0.getImm()));
1355 return MI.getOperand(0).getReg() == Reg;
1356 }
1357
1358 return false;
1359 }
1360 case AMDGPU::S_NOT_B32:
1361 case AMDGPU::V_NOT_B32_e32:
1362 case AMDGPU::V_NOT_B32_e64: {
1363 const MachineOperand &Src0 = MI.getOperand(1);
1364 if (Src0.isImm()) {
1365 ImmVal = static_cast<int64_t>(~static_cast<int32_t>(Src0.getImm()));
1366 return MI.getOperand(0).getReg() == Reg;
1367 }
1368
1369 return false;
1370 }
1371 default:
1372 return false;
1373 }
1374}
1375
1377
1378 if (RI.isAGPRClass(DstRC))
1379 return AMDGPU::COPY;
1380 if (RI.getRegSizeInBits(*DstRC) == 16) {
1381 // Assume hi bits are unneeded. Only _e64 true16 instructions are legal
1382 // before RA.
1383 return RI.isSGPRClass(DstRC) ? AMDGPU::COPY : AMDGPU::V_MOV_B16_t16_e64;
1384 }
1385 if (RI.getRegSizeInBits(*DstRC) == 32)
1386 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1387 if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC))
1388 return AMDGPU::S_MOV_B64;
1389 if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC))
1390 return AMDGPU::V_MOV_B64_PSEUDO;
1391 return AMDGPU::COPY;
1392}
1393
1394const MCInstrDesc &
1396 bool IsIndirectSrc) const {
1397 if (IsIndirectSrc) {
1398 if (VecSize <= 32) // 4 bytes
1399 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1);
1400 if (VecSize <= 64) // 8 bytes
1401 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2);
1402 if (VecSize <= 96) // 12 bytes
1403 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3);
1404 if (VecSize <= 128) // 16 bytes
1405 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4);
1406 if (VecSize <= 160) // 20 bytes
1407 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5);
1408 if (VecSize <= 256) // 32 bytes
1409 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8);
1410 if (VecSize <= 288) // 36 bytes
1411 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V9);
1412 if (VecSize <= 320) // 40 bytes
1413 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V10);
1414 if (VecSize <= 352) // 44 bytes
1415 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V11);
1416 if (VecSize <= 384) // 48 bytes
1417 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V12);
1418 if (VecSize <= 512) // 64 bytes
1419 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16);
1420 if (VecSize <= 1024) // 128 bytes
1421 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32);
1422
1423 llvm_unreachable("unsupported size for IndirectRegReadGPRIDX pseudos");
1424 }
1425
1426 if (VecSize <= 32) // 4 bytes
1427 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1);
1428 if (VecSize <= 64) // 8 bytes
1429 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2);
1430 if (VecSize <= 96) // 12 bytes
1431 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3);
1432 if (VecSize <= 128) // 16 bytes
1433 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4);
1434 if (VecSize <= 160) // 20 bytes
1435 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5);
1436 if (VecSize <= 256) // 32 bytes
1437 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8);
1438 if (VecSize <= 288) // 36 bytes
1439 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V9);
1440 if (VecSize <= 320) // 40 bytes
1441 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V10);
1442 if (VecSize <= 352) // 44 bytes
1443 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V11);
1444 if (VecSize <= 384) // 48 bytes
1445 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V12);
1446 if (VecSize <= 512) // 64 bytes
1447 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16);
1448 if (VecSize <= 1024) // 128 bytes
1449 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32);
1450
1451 llvm_unreachable("unsupported size for IndirectRegWriteGPRIDX pseudos");
1452}
1453
1454static unsigned getIndirectVGPRWriteMovRelPseudoOpc(unsigned VecSize) {
1455 if (VecSize <= 32) // 4 bytes
1456 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1;
1457 if (VecSize <= 64) // 8 bytes
1458 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2;
1459 if (VecSize <= 96) // 12 bytes
1460 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3;
1461 if (VecSize <= 128) // 16 bytes
1462 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4;
1463 if (VecSize <= 160) // 20 bytes
1464 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5;
1465 if (VecSize <= 256) // 32 bytes
1466 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8;
1467 if (VecSize <= 288) // 36 bytes
1468 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V9;
1469 if (VecSize <= 320) // 40 bytes
1470 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V10;
1471 if (VecSize <= 352) // 44 bytes
1472 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V11;
1473 if (VecSize <= 384) // 48 bytes
1474 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V12;
1475 if (VecSize <= 512) // 64 bytes
1476 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16;
1477 if (VecSize <= 1024) // 128 bytes
1478 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32;
1479
1480 llvm_unreachable("unsupported size for IndirectRegWrite pseudos");
1481}
1482
1483static unsigned getIndirectSGPRWriteMovRelPseudo32(unsigned VecSize) {
1484 if (VecSize <= 32) // 4 bytes
1485 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1;
1486 if (VecSize <= 64) // 8 bytes
1487 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2;
1488 if (VecSize <= 96) // 12 bytes
1489 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3;
1490 if (VecSize <= 128) // 16 bytes
1491 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4;
1492 if (VecSize <= 160) // 20 bytes
1493 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5;
1494 if (VecSize <= 256) // 32 bytes
1495 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8;
1496 if (VecSize <= 288) // 36 bytes
1497 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V9;
1498 if (VecSize <= 320) // 40 bytes
1499 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V10;
1500 if (VecSize <= 352) // 44 bytes
1501 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V11;
1502 if (VecSize <= 384) // 48 bytes
1503 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V12;
1504 if (VecSize <= 512) // 64 bytes
1505 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16;
1506 if (VecSize <= 1024) // 128 bytes
1507 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32;
1508
1509 llvm_unreachable("unsupported size for IndirectRegWrite pseudos");
1510}
1511
1512static unsigned getIndirectSGPRWriteMovRelPseudo64(unsigned VecSize) {
1513 if (VecSize <= 64) // 8 bytes
1514 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1;
1515 if (VecSize <= 128) // 16 bytes
1516 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2;
1517 if (VecSize <= 256) // 32 bytes
1518 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4;
1519 if (VecSize <= 512) // 64 bytes
1520 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8;
1521 if (VecSize <= 1024) // 128 bytes
1522 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16;
1523
1524 llvm_unreachable("unsupported size for IndirectRegWrite pseudos");
1525}
1526
1527const MCInstrDesc &
1528SIInstrInfo::getIndirectRegWriteMovRelPseudo(unsigned VecSize, unsigned EltSize,
1529 bool IsSGPR) const {
1530 if (IsSGPR) {
1531 switch (EltSize) {
1532 case 32:
1533 return get(getIndirectSGPRWriteMovRelPseudo32(VecSize));
1534 case 64:
1535 return get(getIndirectSGPRWriteMovRelPseudo64(VecSize));
1536 default:
1537 llvm_unreachable("invalid reg indexing elt size");
1538 }
1539 }
1540
1541 assert(EltSize == 32 && "invalid reg indexing elt size");
1543}
1544
1545static unsigned getSGPRSpillSaveOpcode(unsigned Size) {
1546 switch (Size) {
1547 case 4:
1548 return AMDGPU::SI_SPILL_S32_SAVE;
1549 case 8:
1550 return AMDGPU::SI_SPILL_S64_SAVE;
1551 case 12:
1552 return AMDGPU::SI_SPILL_S96_SAVE;
1553 case 16:
1554 return AMDGPU::SI_SPILL_S128_SAVE;
1555 case 20:
1556 return AMDGPU::SI_SPILL_S160_SAVE;
1557 case 24:
1558 return AMDGPU::SI_SPILL_S192_SAVE;
1559 case 28:
1560 return AMDGPU::SI_SPILL_S224_SAVE;
1561 case 32:
1562 return AMDGPU::SI_SPILL_S256_SAVE;
1563 case 36:
1564 return AMDGPU::SI_SPILL_S288_SAVE;
1565 case 40:
1566 return AMDGPU::SI_SPILL_S320_SAVE;
1567 case 44:
1568 return AMDGPU::SI_SPILL_S352_SAVE;
1569 case 48:
1570 return AMDGPU::SI_SPILL_S384_SAVE;
1571 case 64:
1572 return AMDGPU::SI_SPILL_S512_SAVE;
1573 case 128:
1574 return AMDGPU::SI_SPILL_S1024_SAVE;
1575 default:
1576 llvm_unreachable("unknown register size");
1577 }
1578}
1579
1580static unsigned getVGPRSpillSaveOpcode(unsigned Size) {
1581 switch (Size) {
1582 case 2:
1583 return AMDGPU::SI_SPILL_V16_SAVE;
1584 case 4:
1585 return AMDGPU::SI_SPILL_V32_SAVE;
1586 case 8:
1587 return AMDGPU::SI_SPILL_V64_SAVE;
1588 case 12:
1589 return AMDGPU::SI_SPILL_V96_SAVE;
1590 case 16:
1591 return AMDGPU::SI_SPILL_V128_SAVE;
1592 case 20:
1593 return AMDGPU::SI_SPILL_V160_SAVE;
1594 case 24:
1595 return AMDGPU::SI_SPILL_V192_SAVE;
1596 case 28:
1597 return AMDGPU::SI_SPILL_V224_SAVE;
1598 case 32:
1599 return AMDGPU::SI_SPILL_V256_SAVE;
1600 case 36:
1601 return AMDGPU::SI_SPILL_V288_SAVE;
1602 case 40:
1603 return AMDGPU::SI_SPILL_V320_SAVE;
1604 case 44:
1605 return AMDGPU::SI_SPILL_V352_SAVE;
1606 case 48:
1607 return AMDGPU::SI_SPILL_V384_SAVE;
1608 case 64:
1609 return AMDGPU::SI_SPILL_V512_SAVE;
1610 case 128:
1611 return AMDGPU::SI_SPILL_V1024_SAVE;
1612 default:
1613 llvm_unreachable("unknown register size");
1614 }
1615}
1616
1617static unsigned getAVSpillSaveOpcode(unsigned Size) {
1618 switch (Size) {
1619 case 4:
1620 return AMDGPU::SI_SPILL_AV32_SAVE;
1621 case 8:
1622 return AMDGPU::SI_SPILL_AV64_SAVE;
1623 case 12:
1624 return AMDGPU::SI_SPILL_AV96_SAVE;
1625 case 16:
1626 return AMDGPU::SI_SPILL_AV128_SAVE;
1627 case 20:
1628 return AMDGPU::SI_SPILL_AV160_SAVE;
1629 case 24:
1630 return AMDGPU::SI_SPILL_AV192_SAVE;
1631 case 28:
1632 return AMDGPU::SI_SPILL_AV224_SAVE;
1633 case 32:
1634 return AMDGPU::SI_SPILL_AV256_SAVE;
1635 case 36:
1636 return AMDGPU::SI_SPILL_AV288_SAVE;
1637 case 40:
1638 return AMDGPU::SI_SPILL_AV320_SAVE;
1639 case 44:
1640 return AMDGPU::SI_SPILL_AV352_SAVE;
1641 case 48:
1642 return AMDGPU::SI_SPILL_AV384_SAVE;
1643 case 64:
1644 return AMDGPU::SI_SPILL_AV512_SAVE;
1645 case 128:
1646 return AMDGPU::SI_SPILL_AV1024_SAVE;
1647 default:
1648 llvm_unreachable("unknown register size");
1649 }
1650}
1651
1652static unsigned getWWMRegSpillSaveOpcode(unsigned Size,
1653 bool IsVectorSuperClass) {
1654 // Currently, there is only 32-bit WWM register spills needed.
1655 if (Size != 4)
1656 llvm_unreachable("unknown wwm register spill size");
1657
1658 if (IsVectorSuperClass)
1659 return AMDGPU::SI_SPILL_WWM_AV32_SAVE;
1660
1661 return AMDGPU::SI_SPILL_WWM_V32_SAVE;
1662}
1663
1665 Register Reg, const TargetRegisterClass *RC, unsigned Size,
1666 const SIMachineFunctionInfo &MFI) const {
1667 bool IsVectorSuperClass = RI.isVectorSuperClass(RC);
1668
1669 // Choose the right opcode if spilling a WWM register.
1671 return getWWMRegSpillSaveOpcode(Size, IsVectorSuperClass);
1672
1673 // TODO: Check if AGPRs are available
1674 if (ST.hasMAIInsts())
1675 return getAVSpillSaveOpcode(Size);
1676
1678}
1679
1682 bool isKill, int FrameIndex, const TargetRegisterClass *RC,
1683 const TargetRegisterInfo *TRI, Register VReg,
1684 MachineInstr::MIFlag Flags) const {
1685 MachineFunction *MF = MBB.getParent();
1687 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1688 const DebugLoc &DL = MBB.findDebugLoc(MI);
1689
1690 MachinePointerInfo PtrInfo
1691 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
1693 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex),
1694 FrameInfo.getObjectAlign(FrameIndex));
1695 unsigned SpillSize = TRI->getSpillSize(*RC);
1696
1698 if (RI.isSGPRClass(RC)) {
1699 MFI->setHasSpilledSGPRs();
1700 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled");
1701 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI &&
1702 SrcReg != AMDGPU::EXEC && "exec should not be spilled");
1703
1704 // We are only allowed to create one new instruction when spilling
1705 // registers, so we need to use pseudo instruction for spilling SGPRs.
1706 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize));
1707
1708 // The SGPR spill/restore instructions only work on number sgprs, so we need
1709 // to make sure we are using the correct register class.
1710 if (SrcReg.isVirtual() && SpillSize == 4) {
1711 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
1712 }
1713
1714 BuildMI(MBB, MI, DL, OpDesc)
1715 .addReg(SrcReg, getKillRegState(isKill)) // data
1716 .addFrameIndex(FrameIndex) // addr
1717 .addMemOperand(MMO)
1719
1720 if (RI.spillSGPRToVGPR())
1721 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill);
1722 return;
1723 }
1724
1725 unsigned Opcode =
1726 getVectorRegSpillSaveOpcode(VReg ? VReg : SrcReg, RC, SpillSize, *MFI);
1727 MFI->setHasSpilledVGPRs();
1728
1729 BuildMI(MBB, MI, DL, get(Opcode))
1730 .addReg(SrcReg, getKillRegState(isKill)) // data
1731 .addFrameIndex(FrameIndex) // addr
1732 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
1733 .addImm(0) // offset
1734 .addMemOperand(MMO);
1735}
1736
1737static unsigned getSGPRSpillRestoreOpcode(unsigned Size) {
1738 switch (Size) {
1739 case 4:
1740 return AMDGPU::SI_SPILL_S32_RESTORE;
1741 case 8:
1742 return AMDGPU::SI_SPILL_S64_RESTORE;
1743 case 12:
1744 return AMDGPU::SI_SPILL_S96_RESTORE;
1745 case 16:
1746 return AMDGPU::SI_SPILL_S128_RESTORE;
1747 case 20:
1748 return AMDGPU::SI_SPILL_S160_RESTORE;
1749 case 24:
1750 return AMDGPU::SI_SPILL_S192_RESTORE;
1751 case 28:
1752 return AMDGPU::SI_SPILL_S224_RESTORE;
1753 case 32:
1754 return AMDGPU::SI_SPILL_S256_RESTORE;
1755 case 36:
1756 return AMDGPU::SI_SPILL_S288_RESTORE;
1757 case 40:
1758 return AMDGPU::SI_SPILL_S320_RESTORE;
1759 case 44:
1760 return AMDGPU::SI_SPILL_S352_RESTORE;
1761 case 48:
1762 return AMDGPU::SI_SPILL_S384_RESTORE;
1763 case 64:
1764 return AMDGPU::SI_SPILL_S512_RESTORE;
1765 case 128:
1766 return AMDGPU::SI_SPILL_S1024_RESTORE;
1767 default:
1768 llvm_unreachable("unknown register size");
1769 }
1770}
1771
1772static unsigned getVGPRSpillRestoreOpcode(unsigned Size) {
1773 switch (Size) {
1774 case 2:
1775 return AMDGPU::SI_SPILL_V16_RESTORE;
1776 case 4:
1777 return AMDGPU::SI_SPILL_V32_RESTORE;
1778 case 8:
1779 return AMDGPU::SI_SPILL_V64_RESTORE;
1780 case 12:
1781 return AMDGPU::SI_SPILL_V96_RESTORE;
1782 case 16:
1783 return AMDGPU::SI_SPILL_V128_RESTORE;
1784 case 20:
1785 return AMDGPU::SI_SPILL_V160_RESTORE;
1786 case 24:
1787 return AMDGPU::SI_SPILL_V192_RESTORE;
1788 case 28:
1789 return AMDGPU::SI_SPILL_V224_RESTORE;
1790 case 32:
1791 return AMDGPU::SI_SPILL_V256_RESTORE;
1792 case 36:
1793 return AMDGPU::SI_SPILL_V288_RESTORE;
1794 case 40:
1795 return AMDGPU::SI_SPILL_V320_RESTORE;
1796 case 44:
1797 return AMDGPU::SI_SPILL_V352_RESTORE;
1798 case 48:
1799 return AMDGPU::SI_SPILL_V384_RESTORE;
1800 case 64:
1801 return AMDGPU::SI_SPILL_V512_RESTORE;
1802 case 128:
1803 return AMDGPU::SI_SPILL_V1024_RESTORE;
1804 default:
1805 llvm_unreachable("unknown register size");
1806 }
1807}
1808
1809static unsigned getAVSpillRestoreOpcode(unsigned Size) {
1810 switch (Size) {
1811 case 4:
1812 return AMDGPU::SI_SPILL_AV32_RESTORE;
1813 case 8:
1814 return AMDGPU::SI_SPILL_AV64_RESTORE;
1815 case 12:
1816 return AMDGPU::SI_SPILL_AV96_RESTORE;
1817 case 16:
1818 return AMDGPU::SI_SPILL_AV128_RESTORE;
1819 case 20:
1820 return AMDGPU::SI_SPILL_AV160_RESTORE;
1821 case 24:
1822 return AMDGPU::SI_SPILL_AV192_RESTORE;
1823 case 28:
1824 return AMDGPU::SI_SPILL_AV224_RESTORE;
1825 case 32:
1826 return AMDGPU::SI_SPILL_AV256_RESTORE;
1827 case 36:
1828 return AMDGPU::SI_SPILL_AV288_RESTORE;
1829 case 40:
1830 return AMDGPU::SI_SPILL_AV320_RESTORE;
1831 case 44:
1832 return AMDGPU::SI_SPILL_AV352_RESTORE;
1833 case 48:
1834 return AMDGPU::SI_SPILL_AV384_RESTORE;
1835 case 64:
1836 return AMDGPU::SI_SPILL_AV512_RESTORE;
1837 case 128:
1838 return AMDGPU::SI_SPILL_AV1024_RESTORE;
1839 default:
1840 llvm_unreachable("unknown register size");
1841 }
1842}
1843
1844static unsigned getWWMRegSpillRestoreOpcode(unsigned Size,
1845 bool IsVectorSuperClass) {
1846 // Currently, there is only 32-bit WWM register spills needed.
1847 if (Size != 4)
1848 llvm_unreachable("unknown wwm register spill size");
1849
1850 if (IsVectorSuperClass) // TODO: Always use this if there are AGPRs
1851 return AMDGPU::SI_SPILL_WWM_AV32_RESTORE;
1852
1853 return AMDGPU::SI_SPILL_WWM_V32_RESTORE;
1854}
1855
1857 Register Reg, const TargetRegisterClass *RC, unsigned Size,
1858 const SIMachineFunctionInfo &MFI) const {
1859 bool IsVectorSuperClass = RI.isVectorSuperClass(RC);
1860
1861 // Choose the right opcode if restoring a WWM register.
1863 return getWWMRegSpillRestoreOpcode(Size, IsVectorSuperClass);
1864
1865 // TODO: Check if AGPRs are available
1866 if (ST.hasMAIInsts())
1868
1869 assert(!RI.isAGPRClass(RC));
1871}
1872
1875 Register DestReg, int FrameIndex,
1876 const TargetRegisterClass *RC,
1877 const TargetRegisterInfo *TRI,
1878 Register VReg,
1879 MachineInstr::MIFlag Flags) const {
1880 MachineFunction *MF = MBB.getParent();
1882 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1883 const DebugLoc &DL = MBB.findDebugLoc(MI);
1884 unsigned SpillSize = TRI->getSpillSize(*RC);
1885
1886 MachinePointerInfo PtrInfo
1887 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
1888
1890 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex),
1891 FrameInfo.getObjectAlign(FrameIndex));
1892
1893 if (RI.isSGPRClass(RC)) {
1894 MFI->setHasSpilledSGPRs();
1895 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into");
1896 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI &&
1897 DestReg != AMDGPU::EXEC && "exec should not be spilled");
1898
1899 // FIXME: Maybe this should not include a memoperand because it will be
1900 // lowered to non-memory instructions.
1901 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize));
1902 if (DestReg.isVirtual() && SpillSize == 4) {
1904 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
1905 }
1906
1907 if (RI.spillSGPRToVGPR())
1908 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill);
1909 BuildMI(MBB, MI, DL, OpDesc, DestReg)
1910 .addFrameIndex(FrameIndex) // addr
1911 .addMemOperand(MMO)
1913
1914 return;
1915 }
1916
1917 unsigned Opcode = getVectorRegSpillRestoreOpcode(VReg ? VReg : DestReg, RC,
1918 SpillSize, *MFI);
1919 BuildMI(MBB, MI, DL, get(Opcode), DestReg)
1920 .addFrameIndex(FrameIndex) // vaddr
1921 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
1922 .addImm(0) // offset
1923 .addMemOperand(MMO);
1924}
1925
1930
1933 unsigned Quantity) const {
1934 DebugLoc DL = MBB.findDebugLoc(MI);
1935 unsigned MaxSNopCount = 1u << ST.getSNopBits();
1936 while (Quantity > 0) {
1937 unsigned Arg = std::min(Quantity, MaxSNopCount);
1938 Quantity -= Arg;
1939 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)).addImm(Arg - 1);
1940 }
1941}
1942
1944 auto *MF = MBB.getParent();
1945 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1946
1947 assert(Info->isEntryFunction());
1948
1949 if (MBB.succ_empty()) {
1950 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end();
1951 if (HasNoTerminator) {
1952 if (Info->returnsVoid()) {
1953 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0);
1954 } else {
1955 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG));
1956 }
1957 }
1958 }
1959}
1960
1964 const DebugLoc &DL) const {
1965 MachineFunction *MF = MBB.getParent();
1966 constexpr unsigned DoorbellIDMask = 0x3ff;
1967 constexpr unsigned ECQueueWaveAbort = 0x400;
1968
1969 MachineBasicBlock *TrapBB = &MBB;
1970 MachineBasicBlock *ContBB = &MBB;
1971 MachineBasicBlock *HaltLoopBB = MF->CreateMachineBasicBlock();
1972
1973 if (!MBB.succ_empty() || std::next(MI.getIterator()) != MBB.end()) {
1974 ContBB = MBB.splitAt(MI, /*UpdateLiveIns=*/false);
1975 TrapBB = MF->CreateMachineBasicBlock();
1976 BuildMI(MBB, MI, DL, get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(TrapBB);
1977 MF->push_back(TrapBB);
1978 MBB.addSuccessor(TrapBB);
1979 }
1980
1981 // Start with a `s_trap 2`, if we're in PRIV=1 and we need the workaround this
1982 // will be a nop.
1983 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_TRAP))
1984 .addImm(static_cast<unsigned>(GCNSubtarget::TrapID::LLVMAMDHSATrap));
1985 Register DoorbellReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
1986 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_SENDMSG_RTN_B32),
1987 DoorbellReg)
1989 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_MOV_B32), AMDGPU::TTMP2)
1990 .addUse(AMDGPU::M0);
1991 Register DoorbellRegMasked =
1992 MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
1993 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_AND_B32), DoorbellRegMasked)
1994 .addUse(DoorbellReg)
1995 .addImm(DoorbellIDMask);
1996 Register SetWaveAbortBit =
1997 MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
1998 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_OR_B32), SetWaveAbortBit)
1999 .addUse(DoorbellRegMasked)
2000 .addImm(ECQueueWaveAbort);
2001 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2002 .addUse(SetWaveAbortBit);
2003 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_SENDMSG))
2005 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2006 .addUse(AMDGPU::TTMP2);
2007 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_BRANCH)).addMBB(HaltLoopBB);
2008 TrapBB->addSuccessor(HaltLoopBB);
2009
2010 BuildMI(*HaltLoopBB, HaltLoopBB->end(), DL, get(AMDGPU::S_SETHALT)).addImm(5);
2011 BuildMI(*HaltLoopBB, HaltLoopBB->end(), DL, get(AMDGPU::S_BRANCH))
2012 .addMBB(HaltLoopBB);
2013 MF->push_back(HaltLoopBB);
2014 HaltLoopBB->addSuccessor(HaltLoopBB);
2015
2016 return ContBB;
2017}
2018
2020 switch (MI.getOpcode()) {
2021 default:
2022 if (MI.isMetaInstruction())
2023 return 0;
2024 return 1; // FIXME: Do wait states equal cycles?
2025
2026 case AMDGPU::S_NOP:
2027 return MI.getOperand(0).getImm() + 1;
2028 // SI_RETURN_TO_EPILOG is a fallthrough to code outside of the function. The
2029 // hazard, even if one exist, won't really be visible. Should we handle it?
2030 }
2031}
2032
2034 MachineBasicBlock &MBB = *MI.getParent();
2035 DebugLoc DL = MBB.findDebugLoc(MI);
2037 switch (MI.getOpcode()) {
2038 default: return TargetInstrInfo::expandPostRAPseudo(MI);
2039 case AMDGPU::S_MOV_B64_term:
2040 // This is only a terminator to get the correct spill code placement during
2041 // register allocation.
2042 MI.setDesc(get(AMDGPU::S_MOV_B64));
2043 break;
2044
2045 case AMDGPU::S_MOV_B32_term:
2046 // This is only a terminator to get the correct spill code placement during
2047 // register allocation.
2048 MI.setDesc(get(AMDGPU::S_MOV_B32));
2049 break;
2050
2051 case AMDGPU::S_XOR_B64_term:
2052 // This is only a terminator to get the correct spill code placement during
2053 // register allocation.
2054 MI.setDesc(get(AMDGPU::S_XOR_B64));
2055 break;
2056
2057 case AMDGPU::S_XOR_B32_term:
2058 // This is only a terminator to get the correct spill code placement during
2059 // register allocation.
2060 MI.setDesc(get(AMDGPU::S_XOR_B32));
2061 break;
2062 case AMDGPU::S_OR_B64_term:
2063 // This is only a terminator to get the correct spill code placement during
2064 // register allocation.
2065 MI.setDesc(get(AMDGPU::S_OR_B64));
2066 break;
2067 case AMDGPU::S_OR_B32_term:
2068 // This is only a terminator to get the correct spill code placement during
2069 // register allocation.
2070 MI.setDesc(get(AMDGPU::S_OR_B32));
2071 break;
2072
2073 case AMDGPU::S_ANDN2_B64_term:
2074 // This is only a terminator to get the correct spill code placement during
2075 // register allocation.
2076 MI.setDesc(get(AMDGPU::S_ANDN2_B64));
2077 break;
2078
2079 case AMDGPU::S_ANDN2_B32_term:
2080 // This is only a terminator to get the correct spill code placement during
2081 // register allocation.
2082 MI.setDesc(get(AMDGPU::S_ANDN2_B32));
2083 break;
2084
2085 case AMDGPU::S_AND_B64_term:
2086 // This is only a terminator to get the correct spill code placement during
2087 // register allocation.
2088 MI.setDesc(get(AMDGPU::S_AND_B64));
2089 break;
2090
2091 case AMDGPU::S_AND_B32_term:
2092 // This is only a terminator to get the correct spill code placement during
2093 // register allocation.
2094 MI.setDesc(get(AMDGPU::S_AND_B32));
2095 break;
2096
2097 case AMDGPU::S_AND_SAVEEXEC_B64_term:
2098 // This is only a terminator to get the correct spill code placement during
2099 // register allocation.
2100 MI.setDesc(get(AMDGPU::S_AND_SAVEEXEC_B64));
2101 break;
2102
2103 case AMDGPU::S_AND_SAVEEXEC_B32_term:
2104 // This is only a terminator to get the correct spill code placement during
2105 // register allocation.
2106 MI.setDesc(get(AMDGPU::S_AND_SAVEEXEC_B32));
2107 break;
2108
2109 case AMDGPU::SI_SPILL_S32_TO_VGPR:
2110 MI.setDesc(get(AMDGPU::V_WRITELANE_B32));
2111 break;
2112
2113 case AMDGPU::SI_RESTORE_S32_FROM_VGPR:
2114 MI.setDesc(get(AMDGPU::V_READLANE_B32));
2115 MI.getMF()->getRegInfo().constrainRegClass(MI.getOperand(0).getReg(),
2116 &AMDGPU::SReg_32_XM0RegClass);
2117 break;
2118 case AMDGPU::AV_MOV_B32_IMM_PSEUDO: {
2119 Register Dst = MI.getOperand(0).getReg();
2120 bool IsAGPR = SIRegisterInfo::isAGPRClass(RI.getPhysRegBaseClass(Dst));
2121 MI.setDesc(
2122 get(IsAGPR ? AMDGPU::V_ACCVGPR_WRITE_B32_e64 : AMDGPU::V_MOV_B32_e32));
2123 break;
2124 }
2125 case AMDGPU::AV_MOV_B64_IMM_PSEUDO: {
2126 Register Dst = MI.getOperand(0).getReg();
2127 if (SIRegisterInfo::isAGPRClass(RI.getPhysRegBaseClass(Dst))) {
2128 int64_t Imm = MI.getOperand(1).getImm();
2129
2130 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
2131 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2132 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DstLo)
2135 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DstHi)
2136 .addImm(SignExtend64<32>(Imm >> 32))
2138 MI.eraseFromParent();
2139 break;
2140 }
2141
2142 [[fallthrough]];
2143 }
2144 case AMDGPU::V_MOV_B64_PSEUDO: {
2145 Register Dst = MI.getOperand(0).getReg();
2146 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
2147 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2148
2149 const MachineOperand &SrcOp = MI.getOperand(1);
2150 // FIXME: Will this work for 64-bit floating point immediates?
2151 assert(!SrcOp.isFPImm());
2152 if (ST.hasMovB64()) {
2153 MI.setDesc(get(AMDGPU::V_MOV_B64_e32));
2154 if (SrcOp.isReg() || isInlineConstant(MI, 1) ||
2155 isUInt<32>(SrcOp.getImm()) || ST.has64BitLiterals())
2156 break;
2157 }
2158 if (SrcOp.isImm()) {
2159 APInt Imm(64, SrcOp.getImm());
2160 APInt Lo(32, Imm.getLoBits(32).getZExtValue());
2161 APInt Hi(32, Imm.getHiBits(32).getZExtValue());
2162 if (ST.hasPkMovB32() && Lo == Hi && isInlineConstant(Lo)) {
2163 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst)
2165 .addImm(Lo.getSExtValue())
2167 .addImm(Lo.getSExtValue())
2168 .addImm(0) // op_sel_lo
2169 .addImm(0) // op_sel_hi
2170 .addImm(0) // neg_lo
2171 .addImm(0) // neg_hi
2172 .addImm(0); // clamp
2173 } else {
2174 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
2175 .addImm(Lo.getSExtValue())
2177 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
2178 .addImm(Hi.getSExtValue())
2180 }
2181 } else {
2182 assert(SrcOp.isReg());
2183 if (ST.hasPkMovB32() &&
2184 !RI.isAGPR(MBB.getParent()->getRegInfo(), SrcOp.getReg())) {
2185 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst)
2186 .addImm(SISrcMods::OP_SEL_1) // src0_mod
2187 .addReg(SrcOp.getReg())
2189 .addReg(SrcOp.getReg())
2190 .addImm(0) // op_sel_lo
2191 .addImm(0) // op_sel_hi
2192 .addImm(0) // neg_lo
2193 .addImm(0) // neg_hi
2194 .addImm(0); // clamp
2195 } else {
2196 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
2197 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
2199 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
2200 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
2202 }
2203 }
2204 MI.eraseFromParent();
2205 break;
2206 }
2207 case AMDGPU::V_MOV_B64_DPP_PSEUDO: {
2209 break;
2210 }
2211 case AMDGPU::S_MOV_B64_IMM_PSEUDO: {
2212 const MachineOperand &SrcOp = MI.getOperand(1);
2213 assert(!SrcOp.isFPImm());
2214
2215 if (ST.has64BitLiterals()) {
2216 MI.setDesc(get(AMDGPU::S_MOV_B64));
2217 break;
2218 }
2219
2220 APInt Imm(64, SrcOp.getImm());
2221 if (Imm.isIntN(32) || isInlineConstant(Imm)) {
2222 MI.setDesc(get(AMDGPU::S_MOV_B64));
2223 break;
2224 }
2225
2226 Register Dst = MI.getOperand(0).getReg();
2227 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
2228 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2229
2230 APInt Lo(32, Imm.getLoBits(32).getZExtValue());
2231 APInt Hi(32, Imm.getHiBits(32).getZExtValue());
2232 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstLo)
2233 .addImm(Lo.getSExtValue())
2235 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstHi)
2236 .addImm(Hi.getSExtValue())
2238 MI.eraseFromParent();
2239 break;
2240 }
2241 case AMDGPU::V_SET_INACTIVE_B32: {
2242 // Lower V_SET_INACTIVE_B32 to V_CNDMASK_B32.
2243 Register DstReg = MI.getOperand(0).getReg();
2244 BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
2245 .add(MI.getOperand(3))
2246 .add(MI.getOperand(4))
2247 .add(MI.getOperand(1))
2248 .add(MI.getOperand(2))
2249 .add(MI.getOperand(5));
2250 MI.eraseFromParent();
2251 break;
2252 }
2253 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1:
2254 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2:
2255 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3:
2256 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4:
2257 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5:
2258 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8:
2259 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V9:
2260 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V10:
2261 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V11:
2262 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V12:
2263 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16:
2264 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32:
2265 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1:
2266 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2:
2267 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3:
2268 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4:
2269 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5:
2270 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8:
2271 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V9:
2272 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V10:
2273 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V11:
2274 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V12:
2275 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16:
2276 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32:
2277 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1:
2278 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2:
2279 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4:
2280 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8:
2281 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16: {
2282 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2);
2283
2284 unsigned Opc;
2285 if (RI.hasVGPRs(EltRC)) {
2286 Opc = AMDGPU::V_MOVRELD_B32_e32;
2287 } else {
2288 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? AMDGPU::S_MOVRELD_B64
2289 : AMDGPU::S_MOVRELD_B32;
2290 }
2291
2292 const MCInstrDesc &OpDesc = get(Opc);
2293 Register VecReg = MI.getOperand(0).getReg();
2294 bool IsUndef = MI.getOperand(1).isUndef();
2295 unsigned SubReg = MI.getOperand(3).getImm();
2296 assert(VecReg == MI.getOperand(1).getReg());
2297
2299 BuildMI(MBB, MI, DL, OpDesc)
2300 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
2301 .add(MI.getOperand(2))
2303 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0));
2304
2305 const int ImpDefIdx =
2306 OpDesc.getNumOperands() + OpDesc.implicit_uses().size();
2307 const int ImpUseIdx = ImpDefIdx + 1;
2308 MIB->tieOperands(ImpDefIdx, ImpUseIdx);
2309 MI.eraseFromParent();
2310 break;
2311 }
2312 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1:
2313 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2:
2314 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3:
2315 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4:
2316 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5:
2317 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8:
2318 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V9:
2319 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V10:
2320 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V11:
2321 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V12:
2322 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16:
2323 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32: {
2324 assert(ST.useVGPRIndexMode());
2325 Register VecReg = MI.getOperand(0).getReg();
2326 bool IsUndef = MI.getOperand(1).isUndef();
2327 MachineOperand &Idx = MI.getOperand(3);
2328 Register SubReg = MI.getOperand(4).getImm();
2329
2330 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON))
2331 .add(Idx)
2333 SetOn->getOperand(3).setIsUndef();
2334
2335 const MCInstrDesc &OpDesc = get(AMDGPU::V_MOV_B32_indirect_write);
2337 BuildMI(MBB, MI, DL, OpDesc)
2338 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
2339 .add(MI.getOperand(2))
2341 .addReg(VecReg,
2342 RegState::Implicit | (IsUndef ? RegState::Undef : 0));
2343
2344 const int ImpDefIdx =
2345 OpDesc.getNumOperands() + OpDesc.implicit_uses().size();
2346 const int ImpUseIdx = ImpDefIdx + 1;
2347 MIB->tieOperands(ImpDefIdx, ImpUseIdx);
2348
2349 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF));
2350
2351 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator()));
2352
2353 MI.eraseFromParent();
2354 break;
2355 }
2356 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1:
2357 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2:
2358 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3:
2359 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4:
2360 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5:
2361 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8:
2362 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V9:
2363 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V10:
2364 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V11:
2365 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V12:
2366 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16:
2367 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32: {
2368 assert(ST.useVGPRIndexMode());
2369 Register Dst = MI.getOperand(0).getReg();
2370 Register VecReg = MI.getOperand(1).getReg();
2371 bool IsUndef = MI.getOperand(1).isUndef();
2372 Register Idx = MI.getOperand(2).getReg();
2373 Register SubReg = MI.getOperand(3).getImm();
2374
2375 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON))
2376 .addReg(Idx)
2378 SetOn->getOperand(3).setIsUndef();
2379
2380 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_indirect_read))
2381 .addDef(Dst)
2382 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
2383 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0));
2384
2385 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF));
2386
2387 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator()));
2388
2389 MI.eraseFromParent();
2390 break;
2391 }
2392 case AMDGPU::SI_PC_ADD_REL_OFFSET: {
2393 MachineFunction &MF = *MBB.getParent();
2394 Register Reg = MI.getOperand(0).getReg();
2395 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
2396 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
2397 MachineOperand OpLo = MI.getOperand(1);
2398 MachineOperand OpHi = MI.getOperand(2);
2399
2400 // Create a bundle so these instructions won't be re-ordered by the
2401 // post-RA scheduler.
2402 MIBundleBuilder Bundler(MBB, MI);
2403 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg));
2404
2405 // What we want here is an offset from the value returned by s_getpc (which
2406 // is the address of the s_add_u32 instruction) to the global variable, but
2407 // since the encoding of $symbol starts 4 bytes after the start of the
2408 // s_add_u32 instruction, we end up with an offset that is 4 bytes too
2409 // small. This requires us to add 4 to the global variable offset in order
2410 // to compute the correct address. Similarly for the s_addc_u32 instruction,
2411 // the encoding of $symbol starts 12 bytes after the start of the s_add_u32
2412 // instruction.
2413
2414 int64_t Adjust = 0;
2415 if (ST.hasGetPCZeroExtension()) {
2416 // Fix up hardware that does not sign-extend the 48-bit PC value by
2417 // inserting: s_sext_i32_i16 reghi, reghi
2418 Bundler.append(
2419 BuildMI(MF, DL, get(AMDGPU::S_SEXT_I32_I16), RegHi).addReg(RegHi));
2420 Adjust += 4;
2421 }
2422
2423 if (OpLo.isGlobal())
2424 OpLo.setOffset(OpLo.getOffset() + Adjust + 4);
2425 Bundler.append(
2426 BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo).addReg(RegLo).add(OpLo));
2427
2428 if (OpHi.isGlobal())
2429 OpHi.setOffset(OpHi.getOffset() + Adjust + 12);
2430 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi)
2431 .addReg(RegHi)
2432 .add(OpHi));
2433
2434 finalizeBundle(MBB, Bundler.begin());
2435
2436 MI.eraseFromParent();
2437 break;
2438 }
2439 case AMDGPU::SI_PC_ADD_REL_OFFSET64: {
2440 MachineFunction &MF = *MBB.getParent();
2441 Register Reg = MI.getOperand(0).getReg();
2442 MachineOperand Op = MI.getOperand(1);
2443
2444 // Create a bundle so these instructions won't be re-ordered by the
2445 // post-RA scheduler.
2446 MIBundleBuilder Bundler(MBB, MI);
2447 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg));
2448 if (Op.isGlobal())
2449 Op.setOffset(Op.getOffset() + 4);
2450 Bundler.append(
2451 BuildMI(MF, DL, get(AMDGPU::S_ADD_U64), Reg).addReg(Reg).add(Op));
2452
2453 finalizeBundle(MBB, Bundler.begin());
2454
2455 MI.eraseFromParent();
2456 break;
2457 }
2458 case AMDGPU::ENTER_STRICT_WWM: {
2459 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
2460 // Whole Wave Mode is entered.
2461 MI.setDesc(get(LMC.OrSaveExecOpc));
2462 break;
2463 }
2464 case AMDGPU::ENTER_STRICT_WQM: {
2465 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
2466 // STRICT_WQM is entered.
2467 BuildMI(MBB, MI, DL, get(LMC.MovOpc), MI.getOperand(0).getReg())
2468 .addReg(LMC.ExecReg);
2469 BuildMI(MBB, MI, DL, get(LMC.WQMOpc), LMC.ExecReg).addReg(LMC.ExecReg);
2470
2471 MI.eraseFromParent();
2472 break;
2473 }
2474 case AMDGPU::EXIT_STRICT_WWM:
2475 case AMDGPU::EXIT_STRICT_WQM: {
2476 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
2477 // WWM/STICT_WQM is exited.
2478 MI.setDesc(get(LMC.MovOpc));
2479 break;
2480 }
2481 case AMDGPU::SI_RETURN: {
2482 const MachineFunction *MF = MBB.getParent();
2483 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
2484 const SIRegisterInfo *TRI = ST.getRegisterInfo();
2485 // Hiding the return address use with SI_RETURN may lead to extra kills in
2486 // the function and missing live-ins. We are fine in practice because callee
2487 // saved register handling ensures the register value is restored before
2488 // RET, but we need the undef flag here to appease the MachineVerifier
2489 // liveness checks.
2491 BuildMI(MBB, MI, DL, get(AMDGPU::S_SETPC_B64_return))
2492 .addReg(TRI->getReturnAddressReg(*MF), RegState::Undef);
2493
2494 MIB.copyImplicitOps(MI);
2495 MI.eraseFromParent();
2496 break;
2497 }
2498
2499 case AMDGPU::S_MUL_U64_U32_PSEUDO:
2500 case AMDGPU::S_MUL_I64_I32_PSEUDO:
2501 MI.setDesc(get(AMDGPU::S_MUL_U64));
2502 break;
2503
2504 case AMDGPU::S_GETPC_B64_pseudo:
2505 MI.setDesc(get(AMDGPU::S_GETPC_B64));
2506 if (ST.hasGetPCZeroExtension()) {
2507 Register Dst = MI.getOperand(0).getReg();
2508 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2509 // Fix up hardware that does not sign-extend the 48-bit PC value by
2510 // inserting: s_sext_i32_i16 dsthi, dsthi
2511 BuildMI(MBB, std::next(MI.getIterator()), DL, get(AMDGPU::S_SEXT_I32_I16),
2512 DstHi)
2513 .addReg(DstHi);
2514 }
2515 break;
2516
2517 case AMDGPU::V_MAX_BF16_PSEUDO_e64:
2518 assert(ST.hasBF16PackedInsts());
2519 MI.setDesc(get(AMDGPU::V_PK_MAX_NUM_BF16));
2520 MI.addOperand(MachineOperand::CreateImm(0)); // op_sel
2521 MI.addOperand(MachineOperand::CreateImm(0)); // neg_lo
2522 MI.addOperand(MachineOperand::CreateImm(0)); // neg_hi
2523 auto Op0 = getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
2524 Op0->setImm(Op0->getImm() | SISrcMods::OP_SEL_1);
2525 auto Op1 = getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
2526 Op1->setImm(Op1->getImm() | SISrcMods::OP_SEL_1);
2527 break;
2528 }
2529
2530 return true;
2531}
2532
2535 unsigned SubIdx, const MachineInstr &Orig,
2536 const TargetRegisterInfo &RI) const {
2537
2538 // Try shrinking the instruction to remat only the part needed for current
2539 // context.
2540 // TODO: Handle more cases.
2541 unsigned Opcode = Orig.getOpcode();
2542 switch (Opcode) {
2543 case AMDGPU::S_LOAD_DWORDX16_IMM:
2544 case AMDGPU::S_LOAD_DWORDX8_IMM: {
2545 if (SubIdx != 0)
2546 break;
2547
2548 if (I == MBB.end())
2549 break;
2550
2551 if (I->isBundled())
2552 break;
2553
2554 // Look for a single use of the register that is also a subreg.
2555 Register RegToFind = Orig.getOperand(0).getReg();
2556 MachineOperand *UseMO = nullptr;
2557 for (auto &CandMO : I->operands()) {
2558 if (!CandMO.isReg() || CandMO.getReg() != RegToFind || CandMO.isDef())
2559 continue;
2560 if (UseMO) {
2561 UseMO = nullptr;
2562 break;
2563 }
2564 UseMO = &CandMO;
2565 }
2566 if (!UseMO || UseMO->getSubReg() == AMDGPU::NoSubRegister)
2567 break;
2568
2569 unsigned Offset = RI.getSubRegIdxOffset(UseMO->getSubReg());
2570 unsigned SubregSize = RI.getSubRegIdxSize(UseMO->getSubReg());
2571
2572 MachineFunction *MF = MBB.getParent();
2574 assert(MRI.use_nodbg_empty(DestReg) && "DestReg should have no users yet.");
2575
2576 unsigned NewOpcode = -1;
2577 if (SubregSize == 256)
2578 NewOpcode = AMDGPU::S_LOAD_DWORDX8_IMM;
2579 else if (SubregSize == 128)
2580 NewOpcode = AMDGPU::S_LOAD_DWORDX4_IMM;
2581 else
2582 break;
2583
2584 const MCInstrDesc &TID = get(NewOpcode);
2585 const TargetRegisterClass *NewRC =
2586 RI.getAllocatableClass(getRegClass(TID, 0, &RI));
2587 MRI.setRegClass(DestReg, NewRC);
2588
2589 UseMO->setReg(DestReg);
2590 UseMO->setSubReg(AMDGPU::NoSubRegister);
2591
2592 // Use a smaller load with the desired size, possibly with updated offset.
2593 MachineInstr *MI = MF->CloneMachineInstr(&Orig);
2594 MI->setDesc(TID);
2595 MI->getOperand(0).setReg(DestReg);
2596 MI->getOperand(0).setSubReg(AMDGPU::NoSubRegister);
2597 if (Offset) {
2598 MachineOperand *OffsetMO = getNamedOperand(*MI, AMDGPU::OpName::offset);
2599 int64_t FinalOffset = OffsetMO->getImm() + Offset / 8;
2600 OffsetMO->setImm(FinalOffset);
2601 }
2603 for (const MachineMemOperand *MemOp : Orig.memoperands())
2604 NewMMOs.push_back(MF->getMachineMemOperand(MemOp, MemOp->getPointerInfo(),
2605 SubregSize / 8));
2606 MI->setMemRefs(*MF, NewMMOs);
2607
2608 MBB.insert(I, MI);
2609 return;
2610 }
2611
2612 default:
2613 break;
2614 }
2615
2616 TargetInstrInfo::reMaterialize(MBB, I, DestReg, SubIdx, Orig, RI);
2617}
2618
2619std::pair<MachineInstr*, MachineInstr*>
2621 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO);
2622
2623 if (ST.hasMovB64() && ST.hasFeature(AMDGPU::FeatureDPALU_DPP) &&
2625 ST, getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl)->getImm())) {
2626 MI.setDesc(get(AMDGPU::V_MOV_B64_dpp));
2627 return std::pair(&MI, nullptr);
2628 }
2629
2630 MachineBasicBlock &MBB = *MI.getParent();
2631 DebugLoc DL = MBB.findDebugLoc(MI);
2632 MachineFunction *MF = MBB.getParent();
2634 Register Dst = MI.getOperand(0).getReg();
2635 unsigned Part = 0;
2636 MachineInstr *Split[2];
2637
2638 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) {
2639 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp));
2640 if (Dst.isPhysical()) {
2641 MovDPP.addDef(RI.getSubReg(Dst, Sub));
2642 } else {
2643 assert(MRI.isSSA());
2644 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2645 MovDPP.addDef(Tmp);
2646 }
2647
2648 for (unsigned I = 1; I <= 2; ++I) { // old and src operands.
2649 const MachineOperand &SrcOp = MI.getOperand(I);
2650 assert(!SrcOp.isFPImm());
2651 if (SrcOp.isImm()) {
2652 APInt Imm(64, SrcOp.getImm());
2653 Imm.ashrInPlace(Part * 32);
2654 MovDPP.addImm(Imm.getLoBits(32).getZExtValue());
2655 } else {
2656 assert(SrcOp.isReg());
2657 Register Src = SrcOp.getReg();
2658 if (Src.isPhysical())
2659 MovDPP.addReg(RI.getSubReg(Src, Sub));
2660 else
2661 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub);
2662 }
2663 }
2664
2665 for (const MachineOperand &MO : llvm::drop_begin(MI.explicit_operands(), 3))
2666 MovDPP.addImm(MO.getImm());
2667
2668 Split[Part] = MovDPP;
2669 ++Part;
2670 }
2671
2672 if (Dst.isVirtual())
2673 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst)
2674 .addReg(Split[0]->getOperand(0).getReg())
2675 .addImm(AMDGPU::sub0)
2676 .addReg(Split[1]->getOperand(0).getReg())
2677 .addImm(AMDGPU::sub1);
2678
2679 MI.eraseFromParent();
2680 return std::pair(Split[0], Split[1]);
2681}
2682
2683std::optional<DestSourcePair>
2685 if (MI.getOpcode() == AMDGPU::WWM_COPY)
2686 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
2687
2688 return std::nullopt;
2689}
2690
2692 AMDGPU::OpName Src0OpName,
2693 MachineOperand &Src1,
2694 AMDGPU::OpName Src1OpName) const {
2695 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName);
2696 if (!Src0Mods)
2697 return false;
2698
2699 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName);
2700 assert(Src1Mods &&
2701 "All commutable instructions have both src0 and src1 modifiers");
2702
2703 int Src0ModsVal = Src0Mods->getImm();
2704 int Src1ModsVal = Src1Mods->getImm();
2705
2706 Src1Mods->setImm(Src0ModsVal);
2707 Src0Mods->setImm(Src1ModsVal);
2708 return true;
2709}
2710
2712 MachineOperand &RegOp,
2713 MachineOperand &NonRegOp) {
2714 Register Reg = RegOp.getReg();
2715 unsigned SubReg = RegOp.getSubReg();
2716 bool IsKill = RegOp.isKill();
2717 bool IsDead = RegOp.isDead();
2718 bool IsUndef = RegOp.isUndef();
2719 bool IsDebug = RegOp.isDebug();
2720
2721 if (NonRegOp.isImm())
2722 RegOp.ChangeToImmediate(NonRegOp.getImm());
2723 else if (NonRegOp.isFI())
2724 RegOp.ChangeToFrameIndex(NonRegOp.getIndex());
2725 else if (NonRegOp.isGlobal()) {
2726 RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(),
2727 NonRegOp.getTargetFlags());
2728 } else
2729 return nullptr;
2730
2731 // Make sure we don't reinterpret a subreg index in the target flags.
2732 RegOp.setTargetFlags(NonRegOp.getTargetFlags());
2733
2734 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug);
2735 NonRegOp.setSubReg(SubReg);
2736
2737 return &MI;
2738}
2739
2741 MachineOperand &NonRegOp1,
2742 MachineOperand &NonRegOp2) {
2743 unsigned TargetFlags = NonRegOp1.getTargetFlags();
2744 int64_t NonRegVal = NonRegOp1.getImm();
2745
2746 NonRegOp1.setImm(NonRegOp2.getImm());
2747 NonRegOp2.setImm(NonRegVal);
2748 NonRegOp1.setTargetFlags(NonRegOp2.getTargetFlags());
2749 NonRegOp2.setTargetFlags(TargetFlags);
2750 return &MI;
2751}
2752
2753bool SIInstrInfo::isLegalToSwap(const MachineInstr &MI, unsigned OpIdx0,
2754 unsigned OpIdx1) const {
2755 const MCInstrDesc &InstDesc = MI.getDesc();
2756 const MCOperandInfo &OpInfo0 = InstDesc.operands()[OpIdx0];
2757 const MCOperandInfo &OpInfo1 = InstDesc.operands()[OpIdx1];
2758
2759 unsigned Opc = MI.getOpcode();
2760 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2761
2762 const MachineOperand &MO0 = MI.getOperand(OpIdx0);
2763 const MachineOperand &MO1 = MI.getOperand(OpIdx1);
2764
2765 // Swap doesn't breach constant bus or literal limits
2766 // It may move literal to position other than src0, this is not allowed
2767 // pre-gfx10 However, most test cases need literals in Src0 for VOP
2768 // FIXME: After gfx9, literal can be in place other than Src0
2769 if (isVALU(MI)) {
2770 if ((int)OpIdx0 == Src0Idx && !MO0.isReg() &&
2771 !isInlineConstant(MO0, OpInfo1))
2772 return false;
2773 if ((int)OpIdx1 == Src0Idx && !MO1.isReg() &&
2774 !isInlineConstant(MO1, OpInfo0))
2775 return false;
2776 }
2777
2778 if ((int)OpIdx1 != Src0Idx && MO0.isReg()) {
2779 if (OpInfo1.RegClass == -1)
2780 return OpInfo1.OperandType == MCOI::OPERAND_UNKNOWN;
2781 return isLegalRegOperand(MI, OpIdx1, MO0) &&
2782 (!MO1.isReg() || isLegalRegOperand(MI, OpIdx0, MO1));
2783 }
2784 if ((int)OpIdx0 != Src0Idx && MO1.isReg()) {
2785 if (OpInfo0.RegClass == -1)
2786 return OpInfo0.OperandType == MCOI::OPERAND_UNKNOWN;
2787 return (!MO0.isReg() || isLegalRegOperand(MI, OpIdx1, MO0)) &&
2788 isLegalRegOperand(MI, OpIdx0, MO1);
2789 }
2790
2791 // No need to check 64-bit literals since swapping does not bring new
2792 // 64-bit literals into current instruction to fold to 32-bit
2793
2794 return isImmOperandLegal(MI, OpIdx1, MO0);
2795}
2796
2798 unsigned Src0Idx,
2799 unsigned Src1Idx) const {
2800 assert(!NewMI && "this should never be used");
2801
2802 unsigned Opc = MI.getOpcode();
2803 int CommutedOpcode = commuteOpcode(Opc);
2804 if (CommutedOpcode == -1)
2805 return nullptr;
2806
2807 if (Src0Idx > Src1Idx)
2808 std::swap(Src0Idx, Src1Idx);
2809
2810 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) ==
2811 static_cast<int>(Src0Idx) &&
2812 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) ==
2813 static_cast<int>(Src1Idx) &&
2814 "inconsistency with findCommutedOpIndices");
2815
2816 if (!isLegalToSwap(MI, Src0Idx, Src1Idx))
2817 return nullptr;
2818
2819 MachineInstr *CommutedMI = nullptr;
2820 MachineOperand &Src0 = MI.getOperand(Src0Idx);
2821 MachineOperand &Src1 = MI.getOperand(Src1Idx);
2822 if (Src0.isReg() && Src1.isReg()) {
2823 // Be sure to copy the source modifiers to the right place.
2824 CommutedMI =
2825 TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx);
2826 } else if (Src0.isReg() && !Src1.isReg()) {
2827 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1);
2828 } else if (!Src0.isReg() && Src1.isReg()) {
2829 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0);
2830 } else if (Src0.isImm() && Src1.isImm()) {
2831 CommutedMI = swapImmOperands(MI, Src0, Src1);
2832 } else {
2833 // FIXME: Found two non registers to commute. This does happen.
2834 return nullptr;
2835 }
2836
2837 if (CommutedMI) {
2838 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers,
2839 Src1, AMDGPU::OpName::src1_modifiers);
2840
2841 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_sel, Src1,
2842 AMDGPU::OpName::src1_sel);
2843
2844 CommutedMI->setDesc(get(CommutedOpcode));
2845 }
2846
2847 return CommutedMI;
2848}
2849
2850// This needs to be implemented because the source modifiers may be inserted
2851// between the true commutable operands, and the base
2852// TargetInstrInfo::commuteInstruction uses it.
2854 unsigned &SrcOpIdx0,
2855 unsigned &SrcOpIdx1) const {
2856 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1);
2857}
2858
2860 unsigned &SrcOpIdx0,
2861 unsigned &SrcOpIdx1) const {
2862 if (!Desc.isCommutable())
2863 return false;
2864
2865 unsigned Opc = Desc.getOpcode();
2866 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2867 if (Src0Idx == -1)
2868 return false;
2869
2870 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
2871 if (Src1Idx == -1)
2872 return false;
2873
2874 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
2875}
2876
2878 int64_t BrOffset) const {
2879 // BranchRelaxation should never have to check s_setpc_b64 or s_add_pc_i64
2880 // because its dest block is unanalyzable.
2881 assert(isSOPP(BranchOp) || isSOPK(BranchOp));
2882
2883 // Convert to dwords.
2884 BrOffset /= 4;
2885
2886 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is
2887 // from the next instruction.
2888 BrOffset -= 1;
2889
2890 return isIntN(BranchOffsetBits, BrOffset);
2891}
2892
2895 return MI.getOperand(0).getMBB();
2896}
2897
2899 for (const MachineInstr &MI : MBB->terminators()) {
2900 if (MI.getOpcode() == AMDGPU::SI_IF || MI.getOpcode() == AMDGPU::SI_ELSE ||
2901 MI.getOpcode() == AMDGPU::SI_LOOP)
2902 return true;
2903 }
2904 return false;
2905}
2906
2908 MachineBasicBlock &DestBB,
2909 MachineBasicBlock &RestoreBB,
2910 const DebugLoc &DL, int64_t BrOffset,
2911 RegScavenger *RS) const {
2912 assert(MBB.empty() &&
2913 "new block should be inserted for expanding unconditional branch");
2914 assert(MBB.pred_size() == 1);
2915 assert(RestoreBB.empty() &&
2916 "restore block should be inserted for restoring clobbered registers");
2917
2918 MachineFunction *MF = MBB.getParent();
2921 auto I = MBB.end();
2922 auto &MCCtx = MF->getContext();
2923
2924 if (ST.hasAddPC64Inst()) {
2925 MCSymbol *Offset =
2926 MCCtx.createTempSymbol("offset", /*AlwaysAddSuffix=*/true);
2927 auto AddPC = BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_PC_I64))
2929 MCSymbol *PostAddPCLabel =
2930 MCCtx.createTempSymbol("post_addpc", /*AlwaysAddSuffix=*/true);
2931 AddPC->setPostInstrSymbol(*MF, PostAddPCLabel);
2932 auto *OffsetExpr = MCBinaryExpr::createSub(
2933 MCSymbolRefExpr::create(DestBB.getSymbol(), MCCtx),
2934 MCSymbolRefExpr::create(PostAddPCLabel, MCCtx), MCCtx);
2935 Offset->setVariableValue(OffsetExpr);
2936 return;
2937 }
2938
2939 assert(RS && "RegScavenger required for long branching");
2940
2941 // FIXME: Virtual register workaround for RegScavenger not working with empty
2942 // blocks.
2943 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2944
2945 // Note: as this is used after hazard recognizer we need to apply some hazard
2946 // workarounds directly.
2947 const bool FlushSGPRWrites = (ST.isWave64() && ST.hasVALUMaskWriteHazard()) ||
2948 ST.hasVALUReadSGPRHazard();
2949 auto ApplyHazardWorkarounds = [this, &MBB, &I, &DL, FlushSGPRWrites]() {
2950 if (FlushSGPRWrites)
2951 BuildMI(MBB, I, DL, get(AMDGPU::S_WAITCNT_DEPCTR))
2953 };
2954
2955 // We need to compute the offset relative to the instruction immediately after
2956 // s_getpc_b64. Insert pc arithmetic code before last terminator.
2957 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg);
2958 ApplyHazardWorkarounds();
2959
2960 MCSymbol *PostGetPCLabel =
2961 MCCtx.createTempSymbol("post_getpc", /*AlwaysAddSuffix=*/true);
2962 GetPC->setPostInstrSymbol(*MF, PostGetPCLabel);
2963
2964 MCSymbol *OffsetLo =
2965 MCCtx.createTempSymbol("offset_lo", /*AlwaysAddSuffix=*/true);
2966 MCSymbol *OffsetHi =
2967 MCCtx.createTempSymbol("offset_hi", /*AlwaysAddSuffix=*/true);
2968 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32))
2969 .addReg(PCReg, RegState::Define, AMDGPU::sub0)
2970 .addReg(PCReg, 0, AMDGPU::sub0)
2971 .addSym(OffsetLo, MO_FAR_BRANCH_OFFSET);
2972 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32))
2973 .addReg(PCReg, RegState::Define, AMDGPU::sub1)
2974 .addReg(PCReg, 0, AMDGPU::sub1)
2975 .addSym(OffsetHi, MO_FAR_BRANCH_OFFSET);
2976 ApplyHazardWorkarounds();
2977
2978 // Insert the indirect branch after the other terminator.
2979 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64))
2980 .addReg(PCReg);
2981
2982 // If a spill is needed for the pc register pair, we need to insert a spill
2983 // restore block right before the destination block, and insert a short branch
2984 // into the old destination block's fallthrough predecessor.
2985 // e.g.:
2986 //
2987 // s_cbranch_scc0 skip_long_branch:
2988 //
2989 // long_branch_bb:
2990 // spill s[8:9]
2991 // s_getpc_b64 s[8:9]
2992 // s_add_u32 s8, s8, restore_bb
2993 // s_addc_u32 s9, s9, 0
2994 // s_setpc_b64 s[8:9]
2995 //
2996 // skip_long_branch:
2997 // foo;
2998 //
2999 // .....
3000 //
3001 // dest_bb_fallthrough_predecessor:
3002 // bar;
3003 // s_branch dest_bb
3004 //
3005 // restore_bb:
3006 // restore s[8:9]
3007 // fallthrough dest_bb
3008 ///
3009 // dest_bb:
3010 // buzz;
3011
3012 Register LongBranchReservedReg = MFI->getLongBranchReservedReg();
3013 Register Scav;
3014
3015 // If we've previously reserved a register for long branches
3016 // avoid running the scavenger and just use those registers
3017 if (LongBranchReservedReg) {
3018 RS->enterBasicBlock(MBB);
3019 Scav = LongBranchReservedReg;
3020 } else {
3022 Scav = RS->scavengeRegisterBackwards(
3023 AMDGPU::SReg_64RegClass, MachineBasicBlock::iterator(GetPC),
3024 /* RestoreAfter */ false, 0, /* AllowSpill */ false);
3025 }
3026 if (Scav) {
3027 RS->setRegUsed(Scav);
3028 MRI.replaceRegWith(PCReg, Scav);
3029 MRI.clearVirtRegs();
3030 } else {
3031 // As SGPR needs VGPR to be spilled, we reuse the slot of temporary VGPR for
3032 // SGPR spill.
3033 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3034 const SIRegisterInfo *TRI = ST.getRegisterInfo();
3035 TRI->spillEmergencySGPR(GetPC, RestoreBB, AMDGPU::SGPR0_SGPR1, RS);
3036 MRI.replaceRegWith(PCReg, AMDGPU::SGPR0_SGPR1);
3037 MRI.clearVirtRegs();
3038 }
3039
3040 MCSymbol *DestLabel = Scav ? DestBB.getSymbol() : RestoreBB.getSymbol();
3041 // Now, the distance could be defined.
3043 MCSymbolRefExpr::create(DestLabel, MCCtx),
3044 MCSymbolRefExpr::create(PostGetPCLabel, MCCtx), MCCtx);
3045 // Add offset assignments.
3046 auto *Mask = MCConstantExpr::create(0xFFFFFFFFULL, MCCtx);
3047 OffsetLo->setVariableValue(MCBinaryExpr::createAnd(Offset, Mask, MCCtx));
3048 auto *ShAmt = MCConstantExpr::create(32, MCCtx);
3049 OffsetHi->setVariableValue(MCBinaryExpr::createAShr(Offset, ShAmt, MCCtx));
3050}
3051
3052unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) {
3053 switch (Cond) {
3054 case SIInstrInfo::SCC_TRUE:
3055 return AMDGPU::S_CBRANCH_SCC1;
3056 case SIInstrInfo::SCC_FALSE:
3057 return AMDGPU::S_CBRANCH_SCC0;
3058 case SIInstrInfo::VCCNZ:
3059 return AMDGPU::S_CBRANCH_VCCNZ;
3060 case SIInstrInfo::VCCZ:
3061 return AMDGPU::S_CBRANCH_VCCZ;
3062 case SIInstrInfo::EXECNZ:
3063 return AMDGPU::S_CBRANCH_EXECNZ;
3064 case SIInstrInfo::EXECZ:
3065 return AMDGPU::S_CBRANCH_EXECZ;
3066 default:
3067 llvm_unreachable("invalid branch predicate");
3068 }
3069}
3070
3071SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) {
3072 switch (Opcode) {
3073 case AMDGPU::S_CBRANCH_SCC0:
3074 return SCC_FALSE;
3075 case AMDGPU::S_CBRANCH_SCC1:
3076 return SCC_TRUE;
3077 case AMDGPU::S_CBRANCH_VCCNZ:
3078 return VCCNZ;
3079 case AMDGPU::S_CBRANCH_VCCZ:
3080 return VCCZ;
3081 case AMDGPU::S_CBRANCH_EXECNZ:
3082 return EXECNZ;
3083 case AMDGPU::S_CBRANCH_EXECZ:
3084 return EXECZ;
3085 default:
3086 return INVALID_BR;
3087 }
3088}
3089
3093 MachineBasicBlock *&FBB,
3095 bool AllowModify) const {
3096 if (I->getOpcode() == AMDGPU::S_BRANCH) {
3097 // Unconditional Branch
3098 TBB = I->getOperand(0).getMBB();
3099 return false;
3100 }
3101
3102 BranchPredicate Pred = getBranchPredicate(I->getOpcode());
3103 if (Pred == INVALID_BR)
3104 return true;
3105
3106 MachineBasicBlock *CondBB = I->getOperand(0).getMBB();
3107 Cond.push_back(MachineOperand::CreateImm(Pred));
3108 Cond.push_back(I->getOperand(1)); // Save the branch register.
3109
3110 ++I;
3111
3112 if (I == MBB.end()) {
3113 // Conditional branch followed by fall-through.
3114 TBB = CondBB;
3115 return false;
3116 }
3117
3118 if (I->getOpcode() == AMDGPU::S_BRANCH) {
3119 TBB = CondBB;
3120 FBB = I->getOperand(0).getMBB();
3121 return false;
3122 }
3123
3124 return true;
3125}
3126
3128 MachineBasicBlock *&FBB,
3130 bool AllowModify) const {
3131 MachineBasicBlock::iterator I = MBB.getFirstTerminator();
3132 auto E = MBB.end();
3133 if (I == E)
3134 return false;
3135
3136 // Skip over the instructions that are artificially terminators for special
3137 // exec management.
3138 while (I != E && !I->isBranch() && !I->isReturn()) {
3139 switch (I->getOpcode()) {
3140 case AMDGPU::S_MOV_B64_term:
3141 case AMDGPU::S_XOR_B64_term:
3142 case AMDGPU::S_OR_B64_term:
3143 case AMDGPU::S_ANDN2_B64_term:
3144 case AMDGPU::S_AND_B64_term:
3145 case AMDGPU::S_AND_SAVEEXEC_B64_term:
3146 case AMDGPU::S_MOV_B32_term:
3147 case AMDGPU::S_XOR_B32_term:
3148 case AMDGPU::S_OR_B32_term:
3149 case AMDGPU::S_ANDN2_B32_term:
3150 case AMDGPU::S_AND_B32_term:
3151 case AMDGPU::S_AND_SAVEEXEC_B32_term:
3152 break;
3153 case AMDGPU::SI_IF:
3154 case AMDGPU::SI_ELSE:
3155 case AMDGPU::SI_KILL_I1_TERMINATOR:
3156 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
3157 // FIXME: It's messy that these need to be considered here at all.
3158 return true;
3159 default:
3160 llvm_unreachable("unexpected non-branch terminator inst");
3161 }
3162
3163 ++I;
3164 }
3165
3166 if (I == E)
3167 return false;
3168
3169 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify);
3170}
3171
3173 int *BytesRemoved) const {
3174 unsigned Count = 0;
3175 unsigned RemovedSize = 0;
3176 for (MachineInstr &MI : llvm::make_early_inc_range(MBB.terminators())) {
3177 // Skip over artificial terminators when removing instructions.
3178 if (MI.isBranch() || MI.isReturn()) {
3179 RemovedSize += getInstSizeInBytes(MI);
3180 MI.eraseFromParent();
3181 ++Count;
3182 }
3183 }
3184
3185 if (BytesRemoved)
3186 *BytesRemoved = RemovedSize;
3187
3188 return Count;
3189}
3190
3191// Copy the flags onto the implicit condition register operand.
3193 const MachineOperand &OrigCond) {
3194 CondReg.setIsUndef(OrigCond.isUndef());
3195 CondReg.setIsKill(OrigCond.isKill());
3196}
3197
3200 MachineBasicBlock *FBB,
3202 const DebugLoc &DL,
3203 int *BytesAdded) const {
3204 if (!FBB && Cond.empty()) {
3205 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
3206 .addMBB(TBB);
3207 if (BytesAdded)
3208 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4;
3209 return 1;
3210 }
3211
3212 assert(TBB && Cond[0].isImm());
3213
3214 unsigned Opcode
3215 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm()));
3216
3217 if (!FBB) {
3218 MachineInstr *CondBr =
3219 BuildMI(&MBB, DL, get(Opcode))
3220 .addMBB(TBB);
3221
3222 // Copy the flags onto the implicit condition register operand.
3223 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]);
3224 fixImplicitOperands(*CondBr);
3225
3226 if (BytesAdded)
3227 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4;
3228 return 1;
3229 }
3230
3231 assert(TBB && FBB);
3232
3233 MachineInstr *CondBr =
3234 BuildMI(&MBB, DL, get(Opcode))
3235 .addMBB(TBB);
3236 fixImplicitOperands(*CondBr);
3237 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
3238 .addMBB(FBB);
3239
3240 MachineOperand &CondReg = CondBr->getOperand(1);
3241 CondReg.setIsUndef(Cond[1].isUndef());
3242 CondReg.setIsKill(Cond[1].isKill());
3243
3244 if (BytesAdded)
3245 *BytesAdded = ST.hasOffset3fBug() ? 16 : 8;
3246
3247 return 2;
3248}
3249
3252 if (Cond.size() != 2) {
3253 return true;
3254 }
3255
3256 if (Cond[0].isImm()) {
3257 Cond[0].setImm(-Cond[0].getImm());
3258 return false;
3259 }
3260
3261 return true;
3262}
3263
3266 Register DstReg, Register TrueReg,
3267 Register FalseReg, int &CondCycles,
3268 int &TrueCycles, int &FalseCycles) const {
3269 switch (Cond[0].getImm()) {
3270 case VCCNZ:
3271 case VCCZ: {
3272 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3273 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
3274 if (MRI.getRegClass(FalseReg) != RC)
3275 return false;
3276
3277 int NumInsts = AMDGPU::getRegBitWidth(*RC) / 32;
3278 CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
3279
3280 // Limit to equal cost for branch vs. N v_cndmask_b32s.
3281 return RI.hasVGPRs(RC) && NumInsts <= 6;
3282 }
3283 case SCC_TRUE:
3284 case SCC_FALSE: {
3285 // FIXME: We could insert for VGPRs if we could replace the original compare
3286 // with a vector one.
3287 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3288 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
3289 if (MRI.getRegClass(FalseReg) != RC)
3290 return false;
3291
3292 int NumInsts = AMDGPU::getRegBitWidth(*RC) / 32;
3293
3294 // Multiples of 8 can do s_cselect_b64
3295 if (NumInsts % 2 == 0)
3296 NumInsts /= 2;
3297
3298 CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
3299 return RI.isSGPRClass(RC);
3300 }
3301 default:
3302 return false;
3303 }
3304}
3305
3309 Register TrueReg, Register FalseReg) const {
3310 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm());
3311 if (Pred == VCCZ || Pred == SCC_FALSE) {
3312 Pred = static_cast<BranchPredicate>(-Pred);
3313 std::swap(TrueReg, FalseReg);
3314 }
3315
3316 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3317 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
3318 unsigned DstSize = RI.getRegSizeInBits(*DstRC);
3319
3320 if (DstSize == 32) {
3322 if (Pred == SCC_TRUE) {
3323 Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg)
3324 .addReg(TrueReg)
3325 .addReg(FalseReg);
3326 } else {
3327 // Instruction's operands are backwards from what is expected.
3328 Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg)
3329 .addReg(FalseReg)
3330 .addReg(TrueReg);
3331 }
3332
3333 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
3334 return;
3335 }
3336
3337 if (DstSize == 64 && Pred == SCC_TRUE) {
3339 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg)
3340 .addReg(TrueReg)
3341 .addReg(FalseReg);
3342
3343 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
3344 return;
3345 }
3346
3347 static const int16_t Sub0_15[] = {
3348 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
3349 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
3350 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
3351 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
3352 };
3353
3354 static const int16_t Sub0_15_64[] = {
3355 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
3356 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
3357 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
3358 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15,
3359 };
3360
3361 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32;
3362 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass;
3363 const int16_t *SubIndices = Sub0_15;
3364 int NElts = DstSize / 32;
3365
3366 // 64-bit select is only available for SALU.
3367 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit.
3368 if (Pred == SCC_TRUE) {
3369 if (NElts % 2) {
3370 SelOp = AMDGPU::S_CSELECT_B32;
3371 EltRC = &AMDGPU::SGPR_32RegClass;
3372 } else {
3373 SelOp = AMDGPU::S_CSELECT_B64;
3374 EltRC = &AMDGPU::SGPR_64RegClass;
3375 SubIndices = Sub0_15_64;
3376 NElts /= 2;
3377 }
3378 }
3379
3381 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg);
3382
3383 I = MIB->getIterator();
3384
3386 for (int Idx = 0; Idx != NElts; ++Idx) {
3387 Register DstElt = MRI.createVirtualRegister(EltRC);
3388 Regs.push_back(DstElt);
3389
3390 unsigned SubIdx = SubIndices[Idx];
3391
3393 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) {
3394 Select =
3395 BuildMI(MBB, I, DL, get(SelOp), DstElt)
3396 .addReg(FalseReg, 0, SubIdx)
3397 .addReg(TrueReg, 0, SubIdx);
3398 } else {
3399 Select =
3400 BuildMI(MBB, I, DL, get(SelOp), DstElt)
3401 .addReg(TrueReg, 0, SubIdx)
3402 .addReg(FalseReg, 0, SubIdx);
3403 }
3404
3405 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
3407
3408 MIB.addReg(DstElt)
3409 .addImm(SubIdx);
3410 }
3411}
3412
3414 switch (MI.getOpcode()) {
3415 case AMDGPU::V_MOV_B16_t16_e32:
3416 case AMDGPU::V_MOV_B16_t16_e64:
3417 case AMDGPU::V_MOV_B32_e32:
3418 case AMDGPU::V_MOV_B32_e64:
3419 case AMDGPU::V_MOV_B64_PSEUDO:
3420 case AMDGPU::V_MOV_B64_e32:
3421 case AMDGPU::V_MOV_B64_e64:
3422 case AMDGPU::S_MOV_B32:
3423 case AMDGPU::S_MOV_B64:
3424 case AMDGPU::S_MOV_B64_IMM_PSEUDO:
3425 case AMDGPU::COPY:
3426 case AMDGPU::WWM_COPY:
3427 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
3428 case AMDGPU::V_ACCVGPR_READ_B32_e64:
3429 case AMDGPU::V_ACCVGPR_MOV_B32:
3430 case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
3431 case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
3432 return true;
3433 default:
3434 return false;
3435 }
3436}
3437
3438static constexpr AMDGPU::OpName ModifierOpNames[] = {
3439 AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers,
3440 AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::clamp,
3441 AMDGPU::OpName::omod, AMDGPU::OpName::op_sel};
3442
3444 unsigned Opc = MI.getOpcode();
3445 for (AMDGPU::OpName Name : reverse(ModifierOpNames)) {
3446 int Idx = AMDGPU::getNamedOperandIdx(Opc, Name);
3447 if (Idx >= 0)
3448 MI.removeOperand(Idx);
3449 }
3450}
3451
3452std::optional<int64_t> SIInstrInfo::extractSubregFromImm(int64_t Imm,
3453 unsigned SubRegIndex) {
3454 switch (SubRegIndex) {
3455 case AMDGPU::NoSubRegister:
3456 return Imm;
3457 case AMDGPU::sub0:
3458 return SignExtend64<32>(Imm);
3459 case AMDGPU::sub1:
3460 return SignExtend64<32>(Imm >> 32);
3461 case AMDGPU::lo16:
3462 return SignExtend64<16>(Imm);
3463 case AMDGPU::hi16:
3464 return SignExtend64<16>(Imm >> 16);
3465 case AMDGPU::sub1_lo16:
3466 return SignExtend64<16>(Imm >> 32);
3467 case AMDGPU::sub1_hi16:
3468 return SignExtend64<16>(Imm >> 48);
3469 default:
3470 return std::nullopt;
3471 }
3472
3473 llvm_unreachable("covered subregister switch");
3474}
3475
3476static unsigned getNewFMAAKInst(const GCNSubtarget &ST, unsigned Opc) {
3477 switch (Opc) {
3478 case AMDGPU::V_MAC_F16_e32:
3479 case AMDGPU::V_MAC_F16_e64:
3480 case AMDGPU::V_MAD_F16_e64:
3481 return AMDGPU::V_MADAK_F16;
3482 case AMDGPU::V_MAC_F32_e32:
3483 case AMDGPU::V_MAC_F32_e64:
3484 case AMDGPU::V_MAD_F32_e64:
3485 return AMDGPU::V_MADAK_F32;
3486 case AMDGPU::V_FMAC_F32_e32:
3487 case AMDGPU::V_FMAC_F32_e64:
3488 case AMDGPU::V_FMA_F32_e64:
3489 return AMDGPU::V_FMAAK_F32;
3490 case AMDGPU::V_FMAC_F16_e32:
3491 case AMDGPU::V_FMAC_F16_e64:
3492 case AMDGPU::V_FMAC_F16_t16_e64:
3493 case AMDGPU::V_FMAC_F16_fake16_e64:
3494 case AMDGPU::V_FMA_F16_e64:
3495 return ST.hasTrue16BitInsts() ? ST.useRealTrue16Insts()
3496 ? AMDGPU::V_FMAAK_F16_t16
3497 : AMDGPU::V_FMAAK_F16_fake16
3498 : AMDGPU::V_FMAAK_F16;
3499 case AMDGPU::V_FMAC_F64_e32:
3500 case AMDGPU::V_FMAC_F64_e64:
3501 case AMDGPU::V_FMA_F64_e64:
3502 return AMDGPU::V_FMAAK_F64;
3503 default:
3504 llvm_unreachable("invalid instruction");
3505 }
3506}
3507
3508static unsigned getNewFMAMKInst(const GCNSubtarget &ST, unsigned Opc) {
3509 switch (Opc) {
3510 case AMDGPU::V_MAC_F16_e32:
3511 case AMDGPU::V_MAC_F16_e64:
3512 case AMDGPU::V_MAD_F16_e64:
3513 return AMDGPU::V_MADMK_F16;
3514 case AMDGPU::V_MAC_F32_e32:
3515 case AMDGPU::V_MAC_F32_e64:
3516 case AMDGPU::V_MAD_F32_e64:
3517 return AMDGPU::V_MADMK_F32;
3518 case AMDGPU::V_FMAC_F32_e32:
3519 case AMDGPU::V_FMAC_F32_e64:
3520 case AMDGPU::V_FMA_F32_e64:
3521 return AMDGPU::V_FMAMK_F32;
3522 case AMDGPU::V_FMAC_F16_e32:
3523 case AMDGPU::V_FMAC_F16_e64:
3524 case AMDGPU::V_FMAC_F16_t16_e64:
3525 case AMDGPU::V_FMAC_F16_fake16_e64:
3526 case AMDGPU::V_FMA_F16_e64:
3527 return ST.hasTrue16BitInsts() ? ST.useRealTrue16Insts()
3528 ? AMDGPU::V_FMAMK_F16_t16
3529 : AMDGPU::V_FMAMK_F16_fake16
3530 : AMDGPU::V_FMAMK_F16;
3531 case AMDGPU::V_FMAC_F64_e32:
3532 case AMDGPU::V_FMAC_F64_e64:
3533 case AMDGPU::V_FMA_F64_e64:
3534 return AMDGPU::V_FMAMK_F64;
3535 default:
3536 llvm_unreachable("invalid instruction");
3537 }
3538}
3539
3541 Register Reg, MachineRegisterInfo *MRI) const {
3542 int64_t Imm;
3543 if (!getConstValDefinedInReg(DefMI, Reg, Imm))
3544 return false;
3545
3546 const bool HasMultipleUses = !MRI->hasOneNonDBGUse(Reg);
3547
3548 assert(!DefMI.getOperand(0).getSubReg() && "Expected SSA form");
3549
3550 unsigned Opc = UseMI.getOpcode();
3551 if (Opc == AMDGPU::COPY) {
3552 assert(!UseMI.getOperand(0).getSubReg() && "Expected SSA form");
3553
3554 Register DstReg = UseMI.getOperand(0).getReg();
3555 Register UseSubReg = UseMI.getOperand(1).getSubReg();
3556
3557 const TargetRegisterClass *DstRC = RI.getRegClassForReg(*MRI, DstReg);
3558
3559 if (HasMultipleUses) {
3560 // TODO: This should fold in more cases with multiple use, but we need to
3561 // more carefully consider what those uses are.
3562 unsigned ImmDefSize = RI.getRegSizeInBits(*MRI->getRegClass(Reg));
3563
3564 // Avoid breaking up a 64-bit inline immediate into a subregister extract.
3565 if (UseSubReg != AMDGPU::NoSubRegister && ImmDefSize == 64)
3566 return false;
3567
3568 // Most of the time folding a 32-bit inline constant is free (though this
3569 // might not be true if we can't later fold it into a real user).
3570 //
3571 // FIXME: This isInlineConstant check is imprecise if
3572 // getConstValDefinedInReg handled the tricky non-mov cases.
3573 if (ImmDefSize == 32 &&
3575 return false;
3576 }
3577
3578 bool Is16Bit = UseSubReg != AMDGPU::NoSubRegister &&
3579 RI.getSubRegIdxSize(UseSubReg) == 16;
3580
3581 if (Is16Bit) {
3582 if (RI.hasVGPRs(DstRC))
3583 return false; // Do not clobber vgpr_hi16
3584
3585 if (DstReg.isVirtual() && UseSubReg != AMDGPU::lo16)
3586 return false;
3587 }
3588
3589 MachineFunction *MF = UseMI.getMF();
3590
3591 unsigned NewOpc = AMDGPU::INSTRUCTION_LIST_END;
3592 MCRegister MovDstPhysReg =
3593 DstReg.isPhysical() ? DstReg.asMCReg() : MCRegister();
3594
3595 std::optional<int64_t> SubRegImm = extractSubregFromImm(Imm, UseSubReg);
3596
3597 // TODO: Try to fold with AMDGPU::V_MOV_B16_t16_e64
3598 for (unsigned MovOp :
3599 {AMDGPU::S_MOV_B32, AMDGPU::V_MOV_B32_e32, AMDGPU::S_MOV_B64,
3600 AMDGPU::V_MOV_B64_PSEUDO, AMDGPU::V_ACCVGPR_WRITE_B32_e64}) {
3601 const MCInstrDesc &MovDesc = get(MovOp);
3602
3603 const TargetRegisterClass *MovDstRC = getRegClass(MovDesc, 0, &RI);
3604 if (Is16Bit) {
3605 // We just need to find a correctly sized register class, so the
3606 // subregister index compatibility doesn't matter since we're statically
3607 // extracting the immediate value.
3608 MovDstRC = RI.getMatchingSuperRegClass(MovDstRC, DstRC, AMDGPU::lo16);
3609 if (!MovDstRC)
3610 continue;
3611
3612 if (MovDstPhysReg) {
3613 // FIXME: We probably should not do this. If there is a live value in
3614 // the high half of the register, it will be corrupted.
3615 MovDstPhysReg =
3616 RI.getMatchingSuperReg(MovDstPhysReg, AMDGPU::lo16, MovDstRC);
3617 if (!MovDstPhysReg)
3618 continue;
3619 }
3620 }
3621
3622 // Result class isn't the right size, try the next instruction.
3623 if (MovDstPhysReg) {
3624 if (!MovDstRC->contains(MovDstPhysReg))
3625 return false;
3626 } else if (!MRI->constrainRegClass(DstReg, MovDstRC)) {
3627 // TODO: This will be overly conservative in the case of 16-bit virtual
3628 // SGPRs. We could hack up the virtual register uses to use a compatible
3629 // 32-bit class.
3630 continue;
3631 }
3632
3633 const MCOperandInfo &OpInfo = MovDesc.operands()[1];
3634
3635 // Ensure the interpreted immediate value is a valid operand in the new
3636 // mov.
3637 //
3638 // FIXME: isImmOperandLegal should have form that doesn't require existing
3639 // MachineInstr or MachineOperand
3640 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType) &&
3641 !isInlineConstant(*SubRegImm, OpInfo.OperandType))
3642 break;
3643
3644 NewOpc = MovOp;
3645 break;
3646 }
3647
3648 if (NewOpc == AMDGPU::INSTRUCTION_LIST_END)
3649 return false;
3650
3651 if (Is16Bit) {
3652 UseMI.getOperand(0).setSubReg(AMDGPU::NoSubRegister);
3653 if (MovDstPhysReg)
3654 UseMI.getOperand(0).setReg(MovDstPhysReg);
3655 assert(UseMI.getOperand(1).getReg().isVirtual());
3656 }
3657
3658 const MCInstrDesc &NewMCID = get(NewOpc);
3659 UseMI.setDesc(NewMCID);
3660 UseMI.getOperand(1).ChangeToImmediate(*SubRegImm);
3661 UseMI.addImplicitDefUseOperands(*MF);
3662 return true;
3663 }
3664
3665 if (HasMultipleUses)
3666 return false;
3667
3668 if (Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 ||
3669 Opc == AMDGPU::V_MAD_F16_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
3670 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 ||
3671 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64 ||
3672 Opc == AMDGPU::V_FMAC_F16_t16_e64 ||
3673 Opc == AMDGPU::V_FMAC_F16_fake16_e64 || Opc == AMDGPU::V_FMA_F64_e64 ||
3674 Opc == AMDGPU::V_FMAC_F64_e64) {
3675 // Don't fold if we are using source or output modifiers. The new VOP2
3676 // instructions don't have them.
3678 return false;
3679
3680 // If this is a free constant, there's no reason to do this.
3681 // TODO: We could fold this here instead of letting SIFoldOperands do it
3682 // later.
3683 int Src0Idx = getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::src0);
3684
3685 // Any src operand can be used for the legality check.
3686 if (isInlineConstant(UseMI, Src0Idx, Imm))
3687 return false;
3688
3689 MachineOperand *Src0 = &UseMI.getOperand(Src0Idx);
3690
3691 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1);
3692 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2);
3693
3694 // Multiplied part is the constant: Use v_madmk_{f16, f32}.
3695 if ((Src0->isReg() && Src0->getReg() == Reg) ||
3696 (Src1->isReg() && Src1->getReg() == Reg)) {
3697 MachineOperand *RegSrc =
3698 Src1->isReg() && Src1->getReg() == Reg ? Src0 : Src1;
3699 if (!RegSrc->isReg())
3700 return false;
3701 if (RI.isSGPRClass(MRI->getRegClass(RegSrc->getReg())) &&
3702 ST.getConstantBusLimit(Opc) < 2)
3703 return false;
3704
3705 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))
3706 return false;
3707
3708 // If src2 is also a literal constant then we have to choose which one to
3709 // fold. In general it is better to choose madak so that the other literal
3710 // can be materialized in an sgpr instead of a vgpr:
3711 // s_mov_b32 s0, literal
3712 // v_madak_f32 v0, s0, v0, literal
3713 // Instead of:
3714 // v_mov_b32 v1, literal
3715 // v_madmk_f32 v0, v0, literal, v1
3716 MachineInstr *Def = MRI->getUniqueVRegDef(Src2->getReg());
3717 if (Def && Def->isMoveImmediate() &&
3718 !isInlineConstant(Def->getOperand(1)))
3719 return false;
3720
3721 unsigned NewOpc = getNewFMAMKInst(ST, Opc);
3722 if (pseudoToMCOpcode(NewOpc) == -1)
3723 return false;
3724
3725 // V_FMAMK_F16_t16 takes VGPR_16_Lo128 operands while V_FMAMK_F16_fake16
3726 // takes VGPR_32_Lo128 operands, so the rewrite would also require
3727 // restricting their register classes. For now just bail out.
3728 if (NewOpc == AMDGPU::V_FMAMK_F16_t16 ||
3729 NewOpc == AMDGPU::V_FMAMK_F16_fake16)
3730 return false;
3731
3732 const std::optional<int64_t> SubRegImm = extractSubregFromImm(
3733 Imm, RegSrc == Src1 ? Src0->getSubReg() : Src1->getSubReg());
3734
3735 // FIXME: This would be a lot easier if we could return a new instruction
3736 // instead of having to modify in place.
3737
3738 Register SrcReg = RegSrc->getReg();
3739 unsigned SrcSubReg = RegSrc->getSubReg();
3740 Src0->setReg(SrcReg);
3741 Src0->setSubReg(SrcSubReg);
3742 Src0->setIsKill(RegSrc->isKill());
3743
3744 if (Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
3745 Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_t16_e64 ||
3746 Opc == AMDGPU::V_FMAC_F16_fake16_e64 ||
3747 Opc == AMDGPU::V_FMAC_F16_e64 || Opc == AMDGPU::V_FMAC_F64_e64)
3748 UseMI.untieRegOperand(
3749 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
3750
3751 Src1->ChangeToImmediate(*SubRegImm);
3752
3754 UseMI.setDesc(get(NewOpc));
3755
3756 bool DeleteDef = MRI->use_nodbg_empty(Reg);
3757 if (DeleteDef)
3758 DefMI.eraseFromParent();
3759
3760 return true;
3761 }
3762
3763 // Added part is the constant: Use v_madak_{f16, f32}.
3764 if (Src2->isReg() && Src2->getReg() == Reg) {
3765 if (ST.getConstantBusLimit(Opc) < 2) {
3766 // Not allowed to use constant bus for another operand.
3767 // We can however allow an inline immediate as src0.
3768 bool Src0Inlined = false;
3769 if (Src0->isReg()) {
3770 // Try to inline constant if possible.
3771 // If the Def moves immediate and the use is single
3772 // We are saving VGPR here.
3773 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg());
3774 if (Def && Def->isMoveImmediate() &&
3775 isInlineConstant(Def->getOperand(1)) &&
3776 MRI->hasOneUse(Src0->getReg())) {
3777 Src0->ChangeToImmediate(Def->getOperand(1).getImm());
3778 Src0Inlined = true;
3779 } else if (ST.getConstantBusLimit(Opc) <= 1 &&
3780 RI.isSGPRReg(*MRI, Src0->getReg())) {
3781 return false;
3782 }
3783 // VGPR is okay as Src0 - fallthrough
3784 }
3785
3786 if (Src1->isReg() && !Src0Inlined) {
3787 // We have one slot for inlinable constant so far - try to fill it
3788 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg());
3789 if (Def && Def->isMoveImmediate() &&
3790 isInlineConstant(Def->getOperand(1)) &&
3791 MRI->hasOneUse(Src1->getReg()) && commuteInstruction(UseMI))
3792 Src0->ChangeToImmediate(Def->getOperand(1).getImm());
3793 else if (RI.isSGPRReg(*MRI, Src1->getReg()))
3794 return false;
3795 // VGPR is okay as Src1 - fallthrough
3796 }
3797 }
3798
3799 unsigned NewOpc = getNewFMAAKInst(ST, Opc);
3800 if (pseudoToMCOpcode(NewOpc) == -1)
3801 return false;
3802
3803 // V_FMAAK_F16_t16 takes VGPR_16_Lo128 operands while V_FMAAK_F16_fake16
3804 // takes VGPR_32_Lo128 operands, so the rewrite would also require
3805 // restricting their register classes. For now just bail out.
3806 if (NewOpc == AMDGPU::V_FMAAK_F16_t16 ||
3807 NewOpc == AMDGPU::V_FMAAK_F16_fake16)
3808 return false;
3809
3810 // FIXME: This would be a lot easier if we could return a new instruction
3811 // instead of having to modify in place.
3812
3813 if (Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
3814 Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_t16_e64 ||
3815 Opc == AMDGPU::V_FMAC_F16_fake16_e64 ||
3816 Opc == AMDGPU::V_FMAC_F16_e64 || Opc == AMDGPU::V_FMAC_F64_e64)
3817 UseMI.untieRegOperand(
3818 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
3819
3820 const std::optional<int64_t> SubRegImm =
3821 extractSubregFromImm(Imm, Src2->getSubReg());
3822
3823 // ChangingToImmediate adds Src2 back to the instruction.
3824 Src2->ChangeToImmediate(*SubRegImm);
3825
3826 // These come before src2.
3828 UseMI.setDesc(get(NewOpc));
3829 // It might happen that UseMI was commuted
3830 // and we now have SGPR as SRC1. If so 2 inlined
3831 // constant and SGPR are illegal.
3833
3834 bool DeleteDef = MRI->use_nodbg_empty(Reg);
3835 if (DeleteDef)
3836 DefMI.eraseFromParent();
3837
3838 return true;
3839 }
3840 }
3841
3842 return false;
3843}
3844
3845static bool
3848 if (BaseOps1.size() != BaseOps2.size())
3849 return false;
3850 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) {
3851 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I]))
3852 return false;
3853 }
3854 return true;
3855}
3856
3857static bool offsetsDoNotOverlap(LocationSize WidthA, int OffsetA,
3858 LocationSize WidthB, int OffsetB) {
3859 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
3860 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
3861 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3862 return LowWidth.hasValue() &&
3863 LowOffset + (int)LowWidth.getValue() <= HighOffset;
3864}
3865
3866bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa,
3867 const MachineInstr &MIb) const {
3868 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1;
3869 int64_t Offset0, Offset1;
3870 LocationSize Dummy0 = LocationSize::precise(0);
3871 LocationSize Dummy1 = LocationSize::precise(0);
3872 bool Offset0IsScalable, Offset1IsScalable;
3873 if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable,
3874 Dummy0, &RI) ||
3875 !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable,
3876 Dummy1, &RI))
3877 return false;
3878
3879 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1))
3880 return false;
3881
3882 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) {
3883 // FIXME: Handle ds_read2 / ds_write2.
3884 return false;
3885 }
3886 LocationSize Width0 = MIa.memoperands().front()->getSize();
3887 LocationSize Width1 = MIb.memoperands().front()->getSize();
3888 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1);
3889}
3890
3892 const MachineInstr &MIb) const {
3893 assert(MIa.mayLoadOrStore() &&
3894 "MIa must load from or modify a memory location");
3895 assert(MIb.mayLoadOrStore() &&
3896 "MIb must load from or modify a memory location");
3897
3899 return false;
3900
3901 // XXX - Can we relax this between address spaces?
3902 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
3903 return false;
3904
3905 if (isLDSDMA(MIa) || isLDSDMA(MIb))
3906 return false;
3907
3908 // TODO: Should we check the address space from the MachineMemOperand? That
3909 // would allow us to distinguish objects we know don't alias based on the
3910 // underlying address space, even if it was lowered to a different one,
3911 // e.g. private accesses lowered to use MUBUF instructions on a scratch
3912 // buffer.
3913 if (isDS(MIa)) {
3914 if (isDS(MIb))
3915 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3916
3917 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb);
3918 }
3919
3920 if (isMUBUF(MIa) || isMTBUF(MIa)) {
3921 if (isMUBUF(MIb) || isMTBUF(MIb))
3922 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3923
3924 if (isFLAT(MIb))
3925 return isFLATScratch(MIb);
3926
3927 return !isSMRD(MIb);
3928 }
3929
3930 if (isSMRD(MIa)) {
3931 if (isSMRD(MIb))
3932 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3933
3934 if (isFLAT(MIb))
3935 return isFLATScratch(MIb);
3936
3937 return !isMUBUF(MIb) && !isMTBUF(MIb);
3938 }
3939
3940 if (isFLAT(MIa)) {
3941 if (isFLAT(MIb)) {
3942 if ((isFLATScratch(MIa) && isFLATGlobal(MIb)) ||
3943 (isFLATGlobal(MIa) && isFLATScratch(MIb)))
3944 return true;
3945
3946 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3947 }
3948
3949 return false;
3950 }
3951
3952 return false;
3953}
3954
3956 int64_t &Imm, MachineInstr **DefMI = nullptr) {
3957 if (Reg.isPhysical())
3958 return false;
3959 auto *Def = MRI.getUniqueVRegDef(Reg);
3960 if (Def && SIInstrInfo::isFoldableCopy(*Def) && Def->getOperand(1).isImm()) {
3961 Imm = Def->getOperand(1).getImm();
3962 if (DefMI)
3963 *DefMI = Def;
3964 return true;
3965 }
3966 return false;
3967}
3968
3969static bool getFoldableImm(const MachineOperand *MO, int64_t &Imm,
3970 MachineInstr **DefMI = nullptr) {
3971 if (!MO->isReg())
3972 return false;
3973 const MachineFunction *MF = MO->getParent()->getParent()->getParent();
3974 const MachineRegisterInfo &MRI = MF->getRegInfo();
3975 return getFoldableImm(MO->getReg(), MRI, Imm, DefMI);
3976}
3977
3979 MachineInstr &NewMI) {
3980 if (LV) {
3981 unsigned NumOps = MI.getNumOperands();
3982 for (unsigned I = 1; I < NumOps; ++I) {
3983 MachineOperand &Op = MI.getOperand(I);
3984 if (Op.isReg() && Op.isKill())
3985 LV->replaceKillInstruction(Op.getReg(), MI, NewMI);
3986 }
3987 }
3988}
3989
3990static unsigned getNewFMAInst(const GCNSubtarget &ST, unsigned Opc) {
3991 switch (Opc) {
3992 case AMDGPU::V_MAC_F16_e32:
3993 case AMDGPU::V_MAC_F16_e64:
3994 return AMDGPU::V_MAD_F16_e64;
3995 case AMDGPU::V_MAC_F32_e32:
3996 case AMDGPU::V_MAC_F32_e64:
3997 return AMDGPU::V_MAD_F32_e64;
3998 case AMDGPU::V_MAC_LEGACY_F32_e32:
3999 case AMDGPU::V_MAC_LEGACY_F32_e64:
4000 return AMDGPU::V_MAD_LEGACY_F32_e64;
4001 case AMDGPU::V_FMAC_LEGACY_F32_e32:
4002 case AMDGPU::V_FMAC_LEGACY_F32_e64:
4003 return AMDGPU::V_FMA_LEGACY_F32_e64;
4004 case AMDGPU::V_FMAC_F16_e32:
4005 case AMDGPU::V_FMAC_F16_e64:
4006 case AMDGPU::V_FMAC_F16_t16_e64:
4007 case AMDGPU::V_FMAC_F16_fake16_e64:
4008 return ST.hasTrue16BitInsts() ? ST.useRealTrue16Insts()
4009 ? AMDGPU::V_FMA_F16_gfx9_t16_e64
4010 : AMDGPU::V_FMA_F16_gfx9_fake16_e64
4011 : AMDGPU::V_FMA_F16_gfx9_e64;
4012 case AMDGPU::V_FMAC_F32_e32:
4013 case AMDGPU::V_FMAC_F32_e64:
4014 return AMDGPU::V_FMA_F32_e64;
4015 case AMDGPU::V_FMAC_F64_e32:
4016 case AMDGPU::V_FMAC_F64_e64:
4017 return AMDGPU::V_FMA_F64_e64;
4018 default:
4019 llvm_unreachable("invalid instruction");
4020 }
4021}
4022
4024 LiveVariables *LV,
4025 LiveIntervals *LIS) const {
4026 MachineBasicBlock &MBB = *MI.getParent();
4027 unsigned Opc = MI.getOpcode();
4028
4029 // Handle MFMA.
4030 int NewMFMAOpc = AMDGPU::getMFMAEarlyClobberOp(Opc);
4031 if (NewMFMAOpc != -1) {
4033 BuildMI(MBB, MI, MI.getDebugLoc(), get(NewMFMAOpc));
4034 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
4035 MIB.add(MI.getOperand(I));
4036 updateLiveVariables(LV, MI, *MIB);
4037 if (LIS) {
4038 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
4039 // SlotIndex of defs needs to be updated when converting to early-clobber
4040 MachineOperand &Def = MIB->getOperand(0);
4041 if (Def.isEarlyClobber() && Def.isReg() &&
4042 LIS->hasInterval(Def.getReg())) {
4043 SlotIndex OldIndex = LIS->getInstructionIndex(*MIB).getRegSlot(false);
4044 SlotIndex NewIndex = LIS->getInstructionIndex(*MIB).getRegSlot(true);
4045 auto &LI = LIS->getInterval(Def.getReg());
4046 auto UpdateDefIndex = [&](LiveRange &LR) {
4047 auto *S = LR.find(OldIndex);
4048 if (S != LR.end() && S->start == OldIndex) {
4049 assert(S->valno && S->valno->def == OldIndex);
4050 S->start = NewIndex;
4051 S->valno->def = NewIndex;
4052 }
4053 };
4054 UpdateDefIndex(LI);
4055 for (auto &SR : LI.subranges())
4056 UpdateDefIndex(SR);
4057 }
4058 }
4059 return MIB;
4060 }
4061
4062 if (SIInstrInfo::isWMMA(MI)) {
4063 unsigned NewOpc = AMDGPU::mapWMMA2AddrTo3AddrOpcode(MI.getOpcode());
4064 MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
4065 .setMIFlags(MI.getFlags());
4066 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
4067 MIB->addOperand(MI.getOperand(I));
4068
4069 updateLiveVariables(LV, MI, *MIB);
4070 if (LIS)
4071 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
4072
4073 return MIB;
4074 }
4075
4076 assert(Opc != AMDGPU::V_FMAC_F16_t16_e32 &&
4077 Opc != AMDGPU::V_FMAC_F16_fake16_e32 &&
4078 "V_FMAC_F16_t16/fake16_e32 is not supported and not expected to be "
4079 "present pre-RA");
4080
4081 // Handle MAC/FMAC.
4082 bool IsF64 = Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64;
4083 bool IsLegacy = Opc == AMDGPU::V_MAC_LEGACY_F32_e32 ||
4084 Opc == AMDGPU::V_MAC_LEGACY_F32_e64 ||
4085 Opc == AMDGPU::V_FMAC_LEGACY_F32_e32 ||
4086 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64;
4087 bool Src0Literal = false;
4088
4089 switch (Opc) {
4090 default:
4091 return nullptr;
4092 case AMDGPU::V_MAC_F16_e64:
4093 case AMDGPU::V_FMAC_F16_e64:
4094 case AMDGPU::V_FMAC_F16_t16_e64:
4095 case AMDGPU::V_FMAC_F16_fake16_e64:
4096 case AMDGPU::V_MAC_F32_e64:
4097 case AMDGPU::V_MAC_LEGACY_F32_e64:
4098 case AMDGPU::V_FMAC_F32_e64:
4099 case AMDGPU::V_FMAC_LEGACY_F32_e64:
4100 case AMDGPU::V_FMAC_F64_e64:
4101 break;
4102 case AMDGPU::V_MAC_F16_e32:
4103 case AMDGPU::V_FMAC_F16_e32:
4104 case AMDGPU::V_MAC_F32_e32:
4105 case AMDGPU::V_MAC_LEGACY_F32_e32:
4106 case AMDGPU::V_FMAC_F32_e32:
4107 case AMDGPU::V_FMAC_LEGACY_F32_e32:
4108 case AMDGPU::V_FMAC_F64_e32: {
4109 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
4110 AMDGPU::OpName::src0);
4111 const MachineOperand *Src0 = &MI.getOperand(Src0Idx);
4112 if (!Src0->isReg() && !Src0->isImm())
4113 return nullptr;
4114
4115 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0))
4116 Src0Literal = true;
4117
4118 break;
4119 }
4120 }
4121
4123 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
4124 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
4125 const MachineOperand *Src0Mods =
4126 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
4127 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
4128 const MachineOperand *Src1Mods =
4129 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
4130 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
4131 const MachineOperand *Src2Mods =
4132 getNamedOperand(MI, AMDGPU::OpName::src2_modifiers);
4133 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
4134 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod);
4135 const MachineOperand *OpSel = getNamedOperand(MI, AMDGPU::OpName::op_sel);
4136
4137 if (!Src0Mods && !Src1Mods && !Src2Mods && !Clamp && !Omod && !IsLegacy &&
4138 (!IsF64 || ST.hasFmaakFmamkF64Insts()) &&
4139 // If we have an SGPR input, we will violate the constant bus restriction.
4140 (ST.getConstantBusLimit(Opc) > 1 || !Src0->isReg() ||
4141 !RI.isSGPRReg(MBB.getParent()->getRegInfo(), Src0->getReg()))) {
4143 const auto killDef = [&]() -> void {
4144 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4145 // The only user is the instruction which will be killed.
4146 Register DefReg = DefMI->getOperand(0).getReg();
4147
4148 if (MRI.hasOneNonDBGUse(DefReg)) {
4149 // We cannot just remove the DefMI here, calling pass will crash.
4150 DefMI->setDesc(get(AMDGPU::IMPLICIT_DEF));
4151 DefMI->getOperand(0).setIsDead(true);
4152 for (unsigned I = DefMI->getNumOperands() - 1; I != 0; --I)
4153 DefMI->removeOperand(I);
4154 if (LV)
4155 LV->getVarInfo(DefReg).AliveBlocks.clear();
4156 }
4157
4158 if (LIS) {
4159 LiveInterval &DefLI = LIS->getInterval(DefReg);
4160
4161 // We cannot delete the original instruction here, so hack out the use
4162 // in the original instruction with a dummy register so we can use
4163 // shrinkToUses to deal with any multi-use edge cases. Other targets do
4164 // not have the complexity of deleting a use to consider here.
4165 Register DummyReg = MRI.cloneVirtualRegister(DefReg);
4166 for (MachineOperand &MIOp : MI.uses()) {
4167 if (MIOp.isReg() && MIOp.getReg() == DefReg) {
4168 MIOp.setIsUndef(true);
4169 MIOp.setReg(DummyReg);
4170 }
4171 }
4172
4173 LIS->shrinkToUses(&DefLI);
4174 }
4175 };
4176
4177 int64_t Imm;
4178 if (!Src0Literal && getFoldableImm(Src2, Imm, &DefMI)) {
4179 unsigned NewOpc = getNewFMAAKInst(ST, Opc);
4180 if (pseudoToMCOpcode(NewOpc) != -1) {
4181 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
4182 .add(*Dst)
4183 .add(*Src0)
4184 .add(*Src1)
4185 .addImm(Imm)
4186 .setMIFlags(MI.getFlags());
4187 updateLiveVariables(LV, MI, *MIB);
4188 if (LIS)
4189 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
4190 killDef();
4191 return MIB;
4192 }
4193 }
4194 unsigned NewOpc = getNewFMAMKInst(ST, Opc);
4195 if (!Src0Literal && getFoldableImm(Src1, Imm, &DefMI)) {
4196 if (pseudoToMCOpcode(NewOpc) != -1) {
4197 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
4198 .add(*Dst)
4199 .add(*Src0)
4200 .addImm(Imm)
4201 .add(*Src2)
4202 .setMIFlags(MI.getFlags());
4203 updateLiveVariables(LV, MI, *MIB);
4204
4205 if (LIS)
4206 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
4207 killDef();
4208 return MIB;
4209 }
4210 }
4211 if (Src0Literal || getFoldableImm(Src0, Imm, &DefMI)) {
4212 if (Src0Literal) {
4213 Imm = Src0->getImm();
4214 DefMI = nullptr;
4215 }
4216 if (pseudoToMCOpcode(NewOpc) != -1 &&
4218 MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0),
4219 Src1)) {
4220 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
4221 .add(*Dst)
4222 .add(*Src1)
4223 .addImm(Imm)
4224 .add(*Src2)
4225 .setMIFlags(MI.getFlags());
4226 updateLiveVariables(LV, MI, *MIB);
4227
4228 if (LIS)
4229 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
4230 if (DefMI)
4231 killDef();
4232 return MIB;
4233 }
4234 }
4235 }
4236
4237 // VOP2 mac/fmac with a literal operand cannot be converted to VOP3 mad/fma
4238 // if VOP3 does not allow a literal operand.
4239 if (Src0Literal && !ST.hasVOP3Literal())
4240 return nullptr;
4241
4242 unsigned NewOpc = getNewFMAInst(ST, Opc);
4243
4244 if (pseudoToMCOpcode(NewOpc) == -1)
4245 return nullptr;
4246
4247 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
4248 .add(*Dst)
4249 .addImm(Src0Mods ? Src0Mods->getImm() : 0)
4250 .add(*Src0)
4251 .addImm(Src1Mods ? Src1Mods->getImm() : 0)
4252 .add(*Src1)
4253 .addImm(Src2Mods ? Src2Mods->getImm() : 0)
4254 .add(*Src2)
4255 .addImm(Clamp ? Clamp->getImm() : 0)
4256 .addImm(Omod ? Omod->getImm() : 0)
4257 .setMIFlags(MI.getFlags());
4258 if (AMDGPU::hasNamedOperand(NewOpc, AMDGPU::OpName::op_sel))
4259 MIB.addImm(OpSel ? OpSel->getImm() : 0);
4260 updateLiveVariables(LV, MI, *MIB);
4261 if (LIS)
4262 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
4263 return MIB;
4264}
4265
4266// It's not generally safe to move VALU instructions across these since it will
4267// start using the register as a base index rather than directly.
4268// XXX - Why isn't hasSideEffects sufficient for these?
4270 switch (MI.getOpcode()) {
4271 case AMDGPU::S_SET_GPR_IDX_ON:
4272 case AMDGPU::S_SET_GPR_IDX_MODE:
4273 case AMDGPU::S_SET_GPR_IDX_OFF:
4274 return true;
4275 default:
4276 return false;
4277 }
4278}
4279
4281 const MachineBasicBlock *MBB,
4282 const MachineFunction &MF) const {
4283 // Skipping the check for SP writes in the base implementation. The reason it
4284 // was added was apparently due to compile time concerns.
4285 //
4286 // TODO: Do we really want this barrier? It triggers unnecessary hazard nops
4287 // but is probably avoidable.
4288
4289 // Copied from base implementation.
4290 // Terminators and labels can't be scheduled around.
4291 if (MI.isTerminator() || MI.isPosition())
4292 return true;
4293
4294 // INLINEASM_BR can jump to another block
4295 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
4296 return true;
4297
4298 if (MI.getOpcode() == AMDGPU::SCHED_BARRIER && MI.getOperand(0).getImm() == 0)
4299 return true;
4300
4301 // Target-independent instructions do not have an implicit-use of EXEC, even
4302 // when they operate on VGPRs. Treating EXEC modifications as scheduling
4303 // boundaries prevents incorrect movements of such instructions.
4304 return MI.modifiesRegister(AMDGPU::EXEC, &RI) ||
4305 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
4306 MI.getOpcode() == AMDGPU::S_SETREG_B32 ||
4307 MI.getOpcode() == AMDGPU::S_SETPRIO ||
4308 MI.getOpcode() == AMDGPU::S_SETPRIO_INC_WG ||
4310}
4311
4313 return Opcode == AMDGPU::DS_ORDERED_COUNT ||
4314 Opcode == AMDGPU::DS_ADD_GS_REG_RTN ||
4315 Opcode == AMDGPU::DS_SUB_GS_REG_RTN || isGWS(Opcode);
4316}
4317
4319 if (!isFLAT(MI) || isFLATGlobal(MI))
4320 return false;
4321
4322 // If scratch is not initialized, we can never access it.
4323 if (MI.getMF()->getFunction().hasFnAttribute("amdgpu-no-flat-scratch-init"))
4324 return false;
4325
4326 // SCRATCH instructions always access scratch.
4327 if (isFLATScratch(MI))
4328 return true;
4329
4330 // If there are no memory operands then conservatively assume the flat
4331 // operation may access scratch.
4332 if (MI.memoperands_empty())
4333 return true;
4334
4335 // See if any memory operand specifies an address space that involves scratch.
4336 return any_of(MI.memoperands(), [](const MachineMemOperand *Memop) {
4337 unsigned AS = Memop->getAddrSpace();
4338 if (AS == AMDGPUAS::FLAT_ADDRESS) {
4339 const MDNode *MD = Memop->getAAInfo().NoAliasAddrSpace;
4340 return !MD || !AMDGPU::hasValueInRangeLikeMetadata(
4341 *MD, AMDGPUAS::PRIVATE_ADDRESS);
4342 }
4343 return AS == AMDGPUAS::PRIVATE_ADDRESS;
4344 });
4345}
4346
4348 // Skip the full operand and register alias search modifiesRegister
4349 // does. There's only a handful of instructions that touch this, it's only an
4350 // implicit def, and doesn't alias any other registers.
4351 return is_contained(MI.getDesc().implicit_defs(), AMDGPU::MODE);
4352}
4353
4355 unsigned Opcode = MI.getOpcode();
4356
4357 if (MI.mayStore() && isSMRD(MI))
4358 return true; // scalar store or atomic
4359
4360 // This will terminate the function when other lanes may need to continue.
4361 if (MI.isReturn())
4362 return true;
4363
4364 // These instructions cause shader I/O that may cause hardware lockups
4365 // when executed with an empty EXEC mask.
4366 //
4367 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when
4368 // EXEC = 0, but checking for that case here seems not worth it
4369 // given the typical code patterns.
4370 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT ||
4371 isEXP(Opcode) || Opcode == AMDGPU::DS_ORDERED_COUNT ||
4372 Opcode == AMDGPU::S_TRAP || Opcode == AMDGPU::S_WAIT_EVENT)
4373 return true;
4374
4375 if (MI.isCall() || MI.isInlineAsm())
4376 return true; // conservative assumption
4377
4378 // Assume that barrier interactions are only intended with active lanes.
4379 if (isBarrier(Opcode))
4380 return true;
4381
4382 // A mode change is a scalar operation that influences vector instructions.
4384 return true;
4385
4386 // These are like SALU instructions in terms of effects, so it's questionable
4387 // whether we should return true for those.
4388 //
4389 // However, executing them with EXEC = 0 causes them to operate on undefined
4390 // data, which we avoid by returning true here.
4391 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 ||
4392 Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32 ||
4393 Opcode == AMDGPU::SI_RESTORE_S32_FROM_VGPR ||
4394 Opcode == AMDGPU::SI_SPILL_S32_TO_VGPR)
4395 return true;
4396
4397 return false;
4398}
4399
4401 const MachineInstr &MI) const {
4402 if (MI.isMetaInstruction())
4403 return false;
4404
4405 // This won't read exec if this is an SGPR->SGPR copy.
4406 if (MI.isCopyLike()) {
4407 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg()))
4408 return true;
4409
4410 // Make sure this isn't copying exec as a normal operand
4411 return MI.readsRegister(AMDGPU::EXEC, &RI);
4412 }
4413
4414 // Make a conservative assumption about the callee.
4415 if (MI.isCall())
4416 return true;
4417
4418 // Be conservative with any unhandled generic opcodes.
4419 if (!isTargetSpecificOpcode(MI.getOpcode()))
4420 return true;
4421
4422 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI);
4423}
4424
4425bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
4426 switch (Imm.getBitWidth()) {
4427 case 1: // This likely will be a condition code mask.
4428 return true;
4429
4430 case 32:
4431 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(),
4432 ST.hasInv2PiInlineImm());
4433 case 64:
4434 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(),
4435 ST.hasInv2PiInlineImm());
4436 case 16:
4437 return ST.has16BitInsts() &&
4438 AMDGPU::isInlinableLiteralI16(Imm.getSExtValue(),
4439 ST.hasInv2PiInlineImm());
4440 default:
4441 llvm_unreachable("invalid bitwidth");
4442 }
4443}
4444
4446 APInt IntImm = Imm.bitcastToAPInt();
4447 int64_t IntImmVal = IntImm.getSExtValue();
4448 bool HasInv2Pi = ST.hasInv2PiInlineImm();
4449 switch (APFloat::SemanticsToEnum(Imm.getSemantics())) {
4450 default:
4451 llvm_unreachable("invalid fltSemantics");
4454 return isInlineConstant(IntImm);
4456 return ST.has16BitInsts() &&
4457 AMDGPU::isInlinableLiteralBF16(IntImmVal, HasInv2Pi);
4459 return ST.has16BitInsts() &&
4460 AMDGPU::isInlinableLiteralFP16(IntImmVal, HasInv2Pi);
4461 }
4462}
4463
4464bool SIInstrInfo::isInlineConstant(int64_t Imm, uint8_t OperandType) const {
4465 // MachineOperand provides no way to tell the true operand size, since it only
4466 // records a 64-bit value. We need to know the size to determine if a 32-bit
4467 // floating point immediate bit pattern is legal for an integer immediate. It
4468 // would be for any 32-bit integer operand, but would not be for a 64-bit one.
4469 switch (OperandType) {
4479 int32_t Trunc = static_cast<int32_t>(Imm);
4480 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm());
4481 }
4487 return AMDGPU::isInlinableLiteral64(Imm, ST.hasInv2PiInlineImm());
4490 // We would expect inline immediates to not be concerned with an integer/fp
4491 // distinction. However, in the case of 16-bit integer operations, the
4492 // "floating point" values appear to not work. It seems read the low 16-bits
4493 // of 32-bit immediates, which happens to always work for the integer
4494 // values.
4495 //
4496 // See llvm bugzilla 46302.
4497 //
4498 // TODO: Theoretically we could use op-sel to use the high bits of the
4499 // 32-bit FP values.
4511 return false;
4514 if (isInt<16>(Imm) || isUInt<16>(Imm)) {
4515 // A few special case instructions have 16-bit operands on subtargets
4516 // where 16-bit instructions are not legal.
4517 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle
4518 // constants in these cases
4519 int16_t Trunc = static_cast<int16_t>(Imm);
4520 return ST.has16BitInsts() &&
4521 AMDGPU::isInlinableLiteralFP16(Trunc, ST.hasInv2PiInlineImm());
4522 }
4523
4524 return false;
4525 }
4528 if (isInt<16>(Imm) || isUInt<16>(Imm)) {
4529 int16_t Trunc = static_cast<int16_t>(Imm);
4530 return ST.has16BitInsts() &&
4531 AMDGPU::isInlinableLiteralBF16(Trunc, ST.hasInv2PiInlineImm());
4532 }
4533 return false;
4534 }
4538 return false;
4540 return isLegalAV64PseudoImm(Imm);
4543 // Always embedded in the instruction for free.
4544 return true;
4554 // Just ignore anything else.
4555 return true;
4556 default:
4557 llvm_unreachable("invalid operand type");
4558 }
4559}
4560
4561static bool compareMachineOp(const MachineOperand &Op0,
4562 const MachineOperand &Op1) {
4563 if (Op0.getType() != Op1.getType())
4564 return false;
4565
4566 switch (Op0.getType()) {
4568 return Op0.getReg() == Op1.getReg();
4570 return Op0.getImm() == Op1.getImm();
4571 default:
4572 llvm_unreachable("Didn't expect to be comparing these operand types");
4573 }
4574}
4575
4577 const MCOperandInfo &OpInfo) const {
4578 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
4579 return true;
4580
4581 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType))
4582 return false;
4583
4584 if (!isVOP3(InstDesc) || !AMDGPU::isSISrcOperand(OpInfo))
4585 return true;
4586
4587 return ST.hasVOP3Literal();
4588}
4589
4590bool SIInstrInfo::isImmOperandLegal(const MCInstrDesc &InstDesc, unsigned OpNo,
4591 int64_t ImmVal) const {
4592 const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo];
4593 if (isInlineConstant(ImmVal, OpInfo.OperandType)) {
4594 if (isMAI(InstDesc) && ST.hasMFMAInlineLiteralBug() &&
4595 OpNo == (unsigned)AMDGPU::getNamedOperandIdx(InstDesc.getOpcode(),
4596 AMDGPU::OpName::src2))
4597 return false;
4598 return RI.opCanUseInlineConstant(OpInfo.OperandType);
4599 }
4600
4601 return isLiteralOperandLegal(InstDesc, OpInfo);
4602}
4603
4604bool SIInstrInfo::isImmOperandLegal(const MCInstrDesc &InstDesc, unsigned OpNo,
4605 const MachineOperand &MO) const {
4606 if (MO.isImm())
4607 return isImmOperandLegal(InstDesc, OpNo, MO.getImm());
4608
4609 assert((MO.isTargetIndex() || MO.isFI() || MO.isGlobal()) &&
4610 "unexpected imm-like operand kind");
4611 const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo];
4612 return isLiteralOperandLegal(InstDesc, OpInfo);
4613}
4614
4616 // 2 32-bit inline constants packed into one.
4617 return AMDGPU::isInlinableLiteral32(Lo_32(Imm), ST.hasInv2PiInlineImm()) &&
4618 AMDGPU::isInlinableLiteral32(Hi_32(Imm), ST.hasInv2PiInlineImm());
4619}
4620
4621bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
4622 // GFX90A does not have V_MUL_LEGACY_F32_e32.
4623 if (Opcode == AMDGPU::V_MUL_LEGACY_F32_e64 && ST.hasGFX90AInsts())
4624 return false;
4625
4626 int Op32 = AMDGPU::getVOPe32(Opcode);
4627 if (Op32 == -1)
4628 return false;
4629
4630 return pseudoToMCOpcode(Op32) != -1;
4631}
4632
4633bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
4634 // The src0_modifier operand is present on all instructions
4635 // that have modifiers.
4636
4637 return AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src0_modifiers);
4638}
4639
4641 AMDGPU::OpName OpName) const {
4642 const MachineOperand *Mods = getNamedOperand(MI, OpName);
4643 return Mods && Mods->getImm();
4644}
4645
4647 return any_of(ModifierOpNames,
4648 [&](AMDGPU::OpName Name) { return hasModifiersSet(MI, Name); });
4649}
4650
4652 const MachineRegisterInfo &MRI) const {
4653 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
4654 // Can't shrink instruction with three operands.
4655 if (Src2) {
4656 switch (MI.getOpcode()) {
4657 default: return false;
4658
4659 case AMDGPU::V_ADDC_U32_e64:
4660 case AMDGPU::V_SUBB_U32_e64:
4661 case AMDGPU::V_SUBBREV_U32_e64: {
4662 const MachineOperand *Src1
4663 = getNamedOperand(MI, AMDGPU::OpName::src1);
4664 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()))
4665 return false;
4666 // Additional verification is needed for sdst/src2.
4667 return true;
4668 }
4669 case AMDGPU::V_MAC_F16_e64:
4670 case AMDGPU::V_MAC_F32_e64:
4671 case AMDGPU::V_MAC_LEGACY_F32_e64:
4672 case AMDGPU::V_FMAC_F16_e64:
4673 case AMDGPU::V_FMAC_F16_t16_e64:
4674 case AMDGPU::V_FMAC_F16_fake16_e64:
4675 case AMDGPU::V_FMAC_F32_e64:
4676 case AMDGPU::V_FMAC_F64_e64:
4677 case AMDGPU::V_FMAC_LEGACY_F32_e64:
4678 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) ||
4679 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers))
4680 return false;
4681 break;
4682
4683 case AMDGPU::V_CNDMASK_B32_e64:
4684 break;
4685 }
4686 }
4687
4688 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
4689 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) ||
4690 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers)))
4691 return false;
4692
4693 // We don't need to check src0, all input types are legal, so just make sure
4694 // src0 isn't using any modifiers.
4695 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
4696 return false;
4697
4698 // Can it be shrunk to a valid 32 bit opcode?
4699 if (!hasVALU32BitEncoding(MI.getOpcode()))
4700 return false;
4701
4702 // Check output modifiers
4703 return !hasModifiersSet(MI, AMDGPU::OpName::omod) &&
4704 !hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
4705 !hasModifiersSet(MI, AMDGPU::OpName::byte_sel) &&
4706 // TODO: Can we avoid checking bound_ctrl/fi here?
4707 // They are only used by permlane*_swap special case.
4708 !hasModifiersSet(MI, AMDGPU::OpName::bound_ctrl) &&
4709 !hasModifiersSet(MI, AMDGPU::OpName::fi);
4710}
4711
4712// Set VCC operand with all flags from \p Orig, except for setting it as
4713// implicit.
4715 const MachineOperand &Orig) {
4716
4717 for (MachineOperand &Use : MI.implicit_operands()) {
4718 if (Use.isUse() &&
4719 (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) {
4720 Use.setIsUndef(Orig.isUndef());
4721 Use.setIsKill(Orig.isKill());
4722 return;
4723 }
4724 }
4725}
4726
4728 unsigned Op32) const {
4729 MachineBasicBlock *MBB = MI.getParent();
4730
4731 const MCInstrDesc &Op32Desc = get(Op32);
4732 MachineInstrBuilder Inst32 =
4733 BuildMI(*MBB, MI, MI.getDebugLoc(), Op32Desc)
4734 .setMIFlags(MI.getFlags());
4735
4736 // Add the dst operand if the 32-bit encoding also has an explicit $vdst.
4737 // For VOPC instructions, this is replaced by an implicit def of vcc.
4738
4739 // We assume the defs of the shrunk opcode are in the same order, and the
4740 // shrunk opcode loses the last def (SGPR def, in the VOP3->VOPC case).
4741 for (int I = 0, E = Op32Desc.getNumDefs(); I != E; ++I)
4742 Inst32.add(MI.getOperand(I));
4743
4744 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
4745
4746 int Idx = MI.getNumExplicitDefs();
4747 for (const MachineOperand &Use : MI.explicit_uses()) {
4748 int OpTy = MI.getDesc().operands()[Idx++].OperandType;
4750 continue;
4751
4752 if (&Use == Src2) {
4753 if (AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2) == -1) {
4754 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is
4755 // replaced with an implicit read of vcc or vcc_lo. The implicit read
4756 // of vcc was already added during the initial BuildMI, but we
4757 // 1) may need to change vcc to vcc_lo to preserve the original register
4758 // 2) have to preserve the original flags.
4759 copyFlagsToImplicitVCC(*Inst32, *Src2);
4760 continue;
4761 }
4762 }
4763
4764 Inst32.add(Use);
4765 }
4766
4767 // FIXME: Losing implicit operands
4768 fixImplicitOperands(*Inst32);
4769 return Inst32;
4770}
4771
4773 // Null is free
4774 Register Reg = RegOp.getReg();
4775 if (Reg == AMDGPU::SGPR_NULL || Reg == AMDGPU::SGPR_NULL64)
4776 return false;
4777
4778 // SGPRs use the constant bus
4779
4780 // FIXME: implicit registers that are not part of the MCInstrDesc's implicit
4781 // physical register operands should also count, except for exec.
4782 if (RegOp.isImplicit())
4783 return Reg == AMDGPU::VCC || Reg == AMDGPU::VCC_LO || Reg == AMDGPU::M0;
4784
4785 // SGPRs use the constant bus
4786 return AMDGPU::SReg_32RegClass.contains(Reg) ||
4787 AMDGPU::SReg_64RegClass.contains(Reg);
4788}
4789
4791 const MachineRegisterInfo &MRI) const {
4792 Register Reg = RegOp.getReg();
4793 return Reg.isVirtual() ? RI.isSGPRClass(MRI.getRegClass(Reg))
4794 : physRegUsesConstantBus(RegOp);
4795}
4796
4798 const MachineOperand &MO,
4799 const MCOperandInfo &OpInfo) const {
4800 // Literal constants use the constant bus.
4801 if (!MO.isReg())
4802 return !isInlineConstant(MO, OpInfo);
4803
4804 Register Reg = MO.getReg();
4805 return Reg.isVirtual() ? RI.isSGPRClass(MRI.getRegClass(Reg))
4807}
4808
4810 for (const MachineOperand &MO : MI.implicit_operands()) {
4811 // We only care about reads.
4812 if (MO.isDef())
4813 continue;
4814
4815 switch (MO.getReg()) {
4816 case AMDGPU::VCC:
4817 case AMDGPU::VCC_LO:
4818 case AMDGPU::VCC_HI:
4819 case AMDGPU::M0:
4820 case AMDGPU::FLAT_SCR:
4821 return MO.getReg();
4822
4823 default:
4824 break;
4825 }
4826 }
4827
4828 return Register();
4829}
4830
4831static bool shouldReadExec(const MachineInstr &MI) {
4832 if (SIInstrInfo::isVALU(MI)) {
4833 switch (MI.getOpcode()) {
4834 case AMDGPU::V_READLANE_B32:
4835 case AMDGPU::SI_RESTORE_S32_FROM_VGPR:
4836 case AMDGPU::V_WRITELANE_B32:
4837 case AMDGPU::SI_SPILL_S32_TO_VGPR:
4838 return false;
4839 }
4840
4841 return true;
4842 }
4843
4844 if (MI.isPreISelOpcode() ||
4845 SIInstrInfo::isGenericOpcode(MI.getOpcode()) ||
4848 return false;
4849
4850 return true;
4851}
4852
4853static bool isRegOrFI(const MachineOperand &MO) {
4854 return MO.isReg() || MO.isFI();
4855}
4856
4857static bool isSubRegOf(const SIRegisterInfo &TRI,
4858 const MachineOperand &SuperVec,
4859 const MachineOperand &SubReg) {
4860 if (SubReg.getReg().isPhysical())
4861 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
4862
4863 return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
4864 SubReg.getReg() == SuperVec.getReg();
4865}
4866
4867// Verify the illegal copy from vector register to SGPR for generic opcode COPY
4868bool SIInstrInfo::verifyCopy(const MachineInstr &MI,
4869 const MachineRegisterInfo &MRI,
4870 StringRef &ErrInfo) const {
4871 Register DstReg = MI.getOperand(0).getReg();
4872 Register SrcReg = MI.getOperand(1).getReg();
4873 // This is a check for copy from vector register to SGPR
4874 if (RI.isVectorRegister(MRI, SrcReg) && RI.isSGPRReg(MRI, DstReg)) {
4875 ErrInfo = "illegal copy from vector register to SGPR";
4876 return false;
4877 }
4878 return true;
4879}
4880
4882 StringRef &ErrInfo) const {
4883 uint16_t Opcode = MI.getOpcode();
4884 const MachineFunction *MF = MI.getParent()->getParent();
4885 const MachineRegisterInfo &MRI = MF->getRegInfo();
4886
4887 // FIXME: At this point the COPY verify is done only for non-ssa forms.
4888 // Find a better property to recognize the point where instruction selection
4889 // is just done.
4890 // We can only enforce this check after SIFixSGPRCopies pass so that the
4891 // illegal copies are legalized and thereafter we don't expect a pass
4892 // inserting similar copies.
4893 if (!MRI.isSSA() && MI.isCopy())
4894 return verifyCopy(MI, MRI, ErrInfo);
4895
4896 if (SIInstrInfo::isGenericOpcode(Opcode))
4897 return true;
4898
4899 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
4900 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
4901 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
4902 int Src3Idx = -1;
4903 if (Src0Idx == -1) {
4904 // VOPD V_DUAL_* instructions use different operand names.
4905 Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0X);
4906 Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vsrc1X);
4907 Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0Y);
4908 Src3Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vsrc1Y);
4909 }
4910
4911 // Make sure the number of operands is correct.
4912 const MCInstrDesc &Desc = get(Opcode);
4913 if (!Desc.isVariadic() &&
4914 Desc.getNumOperands() != MI.getNumExplicitOperands()) {
4915 ErrInfo = "Instruction has wrong number of operands.";
4916 return false;
4917 }
4918
4919 if (MI.isInlineAsm()) {
4920 // Verify register classes for inlineasm constraints.
4921 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands();
4922 I != E; ++I) {
4923 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI);
4924 if (!RC)
4925 continue;
4926
4927 const MachineOperand &Op = MI.getOperand(I);
4928 if (!Op.isReg())
4929 continue;
4930
4931 Register Reg = Op.getReg();
4932 if (!Reg.isVirtual() && !RC->contains(Reg)) {
4933 ErrInfo = "inlineasm operand has incorrect register class.";
4934 return false;
4935 }
4936 }
4937
4938 return true;
4939 }
4940
4941 if (isImage(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) {
4942 ErrInfo = "missing memory operand from image instruction.";
4943 return false;
4944 }
4945
4946 // Make sure the register classes are correct.
4947 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
4948 const MachineOperand &MO = MI.getOperand(i);
4949 if (MO.isFPImm()) {
4950 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
4951 "all fp values to integers.";
4952 return false;
4953 }
4954
4955 int RegClass = Desc.operands()[i].RegClass;
4956
4957 const MCOperandInfo &OpInfo = Desc.operands()[i];
4958 switch (OpInfo.OperandType) {
4960 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) {
4961 ErrInfo = "Illegal immediate value for operand.";
4962 return false;
4963 }
4964 break;
4977 break;
4979 break;
4980 break;
4994 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) {
4995 ErrInfo = "Illegal immediate value for operand.";
4996 return false;
4997 }
4998 break;
4999 }
5001 if (!MI.getOperand(i).isImm() || !isInlineConstant(MI, i)) {
5002 ErrInfo = "Expected inline constant for operand.";
5003 return false;
5004 }
5005 break;
5009 break;
5014 // Check if this operand is an immediate.
5015 // FrameIndex operands will be replaced by immediates, so they are
5016 // allowed.
5017 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
5018 ErrInfo = "Expected immediate, but got non-immediate";
5019 return false;
5020 }
5021 break;
5025 break;
5026 default:
5027 if (OpInfo.isGenericType())
5028 continue;
5029 break;
5030 }
5031
5032 if (!MO.isReg())
5033 continue;
5034 Register Reg = MO.getReg();
5035 if (!Reg)
5036 continue;
5037
5038 // FIXME: Ideally we would have separate instruction definitions with the
5039 // aligned register constraint.
5040 // FIXME: We do not verify inline asm operands, but custom inline asm
5041 // verification is broken anyway
5042 if (ST.needsAlignedVGPRs() && Opcode != AMDGPU::AV_MOV_B64_IMM_PSEUDO) {
5043 const TargetRegisterClass *RC = RI.getRegClassForReg(MRI, Reg);
5044 if (RI.hasVectorRegisters(RC) && MO.getSubReg()) {
5045 if (const TargetRegisterClass *SubRC =
5046 RI.getSubRegisterClass(RC, MO.getSubReg())) {
5047 RC = RI.getCompatibleSubRegClass(RC, SubRC, MO.getSubReg());
5048 if (RC)
5049 RC = SubRC;
5050 }
5051 }
5052
5053 // Check that this is the aligned version of the class.
5054 if (!RC || !RI.isProperlyAlignedRC(*RC)) {
5055 ErrInfo = "Subtarget requires even aligned vector registers";
5056 return false;
5057 }
5058 }
5059
5060 if (RegClass != -1) {
5061 if (Reg.isVirtual())
5062 continue;
5063
5064 const TargetRegisterClass *RC = RI.getRegClass(RegClass);
5065 if (!RC->contains(Reg)) {
5066 ErrInfo = "Operand has incorrect register class.";
5067 return false;
5068 }
5069 }
5070 }
5071
5072 // Verify SDWA
5073 if (isSDWA(MI)) {
5074 if (!ST.hasSDWA()) {
5075 ErrInfo = "SDWA is not supported on this target";
5076 return false;
5077 }
5078
5079 for (auto Op : {AMDGPU::OpName::src0_sel, AMDGPU::OpName::src1_sel,
5080 AMDGPU::OpName::dst_sel}) {
5081 const MachineOperand *MO = getNamedOperand(MI, Op);
5082 if (!MO)
5083 continue;
5084 int64_t Imm = MO->getImm();
5085 if (Imm < 0 || Imm > AMDGPU::SDWA::SdwaSel::DWORD) {
5086 ErrInfo = "Invalid SDWA selection";
5087 return false;
5088 }
5089 }
5090
5091 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
5092
5093 for (int OpIdx : {DstIdx, Src0Idx, Src1Idx, Src2Idx}) {
5094 if (OpIdx == -1)
5095 continue;
5096 const MachineOperand &MO = MI.getOperand(OpIdx);
5097
5098 if (!ST.hasSDWAScalar()) {
5099 // Only VGPRS on VI
5100 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) {
5101 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI";
5102 return false;
5103 }
5104 } else {
5105 // No immediates on GFX9
5106 if (!MO.isReg()) {
5107 ErrInfo =
5108 "Only reg allowed as operands in SDWA instructions on GFX9+";
5109 return false;
5110 }
5111 }
5112 }
5113
5114 if (!ST.hasSDWAOmod()) {
5115 // No omod allowed on VI
5116 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
5117 if (OMod != nullptr &&
5118 (!OMod->isImm() || OMod->getImm() != 0)) {
5119 ErrInfo = "OMod not allowed in SDWA instructions on VI";
5120 return false;
5121 }
5122 }
5123
5124 if (Opcode == AMDGPU::V_CVT_F32_FP8_sdwa ||
5125 Opcode == AMDGPU::V_CVT_F32_BF8_sdwa ||
5126 Opcode == AMDGPU::V_CVT_PK_F32_FP8_sdwa ||
5127 Opcode == AMDGPU::V_CVT_PK_F32_BF8_sdwa) {
5128 const MachineOperand *Src0ModsMO =
5129 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
5130 unsigned Mods = Src0ModsMO->getImm();
5131 if (Mods & SISrcMods::ABS || Mods & SISrcMods::NEG ||
5132 Mods & SISrcMods::SEXT) {
5133 ErrInfo = "sext, abs and neg are not allowed on this instruction";
5134 return false;
5135 }
5136 }
5137
5138 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode);
5139 if (isVOPC(BasicOpcode)) {
5140 if (!ST.hasSDWASdst() && DstIdx != -1) {
5141 // Only vcc allowed as dst on VI for VOPC
5142 const MachineOperand &Dst = MI.getOperand(DstIdx);
5143 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) {
5144 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI";
5145 return false;
5146 }
5147 } else if (!ST.hasSDWAOutModsVOPC()) {
5148 // No clamp allowed on GFX9 for VOPC
5149 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
5150 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) {
5151 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI";
5152 return false;
5153 }
5154
5155 // No omod allowed on GFX9 for VOPC
5156 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
5157 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) {
5158 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI";
5159 return false;
5160 }
5161 }
5162 }
5163
5164 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused);
5165 if (DstUnused && DstUnused->isImm() &&
5166 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) {
5167 const MachineOperand &Dst = MI.getOperand(DstIdx);
5168 if (!Dst.isReg() || !Dst.isTied()) {
5169 ErrInfo = "Dst register should have tied register";
5170 return false;
5171 }
5172
5173 const MachineOperand &TiedMO =
5174 MI.getOperand(MI.findTiedOperandIdx(DstIdx));
5175 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) {
5176 ErrInfo =
5177 "Dst register should be tied to implicit use of preserved register";
5178 return false;
5179 }
5180 if (TiedMO.getReg().isPhysical() && Dst.getReg() != TiedMO.getReg()) {
5181 ErrInfo = "Dst register should use same physical register as preserved";
5182 return false;
5183 }
5184 }
5185 }
5186
5187 // Verify MIMG / VIMAGE / VSAMPLE
5188 if (isImage(Opcode) && !MI.mayStore()) {
5189 // Ensure that the return type used is large enough for all the options
5190 // being used TFE/LWE require an extra result register.
5191 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask);
5192 if (DMask) {
5193 uint64_t DMaskImm = DMask->getImm();
5194 uint32_t RegCount = isGather4(Opcode) ? 4 : llvm::popcount(DMaskImm);
5195 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe);
5196 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe);
5197 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16);
5198
5199 // Adjust for packed 16 bit values
5200 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem())
5201 RegCount = divideCeil(RegCount, 2);
5202
5203 // Adjust if using LWE or TFE
5204 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm()))
5205 RegCount += 1;
5206
5207 const uint32_t DstIdx =
5208 AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
5209 const MachineOperand &Dst = MI.getOperand(DstIdx);
5210 if (Dst.isReg()) {
5211 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx);
5212 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32;
5213 if (RegCount > DstSize) {
5214 ErrInfo = "Image instruction returns too many registers for dst "
5215 "register class";
5216 return false;
5217 }
5218 }
5219 }
5220 }
5221
5222 // Verify VOP*. Ignore multiple sgpr operands on writelane.
5223 if (isVALU(MI) && Desc.getOpcode() != AMDGPU::V_WRITELANE_B32) {
5224 unsigned ConstantBusCount = 0;
5225 bool UsesLiteral = false;
5226 const MachineOperand *LiteralVal = nullptr;
5227
5228 int ImmIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm);
5229 if (ImmIdx != -1) {
5230 ++ConstantBusCount;
5231 UsesLiteral = true;
5232 LiteralVal = &MI.getOperand(ImmIdx);
5233 }
5234
5235 SmallVector<Register, 2> SGPRsUsed;
5236 Register SGPRUsed;
5237
5238 // Only look at the true operands. Only a real operand can use the constant
5239 // bus, and we don't want to check pseudo-operands like the source modifier
5240 // flags.
5241 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx, Src3Idx}) {
5242 if (OpIdx == -1)
5243 continue;
5244 const MachineOperand &MO = MI.getOperand(OpIdx);
5245 if (usesConstantBus(MRI, MO, MI.getDesc().operands()[OpIdx])) {
5246 if (MO.isReg()) {
5247 SGPRUsed = MO.getReg();
5248 if (!llvm::is_contained(SGPRsUsed, SGPRUsed)) {
5249 ++ConstantBusCount;
5250 SGPRsUsed.push_back(SGPRUsed);
5251 }
5252 } else if (!MO.isFI()) { // Treat FI like a register.
5253 if (!UsesLiteral) {
5254 ++ConstantBusCount;
5255 UsesLiteral = true;
5256 LiteralVal = &MO;
5257 } else if (!MO.isIdenticalTo(*LiteralVal)) {
5258 assert(isVOP2(MI) || isVOP3(MI));
5259 ErrInfo = "VOP2/VOP3 instruction uses more than one literal";
5260 return false;
5261 }
5262 }
5263 }
5264 }
5265
5266 SGPRUsed = findImplicitSGPRRead(MI);
5267 if (SGPRUsed) {
5268 // Implicit uses may safely overlap true operands
5269 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) {
5270 return !RI.regsOverlap(SGPRUsed, SGPR);
5271 })) {
5272 ++ConstantBusCount;
5273 SGPRsUsed.push_back(SGPRUsed);
5274 }
5275 }
5276
5277 // v_writelane_b32 is an exception from constant bus restriction:
5278 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const
5279 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) &&
5280 Opcode != AMDGPU::V_WRITELANE_B32) {
5281 ErrInfo = "VOP* instruction violates constant bus restriction";
5282 return false;
5283 }
5284
5285 if (isVOP3(MI) && UsesLiteral && !ST.hasVOP3Literal()) {
5286 ErrInfo = "VOP3 instruction uses literal";
5287 return false;
5288 }
5289 }
5290
5291 // Special case for writelane - this can break the multiple constant bus rule,
5292 // but still can't use more than one SGPR register
5293 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) {
5294 unsigned SGPRCount = 0;
5295 Register SGPRUsed;
5296
5297 for (int OpIdx : {Src0Idx, Src1Idx}) {
5298 if (OpIdx == -1)
5299 break;
5300
5301 const MachineOperand &MO = MI.getOperand(OpIdx);
5302
5303 if (usesConstantBus(MRI, MO, MI.getDesc().operands()[OpIdx])) {
5304 if (MO.isReg() && MO.getReg() != AMDGPU::M0) {
5305 if (MO.getReg() != SGPRUsed)
5306 ++SGPRCount;
5307 SGPRUsed = MO.getReg();
5308 }
5309 }
5310 if (SGPRCount > ST.getConstantBusLimit(Opcode)) {
5311 ErrInfo = "WRITELANE instruction violates constant bus restriction";
5312 return false;
5313 }
5314 }
5315 }
5316
5317 // Verify misc. restrictions on specific instructions.
5318 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32_e64 ||
5319 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64_e64) {
5320 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
5321 const MachineOperand &Src1 = MI.getOperand(Src1Idx);
5322 const MachineOperand &Src2 = MI.getOperand(Src2Idx);
5323 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
5324 if (!compareMachineOp(Src0, Src1) &&
5325 !compareMachineOp(Src0, Src2)) {
5326 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
5327 return false;
5328 }
5329 }
5330 if ((getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm() &
5331 SISrcMods::ABS) ||
5332 (getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm() &
5333 SISrcMods::ABS) ||
5334 (getNamedOperand(MI, AMDGPU::OpName::src2_modifiers)->getImm() &
5335 SISrcMods::ABS)) {
5336 ErrInfo = "ABS not allowed in VOP3B instructions";
5337 return false;
5338 }
5339 }
5340
5341 if (isSOP2(MI) || isSOPC(MI)) {
5342 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
5343 const MachineOperand &Src1 = MI.getOperand(Src1Idx);
5344
5345 if (!isRegOrFI(Src0) && !isRegOrFI(Src1) &&
5346 !isInlineConstant(Src0, Desc.operands()[Src0Idx]) &&
5347 !isInlineConstant(Src1, Desc.operands()[Src1Idx]) &&
5348 !Src0.isIdenticalTo(Src1)) {
5349 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants";
5350 return false;
5351 }
5352 }
5353
5354 if (isSOPK(MI)) {
5355 const auto *Op = getNamedOperand(MI, AMDGPU::OpName::simm16);
5356 if (Desc.isBranch()) {
5357 if (!Op->isMBB()) {
5358 ErrInfo = "invalid branch target for SOPK instruction";
5359 return false;
5360 }
5361 } else {
5362 uint64_t Imm = Op->getImm();
5363 if (sopkIsZext(Opcode)) {
5364 if (!isUInt<16>(Imm)) {
5365 ErrInfo = "invalid immediate for SOPK instruction";
5366 return false;
5367 }
5368 } else {
5369 if (!isInt<16>(Imm)) {
5370 ErrInfo = "invalid immediate for SOPK instruction";
5371 return false;
5372 }
5373 }
5374 }
5375 }
5376
5377 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
5378 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
5379 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
5380 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
5381 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
5382 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
5383
5384 const unsigned StaticNumOps =
5385 Desc.getNumOperands() + Desc.implicit_uses().size();
5386 const unsigned NumImplicitOps = IsDst ? 2 : 1;
5387
5388 // Allow additional implicit operands. This allows a fixup done by the post
5389 // RA scheduler where the main implicit operand is killed and implicit-defs
5390 // are added for sub-registers that remain live after this instruction.
5391 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
5392 ErrInfo = "missing implicit register operands";
5393 return false;
5394 }
5395
5396 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
5397 if (IsDst) {
5398 if (!Dst->isUse()) {
5399 ErrInfo = "v_movreld_b32 vdst should be a use operand";
5400 return false;
5401 }
5402
5403 unsigned UseOpIdx;
5404 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
5405 UseOpIdx != StaticNumOps + 1) {
5406 ErrInfo = "movrel implicit operands should be tied";
5407 return false;
5408 }
5409 }
5410
5411 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
5412 const MachineOperand &ImpUse
5413 = MI.getOperand(StaticNumOps + NumImplicitOps - 1);
5414 if (!ImpUse.isReg() || !ImpUse.isUse() ||
5415 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
5416 ErrInfo = "src0 should be subreg of implicit vector use";
5417 return false;
5418 }
5419 }
5420
5421 // Make sure we aren't losing exec uses in the td files. This mostly requires
5422 // being careful when using let Uses to try to add other use registers.
5423 if (shouldReadExec(MI)) {
5424 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
5425 ErrInfo = "VALU instruction does not implicitly read exec mask";
5426 return false;
5427 }
5428 }
5429
5430 if (isSMRD(MI)) {
5431 if (MI.mayStore() &&
5432 ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) {
5433 // The register offset form of scalar stores may only use m0 as the
5434 // soffset register.
5435 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soffset);
5436 if (Soff && Soff->getReg() != AMDGPU::M0) {
5437 ErrInfo = "scalar stores must use m0 as offset register";
5438 return false;
5439 }
5440 }
5441 }
5442
5443 if (isFLAT(MI) && !ST.hasFlatInstOffsets()) {
5444 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
5445 if (Offset->getImm() != 0) {
5446 ErrInfo = "subtarget does not support offsets in flat instructions";
5447 return false;
5448 }
5449 }
5450
5451 if (isDS(MI) && !ST.hasGDS()) {
5452 const MachineOperand *GDSOp = getNamedOperand(MI, AMDGPU::OpName::gds);
5453 if (GDSOp && GDSOp->getImm() != 0) {
5454 ErrInfo = "GDS is not supported on this subtarget";
5455 return false;
5456 }
5457 }
5458
5459 if (isImage(MI)) {
5460 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim);
5461 if (DimOp) {
5462 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode,
5463 AMDGPU::OpName::vaddr0);
5464 AMDGPU::OpName RSrcOpName =
5465 isMIMG(MI) ? AMDGPU::OpName::srsrc : AMDGPU::OpName::rsrc;
5466 int RsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, RSrcOpName);
5467 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode);
5468 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
5469 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
5470 const AMDGPU::MIMGDimInfo *Dim =
5472
5473 if (!Dim) {
5474 ErrInfo = "dim is out of range";
5475 return false;
5476 }
5477
5478 bool IsA16 = false;
5479 if (ST.hasR128A16()) {
5480 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128);
5481 IsA16 = R128A16->getImm() != 0;
5482 } else if (ST.hasA16()) {
5483 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16);
5484 IsA16 = A16->getImm() != 0;
5485 }
5486
5487 bool IsNSA = RsrcIdx - VAddr0Idx > 1;
5488
5489 unsigned AddrWords =
5490 AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, ST.hasG16());
5491
5492 unsigned VAddrWords;
5493 if (IsNSA) {
5494 VAddrWords = RsrcIdx - VAddr0Idx;
5495 if (ST.hasPartialNSAEncoding() &&
5496 AddrWords > ST.getNSAMaxSize(isVSAMPLE(MI))) {
5497 unsigned LastVAddrIdx = RsrcIdx - 1;
5498 VAddrWords += getOpSize(MI, LastVAddrIdx) / 4 - 1;
5499 }
5500 } else {
5501 VAddrWords = getOpSize(MI, VAddr0Idx) / 4;
5502 if (AddrWords > 12)
5503 AddrWords = 16;
5504 }
5505
5506 if (VAddrWords != AddrWords) {
5507 LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWords
5508 << " but got " << VAddrWords << "\n");
5509 ErrInfo = "bad vaddr size";
5510 return false;
5511 }
5512 }
5513 }
5514
5515 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl);
5516 if (DppCt) {
5517 using namespace AMDGPU::DPP;
5518
5519 unsigned DC = DppCt->getImm();
5520 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 ||
5521 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST ||
5522 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) ||
5523 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) ||
5524 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) ||
5525 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) ||
5526 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) {
5527 ErrInfo = "Invalid dpp_ctrl value";
5528 return false;
5529 }
5530 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 &&
5531 ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
5532 ErrInfo = "Invalid dpp_ctrl value: "
5533 "wavefront shifts are not supported on GFX10+";
5534 return false;
5535 }
5536 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 &&
5537 ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
5538 ErrInfo = "Invalid dpp_ctrl value: "
5539 "broadcasts are not supported on GFX10+";
5540 return false;
5541 }
5542 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST &&
5543 ST.getGeneration() < AMDGPUSubtarget::GFX10) {
5544 if (DC >= DppCtrl::ROW_NEWBCAST_FIRST &&
5545 DC <= DppCtrl::ROW_NEWBCAST_LAST &&
5546 !ST.hasGFX90AInsts()) {
5547 ErrInfo = "Invalid dpp_ctrl value: "
5548 "row_newbroadcast/row_share is not supported before "
5549 "GFX90A/GFX10";
5550 return false;
5551 }
5552 if (DC > DppCtrl::ROW_NEWBCAST_LAST || !ST.hasGFX90AInsts()) {
5553 ErrInfo = "Invalid dpp_ctrl value: "
5554 "row_share and row_xmask are not supported before GFX10";
5555 return false;
5556 }
5557 }
5558
5559 if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO &&
5562 ErrInfo = "Invalid dpp_ctrl value: "
5563 "DP ALU dpp only support row_newbcast";
5564 return false;
5565 }
5566 }
5567
5568 if ((MI.mayStore() || MI.mayLoad()) && !isVGPRSpill(MI)) {
5569 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
5570 AMDGPU::OpName DataName =
5571 isDS(Opcode) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata;
5572 const MachineOperand *Data = getNamedOperand(MI, DataName);
5573 const MachineOperand *Data2 = getNamedOperand(MI, AMDGPU::OpName::data1);
5574 if (Data && !Data->isReg())
5575 Data = nullptr;
5576
5577 if (ST.hasGFX90AInsts()) {
5578 if (Dst && Data && !Dst->isTied() && !Data->isTied() &&
5579 (RI.isAGPR(MRI, Dst->getReg()) != RI.isAGPR(MRI, Data->getReg()))) {
5580 ErrInfo = "Invalid register class: "
5581 "vdata and vdst should be both VGPR or AGPR";
5582 return false;
5583 }
5584 if (Data && Data2 &&
5585 (RI.isAGPR(MRI, Data->getReg()) != RI.isAGPR(MRI, Data2->getReg()))) {
5586 ErrInfo = "Invalid register class: "
5587 "both data operands should be VGPR or AGPR";
5588 return false;
5589 }
5590 } else {
5591 if ((Dst && RI.isAGPR(MRI, Dst->getReg())) ||
5592 (Data && RI.isAGPR(MRI, Data->getReg())) ||
5593 (Data2 && RI.isAGPR(MRI, Data2->getReg()))) {
5594 ErrInfo = "Invalid register class: "
5595 "agpr loads and stores not supported on this GPU";
5596 return false;
5597 }
5598 }
5599 }
5600
5601 if (ST.needsAlignedVGPRs()) {
5602 const auto isAlignedReg = [&MI, &MRI, this](AMDGPU::OpName OpName) -> bool {
5604 if (!Op)
5605 return true;
5606 Register Reg = Op->getReg();
5607 if (Reg.isPhysical())
5608 return !(RI.getHWRegIndex(Reg) & 1);
5609 const TargetRegisterClass &RC = *MRI.getRegClass(Reg);
5610 return RI.getRegSizeInBits(RC) > 32 && RI.isProperlyAlignedRC(RC) &&
5611 !(RI.getChannelFromSubReg(Op->getSubReg()) & 1);
5612 };
5613
5614 if (Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_SEMA_BR ||
5615 Opcode == AMDGPU::DS_GWS_BARRIER) {
5616
5617 if (!isAlignedReg(AMDGPU::OpName::data0)) {
5618 ErrInfo = "Subtarget requires even aligned vector registers "
5619 "for DS_GWS instructions";
5620 return false;
5621 }
5622 }
5623
5624 if (isMIMG(MI)) {
5625 if (!isAlignedReg(AMDGPU::OpName::vaddr)) {
5626 ErrInfo = "Subtarget requires even aligned vector registers "
5627 "for vaddr operand of image instructions";
5628 return false;
5629 }
5630 }
5631 }
5632
5633 if (Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && !ST.hasGFX90AInsts()) {
5634 const MachineOperand *Src = getNamedOperand(MI, AMDGPU::OpName::src0);
5635 if (Src->isReg() && RI.isSGPRReg(MRI, Src->getReg())) {
5636 ErrInfo = "Invalid register class: "
5637 "v_accvgpr_write with an SGPR is not supported on this GPU";
5638 return false;
5639 }
5640 }
5641
5642 if (Desc.getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS) {
5643 const MachineOperand &SrcOp = MI.getOperand(1);
5644 if (!SrcOp.isReg() || SrcOp.getReg().isVirtual()) {
5645 ErrInfo = "pseudo expects only physical SGPRs";
5646 return false;
5647 }
5648 }
5649
5650 if (const MachineOperand *CPol = getNamedOperand(MI, AMDGPU::OpName::cpol)) {
5651 if (CPol->getImm() & AMDGPU::CPol::SCAL) {
5652 if (!ST.hasScaleOffset()) {
5653 ErrInfo = "Subtarget does not support offset scaling";
5654 return false;
5655 }
5656 if (!AMDGPU::supportsScaleOffset(*this, MI.getOpcode())) {
5657 ErrInfo = "Instruction does not support offset scaling";
5658 return false;
5659 }
5660 }
5661 }
5662
5663 // See SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand for more
5664 // information.
5665 if (AMDGPU::isPackedFP32Inst(Opcode) && AMDGPU::isGFX12Plus(ST)) {
5666 for (unsigned I = 0; I < 3; ++I) {
5668 return false;
5669 }
5670 }
5671
5672 return true;
5673}
5674
5675// It is more readable to list mapped opcodes on the same line.
5676// clang-format off
5677
5679 switch (MI.getOpcode()) {
5680 default: return AMDGPU::INSTRUCTION_LIST_END;
5681 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
5682 case AMDGPU::COPY: return AMDGPU::COPY;
5683 case AMDGPU::PHI: return AMDGPU::PHI;
5684 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
5685 case AMDGPU::WQM: return AMDGPU::WQM;
5686 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM;
5687 case AMDGPU::STRICT_WWM: return AMDGPU::STRICT_WWM;
5688 case AMDGPU::STRICT_WQM: return AMDGPU::STRICT_WQM;
5689 case AMDGPU::S_MOV_B32: {
5690 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
5691 return MI.getOperand(1).isReg() ||
5692 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ?
5693 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
5694 }
5695 case AMDGPU::S_ADD_I32:
5696 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32;
5697 case AMDGPU::S_ADDC_U32:
5698 return AMDGPU::V_ADDC_U32_e32;
5699 case AMDGPU::S_SUB_I32:
5700 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32;
5701 // FIXME: These are not consistently handled, and selected when the carry is
5702 // used.
5703 case AMDGPU::S_ADD_U32:
5704 return AMDGPU::V_ADD_CO_U32_e32;
5705 case AMDGPU::S_SUB_U32:
5706 return AMDGPU::V_SUB_CO_U32_e32;
5707 case AMDGPU::S_ADD_U64_PSEUDO:
5708 return AMDGPU::V_ADD_U64_PSEUDO;
5709 case AMDGPU::S_SUB_U64_PSEUDO:
5710 return AMDGPU::V_SUB_U64_PSEUDO;
5711 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
5712 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32_e64;
5713 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32_e64;
5714 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32_e64;
5715 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64;
5716 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64;
5717 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64;
5718 case AMDGPU::S_XNOR_B32:
5719 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END;
5720 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64;
5721 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64;
5722 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64;
5723 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64;
5724 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
5725 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64_e64;
5726 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
5727 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64_e64;
5728 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
5729 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64_e64;
5730 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32_e64;
5731 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32_e64;
5732 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32_e64;
5733 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32_e64;
5734 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64;
5735 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
5736 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
5737 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
5738 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e64;
5739 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e64;
5740 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e64;
5741 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e64;
5742 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e64;
5743 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e64;
5744 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e64;
5745 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e64;
5746 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e64;
5747 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e64;
5748 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e64;
5749 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e64;
5750 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e64;
5751 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e64;
5752 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
5753 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
5754 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
5755 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64;
5756 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ;
5757 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ;
5758 case AMDGPU::S_CVT_F32_I32: return AMDGPU::V_CVT_F32_I32_e64;
5759 case AMDGPU::S_CVT_F32_U32: return AMDGPU::V_CVT_F32_U32_e64;
5760 case AMDGPU::S_CVT_I32_F32: return AMDGPU::V_CVT_I32_F32_e64;
5761 case AMDGPU::S_CVT_U32_F32: return AMDGPU::V_CVT_U32_F32_e64;
5762 case AMDGPU::S_CVT_F32_F16:
5763 case AMDGPU::S_CVT_HI_F32_F16:
5764 return ST.useRealTrue16Insts() ? AMDGPU::V_CVT_F32_F16_t16_e64
5765 : AMDGPU::V_CVT_F32_F16_fake16_e64;
5766 case AMDGPU::S_CVT_F16_F32:
5767 return ST.useRealTrue16Insts() ? AMDGPU::V_CVT_F16_F32_t16_e64
5768 : AMDGPU::V_CVT_F16_F32_fake16_e64;
5769 case AMDGPU::S_CEIL_F32: return AMDGPU::V_CEIL_F32_e64;
5770 case AMDGPU::S_FLOOR_F32: return AMDGPU::V_FLOOR_F32_e64;
5771 case AMDGPU::S_TRUNC_F32: return AMDGPU::V_TRUNC_F32_e64;
5772 case AMDGPU::S_RNDNE_F32: return AMDGPU::V_RNDNE_F32_e64;
5773 case AMDGPU::S_CEIL_F16:
5774 return ST.useRealTrue16Insts() ? AMDGPU::V_CEIL_F16_t16_e64
5775 : AMDGPU::V_CEIL_F16_fake16_e64;
5776 case AMDGPU::S_FLOOR_F16:
5777 return ST.useRealTrue16Insts() ? AMDGPU::V_FLOOR_F16_t16_e64
5778 : AMDGPU::V_FLOOR_F16_fake16_e64;
5779 case AMDGPU::S_TRUNC_F16:
5780 return ST.useRealTrue16Insts() ? AMDGPU::V_TRUNC_F16_t16_e64
5781 : AMDGPU::V_TRUNC_F16_fake16_e64;
5782 case AMDGPU::S_RNDNE_F16:
5783 return ST.useRealTrue16Insts() ? AMDGPU::V_RNDNE_F16_t16_e64
5784 : AMDGPU::V_RNDNE_F16_fake16_e64;
5785 case AMDGPU::S_ADD_F32: return AMDGPU::V_ADD_F32_e64;
5786 case AMDGPU::S_SUB_F32: return AMDGPU::V_SUB_F32_e64;
5787 case AMDGPU::S_MIN_F32: return AMDGPU::V_MIN_F32_e64;
5788 case AMDGPU::S_MAX_F32: return AMDGPU::V_MAX_F32_e64;
5789 case AMDGPU::S_MINIMUM_F32: return AMDGPU::V_MINIMUM_F32_e64;
5790 case AMDGPU::S_MAXIMUM_F32: return AMDGPU::V_MAXIMUM_F32_e64;
5791 case AMDGPU::S_MUL_F32: return AMDGPU::V_MUL_F32_e64;
5792 case AMDGPU::S_ADD_F16:
5793 return ST.useRealTrue16Insts() ? AMDGPU::V_ADD_F16_t16_e64
5794 : AMDGPU::V_ADD_F16_fake16_e64;
5795 case AMDGPU::S_SUB_F16:
5796 return ST.useRealTrue16Insts() ? AMDGPU::V_SUB_F16_t16_e64
5797 : AMDGPU::V_SUB_F16_fake16_e64;
5798 case AMDGPU::S_MIN_F16:
5799 return ST.useRealTrue16Insts() ? AMDGPU::V_MIN_F16_t16_e64
5800 : AMDGPU::V_MIN_F16_fake16_e64;
5801 case AMDGPU::S_MAX_F16:
5802 return ST.useRealTrue16Insts() ? AMDGPU::V_MAX_F16_t16_e64
5803 : AMDGPU::V_MAX_F16_fake16_e64;
5804 case AMDGPU::S_MINIMUM_F16:
5805 return ST.useRealTrue16Insts() ? AMDGPU::V_MINIMUM_F16_t16_e64
5806 : AMDGPU::V_MINIMUM_F16_fake16_e64;
5807 case AMDGPU::S_MAXIMUM_F16:
5808 return ST.useRealTrue16Insts() ? AMDGPU::V_MAXIMUM_F16_t16_e64
5809 : AMDGPU::V_MAXIMUM_F16_fake16_e64;
5810 case AMDGPU::S_MUL_F16:
5811 return ST.useRealTrue16Insts() ? AMDGPU::V_MUL_F16_t16_e64
5812 : AMDGPU::V_MUL_F16_fake16_e64;
5813 case AMDGPU::S_CVT_PK_RTZ_F16_F32: return AMDGPU::V_CVT_PKRTZ_F16_F32_e64;
5814 case AMDGPU::S_FMAC_F32: return AMDGPU::V_FMAC_F32_e64;
5815 case AMDGPU::S_FMAC_F16:
5816 return ST.useRealTrue16Insts() ? AMDGPU::V_FMAC_F16_t16_e64
5817 : AMDGPU::V_FMAC_F16_fake16_e64;
5818 case AMDGPU::S_FMAMK_F32: return AMDGPU::V_FMAMK_F32;
5819 case AMDGPU::S_FMAAK_F32: return AMDGPU::V_FMAAK_F32;
5820 case AMDGPU::S_CMP_LT_F32: return AMDGPU::V_CMP_LT_F32_e64;
5821 case AMDGPU::S_CMP_EQ_F32: return AMDGPU::V_CMP_EQ_F32_e64;
5822 case AMDGPU::S_CMP_LE_F32: return AMDGPU::V_CMP_LE_F32_e64;
5823 case AMDGPU::S_CMP_GT_F32: return AMDGPU::V_CMP_GT_F32_e64;
5824 case AMDGPU::S_CMP_LG_F32: return AMDGPU::V_CMP_LG_F32_e64;
5825 case AMDGPU::S_CMP_GE_F32: return AMDGPU::V_CMP_GE_F32_e64;
5826 case AMDGPU::S_CMP_O_F32: return AMDGPU::V_CMP_O_F32_e64;
5827 case AMDGPU::S_CMP_U_F32: return AMDGPU::V_CMP_U_F32_e64;
5828 case AMDGPU::S_CMP_NGE_F32: return AMDGPU::V_CMP_NGE_F32_e64;
5829 case AMDGPU::S_CMP_NLG_F32: return AMDGPU::V_CMP_NLG_F32_e64;
5830 case AMDGPU::S_CMP_NGT_F32: return AMDGPU::V_CMP_NGT_F32_e64;
5831 case AMDGPU::S_CMP_NLE_F32: return AMDGPU::V_CMP_NLE_F32_e64;
5832 case AMDGPU::S_CMP_NEQ_F32: return AMDGPU::V_CMP_NEQ_F32_e64;
5833 case AMDGPU::S_CMP_NLT_F32: return AMDGPU::V_CMP_NLT_F32_e64;
5834 case AMDGPU::S_CMP_LT_F16:
5835 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_LT_F16_t16_e64
5836 : AMDGPU::V_CMP_LT_F16_fake16_e64;
5837 case AMDGPU::S_CMP_EQ_F16:
5838 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_EQ_F16_t16_e64
5839 : AMDGPU::V_CMP_EQ_F16_fake16_e64;
5840 case AMDGPU::S_CMP_LE_F16:
5841 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_LE_F16_t16_e64
5842 : AMDGPU::V_CMP_LE_F16_fake16_e64;
5843 case AMDGPU::S_CMP_GT_F16:
5844 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_GT_F16_t16_e64
5845 : AMDGPU::V_CMP_GT_F16_fake16_e64;
5846 case AMDGPU::S_CMP_LG_F16:
5847 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_LG_F16_t16_e64
5848 : AMDGPU::V_CMP_LG_F16_fake16_e64;
5849 case AMDGPU::S_CMP_GE_F16:
5850 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_GE_F16_t16_e64
5851 : AMDGPU::V_CMP_GE_F16_fake16_e64;
5852 case AMDGPU::S_CMP_O_F16:
5853 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_O_F16_t16_e64
5854 : AMDGPU::V_CMP_O_F16_fake16_e64;
5855 case AMDGPU::S_CMP_U_F16:
5856 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_U_F16_t16_e64
5857 : AMDGPU::V_CMP_U_F16_fake16_e64;
5858 case AMDGPU::S_CMP_NGE_F16:
5859 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NGE_F16_t16_e64
5860 : AMDGPU::V_CMP_NGE_F16_fake16_e64;
5861 case AMDGPU::S_CMP_NLG_F16:
5862 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NLG_F16_t16_e64
5863 : AMDGPU::V_CMP_NLG_F16_fake16_e64;
5864 case AMDGPU::S_CMP_NGT_F16:
5865 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NGT_F16_t16_e64
5866 : AMDGPU::V_CMP_NGT_F16_fake16_e64;
5867 case AMDGPU::S_CMP_NLE_F16:
5868 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NLE_F16_t16_e64
5869 : AMDGPU::V_CMP_NLE_F16_fake16_e64;
5870 case AMDGPU::S_CMP_NEQ_F16:
5871 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NEQ_F16_t16_e64
5872 : AMDGPU::V_CMP_NEQ_F16_fake16_e64;
5873 case AMDGPU::S_CMP_NLT_F16:
5874 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NLT_F16_t16_e64
5875 : AMDGPU::V_CMP_NLT_F16_fake16_e64;
5876 case AMDGPU::V_S_EXP_F32_e64: return AMDGPU::V_EXP_F32_e64;
5877 case AMDGPU::V_S_EXP_F16_e64:
5878 return ST.useRealTrue16Insts() ? AMDGPU::V_EXP_F16_t16_e64
5879 : AMDGPU::V_EXP_F16_fake16_e64;
5880 case AMDGPU::V_S_LOG_F32_e64: return AMDGPU::V_LOG_F32_e64;
5881 case AMDGPU::V_S_LOG_F16_e64:
5882 return ST.useRealTrue16Insts() ? AMDGPU::V_LOG_F16_t16_e64
5883 : AMDGPU::V_LOG_F16_fake16_e64;
5884 case AMDGPU::V_S_RCP_F32_e64: return AMDGPU::V_RCP_F32_e64;
5885 case AMDGPU::V_S_RCP_F16_e64:
5886 return ST.useRealTrue16Insts() ? AMDGPU::V_RCP_F16_t16_e64
5887 : AMDGPU::V_RCP_F16_fake16_e64;
5888 case AMDGPU::V_S_RSQ_F32_e64: return AMDGPU::V_RSQ_F32_e64;
5889 case AMDGPU::V_S_RSQ_F16_e64:
5890 return ST.useRealTrue16Insts() ? AMDGPU::V_RSQ_F16_t16_e64
5891 : AMDGPU::V_RSQ_F16_fake16_e64;
5892 case AMDGPU::V_S_SQRT_F32_e64: return AMDGPU::V_SQRT_F32_e64;
5893 case AMDGPU::V_S_SQRT_F16_e64:
5894 return ST.useRealTrue16Insts() ? AMDGPU::V_SQRT_F16_t16_e64
5895 : AMDGPU::V_SQRT_F16_fake16_e64;
5896 }
5898 "Unexpected scalar opcode without corresponding vector one!");
5899}
5900
5901// clang-format on
5902
5906 const DebugLoc &DL, Register Reg,
5907 bool IsSCCLive,
5908 SlotIndexes *Indexes) const {
5909 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
5910 const SIInstrInfo *TII = ST.getInstrInfo();
5912 if (IsSCCLive) {
5913 // Insert two move instructions, one to save the original value of EXEC and
5914 // the other to turn on all bits in EXEC. This is required as we can't use
5915 // the single instruction S_OR_SAVEEXEC that clobbers SCC.
5916 auto StoreExecMI = BuildMI(MBB, MBBI, DL, TII->get(LMC.MovOpc), Reg)
5918 auto FlipExecMI =
5919 BuildMI(MBB, MBBI, DL, TII->get(LMC.MovOpc), LMC.ExecReg).addImm(-1);
5920 if (Indexes) {
5921 Indexes->insertMachineInstrInMaps(*StoreExecMI);
5922 Indexes->insertMachineInstrInMaps(*FlipExecMI);
5923 }
5924 } else {
5925 auto SaveExec =
5926 BuildMI(MBB, MBBI, DL, TII->get(LMC.OrSaveExecOpc), Reg).addImm(-1);
5927 SaveExec->getOperand(3).setIsDead(); // Mark SCC as dead.
5928 if (Indexes)
5929 Indexes->insertMachineInstrInMaps(*SaveExec);
5930 }
5931}
5932
5935 const DebugLoc &DL, Register Reg,
5936 SlotIndexes *Indexes) const {
5938 auto ExecRestoreMI = BuildMI(MBB, MBBI, DL, get(LMC.MovOpc), LMC.ExecReg)
5939 .addReg(Reg, RegState::Kill);
5940 if (Indexes)
5941 Indexes->insertMachineInstrInMaps(*ExecRestoreMI);
5942}
5943
5947 "Not a whole wave func");
5948 MachineBasicBlock &MBB = *MF.begin();
5949 for (MachineInstr &MI : MBB)
5950 if (MI.getOpcode() == AMDGPU::SI_WHOLE_WAVE_FUNC_SETUP ||
5951 MI.getOpcode() == AMDGPU::G_AMDGPU_WHOLE_WAVE_FUNC_SETUP)
5952 return &MI;
5953
5954 llvm_unreachable("Couldn't find SI_SETUP_WHOLE_WAVE_FUNC instruction");
5955}
5956
5957static const TargetRegisterClass *
5959 const MCInstrDesc &TID, unsigned RCID) {
5960 if (!ST.hasGFX90AInsts() && (TID.mayLoad() || TID.mayStore())) {
5961 switch (RCID) {
5962 case AMDGPU::AV_32RegClassID:
5963 RCID = AMDGPU::VGPR_32RegClassID;
5964 break;
5965 case AMDGPU::AV_64RegClassID:
5966 RCID = AMDGPU::VReg_64RegClassID;
5967 break;
5968 case AMDGPU::AV_96RegClassID:
5969 RCID = AMDGPU::VReg_96RegClassID;
5970 break;
5971 case AMDGPU::AV_128RegClassID:
5972 RCID = AMDGPU::VReg_128RegClassID;
5973 break;
5974 case AMDGPU::AV_160RegClassID:
5975 RCID = AMDGPU::VReg_160RegClassID;
5976 break;
5977 case AMDGPU::AV_512RegClassID:
5978 RCID = AMDGPU::VReg_512RegClassID;
5979 break;
5980 default:
5981 break;
5982 }
5983 }
5984
5985 return RI.getProperlyAlignedRC(RI.getRegClass(RCID));
5986}
5987
5988const TargetRegisterClass *
5989SIInstrInfo::getRegClass(const MCInstrDesc &TID, unsigned OpNum,
5990 const TargetRegisterInfo *TRI) const {
5991 if (OpNum >= TID.getNumOperands())
5992 return nullptr;
5993 auto RegClass = TID.operands()[OpNum].RegClass;
5994 // Special pseudos have no alignment requirement.
5995 if (TID.getOpcode() == AMDGPU::AV_MOV_B64_IMM_PSEUDO || isSpill(TID))
5996 return RI.getRegClass(RegClass);
5997
5998 return adjustAllocatableRegClass(ST, RI, TID, RegClass);
5999}
6000
6002 unsigned OpNo) const {
6003 const MCInstrDesc &Desc = get(MI.getOpcode());
6004 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
6005 Desc.operands()[OpNo].RegClass == -1) {
6006 Register Reg = MI.getOperand(OpNo).getReg();
6007
6008 if (Reg.isVirtual()) {
6009 const MachineRegisterInfo &MRI =
6010 MI.getParent()->getParent()->getRegInfo();
6011 return MRI.getRegClass(Reg);
6012 }
6013 return RI.getPhysRegBaseClass(Reg);
6014 }
6015
6016 unsigned RCID = Desc.operands()[OpNo].RegClass;
6017 return adjustAllocatableRegClass(ST, RI, Desc, RCID);
6018}
6019
6022 MachineBasicBlock *MBB = MI.getParent();
6023 MachineOperand &MO = MI.getOperand(OpIdx);
6024 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
6025 unsigned RCID = get(MI.getOpcode()).operands()[OpIdx].RegClass;
6026 const TargetRegisterClass *RC = RI.getRegClass(RCID);
6027 unsigned Size = RI.getRegSizeInBits(*RC);
6028 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO
6029 : Size == 16 ? AMDGPU::V_MOV_B16_t16_e64
6030 : AMDGPU::V_MOV_B32_e32;
6031 if (MO.isReg())
6032 Opcode = AMDGPU::COPY;
6033 else if (RI.isSGPRClass(RC))
6034 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
6035
6036 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
6037 Register Reg = MRI.createVirtualRegister(VRC);
6038 DebugLoc DL = MBB->findDebugLoc(I);
6039 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO);
6040 MO.ChangeToRegister(Reg, false);
6041}
6042
6045 const MachineOperand &SuperReg, const TargetRegisterClass *SuperRC,
6046 unsigned SubIdx, const TargetRegisterClass *SubRC) const {
6047 if (!SuperReg.getReg().isVirtual())
6048 return RI.getSubReg(SuperReg.getReg(), SubIdx);
6049
6050 MachineBasicBlock *MBB = MI->getParent();
6051 const DebugLoc &DL = MI->getDebugLoc();
6052 Register SubReg = MRI.createVirtualRegister(SubRC);
6053
6054 unsigned NewSubIdx = RI.composeSubRegIndices(SuperReg.getSubReg(), SubIdx);
6055 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
6056 .addReg(SuperReg.getReg(), 0, NewSubIdx);
6057 return SubReg;
6058}
6059
6062 const MachineOperand &Op, const TargetRegisterClass *SuperRC,
6063 unsigned SubIdx, const TargetRegisterClass *SubRC) const {
6064 if (Op.isImm()) {
6065 if (SubIdx == AMDGPU::sub0)
6066 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm()));
6067 if (SubIdx == AMDGPU::sub1)
6068 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32));
6069
6070 llvm_unreachable("Unhandled register index for immediate");
6071 }
6072
6073 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
6074 SubIdx, SubRC);
6075 return MachineOperand::CreateReg(SubReg, false);
6076}
6077
6078// Change the order of operands from (0, 1, 2) to (0, 2, 1)
6079void SIInstrInfo::swapOperands(MachineInstr &Inst) const {
6080 assert(Inst.getNumExplicitOperands() == 3);
6081 MachineOperand Op1 = Inst.getOperand(1);
6082 Inst.removeOperand(1);
6083 Inst.addOperand(Op1);
6084}
6085
6087 const MCOperandInfo &OpInfo,
6088 const MachineOperand &MO) const {
6089 if (!MO.isReg())
6090 return false;
6091
6092 Register Reg = MO.getReg();
6093
6094 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass);
6095 if (Reg.isPhysical())
6096 return DRC->contains(Reg);
6097
6098 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
6099
6100 if (MO.getSubReg()) {
6101 const MachineFunction *MF = MO.getParent()->getParent()->getParent();
6102 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF);
6103 if (!SuperRC)
6104 return false;
6105 return RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()) != nullptr;
6106 }
6107
6108 return RI.getCommonSubClass(DRC, RC) != nullptr;
6109}
6110
6112 const MachineOperand &MO) const {
6113 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
6114 const MCOperandInfo OpInfo = MI.getDesc().operands()[OpIdx];
6115 unsigned Opc = MI.getOpcode();
6116
6117 // See SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand for more
6118 // information.
6119 if (AMDGPU::isPackedFP32Inst(MI.getOpcode()) && AMDGPU::isGFX12Plus(ST) &&
6120 MO.isReg() && RI.isSGPRReg(MRI, MO.getReg())) {
6121 constexpr const AMDGPU::OpName OpNames[] = {
6122 AMDGPU::OpName::src0, AMDGPU::OpName::src1, AMDGPU::OpName::src2};
6123
6124 for (auto [I, OpName] : enumerate(OpNames)) {
6125 int SrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpNames[I]);
6126 if (static_cast<unsigned>(SrcIdx) == OpIdx &&
6128 return false;
6129 }
6130 }
6131
6132 if (!isLegalRegOperand(MRI, OpInfo, MO))
6133 return false;
6134
6135 // check Accumulate GPR operand
6136 bool IsAGPR = RI.isAGPR(MRI, MO.getReg());
6137 if (IsAGPR && !ST.hasMAIInsts())
6138 return false;
6139 if (IsAGPR && (!ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) &&
6140 (MI.mayLoad() || MI.mayStore() || isDS(Opc) || isMIMG(Opc)))
6141 return false;
6142 // Atomics should have both vdst and vdata either vgpr or agpr.
6143 const int VDstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
6144 const int DataIdx = AMDGPU::getNamedOperandIdx(
6145 Opc, isDS(Opc) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata);
6146 if ((int)OpIdx == VDstIdx && DataIdx != -1 &&
6147 MI.getOperand(DataIdx).isReg() &&
6148 RI.isAGPR(MRI, MI.getOperand(DataIdx).getReg()) != IsAGPR)
6149 return false;
6150 if ((int)OpIdx == DataIdx) {
6151 if (VDstIdx != -1 &&
6152 RI.isAGPR(MRI, MI.getOperand(VDstIdx).getReg()) != IsAGPR)
6153 return false;
6154 // DS instructions with 2 src operands also must have tied RC.
6155 const int Data1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
6156 if (Data1Idx != -1 && MI.getOperand(Data1Idx).isReg() &&
6157 RI.isAGPR(MRI, MI.getOperand(Data1Idx).getReg()) != IsAGPR)
6158 return false;
6159 }
6160
6161 // Check V_ACCVGPR_WRITE_B32_e64
6162 if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && !ST.hasGFX90AInsts() &&
6163 (int)OpIdx == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) &&
6164 RI.isSGPRReg(MRI, MO.getReg()))
6165 return false;
6166 return true;
6167}
6168
6170 const MCOperandInfo &OpInfo,
6171 const MachineOperand &MO) const {
6172 if (MO.isReg())
6173 return isLegalRegOperand(MRI, OpInfo, MO);
6174
6175 // Handle non-register types that are treated like immediates.
6176 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal());
6177 return true;
6178}
6179
6181 const MachineRegisterInfo &MRI, const MachineInstr &MI, unsigned SrcN,
6182 const MachineOperand *MO) const {
6183 constexpr const unsigned NumOps = 3;
6184 constexpr const AMDGPU::OpName OpNames[NumOps * 2] = {
6185 AMDGPU::OpName::src0, AMDGPU::OpName::src1,
6186 AMDGPU::OpName::src2, AMDGPU::OpName::src0_modifiers,
6187 AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src2_modifiers};
6188
6189 assert(SrcN < NumOps);
6190
6191 if (!MO) {
6192 int SrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpNames[SrcN]);
6193 if (SrcIdx == -1)
6194 return true;
6195 MO = &MI.getOperand(SrcIdx);
6196 }
6197
6198 if (!MO->isReg() || !RI.isSGPRReg(MRI, MO->getReg()))
6199 return true;
6200
6201 int ModsIdx =
6202 AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpNames[NumOps + SrcN]);
6203 if (ModsIdx == -1)
6204 return true;
6205
6206 unsigned Mods = MI.getOperand(ModsIdx).getImm();
6207 bool OpSel = Mods & SISrcMods::OP_SEL_0;
6208 bool OpSelHi = Mods & SISrcMods::OP_SEL_1;
6209
6210 return !OpSel && !OpSelHi;
6211}
6212
6214 const MachineOperand *MO) const {
6215 const MachineFunction &MF = *MI.getParent()->getParent();
6216 const MachineRegisterInfo &MRI = MF.getRegInfo();
6217 const MCInstrDesc &InstDesc = MI.getDesc();
6218 const MCOperandInfo &OpInfo = InstDesc.operands()[OpIdx];
6219 const TargetRegisterClass *DefinedRC =
6220 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
6221 if (!MO)
6222 MO = &MI.getOperand(OpIdx);
6223
6224 const bool IsInlineConst = !MO->isReg() && isInlineConstant(*MO, OpInfo);
6225
6226 if (isVALU(MI) && !IsInlineConst && usesConstantBus(MRI, *MO, OpInfo)) {
6227 const MachineOperand *UsedLiteral = nullptr;
6228
6229 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode());
6230 int LiteralLimit = !isVOP3(MI) || ST.hasVOP3Literal() ? 1 : 0;
6231
6232 // TODO: Be more permissive with frame indexes.
6233 if (!MO->isReg() && !isInlineConstant(*MO, OpInfo)) {
6234 if (!LiteralLimit--)
6235 return false;
6236
6237 UsedLiteral = MO;
6238 }
6239
6241 if (MO->isReg())
6242 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg()));
6243
6244 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
6245 if (i == OpIdx)
6246 continue;
6247 const MachineOperand &Op = MI.getOperand(i);
6248 if (Op.isReg()) {
6249 if (Op.isUse()) {
6250 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg());
6251 if (regUsesConstantBus(Op, MRI) && SGPRsUsed.insert(SGPR).second) {
6252 if (--ConstantBusLimit <= 0)
6253 return false;
6254 }
6255 }
6256 } else if (AMDGPU::isSISrcOperand(InstDesc.operands()[i]) &&
6257 !isInlineConstant(Op, InstDesc.operands()[i])) {
6258 // The same literal may be used multiple times.
6259 if (!UsedLiteral)
6260 UsedLiteral = &Op;
6261 else if (UsedLiteral->isIdenticalTo(Op))
6262 continue;
6263
6264 if (!LiteralLimit--)
6265 return false;
6266 if (--ConstantBusLimit <= 0)
6267 return false;
6268 }
6269 }
6270 } else if (!IsInlineConst && !MO->isReg() && isSALU(MI)) {
6271 // There can be at most one literal operand, but it can be repeated.
6272 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
6273 if (i == OpIdx)
6274 continue;
6275 const MachineOperand &Op = MI.getOperand(i);
6276 if (!Op.isReg() && !Op.isFI() && !Op.isRegMask() &&
6277 !isInlineConstant(Op, InstDesc.operands()[i]) &&
6278 !Op.isIdenticalTo(*MO))
6279 return false;
6280
6281 // Do not fold a non-inlineable and non-register operand into an
6282 // instruction that already has a frame index. The frame index handling
6283 // code could not handle well when a frame index co-exists with another
6284 // non-register operand, unless that operand is an inlineable immediate.
6285 if (Op.isFI())
6286 return false;
6287 }
6288 } else if (IsInlineConst && ST.hasNoF16PseudoScalarTransInlineConstants() &&
6289 isF16PseudoScalarTrans(MI.getOpcode())) {
6290 return false;
6291 }
6292
6293 if (MO->isReg()) {
6294 if (!DefinedRC)
6295 return OpInfo.OperandType == MCOI::OPERAND_UNKNOWN;
6296 return isLegalRegOperand(MI, OpIdx, *MO);
6297 }
6298
6299 if (MO->isImm()) {
6300 uint64_t Imm = MO->getImm();
6301 bool Is64BitFPOp = OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_FP64;
6302 bool Is64BitOp = Is64BitFPOp ||
6303 OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_INT64 ||
6304 OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_V2INT32 ||
6305 OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_V2FP32;
6306 if (Is64BitOp &&
6307 !AMDGPU::isInlinableLiteral64(Imm, ST.hasInv2PiInlineImm())) {
6308 if (!AMDGPU::isValid32BitLiteral(Imm, Is64BitFPOp) &&
6309 (!ST.has64BitLiterals() || InstDesc.getSize() != 4))
6310 return false;
6311
6312 // FIXME: We can use sign extended 64-bit literals, but only for signed
6313 // operands. At the moment we do not know if an operand is signed.
6314 // Such operand will be encoded as its low 32 bits and then either
6315 // correctly sign extended or incorrectly zero extended by HW.
6316 // If 64-bit literals are supported and the literal will be encoded
6317 // as full 64 bit we still can use it.
6318 if (!Is64BitFPOp && (int32_t)Imm < 0 &&
6319 (!ST.has64BitLiterals() || AMDGPU::isValid32BitLiteral(Imm, false)))
6320 return false;
6321 }
6322 }
6323
6324 // Handle non-register types that are treated like immediates.
6325 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal());
6326
6327 if (!DefinedRC) {
6328 // This operand expects an immediate.
6329 return true;
6330 }
6331
6332 return isImmOperandLegal(MI, OpIdx, *MO);
6333}
6334
6336 MachineInstr &MI) const {
6337 unsigned Opc = MI.getOpcode();
6338 const MCInstrDesc &InstrDesc = get(Opc);
6339
6340 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
6341 MachineOperand &Src0 = MI.getOperand(Src0Idx);
6342
6343 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
6344 MachineOperand &Src1 = MI.getOperand(Src1Idx);
6345
6346 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32
6347 // we need to only have one constant bus use before GFX10.
6348 bool HasImplicitSGPR = findImplicitSGPRRead(MI);
6349 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && Src0.isReg() &&
6350 RI.isSGPRReg(MRI, Src0.getReg()))
6351 legalizeOpWithMove(MI, Src0Idx);
6352
6353 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for
6354 // both the value to write (src0) and lane select (src1). Fix up non-SGPR
6355 // src0/src1 with V_READFIRSTLANE.
6356 if (Opc == AMDGPU::V_WRITELANE_B32) {
6357 const DebugLoc &DL = MI.getDebugLoc();
6358 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) {
6359 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6360 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
6361 .add(Src0);
6362 Src0.ChangeToRegister(Reg, false);
6363 }
6364 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) {
6365 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6366 const DebugLoc &DL = MI.getDebugLoc();
6367 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
6368 .add(Src1);
6369 Src1.ChangeToRegister(Reg, false);
6370 }
6371 return;
6372 }
6373
6374 // Special case: V_FMAC_F32 and V_FMAC_F16 have src2.
6375 if (Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F16_e32) {
6376 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
6377 if (!RI.isVGPR(MRI, MI.getOperand(Src2Idx).getReg()))
6378 legalizeOpWithMove(MI, Src2Idx);
6379 }
6380
6381 // VOP2 src0 instructions support all operand types, so we don't need to check
6382 // their legality. If src1 is already legal, we don't need to do anything.
6383 if (isLegalRegOperand(MRI, InstrDesc.operands()[Src1Idx], Src1))
6384 return;
6385
6386 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for
6387 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane
6388 // select is uniform.
6389 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() &&
6390 RI.isVGPR(MRI, Src1.getReg())) {
6391 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6392 const DebugLoc &DL = MI.getDebugLoc();
6393 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
6394 .add(Src1);
6395 Src1.ChangeToRegister(Reg, false);
6396 return;
6397 }
6398
6399 // We do not use commuteInstruction here because it is too aggressive and will
6400 // commute if it is possible. We only want to commute here if it improves
6401 // legality. This can be called a fairly large number of times so don't waste
6402 // compile time pointlessly swapping and checking legality again.
6403 if (HasImplicitSGPR || !MI.isCommutable()) {
6404 legalizeOpWithMove(MI, Src1Idx);
6405 return;
6406 }
6407
6408 // If src0 can be used as src1, commuting will make the operands legal.
6409 // Otherwise we have to give up and insert a move.
6410 //
6411 // TODO: Other immediate-like operand kinds could be commuted if there was a
6412 // MachineOperand::ChangeTo* for them.
6413 if ((!Src1.isImm() && !Src1.isReg()) ||
6414 !isLegalRegOperand(MRI, InstrDesc.operands()[Src1Idx], Src0)) {
6415 legalizeOpWithMove(MI, Src1Idx);
6416 return;
6417 }
6418
6419 int CommutedOpc = commuteOpcode(MI);
6420 if (CommutedOpc == -1) {
6421 legalizeOpWithMove(MI, Src1Idx);
6422 return;
6423 }
6424
6425 MI.setDesc(get(CommutedOpc));
6426
6427 Register Src0Reg = Src0.getReg();
6428 unsigned Src0SubReg = Src0.getSubReg();
6429 bool Src0Kill = Src0.isKill();
6430
6431 if (Src1.isImm())
6432 Src0.ChangeToImmediate(Src1.getImm());
6433 else if (Src1.isReg()) {
6434 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill());
6435 Src0.setSubReg(Src1.getSubReg());
6436 } else
6437 llvm_unreachable("Should only have register or immediate operands");
6438
6439 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill);
6440 Src1.setSubReg(Src0SubReg);
6442}
6443
6444// Legalize VOP3 operands. All operand types are supported for any operand
6445// but only one literal constant and only starting from GFX10.
6447 MachineInstr &MI) const {
6448 unsigned Opc = MI.getOpcode();
6449
6450 int VOP3Idx[3] = {
6451 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
6452 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1),
6453 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)
6454 };
6455
6456 if (Opc == AMDGPU::V_PERMLANE16_B32_e64 ||
6457 Opc == AMDGPU::V_PERMLANEX16_B32_e64 ||
6458 Opc == AMDGPU::V_PERMLANE_BCAST_B32_e64 ||
6459 Opc == AMDGPU::V_PERMLANE_UP_B32_e64 ||
6460 Opc == AMDGPU::V_PERMLANE_DOWN_B32_e64 ||
6461 Opc == AMDGPU::V_PERMLANE_XOR_B32_e64 ||
6462 Opc == AMDGPU::V_PERMLANE_IDX_GEN_B32_e64) {
6463 // src1 and src2 must be scalar
6464 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]);
6465 const DebugLoc &DL = MI.getDebugLoc();
6466 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
6467 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6468 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
6469 .add(Src1);
6470 Src1.ChangeToRegister(Reg, false);
6471 }
6472 if (VOP3Idx[2] != -1) {
6473 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]);
6474 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) {
6475 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6476 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
6477 .add(Src2);
6478 Src2.ChangeToRegister(Reg, false);
6479 }
6480 }
6481 }
6482
6483 // Find the one SGPR operand we are allowed to use.
6484 int ConstantBusLimit = ST.getConstantBusLimit(Opc);
6485 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0;
6486 SmallDenseSet<unsigned> SGPRsUsed;
6487 Register SGPRReg = findUsedSGPR(MI, VOP3Idx);
6488 if (SGPRReg) {
6489 SGPRsUsed.insert(SGPRReg);
6490 --ConstantBusLimit;
6491 }
6492
6493 for (int Idx : VOP3Idx) {
6494 if (Idx == -1)
6495 break;
6496 MachineOperand &MO = MI.getOperand(Idx);
6497
6498 if (!MO.isReg()) {
6499 if (isInlineConstant(MO, get(Opc).operands()[Idx]))
6500 continue;
6501
6502 if (LiteralLimit > 0 && ConstantBusLimit > 0) {
6503 --LiteralLimit;
6504 --ConstantBusLimit;
6505 continue;
6506 }
6507
6508 --LiteralLimit;
6509 --ConstantBusLimit;
6510 legalizeOpWithMove(MI, Idx);
6511 continue;
6512 }
6513
6514 if (!RI.isSGPRClass(RI.getRegClassForReg(MRI, MO.getReg())))
6515 continue; // VGPRs are legal
6516
6517 // We can use one SGPR in each VOP3 instruction prior to GFX10
6518 // and two starting from GFX10.
6519 if (SGPRsUsed.count(MO.getReg()))
6520 continue;
6521 if (ConstantBusLimit > 0) {
6522 SGPRsUsed.insert(MO.getReg());
6523 --ConstantBusLimit;
6524 continue;
6525 }
6526
6527 // If we make it this far, then the operand is not legal and we must
6528 // legalize it.
6529 legalizeOpWithMove(MI, Idx);
6530 }
6531
6532 // Special case: V_FMAC_F32 and V_FMAC_F16 have src2 tied to vdst.
6533 if ((Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) &&
6534 !RI.isVGPR(MRI, MI.getOperand(VOP3Idx[2]).getReg()))
6535 legalizeOpWithMove(MI, VOP3Idx[2]);
6536
6537 // Fix the register class of packed FP32 instructions on gfx12+. See
6538 // SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand for more information.
6540 for (unsigned I = 0; I < 3; ++I) {
6542 legalizeOpWithMove(MI, VOP3Idx[I]);
6543 }
6544 }
6545}
6546
6549 const TargetRegisterClass *DstRC /*=nullptr*/) const {
6550 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
6551 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
6552 if (DstRC)
6553 SRC = RI.getCommonSubClass(SRC, DstRC);
6554
6555 Register DstReg = MRI.createVirtualRegister(SRC);
6556 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32;
6557
6558 if (RI.hasAGPRs(VRC)) {
6559 VRC = RI.getEquivalentVGPRClass(VRC);
6560 Register NewSrcReg = MRI.createVirtualRegister(VRC);
6561 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
6562 get(TargetOpcode::COPY), NewSrcReg)
6563 .addReg(SrcReg);
6564 SrcReg = NewSrcReg;
6565 }
6566
6567 if (SubRegs == 1) {
6568 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
6569 get(AMDGPU::V_READFIRSTLANE_B32), DstReg)
6570 .addReg(SrcReg);
6571 return DstReg;
6572 }
6573
6575 for (unsigned i = 0; i < SubRegs; ++i) {
6576 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
6577 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
6578 get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
6579 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
6580 SRegs.push_back(SGPR);
6581 }
6582
6584 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
6585 get(AMDGPU::REG_SEQUENCE), DstReg);
6586 for (unsigned i = 0; i < SubRegs; ++i) {
6587 MIB.addReg(SRegs[i]);
6588 MIB.addImm(RI.getSubRegFromChannel(i));
6589 }
6590 return DstReg;
6591}
6592
6594 MachineInstr &MI) const {
6595
6596 // If the pointer is store in VGPRs, then we need to move them to
6597 // SGPRs using v_readfirstlane. This is safe because we only select
6598 // loads with uniform pointers to SMRD instruction so we know the
6599 // pointer value is uniform.
6600 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase);
6601 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) {
6602 Register SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
6603 SBase->setReg(SGPR);
6604 }
6605 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soffset);
6606 if (SOff && !RI.isSGPRReg(MRI, SOff->getReg())) {
6607 Register SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI);
6608 SOff->setReg(SGPR);
6609 }
6610}
6611
6613 unsigned Opc = Inst.getOpcode();
6614 int OldSAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr);
6615 if (OldSAddrIdx < 0)
6616 return false;
6617
6618 assert(isSegmentSpecificFLAT(Inst) || (isFLAT(Inst) && ST.hasFlatGVSMode()));
6619
6620 int NewOpc = AMDGPU::getGlobalVaddrOp(Opc);
6621 if (NewOpc < 0)
6623 if (NewOpc < 0)
6624 return false;
6625
6627 MachineOperand &SAddr = Inst.getOperand(OldSAddrIdx);
6628 if (RI.isSGPRReg(MRI, SAddr.getReg()))
6629 return false;
6630
6631 int NewVAddrIdx = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vaddr);
6632 if (NewVAddrIdx < 0)
6633 return false;
6634
6635 int OldVAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr);
6636
6637 // Check vaddr, it shall be zero or absent.
6638 MachineInstr *VAddrDef = nullptr;
6639 if (OldVAddrIdx >= 0) {
6640 MachineOperand &VAddr = Inst.getOperand(OldVAddrIdx);
6641 VAddrDef = MRI.getUniqueVRegDef(VAddr.getReg());
6642 if (!VAddrDef || !VAddrDef->isMoveImmediate() ||
6643 !VAddrDef->getOperand(1).isImm() ||
6644 VAddrDef->getOperand(1).getImm() != 0)
6645 return false;
6646 }
6647
6648 const MCInstrDesc &NewDesc = get(NewOpc);
6649 Inst.setDesc(NewDesc);
6650
6651 // Callers expect iterator to be valid after this call, so modify the
6652 // instruction in place.
6653 if (OldVAddrIdx == NewVAddrIdx) {
6654 MachineOperand &NewVAddr = Inst.getOperand(NewVAddrIdx);
6655 // Clear use list from the old vaddr holding a zero register.
6656 MRI.removeRegOperandFromUseList(&NewVAddr);
6657 MRI.moveOperands(&NewVAddr, &SAddr, 1);
6658 Inst.removeOperand(OldSAddrIdx);
6659 // Update the use list with the pointer we have just moved from vaddr to
6660 // saddr position. Otherwise new vaddr will be missing from the use list.
6661 MRI.removeRegOperandFromUseList(&NewVAddr);
6662 MRI.addRegOperandToUseList(&NewVAddr);
6663 } else {
6664 assert(OldSAddrIdx == NewVAddrIdx);
6665
6666 if (OldVAddrIdx >= 0) {
6667 int NewVDstIn = AMDGPU::getNamedOperandIdx(NewOpc,
6668 AMDGPU::OpName::vdst_in);
6669
6670 // removeOperand doesn't try to fixup tied operand indexes at it goes, so
6671 // it asserts. Untie the operands for now and retie them afterwards.
6672 if (NewVDstIn != -1) {
6673 int OldVDstIn = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in);
6674 Inst.untieRegOperand(OldVDstIn);
6675 }
6676
6677 Inst.removeOperand(OldVAddrIdx);
6678
6679 if (NewVDstIn != -1) {
6680 int NewVDst = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst);
6681 Inst.tieOperands(NewVDst, NewVDstIn);
6682 }
6683 }
6684 }
6685
6686 if (VAddrDef && MRI.use_nodbg_empty(VAddrDef->getOperand(0).getReg()))
6687 VAddrDef->eraseFromParent();
6688
6689 return true;
6690}
6691
6692// FIXME: Remove this when SelectionDAG is obsoleted.
6694 MachineInstr &MI) const {
6695 if (!isSegmentSpecificFLAT(MI) && !ST.hasFlatGVSMode())
6696 return;
6697
6698 // Fixup SGPR operands in VGPRs. We only select these when the DAG divergence
6699 // thinks they are uniform, so a readfirstlane should be valid.
6700 MachineOperand *SAddr = getNamedOperand(MI, AMDGPU::OpName::saddr);
6701 if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg())))
6702 return;
6703
6705 return;
6706
6707 const TargetRegisterClass *DeclaredRC =
6708 getRegClass(MI.getDesc(), SAddr->getOperandNo(), &RI);
6709
6710 Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI, DeclaredRC);
6711 SAddr->setReg(ToSGPR);
6712}
6713
6716 const TargetRegisterClass *DstRC,
6719 const DebugLoc &DL) const {
6720 Register OpReg = Op.getReg();
6721 unsigned OpSubReg = Op.getSubReg();
6722
6723 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg(
6724 RI.getRegClassForReg(MRI, OpReg), OpSubReg);
6725
6726 // Check if operand is already the correct register class.
6727 if (DstRC == OpRC)
6728 return;
6729
6730 Register DstReg = MRI.createVirtualRegister(DstRC);
6731 auto Copy =
6732 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).addReg(OpReg);
6733 Op.setReg(DstReg);
6734
6735 MachineInstr *Def = MRI.getVRegDef(OpReg);
6736 if (!Def)
6737 return;
6738
6739 // Try to eliminate the copy if it is copying an immediate value.
6740 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass)
6741 foldImmediate(*Copy, *Def, OpReg, &MRI);
6742
6743 bool ImpDef = Def->isImplicitDef();
6744 while (!ImpDef && Def && Def->isCopy()) {
6745 if (Def->getOperand(1).getReg().isPhysical())
6746 break;
6747 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg());
6748 ImpDef = Def && Def->isImplicitDef();
6749 }
6750 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) &&
6751 !ImpDef)
6752 Copy.addReg(AMDGPU::EXEC, RegState::Implicit);
6753}
6754
6755// Emit the actual waterfall loop, executing the wrapped instruction for each
6756// unique value of \p ScalarOps across all lanes. In the best case we execute 1
6757// iteration, in the worst case we execute 64 (once per lane).
6758static void
6761 MachineBasicBlock &LoopBB,
6762 MachineBasicBlock &BodyBB,
6763 const DebugLoc &DL,
6764 ArrayRef<MachineOperand *> ScalarOps) {
6765 MachineFunction &MF = *LoopBB.getParent();
6766 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
6767 const SIRegisterInfo *TRI = ST.getRegisterInfo();
6769 const auto *BoolXExecRC = TRI->getWaveMaskRegClass();
6770
6772 Register CondReg;
6773
6774 for (MachineOperand *ScalarOp : ScalarOps) {
6775 unsigned RegSize = TRI->getRegSizeInBits(ScalarOp->getReg(), MRI);
6776 unsigned NumSubRegs = RegSize / 32;
6777 Register VScalarOp = ScalarOp->getReg();
6778
6779 if (NumSubRegs == 1) {
6780 Register CurReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6781
6782 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurReg)
6783 .addReg(VScalarOp);
6784
6785 Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC);
6786
6787 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U32_e64), NewCondReg)
6788 .addReg(CurReg)
6789 .addReg(VScalarOp);
6790
6791 // Combine the comparison results with AND.
6792 if (!CondReg) // First.
6793 CondReg = NewCondReg;
6794 else { // If not the first, we create an AND.
6795 Register AndReg = MRI.createVirtualRegister(BoolXExecRC);
6796 BuildMI(LoopBB, I, DL, TII.get(LMC.AndOpc), AndReg)
6797 .addReg(CondReg)
6798 .addReg(NewCondReg);
6799 CondReg = AndReg;
6800 }
6801
6802 // Update ScalarOp operand to use the SGPR ScalarOp.
6803 ScalarOp->setReg(CurReg);
6804 ScalarOp->setIsKill();
6805 } else {
6806 SmallVector<Register, 8> ReadlanePieces;
6807 unsigned VScalarOpUndef = getUndefRegState(ScalarOp->isUndef());
6808 assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 &&
6809 "Unhandled register size");
6810
6811 for (unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) {
6812 Register CurRegLo =
6813 MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6814 Register CurRegHi =
6815 MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6816
6817 // Read the next variant <- also loop target.
6818 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo)
6819 .addReg(VScalarOp, VScalarOpUndef, TRI->getSubRegFromChannel(Idx));
6820
6821 // Read the next variant <- also loop target.
6822 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi)
6823 .addReg(VScalarOp, VScalarOpUndef,
6824 TRI->getSubRegFromChannel(Idx + 1));
6825
6826 ReadlanePieces.push_back(CurRegLo);
6827 ReadlanePieces.push_back(CurRegHi);
6828
6829 // Comparison is to be done as 64-bit.
6830 Register CurReg = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass);
6831 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), CurReg)
6832 .addReg(CurRegLo)
6833 .addImm(AMDGPU::sub0)
6834 .addReg(CurRegHi)
6835 .addImm(AMDGPU::sub1);
6836
6837 Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC);
6838 auto Cmp = BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64),
6839 NewCondReg)
6840 .addReg(CurReg);
6841 if (NumSubRegs <= 2)
6842 Cmp.addReg(VScalarOp);
6843 else
6844 Cmp.addReg(VScalarOp, VScalarOpUndef,
6845 TRI->getSubRegFromChannel(Idx, 2));
6846
6847 // Combine the comparison results with AND.
6848 if (!CondReg) // First.
6849 CondReg = NewCondReg;
6850 else { // If not the first, we create an AND.
6851 Register AndReg = MRI.createVirtualRegister(BoolXExecRC);
6852 BuildMI(LoopBB, I, DL, TII.get(LMC.AndOpc), AndReg)
6853 .addReg(CondReg)
6854 .addReg(NewCondReg);
6855 CondReg = AndReg;
6856 }
6857 } // End for loop.
6858
6859 const auto *SScalarOpRC =
6860 TRI->getEquivalentSGPRClass(MRI.getRegClass(VScalarOp));
6861 Register SScalarOp = MRI.createVirtualRegister(SScalarOpRC);
6862
6863 // Build scalar ScalarOp.
6864 auto Merge =
6865 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SScalarOp);
6866 unsigned Channel = 0;
6867 for (Register Piece : ReadlanePieces) {
6868 Merge.addReg(Piece).addImm(TRI->getSubRegFromChannel(Channel++));
6869 }
6870
6871 // Update ScalarOp operand to use the SGPR ScalarOp.
6872 ScalarOp->setReg(SScalarOp);
6873 ScalarOp->setIsKill();
6874 }
6875 }
6876
6877 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
6878 MRI.setSimpleHint(SaveExec, CondReg);
6879
6880 // Update EXEC to matching lanes, saving original to SaveExec.
6881 BuildMI(LoopBB, I, DL, TII.get(LMC.AndSaveExecOpc), SaveExec)
6882 .addReg(CondReg, RegState::Kill);
6883
6884 // The original instruction is here; we insert the terminators after it.
6885 I = BodyBB.end();
6886
6887 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
6888 BuildMI(BodyBB, I, DL, TII.get(LMC.XorTermOpc), LMC.ExecReg)
6889 .addReg(LMC.ExecReg)
6890 .addReg(SaveExec);
6891
6892 BuildMI(BodyBB, I, DL, TII.get(AMDGPU::SI_WATERFALL_LOOP)).addMBB(&LoopBB);
6893}
6894
6895// Build a waterfall loop around \p MI, replacing the VGPR \p ScalarOp register
6896// with SGPRs by iterating over all unique values across all lanes.
6897// Returns the loop basic block that now contains \p MI.
6898static MachineBasicBlock *
6902 MachineBasicBlock::iterator Begin = nullptr,
6903 MachineBasicBlock::iterator End = nullptr) {
6904 MachineBasicBlock &MBB = *MI.getParent();
6905 MachineFunction &MF = *MBB.getParent();
6906 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
6907 const SIRegisterInfo *TRI = ST.getRegisterInfo();
6909 if (!Begin.isValid())
6910 Begin = &MI;
6911 if (!End.isValid()) {
6912 End = &MI;
6913 ++End;
6914 }
6915 const DebugLoc &DL = MI.getDebugLoc();
6917 const auto *BoolXExecRC = TRI->getWaveMaskRegClass();
6918
6919 // Save SCC. Waterfall Loop may overwrite SCC.
6920 Register SaveSCCReg;
6921
6922 // FIXME: We should maintain SCC liveness while doing the FixSGPRCopies walk
6923 // rather than unlimited scan everywhere
6924 bool SCCNotDead =
6925 MBB.computeRegisterLiveness(TRI, AMDGPU::SCC, MI,
6926 std::numeric_limits<unsigned>::max()) !=
6928 if (SCCNotDead) {
6929 SaveSCCReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
6930 BuildMI(MBB, Begin, DL, TII.get(AMDGPU::S_CSELECT_B32), SaveSCCReg)
6931 .addImm(1)
6932 .addImm(0);
6933 }
6934
6935 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
6936
6937 // Save the EXEC mask
6938 BuildMI(MBB, Begin, DL, TII.get(LMC.MovOpc), SaveExec).addReg(LMC.ExecReg);
6939
6940 // Killed uses in the instruction we are waterfalling around will be
6941 // incorrect due to the added control-flow.
6943 ++AfterMI;
6944 for (auto I = Begin; I != AfterMI; I++) {
6945 for (auto &MO : I->all_uses())
6946 MRI.clearKillFlags(MO.getReg());
6947 }
6948
6949 // To insert the loop we need to split the block. Move everything after this
6950 // point to a new block, and insert a new empty block between the two.
6953 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock();
6955 ++MBBI;
6956
6957 MF.insert(MBBI, LoopBB);
6958 MF.insert(MBBI, BodyBB);
6959 MF.insert(MBBI, RemainderBB);
6960
6961 LoopBB->addSuccessor(BodyBB);
6962 BodyBB->addSuccessor(LoopBB);
6963 BodyBB->addSuccessor(RemainderBB);
6964
6965 // Move Begin to MI to the BodyBB, and the remainder of the block to
6966 // RemainderBB.
6967 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
6968 RemainderBB->splice(RemainderBB->begin(), &MBB, End, MBB.end());
6969 BodyBB->splice(BodyBB->begin(), &MBB, Begin, MBB.end());
6970
6971 MBB.addSuccessor(LoopBB);
6972
6973 // Update dominators. We know that MBB immediately dominates LoopBB, that
6974 // LoopBB immediately dominates BodyBB, and BodyBB immediately dominates
6975 // RemainderBB. RemainderBB immediately dominates all of the successors
6976 // transferred to it from MBB that MBB used to properly dominate.
6977 if (MDT) {
6978 MDT->addNewBlock(LoopBB, &MBB);
6979 MDT->addNewBlock(BodyBB, LoopBB);
6980 MDT->addNewBlock(RemainderBB, BodyBB);
6981 for (auto &Succ : RemainderBB->successors()) {
6982 if (MDT->properlyDominates(&MBB, Succ)) {
6983 MDT->changeImmediateDominator(Succ, RemainderBB);
6984 }
6985 }
6986 }
6987
6988 emitLoadScalarOpsFromVGPRLoop(TII, MRI, *LoopBB, *BodyBB, DL, ScalarOps);
6989
6990 MachineBasicBlock::iterator First = RemainderBB->begin();
6991 // Restore SCC
6992 if (SCCNotDead) {
6993 BuildMI(*RemainderBB, First, DL, TII.get(AMDGPU::S_CMP_LG_U32))
6994 .addReg(SaveSCCReg, RegState::Kill)
6995 .addImm(0);
6996 }
6997
6998 // Restore the EXEC mask
6999 BuildMI(*RemainderBB, First, DL, TII.get(LMC.MovOpc), LMC.ExecReg)
7000 .addReg(SaveExec);
7001 return BodyBB;
7002}
7003
7004// Extract pointer from Rsrc and return a zero-value Rsrc replacement.
7005static std::tuple<unsigned, unsigned>
7007 MachineBasicBlock &MBB = *MI.getParent();
7008 MachineFunction &MF = *MBB.getParent();
7010
7011 // Extract the ptr from the resource descriptor.
7012 unsigned RsrcPtr =
7013 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass,
7014 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
7015
7016 // Create an empty resource descriptor
7017 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
7018 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
7019 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
7020 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
7021 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat();
7022
7023 // Zero64 = 0
7024 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64)
7025 .addImm(0);
7026
7027 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
7028 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
7029 .addImm(Lo_32(RsrcDataFormat));
7030
7031 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
7032 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
7033 .addImm(Hi_32(RsrcDataFormat));
7034
7035 // NewSRsrc = {Zero64, SRsrcFormat}
7036 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc)
7037 .addReg(Zero64)
7038 .addImm(AMDGPU::sub0_sub1)
7039 .addReg(SRsrcFormatLo)
7040 .addImm(AMDGPU::sub2)
7041 .addReg(SRsrcFormatHi)
7042 .addImm(AMDGPU::sub3);
7043
7044 return std::tuple(RsrcPtr, NewSRsrc);
7045}
7046
7049 MachineDominatorTree *MDT) const {
7050 MachineFunction &MF = *MI.getParent()->getParent();
7052 MachineBasicBlock *CreatedBB = nullptr;
7053
7054 // Legalize VOP2
7055 if (isVOP2(MI) || isVOPC(MI)) {
7057 return CreatedBB;
7058 }
7059
7060 // Legalize VOP3
7061 if (isVOP3(MI)) {
7063 return CreatedBB;
7064 }
7065
7066 // Legalize SMRD
7067 if (isSMRD(MI)) {
7069 return CreatedBB;
7070 }
7071
7072 // Legalize FLAT
7073 if (isFLAT(MI)) {
7075 return CreatedBB;
7076 }
7077
7078 // Legalize REG_SEQUENCE and PHI
7079 // The register class of the operands much be the same type as the register
7080 // class of the output.
7081 if (MI.getOpcode() == AMDGPU::PHI) {
7082 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
7083 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
7084 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual())
7085 continue;
7086 const TargetRegisterClass *OpRC =
7087 MRI.getRegClass(MI.getOperand(i).getReg());
7088 if (RI.hasVectorRegisters(OpRC)) {
7089 VRC = OpRC;
7090 } else {
7091 SRC = OpRC;
7092 }
7093 }
7094
7095 // If any of the operands are VGPR registers, then they all most be
7096 // otherwise we will create illegal VGPR->SGPR copies when legalizing
7097 // them.
7098 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) {
7099 if (!VRC) {
7100 assert(SRC);
7101 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) {
7102 VRC = &AMDGPU::VReg_1RegClass;
7103 } else
7104 VRC = RI.isAGPRClass(getOpRegClass(MI, 0))
7105 ? RI.getEquivalentAGPRClass(SRC)
7106 : RI.getEquivalentVGPRClass(SRC);
7107 } else {
7108 VRC = RI.isAGPRClass(getOpRegClass(MI, 0))
7109 ? RI.getEquivalentAGPRClass(VRC)
7110 : RI.getEquivalentVGPRClass(VRC);
7111 }
7112 RC = VRC;
7113 } else {
7114 RC = SRC;
7115 }
7116
7117 // Update all the operands so they have the same type.
7118 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
7119 MachineOperand &Op = MI.getOperand(I);
7120 if (!Op.isReg() || !Op.getReg().isVirtual())
7121 continue;
7122
7123 // MI is a PHI instruction.
7124 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB();
7126
7127 // Avoid creating no-op copies with the same src and dst reg class. These
7128 // confuse some of the machine passes.
7129 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc());
7130 }
7131 }
7132
7133 // REG_SEQUENCE doesn't really require operand legalization, but if one has a
7134 // VGPR dest type and SGPR sources, insert copies so all operands are
7135 // VGPRs. This seems to help operand folding / the register coalescer.
7136 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
7137 MachineBasicBlock *MBB = MI.getParent();
7138 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0);
7139 if (RI.hasVGPRs(DstRC)) {
7140 // Update all the operands so they are VGPR register classes. These may
7141 // not be the same register class because REG_SEQUENCE supports mixing
7142 // subregister index types e.g. sub0_sub1 + sub2 + sub3
7143 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
7144 MachineOperand &Op = MI.getOperand(I);
7145 if (!Op.isReg() || !Op.getReg().isVirtual())
7146 continue;
7147
7148 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
7149 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC);
7150 if (VRC == OpRC)
7151 continue;
7152
7153 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc());
7154 Op.setIsKill();
7155 }
7156 }
7157
7158 return CreatedBB;
7159 }
7160
7161 // Legalize INSERT_SUBREG
7162 // src0 must have the same register class as dst
7163 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
7164 Register Dst = MI.getOperand(0).getReg();
7165 Register Src0 = MI.getOperand(1).getReg();
7166 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
7167 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
7168 if (DstRC != Src0RC) {
7169 MachineBasicBlock *MBB = MI.getParent();
7170 MachineOperand &Op = MI.getOperand(1);
7171 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc());
7172 }
7173 return CreatedBB;
7174 }
7175
7176 // Legalize SI_INIT_M0
7177 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) {
7178 MachineOperand &Src = MI.getOperand(0);
7179 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg())))
7180 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI));
7181 return CreatedBB;
7182 }
7183
7184 // Legalize S_BITREPLICATE, S_QUADMASK and S_WQM
7185 if (MI.getOpcode() == AMDGPU::S_BITREPLICATE_B64_B32 ||
7186 MI.getOpcode() == AMDGPU::S_QUADMASK_B32 ||
7187 MI.getOpcode() == AMDGPU::S_QUADMASK_B64 ||
7188 MI.getOpcode() == AMDGPU::S_WQM_B32 ||
7189 MI.getOpcode() == AMDGPU::S_WQM_B64 ||
7190 MI.getOpcode() == AMDGPU::S_INVERSE_BALLOT_U32 ||
7191 MI.getOpcode() == AMDGPU::S_INVERSE_BALLOT_U64) {
7192 MachineOperand &Src = MI.getOperand(1);
7193 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg())))
7194 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI));
7195 return CreatedBB;
7196 }
7197
7198 // Legalize MIMG/VIMAGE/VSAMPLE and MUBUF/MTBUF for shaders.
7199 //
7200 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via
7201 // scratch memory access. In both cases, the legalization never involves
7202 // conversion to the addr64 form.
7204 (isMUBUF(MI) || isMTBUF(MI)))) {
7205 AMDGPU::OpName RSrcOpName = (isVIMAGE(MI) || isVSAMPLE(MI))
7206 ? AMDGPU::OpName::rsrc
7207 : AMDGPU::OpName::srsrc;
7208 MachineOperand *SRsrc = getNamedOperand(MI, RSrcOpName);
7209 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg())))
7210 CreatedBB = loadMBUFScalarOperandsFromVGPR(*this, MI, {SRsrc}, MDT);
7211
7212 AMDGPU::OpName SampOpName =
7213 isMIMG(MI) ? AMDGPU::OpName::ssamp : AMDGPU::OpName::samp;
7214 MachineOperand *SSamp = getNamedOperand(MI, SampOpName);
7215 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg())))
7216 CreatedBB = loadMBUFScalarOperandsFromVGPR(*this, MI, {SSamp}, MDT);
7217
7218 return CreatedBB;
7219 }
7220
7221 // Legalize SI_CALL
7222 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
7223 MachineOperand *Dest = &MI.getOperand(0);
7224 if (!RI.isSGPRClass(MRI.getRegClass(Dest->getReg()))) {
7225 // Move everything between ADJCALLSTACKUP and ADJCALLSTACKDOWN and
7226 // following copies, we also need to move copies from and to physical
7227 // registers into the loop block.
7228 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
7229 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
7230
7231 // Also move the copies to physical registers into the loop block
7232 MachineBasicBlock &MBB = *MI.getParent();
7234 while (Start->getOpcode() != FrameSetupOpcode)
7235 --Start;
7237 while (End->getOpcode() != FrameDestroyOpcode)
7238 ++End;
7239 // Also include following copies of the return value
7240 ++End;
7241 while (End != MBB.end() && End->isCopy() && End->getOperand(1).isReg() &&
7242 MI.definesRegister(End->getOperand(1).getReg(), /*TRI=*/nullptr))
7243 ++End;
7244 CreatedBB =
7245 loadMBUFScalarOperandsFromVGPR(*this, MI, {Dest}, MDT, Start, End);
7246 }
7247 }
7248
7249 // Legalize s_sleep_var.
7250 if (MI.getOpcode() == AMDGPU::S_SLEEP_VAR) {
7251 const DebugLoc &DL = MI.getDebugLoc();
7252 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
7253 int Src0Idx =
7254 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
7255 MachineOperand &Src0 = MI.getOperand(Src0Idx);
7256 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
7257 .add(Src0);
7258 Src0.ChangeToRegister(Reg, false);
7259 return nullptr;
7260 }
7261
7262 // Legalize TENSOR_LOAD_TO_LDS, TENSOR_LOAD_TO_LDS_D2, TENSOR_STORE_FROM_LDS,
7263 // TENSOR_STORE_FROM_LDS_D2. All their operands are scalar.
7264 if (MI.getOpcode() == AMDGPU::TENSOR_LOAD_TO_LDS ||
7265 MI.getOpcode() == AMDGPU::TENSOR_LOAD_TO_LDS_D2 ||
7266 MI.getOpcode() == AMDGPU::TENSOR_STORE_FROM_LDS ||
7267 MI.getOpcode() == AMDGPU::TENSOR_STORE_FROM_LDS_D2) {
7268 for (MachineOperand &Src : MI.explicit_operands()) {
7269 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg())))
7270 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI));
7271 }
7272 return CreatedBB;
7273 }
7274
7275 // Legalize MUBUF instructions.
7276 bool isSoffsetLegal = true;
7277 int SoffsetIdx =
7278 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::soffset);
7279 if (SoffsetIdx != -1) {
7280 MachineOperand *Soffset = &MI.getOperand(SoffsetIdx);
7281 if (Soffset->isReg() && Soffset->getReg().isVirtual() &&
7282 !RI.isSGPRClass(MRI.getRegClass(Soffset->getReg()))) {
7283 isSoffsetLegal = false;
7284 }
7285 }
7286
7287 bool isRsrcLegal = true;
7288 int RsrcIdx =
7289 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
7290 if (RsrcIdx != -1) {
7291 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx);
7292 if (Rsrc->isReg() && !RI.isSGPRReg(MRI, Rsrc->getReg()))
7293 isRsrcLegal = false;
7294 }
7295
7296 // The operands are legal.
7297 if (isRsrcLegal && isSoffsetLegal)
7298 return CreatedBB;
7299
7300 if (!isRsrcLegal) {
7301 // Legalize a VGPR Rsrc
7302 //
7303 // If the instruction is _ADDR64, we can avoid a waterfall by extracting
7304 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using
7305 // a zero-value SRsrc.
7306 //
7307 // If the instruction is _OFFSET (both idxen and offen disabled), and we
7308 // support ADDR64 instructions, we can convert to ADDR64 and do the same as
7309 // above.
7310 //
7311 // Otherwise we are on non-ADDR64 hardware, and/or we have
7312 // idxen/offen/bothen and we fall back to a waterfall loop.
7313
7314 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx);
7315 MachineBasicBlock &MBB = *MI.getParent();
7316
7317 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
7318 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) {
7319 // This is already an ADDR64 instruction so we need to add the pointer
7320 // extracted from the resource descriptor to the current value of VAddr.
7321 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
7322 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
7323 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
7324
7325 const auto *BoolXExecRC = RI.getWaveMaskRegClass();
7326 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC);
7327 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC);
7328
7329 unsigned RsrcPtr, NewSRsrc;
7330 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
7331
7332 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0
7333 const DebugLoc &DL = MI.getDebugLoc();
7334 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_CO_U32_e64), NewVAddrLo)
7335 .addDef(CondReg0)
7336 .addReg(RsrcPtr, 0, AMDGPU::sub0)
7337 .addReg(VAddr->getReg(), 0, AMDGPU::sub0)
7338 .addImm(0);
7339
7340 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1
7341 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi)
7342 .addDef(CondReg1, RegState::Dead)
7343 .addReg(RsrcPtr, 0, AMDGPU::sub1)
7344 .addReg(VAddr->getReg(), 0, AMDGPU::sub1)
7345 .addReg(CondReg0, RegState::Kill)
7346 .addImm(0);
7347
7348 // NewVaddr = {NewVaddrHi, NewVaddrLo}
7349 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
7350 .addReg(NewVAddrLo)
7351 .addImm(AMDGPU::sub0)
7352 .addReg(NewVAddrHi)
7353 .addImm(AMDGPU::sub1);
7354
7355 VAddr->setReg(NewVAddr);
7356 Rsrc->setReg(NewSRsrc);
7357 } else if (!VAddr && ST.hasAddr64()) {
7358 // This instructions is the _OFFSET variant, so we need to convert it to
7359 // ADDR64.
7360 assert(ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS &&
7361 "FIXME: Need to emit flat atomics here");
7362
7363 unsigned RsrcPtr, NewSRsrc;
7364 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
7365
7366 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
7367 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
7368 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
7369 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
7370 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode());
7371
7372 // Atomics with return have an additional tied operand and are
7373 // missing some of the special bits.
7374 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in);
7375 MachineInstr *Addr64;
7376
7377 if (!VDataIn) {
7378 // Regular buffer load / store.
7380 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
7381 .add(*VData)
7382 .addReg(NewVAddr)
7383 .addReg(NewSRsrc)
7384 .add(*SOffset)
7385 .add(*Offset);
7386
7387 if (const MachineOperand *CPol =
7388 getNamedOperand(MI, AMDGPU::OpName::cpol)) {
7389 MIB.addImm(CPol->getImm());
7390 }
7391
7392 if (const MachineOperand *TFE =
7393 getNamedOperand(MI, AMDGPU::OpName::tfe)) {
7394 MIB.addImm(TFE->getImm());
7395 }
7396
7397 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz));
7398
7399 MIB.cloneMemRefs(MI);
7400 Addr64 = MIB;
7401 } else {
7402 // Atomics with return.
7403 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
7404 .add(*VData)
7405 .add(*VDataIn)
7406 .addReg(NewVAddr)
7407 .addReg(NewSRsrc)
7408 .add(*SOffset)
7409 .add(*Offset)
7410 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::cpol))
7411 .cloneMemRefs(MI);
7412 }
7413
7414 MI.removeFromParent();
7415
7416 // NewVaddr = {NewVaddrHi, NewVaddrLo}
7417 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
7418 NewVAddr)
7419 .addReg(RsrcPtr, 0, AMDGPU::sub0)
7420 .addImm(AMDGPU::sub0)
7421 .addReg(RsrcPtr, 0, AMDGPU::sub1)
7422 .addImm(AMDGPU::sub1);
7423 } else {
7424 // Legalize a VGPR Rsrc and soffset together.
7425 if (!isSoffsetLegal) {
7426 MachineOperand *Soffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
7427 CreatedBB =
7428 loadMBUFScalarOperandsFromVGPR(*this, MI, {Rsrc, Soffset}, MDT);
7429 return CreatedBB;
7430 }
7431 CreatedBB = loadMBUFScalarOperandsFromVGPR(*this, MI, {Rsrc}, MDT);
7432 return CreatedBB;
7433 }
7434 }
7435
7436 // Legalize a VGPR soffset.
7437 if (!isSoffsetLegal) {
7438 MachineOperand *Soffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
7439 CreatedBB = loadMBUFScalarOperandsFromVGPR(*this, MI, {Soffset}, MDT);
7440 return CreatedBB;
7441 }
7442 return CreatedBB;
7443}
7444
7446 InstrList.insert(MI);
7447 // Add MBUF instructiosn to deferred list.
7448 int RsrcIdx =
7449 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::srsrc);
7450 if (RsrcIdx != -1) {
7451 DeferredList.insert(MI);
7452 }
7453}
7454
7456 return DeferredList.contains(MI);
7457}
7458
7459// Legalize size mismatches between 16bit and 32bit registers in v2s copy
7460// lowering (change spgr to vgpr).
7461// This is mainly caused by 16bit SALU and 16bit VALU using reg with different
7462// size. Need to legalize the size of the operands during the vgpr lowering
7463// chain. This can be removed after we have sgpr16 in place
7465 MachineRegisterInfo &MRI) const {
7466 if (!ST.useRealTrue16Insts())
7467 return;
7468
7469 unsigned Opcode = MI.getOpcode();
7470 MachineBasicBlock *MBB = MI.getParent();
7471 // Legalize operands and check for size mismatch
7472 if (!OpIdx || OpIdx >= MI.getNumExplicitOperands() ||
7473 OpIdx >= get(Opcode).getNumOperands() ||
7474 get(Opcode).operands()[OpIdx].RegClass == -1)
7475 return;
7476
7477 MachineOperand &Op = MI.getOperand(OpIdx);
7478 if (!Op.isReg() || !Op.getReg().isVirtual())
7479 return;
7480
7481 const TargetRegisterClass *CurrRC = MRI.getRegClass(Op.getReg());
7482 if (!RI.isVGPRClass(CurrRC))
7483 return;
7484
7485 unsigned RCID = get(Opcode).operands()[OpIdx].RegClass;
7486 const TargetRegisterClass *ExpectedRC = RI.getRegClass(RCID);
7487 if (RI.getMatchingSuperRegClass(CurrRC, ExpectedRC, AMDGPU::lo16)) {
7488 Op.setSubReg(AMDGPU::lo16);
7489 } else if (RI.getMatchingSuperRegClass(ExpectedRC, CurrRC, AMDGPU::lo16)) {
7490 const DebugLoc &DL = MI.getDebugLoc();
7491 Register NewDstReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
7492 Register Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_16RegClass);
7493 BuildMI(*MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), Undef);
7494 BuildMI(*MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), NewDstReg)
7495 .addReg(Op.getReg())
7496 .addImm(AMDGPU::lo16)
7497 .addReg(Undef)
7498 .addImm(AMDGPU::hi16);
7499 Op.setReg(NewDstReg);
7500 }
7501}
7503 MachineRegisterInfo &MRI) const {
7504 for (unsigned OpIdx = 1; OpIdx < MI.getNumExplicitOperands(); OpIdx++)
7506}
7507
7509 MachineDominatorTree *MDT) const {
7510
7511 while (!Worklist.empty()) {
7512 MachineInstr &Inst = *Worklist.top();
7513 Worklist.erase_top();
7514 // Skip MachineInstr in the deferred list.
7515 if (Worklist.isDeferred(&Inst))
7516 continue;
7517 moveToVALUImpl(Worklist, MDT, Inst);
7518 }
7519
7520 // Deferred list of instructions will be processed once
7521 // all the MachineInstr in the worklist are done.
7522 for (MachineInstr *Inst : Worklist.getDeferredList()) {
7523 moveToVALUImpl(Worklist, MDT, *Inst);
7524 assert(Worklist.empty() &&
7525 "Deferred MachineInstr are not supposed to re-populate worklist");
7526 }
7527}
7528
7531 MachineInstr &Inst) const {
7532
7534 if (!MBB)
7535 return;
7536 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
7537 unsigned Opcode = Inst.getOpcode();
7538 unsigned NewOpcode = getVALUOp(Inst);
7539 // Handle some special cases
7540 switch (Opcode) {
7541 default:
7542 break;
7543 case AMDGPU::S_ADD_I32:
7544 case AMDGPU::S_SUB_I32: {
7545 // FIXME: The u32 versions currently selected use the carry.
7546 bool Changed;
7547 MachineBasicBlock *CreatedBBTmp = nullptr;
7548 std::tie(Changed, CreatedBBTmp) = moveScalarAddSub(Worklist, Inst, MDT);
7549 if (Changed)
7550 return;
7551
7552 // Default handling
7553 break;
7554 }
7555
7556 case AMDGPU::S_MUL_U64:
7557 if (ST.hasVectorMulU64()) {
7558 NewOpcode = AMDGPU::V_MUL_U64_e64;
7559 break;
7560 }
7561 // Split s_mul_u64 in 32-bit vector multiplications.
7562 splitScalarSMulU64(Worklist, Inst, MDT);
7563 Inst.eraseFromParent();
7564 return;
7565
7566 case AMDGPU::S_MUL_U64_U32_PSEUDO:
7567 case AMDGPU::S_MUL_I64_I32_PSEUDO:
7568 // This is a special case of s_mul_u64 where all the operands are either
7569 // zero extended or sign extended.
7570 splitScalarSMulPseudo(Worklist, Inst, MDT);
7571 Inst.eraseFromParent();
7572 return;
7573
7574 case AMDGPU::S_AND_B64:
7575 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT);
7576 Inst.eraseFromParent();
7577 return;
7578
7579 case AMDGPU::S_OR_B64:
7580 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT);
7581 Inst.eraseFromParent();
7582 return;
7583
7584 case AMDGPU::S_XOR_B64:
7585 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT);
7586 Inst.eraseFromParent();
7587 return;
7588
7589 case AMDGPU::S_NAND_B64:
7590 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT);
7591 Inst.eraseFromParent();
7592 return;
7593
7594 case AMDGPU::S_NOR_B64:
7595 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT);
7596 Inst.eraseFromParent();
7597 return;
7598
7599 case AMDGPU::S_XNOR_B64:
7600 if (ST.hasDLInsts())
7601 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
7602 else
7603 splitScalar64BitXnor(Worklist, Inst, MDT);
7604 Inst.eraseFromParent();
7605 return;
7606
7607 case AMDGPU::S_ANDN2_B64:
7608 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT);
7609 Inst.eraseFromParent();
7610 return;
7611
7612 case AMDGPU::S_ORN2_B64:
7613 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT);
7614 Inst.eraseFromParent();
7615 return;
7616
7617 case AMDGPU::S_BREV_B64:
7618 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_BREV_B32, true);
7619 Inst.eraseFromParent();
7620 return;
7621
7622 case AMDGPU::S_NOT_B64:
7623 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32);
7624 Inst.eraseFromParent();
7625 return;
7626
7627 case AMDGPU::S_BCNT1_I32_B64:
7628 splitScalar64BitBCNT(Worklist, Inst);
7629 Inst.eraseFromParent();
7630 return;
7631
7632 case AMDGPU::S_BFE_I64:
7633 splitScalar64BitBFE(Worklist, Inst);
7634 Inst.eraseFromParent();
7635 return;
7636
7637 case AMDGPU::S_FLBIT_I32_B64:
7638 splitScalar64BitCountOp(Worklist, Inst, AMDGPU::V_FFBH_U32_e32);
7639 Inst.eraseFromParent();
7640 return;
7641 case AMDGPU::S_FF1_I32_B64:
7642 splitScalar64BitCountOp(Worklist, Inst, AMDGPU::V_FFBL_B32_e32);
7643 Inst.eraseFromParent();
7644 return;
7645
7646 case AMDGPU::S_LSHL_B32:
7647 if (ST.hasOnlyRevVALUShifts()) {
7648 NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
7649 swapOperands(Inst);
7650 }
7651 break;
7652 case AMDGPU::S_ASHR_I32:
7653 if (ST.hasOnlyRevVALUShifts()) {
7654 NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
7655 swapOperands(Inst);
7656 }
7657 break;
7658 case AMDGPU::S_LSHR_B32:
7659 if (ST.hasOnlyRevVALUShifts()) {
7660 NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
7661 swapOperands(Inst);
7662 }
7663 break;
7664 case AMDGPU::S_LSHL_B64:
7665 if (ST.hasOnlyRevVALUShifts()) {
7666 NewOpcode = ST.getGeneration() >= AMDGPUSubtarget::GFX12
7667 ? AMDGPU::V_LSHLREV_B64_pseudo_e64
7668 : AMDGPU::V_LSHLREV_B64_e64;
7669 swapOperands(Inst);
7670 }
7671 break;
7672 case AMDGPU::S_ASHR_I64:
7673 if (ST.hasOnlyRevVALUShifts()) {
7674 NewOpcode = AMDGPU::V_ASHRREV_I64_e64;
7675 swapOperands(Inst);
7676 }
7677 break;
7678 case AMDGPU::S_LSHR_B64:
7679 if (ST.hasOnlyRevVALUShifts()) {
7680 NewOpcode = AMDGPU::V_LSHRREV_B64_e64;
7681 swapOperands(Inst);
7682 }
7683 break;
7684
7685 case AMDGPU::S_ABS_I32:
7686 lowerScalarAbs(Worklist, Inst);
7687 Inst.eraseFromParent();
7688 return;
7689
7690 case AMDGPU::S_CBRANCH_SCC0:
7691 case AMDGPU::S_CBRANCH_SCC1: {
7692 // Clear unused bits of vcc
7693 Register CondReg = Inst.getOperand(1).getReg();
7694 bool IsSCC = CondReg == AMDGPU::SCC;
7696 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(LMC.AndOpc), LMC.VccReg)
7697 .addReg(LMC.ExecReg)
7698 .addReg(IsSCC ? LMC.VccReg : CondReg);
7699 Inst.removeOperand(1);
7700 } break;
7701
7702 case AMDGPU::S_BFE_U64:
7703 case AMDGPU::S_BFM_B64:
7704 llvm_unreachable("Moving this op to VALU not implemented");
7705
7706 case AMDGPU::S_PACK_LL_B32_B16:
7707 case AMDGPU::S_PACK_LH_B32_B16:
7708 case AMDGPU::S_PACK_HL_B32_B16:
7709 case AMDGPU::S_PACK_HH_B32_B16:
7710 movePackToVALU(Worklist, MRI, Inst);
7711 Inst.eraseFromParent();
7712 return;
7713
7714 case AMDGPU::S_XNOR_B32:
7715 lowerScalarXnor(Worklist, Inst);
7716 Inst.eraseFromParent();
7717 return;
7718
7719 case AMDGPU::S_NAND_B32:
7720 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32);
7721 Inst.eraseFromParent();
7722 return;
7723
7724 case AMDGPU::S_NOR_B32:
7725 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32);
7726 Inst.eraseFromParent();
7727 return;
7728
7729 case AMDGPU::S_ANDN2_B32:
7730 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32);
7731 Inst.eraseFromParent();
7732 return;
7733
7734 case AMDGPU::S_ORN2_B32:
7735 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32);
7736 Inst.eraseFromParent();
7737 return;
7738
7739 // TODO: remove as soon as everything is ready
7740 // to replace VGPR to SGPR copy with V_READFIRSTLANEs.
7741 // S_ADD/SUB_CO_PSEUDO as well as S_UADDO/USUBO_PSEUDO
7742 // can only be selected from the uniform SDNode.
7743 case AMDGPU::S_ADD_CO_PSEUDO:
7744 case AMDGPU::S_SUB_CO_PSEUDO: {
7745 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO)
7746 ? AMDGPU::V_ADDC_U32_e64
7747 : AMDGPU::V_SUBB_U32_e64;
7748 const auto *CarryRC = RI.getWaveMaskRegClass();
7749
7750 Register CarryInReg = Inst.getOperand(4).getReg();
7751 if (!MRI.constrainRegClass(CarryInReg, CarryRC)) {
7752 Register NewCarryReg = MRI.createVirtualRegister(CarryRC);
7753 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), NewCarryReg)
7754 .addReg(CarryInReg);
7755 }
7756
7757 Register CarryOutReg = Inst.getOperand(1).getReg();
7758
7759 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass(
7760 MRI.getRegClass(Inst.getOperand(0).getReg())));
7761 MachineInstr *CarryOp =
7762 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(Opc), DestReg)
7763 .addReg(CarryOutReg, RegState::Define)
7764 .add(Inst.getOperand(2))
7765 .add(Inst.getOperand(3))
7766 .addReg(CarryInReg)
7767 .addImm(0);
7768 legalizeOperands(*CarryOp);
7769 MRI.replaceRegWith(Inst.getOperand(0).getReg(), DestReg);
7770 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist);
7771 Inst.eraseFromParent();
7772 }
7773 return;
7774 case AMDGPU::S_UADDO_PSEUDO:
7775 case AMDGPU::S_USUBO_PSEUDO: {
7776 const DebugLoc &DL = Inst.getDebugLoc();
7777 MachineOperand &Dest0 = Inst.getOperand(0);
7778 MachineOperand &Dest1 = Inst.getOperand(1);
7779 MachineOperand &Src0 = Inst.getOperand(2);
7780 MachineOperand &Src1 = Inst.getOperand(3);
7781
7782 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_UADDO_PSEUDO)
7783 ? AMDGPU::V_ADD_CO_U32_e64
7784 : AMDGPU::V_SUB_CO_U32_e64;
7785 const TargetRegisterClass *NewRC =
7786 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg()));
7787 Register DestReg = MRI.createVirtualRegister(NewRC);
7788 MachineInstr *NewInstr = BuildMI(*MBB, &Inst, DL, get(Opc), DestReg)
7789 .addReg(Dest1.getReg(), RegState::Define)
7790 .add(Src0)
7791 .add(Src1)
7792 .addImm(0); // clamp bit
7793
7794 legalizeOperands(*NewInstr, MDT);
7795 MRI.replaceRegWith(Dest0.getReg(), DestReg);
7796 addUsersToMoveToVALUWorklist(NewInstr->getOperand(0).getReg(), MRI,
7797 Worklist);
7798 Inst.eraseFromParent();
7799 }
7800 return;
7801
7802 case AMDGPU::S_CSELECT_B32:
7803 case AMDGPU::S_CSELECT_B64:
7804 lowerSelect(Worklist, Inst, MDT);
7805 Inst.eraseFromParent();
7806 return;
7807 case AMDGPU::S_CMP_EQ_I32:
7808 case AMDGPU::S_CMP_LG_I32:
7809 case AMDGPU::S_CMP_GT_I32:
7810 case AMDGPU::S_CMP_GE_I32:
7811 case AMDGPU::S_CMP_LT_I32:
7812 case AMDGPU::S_CMP_LE_I32:
7813 case AMDGPU::S_CMP_EQ_U32:
7814 case AMDGPU::S_CMP_LG_U32:
7815 case AMDGPU::S_CMP_GT_U32:
7816 case AMDGPU::S_CMP_GE_U32:
7817 case AMDGPU::S_CMP_LT_U32:
7818 case AMDGPU::S_CMP_LE_U32:
7819 case AMDGPU::S_CMP_EQ_U64:
7820 case AMDGPU::S_CMP_LG_U64:
7821 case AMDGPU::S_CMP_LT_F32:
7822 case AMDGPU::S_CMP_EQ_F32:
7823 case AMDGPU::S_CMP_LE_F32:
7824 case AMDGPU::S_CMP_GT_F32:
7825 case AMDGPU::S_CMP_LG_F32:
7826 case AMDGPU::S_CMP_GE_F32:
7827 case AMDGPU::S_CMP_O_F32:
7828 case AMDGPU::S_CMP_U_F32:
7829 case AMDGPU::S_CMP_NGE_F32:
7830 case AMDGPU::S_CMP_NLG_F32:
7831 case AMDGPU::S_CMP_NGT_F32:
7832 case AMDGPU::S_CMP_NLE_F32:
7833 case AMDGPU::S_CMP_NEQ_F32:
7834 case AMDGPU::S_CMP_NLT_F32: {
7835 Register CondReg = MRI.createVirtualRegister(RI.getWaveMaskRegClass());
7836 auto NewInstr =
7837 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(NewOpcode), CondReg)
7838 .setMIFlags(Inst.getFlags());
7839 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::src0_modifiers) >=
7840 0) {
7841 NewInstr
7842 .addImm(0) // src0_modifiers
7843 .add(Inst.getOperand(0)) // src0
7844 .addImm(0) // src1_modifiers
7845 .add(Inst.getOperand(1)) // src1
7846 .addImm(0); // clamp
7847 } else {
7848 NewInstr.add(Inst.getOperand(0)).add(Inst.getOperand(1));
7849 }
7850 legalizeOperands(*NewInstr, MDT);
7851 int SCCIdx = Inst.findRegisterDefOperandIdx(AMDGPU::SCC, /*TRI=*/nullptr);
7852 MachineOperand SCCOp = Inst.getOperand(SCCIdx);
7853 addSCCDefUsersToVALUWorklist(SCCOp, Inst, Worklist, CondReg);
7854 Inst.eraseFromParent();
7855 return;
7856 }
7857 case AMDGPU::S_CMP_LT_F16:
7858 case AMDGPU::S_CMP_EQ_F16:
7859 case AMDGPU::S_CMP_LE_F16:
7860 case AMDGPU::S_CMP_GT_F16:
7861 case AMDGPU::S_CMP_LG_F16:
7862 case AMDGPU::S_CMP_GE_F16:
7863 case AMDGPU::S_CMP_O_F16:
7864 case AMDGPU::S_CMP_U_F16:
7865 case AMDGPU::S_CMP_NGE_F16:
7866 case AMDGPU::S_CMP_NLG_F16:
7867 case AMDGPU::S_CMP_NGT_F16:
7868 case AMDGPU::S_CMP_NLE_F16:
7869 case AMDGPU::S_CMP_NEQ_F16:
7870 case AMDGPU::S_CMP_NLT_F16: {
7871 Register CondReg = MRI.createVirtualRegister(RI.getWaveMaskRegClass());
7872 auto NewInstr =
7873 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(NewOpcode), CondReg)
7874 .setMIFlags(Inst.getFlags());
7875 if (AMDGPU::hasNamedOperand(NewOpcode, AMDGPU::OpName::src0_modifiers)) {
7876 NewInstr
7877 .addImm(0) // src0_modifiers
7878 .add(Inst.getOperand(0)) // src0
7879 .addImm(0) // src1_modifiers
7880 .add(Inst.getOperand(1)) // src1
7881 .addImm(0); // clamp
7882 if (AMDGPU::hasNamedOperand(NewOpcode, AMDGPU::OpName::op_sel))
7883 NewInstr.addImm(0); // op_sel0
7884 } else {
7885 NewInstr
7886 .add(Inst.getOperand(0))
7887 .add(Inst.getOperand(1));
7888 }
7889 legalizeOperandsVALUt16(*NewInstr, MRI);
7890 legalizeOperands(*NewInstr, MDT);
7891 int SCCIdx = Inst.findRegisterDefOperandIdx(AMDGPU::SCC, /*TRI=*/nullptr);
7892 MachineOperand SCCOp = Inst.getOperand(SCCIdx);
7893 addSCCDefUsersToVALUWorklist(SCCOp, Inst, Worklist, CondReg);
7894 Inst.eraseFromParent();
7895 return;
7896 }
7897 case AMDGPU::S_CVT_HI_F32_F16: {
7898 const DebugLoc &DL = Inst.getDebugLoc();
7899 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
7900 Register NewDst = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
7901 if (ST.useRealTrue16Insts()) {
7902 BuildMI(*MBB, Inst, DL, get(AMDGPU::COPY), TmpReg)
7903 .add(Inst.getOperand(1));
7904 BuildMI(*MBB, Inst, DL, get(NewOpcode), NewDst)
7905 .addImm(0) // src0_modifiers
7906 .addReg(TmpReg, 0, AMDGPU::hi16)
7907 .addImm(0) // clamp
7908 .addImm(0) // omod
7909 .addImm(0); // op_sel0
7910 } else {
7911 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
7912 .addImm(16)
7913 .add(Inst.getOperand(1));
7914 BuildMI(*MBB, Inst, DL, get(NewOpcode), NewDst)
7915 .addImm(0) // src0_modifiers
7916 .addReg(TmpReg)
7917 .addImm(0) // clamp
7918 .addImm(0); // omod
7919 }
7920
7921 MRI.replaceRegWith(Inst.getOperand(0).getReg(), NewDst);
7922 addUsersToMoveToVALUWorklist(NewDst, MRI, Worklist);
7923 Inst.eraseFromParent();
7924 return;
7925 }
7926 case AMDGPU::S_MINIMUM_F32:
7927 case AMDGPU::S_MAXIMUM_F32: {
7928 const DebugLoc &DL = Inst.getDebugLoc();
7929 Register NewDst = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
7930 MachineInstr *NewInstr = BuildMI(*MBB, Inst, DL, get(NewOpcode), NewDst)
7931 .addImm(0) // src0_modifiers
7932 .add(Inst.getOperand(1))
7933 .addImm(0) // src1_modifiers
7934 .add(Inst.getOperand(2))
7935 .addImm(0) // clamp
7936 .addImm(0); // omod
7937 MRI.replaceRegWith(Inst.getOperand(0).getReg(), NewDst);
7938
7939 legalizeOperands(*NewInstr, MDT);
7940 addUsersToMoveToVALUWorklist(NewDst, MRI, Worklist);
7941 Inst.eraseFromParent();
7942 return;
7943 }
7944 case AMDGPU::S_MINIMUM_F16:
7945 case AMDGPU::S_MAXIMUM_F16: {
7946 const DebugLoc &DL = Inst.getDebugLoc();
7947 Register NewDst = MRI.createVirtualRegister(ST.useRealTrue16Insts()
7948 ? &AMDGPU::VGPR_16RegClass
7949 : &AMDGPU::VGPR_32RegClass);
7950 MachineInstr *NewInstr = BuildMI(*MBB, Inst, DL, get(NewOpcode), NewDst)
7951 .addImm(0) // src0_modifiers
7952 .add(Inst.getOperand(1))
7953 .addImm(0) // src1_modifiers
7954 .add(Inst.getOperand(2))
7955 .addImm(0) // clamp
7956 .addImm(0) // omod
7957 .addImm(0); // opsel0
7958 MRI.replaceRegWith(Inst.getOperand(0).getReg(), NewDst);
7959 legalizeOperandsVALUt16(*NewInstr, MRI);
7960 legalizeOperands(*NewInstr, MDT);
7961 addUsersToMoveToVALUWorklist(NewDst, MRI, Worklist);
7962 Inst.eraseFromParent();
7963 return;
7964 }
7965 case AMDGPU::V_S_EXP_F16_e64:
7966 case AMDGPU::V_S_LOG_F16_e64:
7967 case AMDGPU::V_S_RCP_F16_e64:
7968 case AMDGPU::V_S_RSQ_F16_e64:
7969 case AMDGPU::V_S_SQRT_F16_e64: {
7970 const DebugLoc &DL = Inst.getDebugLoc();
7971 Register NewDst = MRI.createVirtualRegister(ST.useRealTrue16Insts()
7972 ? &AMDGPU::VGPR_16RegClass
7973 : &AMDGPU::VGPR_32RegClass);
7974 auto NewInstr = BuildMI(*MBB, Inst, DL, get(NewOpcode), NewDst)
7975 .add(Inst.getOperand(1)) // src0_modifiers
7976 .add(Inst.getOperand(2))
7977 .add(Inst.getOperand(3)) // clamp
7978 .add(Inst.getOperand(4)) // omod
7979 .setMIFlags(Inst.getFlags());
7980 if (AMDGPU::hasNamedOperand(NewOpcode, AMDGPU::OpName::op_sel))
7981 NewInstr.addImm(0); // opsel0
7982 MRI.replaceRegWith(Inst.getOperand(0).getReg(), NewDst);
7983 legalizeOperandsVALUt16(*NewInstr, MRI);
7984 legalizeOperands(*NewInstr, MDT);
7985 addUsersToMoveToVALUWorklist(NewDst, MRI, Worklist);
7986 Inst.eraseFromParent();
7987 return;
7988 }
7989 }
7990
7991 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
7992 // We cannot move this instruction to the VALU, so we should try to
7993 // legalize its operands instead.
7994 legalizeOperands(Inst, MDT);
7995 return;
7996 }
7997 // Handle converting generic instructions like COPY-to-SGPR into
7998 // COPY-to-VGPR.
7999 if (NewOpcode == Opcode) {
8000 Register DstReg = Inst.getOperand(0).getReg();
8001 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst);
8002
8003 // If it's a copy of a VGPR to a physical SGPR, insert a V_READFIRSTLANE and
8004 // hope for the best.
8005 if (Inst.isCopy() && DstReg.isPhysical() &&
8006 RI.isVGPR(MRI, Inst.getOperand(1).getReg())) {
8007 // TODO: Only works for 32 bit registers.
8008 if (MRI.constrainRegClass(DstReg, &AMDGPU::SReg_32_XM0RegClass)) {
8009 BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(),
8010 get(AMDGPU::V_READFIRSTLANE_B32), DstReg)
8011 .add(Inst.getOperand(1));
8012 } else {
8013 Register NewDst =
8014 MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
8015 BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(),
8016 get(AMDGPU::V_READFIRSTLANE_B32), NewDst)
8017 .add(Inst.getOperand(1));
8018 BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY),
8019 DstReg)
8020 .addReg(NewDst);
8021 }
8022 Inst.eraseFromParent();
8023 return;
8024 }
8025
8026 if (Inst.isCopy() && Inst.getOperand(1).getReg().isVirtual() &&
8027 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) {
8028 // Instead of creating a copy where src and dst are the same register
8029 // class, we just replace all uses of dst with src. These kinds of
8030 // copies interfere with the heuristics MachineSink uses to decide
8031 // whether or not to split a critical edge. Since the pass assumes
8032 // that copies will end up as machine instructions and not be
8033 // eliminated.
8034 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist);
8035 Register NewDstReg = Inst.getOperand(1).getReg();
8036 MRI.replaceRegWith(DstReg, NewDstReg);
8037 MRI.clearKillFlags(NewDstReg);
8038 Inst.getOperand(0).setReg(DstReg);
8039 Inst.eraseFromParent();
8040 // Legalize t16 operand since replaceReg is called after addUsersToVALU
8041 for (MachineOperand &MO :
8042 make_early_inc_range(MRI.use_operands(NewDstReg))) {
8043 legalizeOperandsVALUt16(*MO.getParent(), MRI);
8044 }
8045 return;
8046 }
8047
8048 // If this is a v2s copy between 16bit and 32bit reg,
8049 // replace vgpr copy to reg_sequence/extract_subreg
8050 // This can be remove after we have sgpr16 in place
8051 if (ST.useRealTrue16Insts() && Inst.isCopy() &&
8052 Inst.getOperand(1).getReg().isVirtual() &&
8053 RI.isVGPR(MRI, Inst.getOperand(1).getReg())) {
8054 const TargetRegisterClass *SrcRegRC = getOpRegClass(Inst, 1);
8055 if (RI.getMatchingSuperRegClass(NewDstRC, SrcRegRC, AMDGPU::lo16)) {
8056 Register NewDstReg = MRI.createVirtualRegister(NewDstRC);
8057 Register Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_16RegClass);
8058 BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(),
8059 get(AMDGPU::IMPLICIT_DEF), Undef);
8060 BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(),
8061 get(AMDGPU::REG_SEQUENCE), NewDstReg)
8062 .addReg(Inst.getOperand(1).getReg())
8063 .addImm(AMDGPU::lo16)
8064 .addReg(Undef)
8065 .addImm(AMDGPU::hi16);
8066 Inst.eraseFromParent();
8067 MRI.replaceRegWith(DstReg, NewDstReg);
8068 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
8069 return;
8070 } else if (RI.getMatchingSuperRegClass(SrcRegRC, NewDstRC,
8071 AMDGPU::lo16)) {
8072 Inst.getOperand(1).setSubReg(AMDGPU::lo16);
8073 Register NewDstReg = MRI.createVirtualRegister(NewDstRC);
8074 MRI.replaceRegWith(DstReg, NewDstReg);
8075 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
8076 return;
8077 }
8078 }
8079
8080 Register NewDstReg = MRI.createVirtualRegister(NewDstRC);
8081 MRI.replaceRegWith(DstReg, NewDstReg);
8082 legalizeOperands(Inst, MDT);
8083 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
8084 return;
8085 }
8086
8087 // Use the new VALU Opcode.
8088 auto NewInstr = BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(NewOpcode))
8089 .setMIFlags(Inst.getFlags());
8090 if (isVOP3(NewOpcode) && !isVOP3(Opcode)) {
8091 // Intersperse VOP3 modifiers among the SALU operands.
8092 NewInstr->addOperand(Inst.getOperand(0));
8093 if (AMDGPU::getNamedOperandIdx(NewOpcode,
8094 AMDGPU::OpName::src0_modifiers) >= 0)
8095 NewInstr.addImm(0);
8096 if (AMDGPU::hasNamedOperand(NewOpcode, AMDGPU::OpName::src0)) {
8097 MachineOperand Src = Inst.getOperand(1);
8098 NewInstr->addOperand(Src);
8099 }
8100
8101 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
8102 // We are converting these to a BFE, so we need to add the missing
8103 // operands for the size and offset.
8104 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
8105 NewInstr.addImm(0);
8106 NewInstr.addImm(Size);
8107 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
8108 // The VALU version adds the second operand to the result, so insert an
8109 // extra 0 operand.
8110 NewInstr.addImm(0);
8111 } else if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
8112 const MachineOperand &OffsetWidthOp = Inst.getOperand(2);
8113 // If we need to move this to VGPRs, we need to unpack the second
8114 // operand back into the 2 separate ones for bit offset and width.
8115 assert(OffsetWidthOp.isImm() &&
8116 "Scalar BFE is only implemented for constant width and offset");
8117 uint32_t Imm = OffsetWidthOp.getImm();
8118
8119 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
8120 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
8121 NewInstr.addImm(Offset);
8122 NewInstr.addImm(BitWidth);
8123 } else {
8124 if (AMDGPU::getNamedOperandIdx(NewOpcode,
8125 AMDGPU::OpName::src1_modifiers) >= 0)
8126 NewInstr.addImm(0);
8127 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::src1) >= 0)
8128 NewInstr->addOperand(Inst.getOperand(2));
8129 if (AMDGPU::getNamedOperandIdx(NewOpcode,
8130 AMDGPU::OpName::src2_modifiers) >= 0)
8131 NewInstr.addImm(0);
8132 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::src2) >= 0)
8133 NewInstr->addOperand(Inst.getOperand(3));
8134 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::clamp) >= 0)
8135 NewInstr.addImm(0);
8136 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::omod) >= 0)
8137 NewInstr.addImm(0);
8138 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::op_sel) >= 0)
8139 NewInstr.addImm(0);
8140 }
8141 } else {
8142 // Just copy the SALU operands.
8143 for (const MachineOperand &Op : Inst.explicit_operands())
8144 NewInstr->addOperand(Op);
8145 }
8146
8147 // Remove any references to SCC. Vector instructions can't read from it, and
8148 // We're just about to add the implicit use / defs of VCC, and we don't want
8149 // both.
8150 for (MachineOperand &Op : Inst.implicit_operands()) {
8151 if (Op.getReg() == AMDGPU::SCC) {
8152 // Only propagate through live-def of SCC.
8153 if (Op.isDef() && !Op.isDead())
8154 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist);
8155 if (Op.isUse())
8156 addSCCDefsToVALUWorklist(NewInstr, Worklist);
8157 }
8158 }
8159 Inst.eraseFromParent();
8160 Register NewDstReg;
8161 if (NewInstr->getOperand(0).isReg() && NewInstr->getOperand(0).isDef()) {
8162 Register DstReg = NewInstr->getOperand(0).getReg();
8163 assert(DstReg.isVirtual());
8164 // Update the destination register class.
8165 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(*NewInstr);
8166 assert(NewDstRC);
8167 NewDstReg = MRI.createVirtualRegister(NewDstRC);
8168 MRI.replaceRegWith(DstReg, NewDstReg);
8169 }
8170 fixImplicitOperands(*NewInstr);
8171
8172 legalizeOperandsVALUt16(*NewInstr, MRI);
8173
8174 // Legalize the operands
8175 legalizeOperands(*NewInstr, MDT);
8176 if (NewDstReg)
8177 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
8178}
8179
8180// Add/sub require special handling to deal with carry outs.
8181std::pair<bool, MachineBasicBlock *>
8182SIInstrInfo::moveScalarAddSub(SIInstrWorklist &Worklist, MachineInstr &Inst,
8183 MachineDominatorTree *MDT) const {
8184 if (ST.hasAddNoCarry()) {
8185 // Assume there is no user of scc since we don't select this in that case.
8186 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant
8187 // is used.
8188
8189 MachineBasicBlock &MBB = *Inst.getParent();
8190 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8191
8192 Register OldDstReg = Inst.getOperand(0).getReg();
8193 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8194
8195 unsigned Opc = Inst.getOpcode();
8196 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32);
8197
8198 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ?
8199 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64;
8200
8201 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC);
8202 Inst.removeOperand(3);
8203
8204 Inst.setDesc(get(NewOpc));
8205 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit
8206 Inst.addImplicitDefUseOperands(*MBB.getParent());
8207 MRI.replaceRegWith(OldDstReg, ResultReg);
8208 MachineBasicBlock *NewBB = legalizeOperands(Inst, MDT);
8209
8210 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
8211 return std::pair(true, NewBB);
8212 }
8213
8214 return std::pair(false, nullptr);
8215}
8216
8217void SIInstrInfo::lowerSelect(SIInstrWorklist &Worklist, MachineInstr &Inst,
8218 MachineDominatorTree *MDT) const {
8219
8220 MachineBasicBlock &MBB = *Inst.getParent();
8221 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8222 MachineBasicBlock::iterator MII = Inst;
8223 DebugLoc DL = Inst.getDebugLoc();
8224
8225 MachineOperand &Dest = Inst.getOperand(0);
8226 MachineOperand &Src0 = Inst.getOperand(1);
8227 MachineOperand &Src1 = Inst.getOperand(2);
8228 MachineOperand &Cond = Inst.getOperand(3);
8229
8230 Register CondReg = Cond.getReg();
8231 bool IsSCC = (CondReg == AMDGPU::SCC);
8232
8233 // If this is a trivial select where the condition is effectively not SCC
8234 // (CondReg is a source of copy to SCC), then the select is semantically
8235 // equivalent to copying CondReg. Hence, there is no need to create
8236 // V_CNDMASK, we can just use that and bail out.
8237 if (!IsSCC && Src0.isImm() && (Src0.getImm() == -1) && Src1.isImm() &&
8238 (Src1.getImm() == 0)) {
8239 MRI.replaceRegWith(Dest.getReg(), CondReg);
8240 return;
8241 }
8242
8243 Register NewCondReg = CondReg;
8244 if (IsSCC) {
8245 const TargetRegisterClass *TC = RI.getWaveMaskRegClass();
8246 NewCondReg = MRI.createVirtualRegister(TC);
8247
8248 // Now look for the closest SCC def if it is a copy
8249 // replacing the CondReg with the COPY source register
8250 bool CopyFound = false;
8251 for (MachineInstr &CandI :
8253 Inst.getParent()->rend())) {
8254 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, &RI, false, false) !=
8255 -1) {
8256 if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) {
8257 BuildMI(MBB, MII, DL, get(AMDGPU::COPY), NewCondReg)
8258 .addReg(CandI.getOperand(1).getReg());
8259 CopyFound = true;
8260 }
8261 break;
8262 }
8263 }
8264 if (!CopyFound) {
8265 // SCC def is not a copy
8266 // Insert a trivial select instead of creating a copy, because a copy from
8267 // SCC would semantically mean just copying a single bit, but we may need
8268 // the result to be a vector condition mask that needs preserving.
8269 unsigned Opcode =
8270 ST.isWave64() ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
8271 auto NewSelect =
8272 BuildMI(MBB, MII, DL, get(Opcode), NewCondReg).addImm(-1).addImm(0);
8273 NewSelect->getOperand(3).setIsUndef(Cond.isUndef());
8274 }
8275 }
8276
8277 Register NewDestReg = MRI.createVirtualRegister(
8278 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest.getReg())));
8279 MachineInstr *NewInst;
8280 if (Inst.getOpcode() == AMDGPU::S_CSELECT_B32) {
8281 NewInst = BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B32_e64), NewDestReg)
8282 .addImm(0)
8283 .add(Src1) // False
8284 .addImm(0)
8285 .add(Src0) // True
8286 .addReg(NewCondReg);
8287 } else {
8288 NewInst =
8289 BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B64_PSEUDO), NewDestReg)
8290 .add(Src1) // False
8291 .add(Src0) // True
8292 .addReg(NewCondReg);
8293 }
8294 MRI.replaceRegWith(Dest.getReg(), NewDestReg);
8295 legalizeOperands(*NewInst, MDT);
8296 addUsersToMoveToVALUWorklist(NewDestReg, MRI, Worklist);
8297}
8298
8299void SIInstrInfo::lowerScalarAbs(SIInstrWorklist &Worklist,
8300 MachineInstr &Inst) const {
8301 MachineBasicBlock &MBB = *Inst.getParent();
8302 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8303 MachineBasicBlock::iterator MII = Inst;
8304 DebugLoc DL = Inst.getDebugLoc();
8305
8306 MachineOperand &Dest = Inst.getOperand(0);
8307 MachineOperand &Src = Inst.getOperand(1);
8308 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8309 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8310
8311 unsigned SubOp = ST.hasAddNoCarry() ?
8312 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_CO_U32_e32;
8313
8314 BuildMI(MBB, MII, DL, get(SubOp), TmpReg)
8315 .addImm(0)
8316 .addReg(Src.getReg());
8317
8318 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg)
8319 .addReg(Src.getReg())
8320 .addReg(TmpReg);
8321
8322 MRI.replaceRegWith(Dest.getReg(), ResultReg);
8323 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
8324}
8325
8326void SIInstrInfo::lowerScalarXnor(SIInstrWorklist &Worklist,
8327 MachineInstr &Inst) const {
8328 MachineBasicBlock &MBB = *Inst.getParent();
8329 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8330 MachineBasicBlock::iterator MII = Inst;
8331 const DebugLoc &DL = Inst.getDebugLoc();
8332
8333 MachineOperand &Dest = Inst.getOperand(0);
8334 MachineOperand &Src0 = Inst.getOperand(1);
8335 MachineOperand &Src1 = Inst.getOperand(2);
8336
8337 if (ST.hasDLInsts()) {
8338 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8339 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL);
8340 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL);
8341
8342 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest)
8343 .add(Src0)
8344 .add(Src1);
8345
8346 MRI.replaceRegWith(Dest.getReg(), NewDest);
8347 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
8348 } else {
8349 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can
8350 // invert either source and then perform the XOR. If either source is a
8351 // scalar register, then we can leave the inversion on the scalar unit to
8352 // achieve a better distribution of scalar and vector instructions.
8353 bool Src0IsSGPR = Src0.isReg() &&
8354 RI.isSGPRClass(MRI.getRegClass(Src0.getReg()));
8355 bool Src1IsSGPR = Src1.isReg() &&
8356 RI.isSGPRClass(MRI.getRegClass(Src1.getReg()));
8357 MachineInstr *Xor;
8358 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
8359 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
8360
8361 // Build a pair of scalar instructions and add them to the work list.
8362 // The next iteration over the work list will lower these to the vector
8363 // unit as necessary.
8364 if (Src0IsSGPR) {
8365 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0);
8366 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest)
8367 .addReg(Temp)
8368 .add(Src1);
8369 } else if (Src1IsSGPR) {
8370 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1);
8371 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest)
8372 .add(Src0)
8373 .addReg(Temp);
8374 } else {
8375 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp)
8376 .add(Src0)
8377 .add(Src1);
8378 MachineInstr *Not =
8379 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp);
8380 Worklist.insert(Not);
8381 }
8382
8383 MRI.replaceRegWith(Dest.getReg(), NewDest);
8384
8385 Worklist.insert(Xor);
8386
8387 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
8388 }
8389}
8390
8391void SIInstrInfo::splitScalarNotBinop(SIInstrWorklist &Worklist,
8392 MachineInstr &Inst,
8393 unsigned Opcode) const {
8394 MachineBasicBlock &MBB = *Inst.getParent();
8395 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8396 MachineBasicBlock::iterator MII = Inst;
8397 const DebugLoc &DL = Inst.getDebugLoc();
8398
8399 MachineOperand &Dest = Inst.getOperand(0);
8400 MachineOperand &Src0 = Inst.getOperand(1);
8401 MachineOperand &Src1 = Inst.getOperand(2);
8402
8403 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
8404 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
8405
8406 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm)
8407 .add(Src0)
8408 .add(Src1);
8409
8410 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest)
8411 .addReg(Interm);
8412
8413 Worklist.insert(&Op);
8414 Worklist.insert(&Not);
8415
8416 MRI.replaceRegWith(Dest.getReg(), NewDest);
8417 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
8418}
8419
8420void SIInstrInfo::splitScalarBinOpN2(SIInstrWorklist &Worklist,
8421 MachineInstr &Inst,
8422 unsigned Opcode) const {
8423 MachineBasicBlock &MBB = *Inst.getParent();
8424 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8425 MachineBasicBlock::iterator MII = Inst;
8426 const DebugLoc &DL = Inst.getDebugLoc();
8427
8428 MachineOperand &Dest = Inst.getOperand(0);
8429 MachineOperand &Src0 = Inst.getOperand(1);
8430 MachineOperand &Src1 = Inst.getOperand(2);
8431
8432 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
8433 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
8434
8435 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm)
8436 .add(Src1);
8437
8438 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest)
8439 .add(Src0)
8440 .addReg(Interm);
8441
8442 Worklist.insert(&Not);
8443 Worklist.insert(&Op);
8444
8445 MRI.replaceRegWith(Dest.getReg(), NewDest);
8446 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
8447}
8448
8449void SIInstrInfo::splitScalar64BitUnaryOp(SIInstrWorklist &Worklist,
8450 MachineInstr &Inst, unsigned Opcode,
8451 bool Swap) const {
8452 MachineBasicBlock &MBB = *Inst.getParent();
8453 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8454
8455 MachineOperand &Dest = Inst.getOperand(0);
8456 MachineOperand &Src0 = Inst.getOperand(1);
8457 DebugLoc DL = Inst.getDebugLoc();
8458
8459 MachineBasicBlock::iterator MII = Inst;
8460
8461 const MCInstrDesc &InstDesc = get(Opcode);
8462 const TargetRegisterClass *Src0RC = Src0.isReg() ?
8463 MRI.getRegClass(Src0.getReg()) :
8464 &AMDGPU::SGPR_32RegClass;
8465
8466 const TargetRegisterClass *Src0SubRC =
8467 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
8468
8469 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
8470 AMDGPU::sub0, Src0SubRC);
8471
8472 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
8473 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
8474 const TargetRegisterClass *NewDestSubRC =
8475 RI.getSubRegisterClass(NewDestRC, AMDGPU::sub0);
8476
8477 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
8478 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0);
8479
8480 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
8481 AMDGPU::sub1, Src0SubRC);
8482
8483 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
8484 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1);
8485
8486 if (Swap)
8487 std::swap(DestSub0, DestSub1);
8488
8489 Register FullDestReg = MRI.createVirtualRegister(NewDestRC);
8490 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
8491 .addReg(DestSub0)
8492 .addImm(AMDGPU::sub0)
8493 .addReg(DestSub1)
8494 .addImm(AMDGPU::sub1);
8495
8496 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
8497
8498 Worklist.insert(&LoHalf);
8499 Worklist.insert(&HiHalf);
8500
8501 // We don't need to legalizeOperands here because for a single operand, src0
8502 // will support any kind of input.
8503
8504 // Move all users of this moved value.
8505 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
8506}
8507
8508// There is not a vector equivalent of s_mul_u64. For this reason, we need to
8509// split the s_mul_u64 in 32-bit vector multiplications.
8510void SIInstrInfo::splitScalarSMulU64(SIInstrWorklist &Worklist,
8511 MachineInstr &Inst,
8512 MachineDominatorTree *MDT) const {
8513 MachineBasicBlock &MBB = *Inst.getParent();
8514 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8515
8516 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
8517 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8518 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8519
8520 MachineOperand &Dest = Inst.getOperand(0);
8521 MachineOperand &Src0 = Inst.getOperand(1);
8522 MachineOperand &Src1 = Inst.getOperand(2);
8523 const DebugLoc &DL = Inst.getDebugLoc();
8524 MachineBasicBlock::iterator MII = Inst;
8525
8526 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
8527 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg());
8528 const TargetRegisterClass *Src0SubRC =
8529 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
8530 if (RI.isSGPRClass(Src0SubRC))
8531 Src0SubRC = RI.getEquivalentVGPRClass(Src0SubRC);
8532 const TargetRegisterClass *Src1SubRC =
8533 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0);
8534 if (RI.isSGPRClass(Src1SubRC))
8535 Src1SubRC = RI.getEquivalentVGPRClass(Src1SubRC);
8536
8537 // First, we extract the low 32-bit and high 32-bit values from each of the
8538 // operands.
8539 MachineOperand Op0L =
8540 buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
8541 MachineOperand Op1L =
8542 buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC);
8543 MachineOperand Op0H =
8544 buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
8545 MachineOperand Op1H =
8546 buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub1, Src1SubRC);
8547
8548 // The multilication is done as follows:
8549 //
8550 // Op1H Op1L
8551 // * Op0H Op0L
8552 // --------------------
8553 // Op1H*Op0L Op1L*Op0L
8554 // + Op1H*Op0H Op1L*Op0H
8555 // -----------------------------------------
8556 // (Op1H*Op0L + Op1L*Op0H + carry) Op1L*Op0L
8557 //
8558 // We drop Op1H*Op0H because the result of the multiplication is a 64-bit
8559 // value and that would overflow.
8560 // The low 32-bit value is Op1L*Op0L.
8561 // The high 32-bit value is Op1H*Op0L + Op1L*Op0H + carry (from Op1L*Op0L).
8562
8563 Register Op1L_Op0H_Reg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8564 MachineInstr *Op1L_Op0H =
8565 BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_LO_U32_e64), Op1L_Op0H_Reg)
8566 .add(Op1L)
8567 .add(Op0H);
8568
8569 Register Op1H_Op0L_Reg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8570 MachineInstr *Op1H_Op0L =
8571 BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_LO_U32_e64), Op1H_Op0L_Reg)
8572 .add(Op1H)
8573 .add(Op0L);
8574
8575 Register CarryReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8576 MachineInstr *Carry =
8577 BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_HI_U32_e64), CarryReg)
8578 .add(Op1L)
8579 .add(Op0L);
8580
8581 MachineInstr *LoHalf =
8582 BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_LO_U32_e64), DestSub0)
8583 .add(Op1L)
8584 .add(Op0L);
8585
8586 Register AddReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8587 MachineInstr *Add = BuildMI(MBB, MII, DL, get(AMDGPU::V_ADD_U32_e32), AddReg)
8588 .addReg(Op1L_Op0H_Reg)
8589 .addReg(Op1H_Op0L_Reg);
8590
8591 MachineInstr *HiHalf =
8592 BuildMI(MBB, MII, DL, get(AMDGPU::V_ADD_U32_e32), DestSub1)
8593 .addReg(AddReg)
8594 .addReg(CarryReg);
8595
8596 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
8597 .addReg(DestSub0)
8598 .addImm(AMDGPU::sub0)
8599 .addReg(DestSub1)
8600 .addImm(AMDGPU::sub1);
8601
8602 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
8603
8604 // Try to legalize the operands in case we need to swap the order to keep it
8605 // valid.
8606 legalizeOperands(*Op1L_Op0H, MDT);
8607 legalizeOperands(*Op1H_Op0L, MDT);
8608 legalizeOperands(*Carry, MDT);
8609 legalizeOperands(*LoHalf, MDT);
8610 legalizeOperands(*Add, MDT);
8611 legalizeOperands(*HiHalf, MDT);
8612
8613 // Move all users of this moved value.
8614 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
8615}
8616
8617// Lower S_MUL_U64_U32_PSEUDO/S_MUL_I64_I32_PSEUDO in two 32-bit vector
8618// multiplications.
8619void SIInstrInfo::splitScalarSMulPseudo(SIInstrWorklist &Worklist,
8620 MachineInstr &Inst,
8621 MachineDominatorTree *MDT) const {
8622 MachineBasicBlock &MBB = *Inst.getParent();
8623 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8624
8625 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
8626 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8627 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8628
8629 MachineOperand &Dest = Inst.getOperand(0);
8630 MachineOperand &Src0 = Inst.getOperand(1);
8631 MachineOperand &Src1 = Inst.getOperand(2);
8632 const DebugLoc &DL = Inst.getDebugLoc();
8633 MachineBasicBlock::iterator MII = Inst;
8634
8635 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
8636 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg());
8637 const TargetRegisterClass *Src0SubRC =
8638 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
8639 if (RI.isSGPRClass(Src0SubRC))
8640 Src0SubRC = RI.getEquivalentVGPRClass(Src0SubRC);
8641 const TargetRegisterClass *Src1SubRC =
8642 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0);
8643 if (RI.isSGPRClass(Src1SubRC))
8644 Src1SubRC = RI.getEquivalentVGPRClass(Src1SubRC);
8645
8646 // First, we extract the low 32-bit and high 32-bit values from each of the
8647 // operands.
8648 MachineOperand Op0L =
8649 buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
8650 MachineOperand Op1L =
8651 buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC);
8652
8653 unsigned Opc = Inst.getOpcode();
8654 unsigned NewOpc = Opc == AMDGPU::S_MUL_U64_U32_PSEUDO
8655 ? AMDGPU::V_MUL_HI_U32_e64
8656 : AMDGPU::V_MUL_HI_I32_e64;
8657 MachineInstr *HiHalf =
8658 BuildMI(MBB, MII, DL, get(NewOpc), DestSub1).add(Op1L).add(Op0L);
8659
8660 MachineInstr *LoHalf =
8661 BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_LO_U32_e64), DestSub0)
8662 .add(Op1L)
8663 .add(Op0L);
8664
8665 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
8666 .addReg(DestSub0)
8667 .addImm(AMDGPU::sub0)
8668 .addReg(DestSub1)
8669 .addImm(AMDGPU::sub1);
8670
8671 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
8672
8673 // Try to legalize the operands in case we need to swap the order to keep it
8674 // valid.
8675 legalizeOperands(*HiHalf, MDT);
8676 legalizeOperands(*LoHalf, MDT);
8677
8678 // Move all users of this moved value.
8679 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
8680}
8681
8682void SIInstrInfo::splitScalar64BitBinaryOp(SIInstrWorklist &Worklist,
8683 MachineInstr &Inst, unsigned Opcode,
8684 MachineDominatorTree *MDT) const {
8685 MachineBasicBlock &MBB = *Inst.getParent();
8686 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8687
8688 MachineOperand &Dest = Inst.getOperand(0);
8689 MachineOperand &Src0 = Inst.getOperand(1);
8690 MachineOperand &Src1 = Inst.getOperand(2);
8691 DebugLoc DL = Inst.getDebugLoc();
8692
8693 MachineBasicBlock::iterator MII = Inst;
8694
8695 const MCInstrDesc &InstDesc = get(Opcode);
8696 const TargetRegisterClass *Src0RC = Src0.isReg() ?
8697 MRI.getRegClass(Src0.getReg()) :
8698 &AMDGPU::SGPR_32RegClass;
8699
8700 const TargetRegisterClass *Src0SubRC =
8701 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
8702 const TargetRegisterClass *Src1RC = Src1.isReg() ?
8703 MRI.getRegClass(Src1.getReg()) :
8704 &AMDGPU::SGPR_32RegClass;
8705
8706 const TargetRegisterClass *Src1SubRC =
8707 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0);
8708
8709 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
8710 AMDGPU::sub0, Src0SubRC);
8711 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
8712 AMDGPU::sub0, Src1SubRC);
8713 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
8714 AMDGPU::sub1, Src0SubRC);
8715 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
8716 AMDGPU::sub1, Src1SubRC);
8717
8718 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
8719 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
8720 const TargetRegisterClass *NewDestSubRC =
8721 RI.getSubRegisterClass(NewDestRC, AMDGPU::sub0);
8722
8723 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
8724 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0)
8725 .add(SrcReg0Sub0)
8726 .add(SrcReg1Sub0);
8727
8728 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
8729 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1)
8730 .add(SrcReg0Sub1)
8731 .add(SrcReg1Sub1);
8732
8733 Register FullDestReg = MRI.createVirtualRegister(NewDestRC);
8734 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
8735 .addReg(DestSub0)
8736 .addImm(AMDGPU::sub0)
8737 .addReg(DestSub1)
8738 .addImm(AMDGPU::sub1);
8739
8740 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
8741
8742 Worklist.insert(&LoHalf);
8743 Worklist.insert(&HiHalf);
8744
8745 // Move all users of this moved value.
8746 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
8747}
8748
8749void SIInstrInfo::splitScalar64BitXnor(SIInstrWorklist &Worklist,
8750 MachineInstr &Inst,
8751 MachineDominatorTree *MDT) const {
8752 MachineBasicBlock &MBB = *Inst.getParent();
8753 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8754
8755 MachineOperand &Dest = Inst.getOperand(0);
8756 MachineOperand &Src0 = Inst.getOperand(1);
8757 MachineOperand &Src1 = Inst.getOperand(2);
8758 const DebugLoc &DL = Inst.getDebugLoc();
8759
8760 MachineBasicBlock::iterator MII = Inst;
8761
8762 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
8763
8764 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
8765
8766 MachineOperand* Op0;
8767 MachineOperand* Op1;
8768
8769 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) {
8770 Op0 = &Src0;
8771 Op1 = &Src1;
8772 } else {
8773 Op0 = &Src1;
8774 Op1 = &Src0;
8775 }
8776
8777 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm)
8778 .add(*Op0);
8779
8780 Register NewDest = MRI.createVirtualRegister(DestRC);
8781
8782 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest)
8783 .addReg(Interm)
8784 .add(*Op1);
8785
8786 MRI.replaceRegWith(Dest.getReg(), NewDest);
8787
8788 Worklist.insert(&Xor);
8789}
8790
8791void SIInstrInfo::splitScalar64BitBCNT(SIInstrWorklist &Worklist,
8792 MachineInstr &Inst) const {
8793 MachineBasicBlock &MBB = *Inst.getParent();
8794 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8795
8796 MachineBasicBlock::iterator MII = Inst;
8797 const DebugLoc &DL = Inst.getDebugLoc();
8798
8799 MachineOperand &Dest = Inst.getOperand(0);
8800 MachineOperand &Src = Inst.getOperand(1);
8801
8802 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64);
8803 const TargetRegisterClass *SrcRC = Src.isReg() ?
8804 MRI.getRegClass(Src.getReg()) :
8805 &AMDGPU::SGPR_32RegClass;
8806
8807 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8808 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8809
8810 const TargetRegisterClass *SrcSubRC =
8811 RI.getSubRegisterClass(SrcRC, AMDGPU::sub0);
8812
8813 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
8814 AMDGPU::sub0, SrcSubRC);
8815 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
8816 AMDGPU::sub1, SrcSubRC);
8817
8818 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0);
8819
8820 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg);
8821
8822 MRI.replaceRegWith(Dest.getReg(), ResultReg);
8823
8824 // We don't need to legalize operands here. src0 for either instruction can be
8825 // an SGPR, and the second input is unused or determined here.
8826 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
8827}
8828
8829void SIInstrInfo::splitScalar64BitBFE(SIInstrWorklist &Worklist,
8830 MachineInstr &Inst) const {
8831 MachineBasicBlock &MBB = *Inst.getParent();
8832 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8833 MachineBasicBlock::iterator MII = Inst;
8834 const DebugLoc &DL = Inst.getDebugLoc();
8835
8836 MachineOperand &Dest = Inst.getOperand(0);
8837 uint32_t Imm = Inst.getOperand(2).getImm();
8838 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
8839 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
8840
8841 (void) Offset;
8842
8843 // Only sext_inreg cases handled.
8844 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 &&
8845 Offset == 0 && "Not implemented");
8846
8847 if (BitWidth < 32) {
8848 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8849 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8850 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
8851
8852 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32_e64), MidRegLo)
8853 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0)
8854 .addImm(0)
8855 .addImm(BitWidth);
8856
8857 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi)
8858 .addImm(31)
8859 .addReg(MidRegLo);
8860
8861 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
8862 .addReg(MidRegLo)
8863 .addImm(AMDGPU::sub0)
8864 .addReg(MidRegHi)
8865 .addImm(AMDGPU::sub1);
8866
8867 MRI.replaceRegWith(Dest.getReg(), ResultReg);
8868 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
8869 return;
8870 }
8871
8872 MachineOperand &Src = Inst.getOperand(1);
8873 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8874 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
8875
8876 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg)
8877 .addImm(31)
8878 .addReg(Src.getReg(), 0, AMDGPU::sub0);
8879
8880 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
8881 .addReg(Src.getReg(), 0, AMDGPU::sub0)
8882 .addImm(AMDGPU::sub0)
8883 .addReg(TmpReg)
8884 .addImm(AMDGPU::sub1);
8885
8886 MRI.replaceRegWith(Dest.getReg(), ResultReg);
8887 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
8888}
8889
8890void SIInstrInfo::splitScalar64BitCountOp(SIInstrWorklist &Worklist,
8891 MachineInstr &Inst, unsigned Opcode,
8892 MachineDominatorTree *MDT) const {
8893 // (S_FLBIT_I32_B64 hi:lo) ->
8894 // -> (umin (V_FFBH_U32_e32 hi), (uaddsat (V_FFBH_U32_e32 lo), 32))
8895 // (S_FF1_I32_B64 hi:lo) ->
8896 // ->(umin (uaddsat (V_FFBL_B32_e32 hi), 32) (V_FFBL_B32_e32 lo))
8897
8898 MachineBasicBlock &MBB = *Inst.getParent();
8899 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8900 MachineBasicBlock::iterator MII = Inst;
8901 const DebugLoc &DL = Inst.getDebugLoc();
8902
8903 MachineOperand &Dest = Inst.getOperand(0);
8904 MachineOperand &Src = Inst.getOperand(1);
8905
8906 const MCInstrDesc &InstDesc = get(Opcode);
8907
8908 bool IsCtlz = Opcode == AMDGPU::V_FFBH_U32_e32;
8909 unsigned OpcodeAdd =
8910 ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32;
8911
8912 const TargetRegisterClass *SrcRC =
8913 Src.isReg() ? MRI.getRegClass(Src.getReg()) : &AMDGPU::SGPR_32RegClass;
8914 const TargetRegisterClass *SrcSubRC =
8915 RI.getSubRegisterClass(SrcRC, AMDGPU::sub0);
8916
8917 MachineOperand SrcRegSub0 =
8918 buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, AMDGPU::sub0, SrcSubRC);
8919 MachineOperand SrcRegSub1 =
8920 buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, AMDGPU::sub1, SrcSubRC);
8921
8922 Register MidReg1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8923 Register MidReg2 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8924 Register MidReg3 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8925 Register MidReg4 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8926
8927 BuildMI(MBB, MII, DL, InstDesc, MidReg1).add(SrcRegSub0);
8928
8929 BuildMI(MBB, MII, DL, InstDesc, MidReg2).add(SrcRegSub1);
8930
8931 BuildMI(MBB, MII, DL, get(OpcodeAdd), MidReg3)
8932 .addReg(IsCtlz ? MidReg1 : MidReg2)
8933 .addImm(32)
8934 .addImm(1); // enable clamp
8935
8936 BuildMI(MBB, MII, DL, get(AMDGPU::V_MIN_U32_e64), MidReg4)
8937 .addReg(MidReg3)
8938 .addReg(IsCtlz ? MidReg2 : MidReg1);
8939
8940 MRI.replaceRegWith(Dest.getReg(), MidReg4);
8941
8942 addUsersToMoveToVALUWorklist(MidReg4, MRI, Worklist);
8943}
8944
8945void SIInstrInfo::addUsersToMoveToVALUWorklist(
8947 SIInstrWorklist &Worklist) const {
8948 for (MachineOperand &MO : make_early_inc_range(MRI.use_operands(DstReg))) {
8949 MachineInstr &UseMI = *MO.getParent();
8950
8951 unsigned OpNo = 0;
8952
8953 switch (UseMI.getOpcode()) {
8954 case AMDGPU::COPY:
8955 case AMDGPU::WQM:
8956 case AMDGPU::SOFT_WQM:
8957 case AMDGPU::STRICT_WWM:
8958 case AMDGPU::STRICT_WQM:
8959 case AMDGPU::REG_SEQUENCE:
8960 case AMDGPU::PHI:
8961 case AMDGPU::INSERT_SUBREG:
8962 break;
8963 default:
8964 OpNo = MO.getOperandNo();
8965 break;
8966 }
8967
8968 const TargetRegisterClass *OpRC = getOpRegClass(UseMI, OpNo);
8969 MRI.constrainRegClass(DstReg, OpRC);
8970
8971 if (!RI.hasVectorRegisters(OpRC))
8972 Worklist.insert(&UseMI);
8973 else
8974 // Legalization could change user list.
8976 }
8977}
8978
8979void SIInstrInfo::movePackToVALU(SIInstrWorklist &Worklist,
8981 MachineInstr &Inst) const {
8982 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8983 MachineBasicBlock *MBB = Inst.getParent();
8984 MachineOperand &Src0 = Inst.getOperand(1);
8985 MachineOperand &Src1 = Inst.getOperand(2);
8986 const DebugLoc &DL = Inst.getDebugLoc();
8987
8988 switch (Inst.getOpcode()) {
8989 case AMDGPU::S_PACK_LL_B32_B16: {
8990 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8991 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8992
8993 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are
8994 // 0.
8995 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
8996 .addImm(0xffff);
8997
8998 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg)
8999 .addReg(ImmReg, RegState::Kill)
9000 .add(Src0);
9001
9002 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32_e64), ResultReg)
9003 .add(Src1)
9004 .addImm(16)
9005 .addReg(TmpReg, RegState::Kill);
9006 break;
9007 }
9008 case AMDGPU::S_PACK_LH_B32_B16: {
9009 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9010 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
9011 .addImm(0xffff);
9012 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32_e64), ResultReg)
9013 .addReg(ImmReg, RegState::Kill)
9014 .add(Src0)
9015 .add(Src1);
9016 break;
9017 }
9018 case AMDGPU::S_PACK_HL_B32_B16: {
9019 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9020 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
9021 .addImm(16)
9022 .add(Src0);
9023 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32_e64), ResultReg)
9024 .add(Src1)
9025 .addImm(16)
9026 .addReg(TmpReg, RegState::Kill);
9027 break;
9028 }
9029 case AMDGPU::S_PACK_HH_B32_B16: {
9030 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9031 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9032 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
9033 .addImm(16)
9034 .add(Src0);
9035 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
9036 .addImm(0xffff0000);
9037 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32_e64), ResultReg)
9038 .add(Src1)
9039 .addReg(ImmReg, RegState::Kill)
9040 .addReg(TmpReg, RegState::Kill);
9041 break;
9042 }
9043 default:
9044 llvm_unreachable("unhandled s_pack_* instruction");
9045 }
9046
9047 MachineOperand &Dest = Inst.getOperand(0);
9048 MRI.replaceRegWith(Dest.getReg(), ResultReg);
9049 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
9050}
9051
9052void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op,
9053 MachineInstr &SCCDefInst,
9054 SIInstrWorklist &Worklist,
9055 Register NewCond) const {
9056
9057 // Ensure that def inst defines SCC, which is still live.
9058 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() &&
9059 !Op.isDead() && Op.getParent() == &SCCDefInst);
9060 SmallVector<MachineInstr *, 4> CopyToDelete;
9061 // This assumes that all the users of SCC are in the same block
9062 // as the SCC def.
9063 for (MachineInstr &MI : // Skip the def inst itself.
9064 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)),
9065 SCCDefInst.getParent()->end())) {
9066 // Check if SCC is used first.
9067 int SCCIdx = MI.findRegisterUseOperandIdx(AMDGPU::SCC, &RI, false);
9068 if (SCCIdx != -1) {
9069 if (MI.isCopy()) {
9070 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
9071 Register DestReg = MI.getOperand(0).getReg();
9072
9073 MRI.replaceRegWith(DestReg, NewCond);
9074 CopyToDelete.push_back(&MI);
9075 } else {
9076
9077 if (NewCond.isValid())
9078 MI.getOperand(SCCIdx).setReg(NewCond);
9079
9080 Worklist.insert(&MI);
9081 }
9082 }
9083 // Exit if we find another SCC def.
9084 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, &RI, false, false) != -1)
9085 break;
9086 }
9087 for (auto &Copy : CopyToDelete)
9088 Copy->eraseFromParent();
9089}
9090
9091// Instructions that use SCC may be converted to VALU instructions. When that
9092// happens, the SCC register is changed to VCC_LO. The instruction that defines
9093// SCC must be changed to an instruction that defines VCC. This function makes
9094// sure that the instruction that defines SCC is added to the moveToVALU
9095// worklist.
9096void SIInstrInfo::addSCCDefsToVALUWorklist(MachineInstr *SCCUseInst,
9097 SIInstrWorklist &Worklist) const {
9098 // Look for a preceding instruction that either defines VCC or SCC. If VCC
9099 // then there is nothing to do because the defining instruction has been
9100 // converted to a VALU already. If SCC then that instruction needs to be
9101 // converted to a VALU.
9102 for (MachineInstr &MI :
9103 make_range(std::next(MachineBasicBlock::reverse_iterator(SCCUseInst)),
9104 SCCUseInst->getParent()->rend())) {
9105 if (MI.modifiesRegister(AMDGPU::VCC, &RI))
9106 break;
9107 if (MI.definesRegister(AMDGPU::SCC, &RI)) {
9108 Worklist.insert(&MI);
9109 break;
9110 }
9111 }
9112}
9113
9114const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(
9115 const MachineInstr &Inst) const {
9116 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0);
9117
9118 switch (Inst.getOpcode()) {
9119 // For target instructions, getOpRegClass just returns the virtual register
9120 // class associated with the operand, so we need to find an equivalent VGPR
9121 // register class in order to move the instruction to the VALU.
9122 case AMDGPU::COPY:
9123 case AMDGPU::PHI:
9124 case AMDGPU::REG_SEQUENCE:
9125 case AMDGPU::INSERT_SUBREG:
9126 case AMDGPU::WQM:
9127 case AMDGPU::SOFT_WQM:
9128 case AMDGPU::STRICT_WWM:
9129 case AMDGPU::STRICT_WQM: {
9130 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1);
9131 if (RI.isAGPRClass(SrcRC)) {
9132 if (RI.isAGPRClass(NewDstRC))
9133 return nullptr;
9134
9135 switch (Inst.getOpcode()) {
9136 case AMDGPU::PHI:
9137 case AMDGPU::REG_SEQUENCE:
9138 case AMDGPU::INSERT_SUBREG:
9139 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC);
9140 break;
9141 default:
9142 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
9143 }
9144
9145 if (!NewDstRC)
9146 return nullptr;
9147 } else {
9148 if (RI.isVGPRClass(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass)
9149 return nullptr;
9150
9151 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
9152 if (!NewDstRC)
9153 return nullptr;
9154 }
9155
9156 return NewDstRC;
9157 }
9158 default:
9159 return NewDstRC;
9160 }
9161}
9162
9163// Find the one SGPR operand we are allowed to use.
9164Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
9165 int OpIndices[3]) const {
9166 const MCInstrDesc &Desc = MI.getDesc();
9167
9168 // Find the one SGPR operand we are allowed to use.
9169 //
9170 // First we need to consider the instruction's operand requirements before
9171 // legalizing. Some operands are required to be SGPRs, such as implicit uses
9172 // of VCC, but we are still bound by the constant bus requirement to only use
9173 // one.
9174 //
9175 // If the operand's class is an SGPR, we can never move it.
9176
9177 Register SGPRReg = findImplicitSGPRRead(MI);
9178 if (SGPRReg)
9179 return SGPRReg;
9180
9181 Register UsedSGPRs[3] = {Register()};
9182 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
9183
9184 for (unsigned i = 0; i < 3; ++i) {
9185 int Idx = OpIndices[i];
9186 if (Idx == -1)
9187 break;
9188
9189 const MachineOperand &MO = MI.getOperand(Idx);
9190 if (!MO.isReg())
9191 continue;
9192
9193 // Is this operand statically required to be an SGPR based on the operand
9194 // constraints?
9195 const TargetRegisterClass *OpRC =
9196 RI.getRegClass(Desc.operands()[Idx].RegClass);
9197 bool IsRequiredSGPR = RI.isSGPRClass(OpRC);
9198 if (IsRequiredSGPR)
9199 return MO.getReg();
9200
9201 // If this could be a VGPR or an SGPR, Check the dynamic register class.
9202 Register Reg = MO.getReg();
9203 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg);
9204 if (RI.isSGPRClass(RegRC))
9205 UsedSGPRs[i] = Reg;
9206 }
9207
9208 // We don't have a required SGPR operand, so we have a bit more freedom in
9209 // selecting operands to move.
9210
9211 // Try to select the most used SGPR. If an SGPR is equal to one of the
9212 // others, we choose that.
9213 //
9214 // e.g.
9215 // V_FMA_F32 v0, s0, s0, s0 -> No moves
9216 // V_FMA_F32 v0, s0, s1, s0 -> Move s1
9217
9218 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should
9219 // prefer those.
9220
9221 if (UsedSGPRs[0]) {
9222 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
9223 SGPRReg = UsedSGPRs[0];
9224 }
9225
9226 if (!SGPRReg && UsedSGPRs[1]) {
9227 if (UsedSGPRs[1] == UsedSGPRs[2])
9228 SGPRReg = UsedSGPRs[1];
9229 }
9230
9231 return SGPRReg;
9232}
9233
9235 AMDGPU::OpName OperandName) const {
9236 if (OperandName == AMDGPU::OpName::NUM_OPERAND_NAMES)
9237 return nullptr;
9238
9239 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
9240 if (Idx == -1)
9241 return nullptr;
9242
9243 return &MI.getOperand(Idx);
9244}
9245
9247 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
9248 int64_t Format = ST.getGeneration() >= AMDGPUSubtarget::GFX11
9251 return (Format << 44) |
9252 (1ULL << 56) | // RESOURCE_LEVEL = 1
9253 (3ULL << 60); // OOB_SELECT = 3
9254 }
9255
9256 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT;
9257 if (ST.isAmdHsaOS()) {
9258 // Set ATC = 1. GFX9 doesn't have this bit.
9259 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS)
9260 RsrcDataFormat |= (1ULL << 56);
9261
9262 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this.
9263 // BTW, it disables TC L2 and therefore decreases performance.
9264 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS)
9265 RsrcDataFormat |= (2ULL << 59);
9266 }
9267
9268 return RsrcDataFormat;
9269}
9270
9274 0xffffffff; // Size;
9275
9276 // GFX9 doesn't have ELEMENT_SIZE.
9277 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
9278 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize(true)) - 1;
9279 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT;
9280 }
9281
9282 // IndexStride = 64 / 32.
9283 uint64_t IndexStride = ST.isWave64() ? 3 : 2;
9284 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT;
9285
9286 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17].
9287 // Clear them unless we want a huge stride.
9288 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS &&
9289 ST.getGeneration() <= AMDGPUSubtarget::GFX9)
9290 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT;
9291
9292 return Rsrc23;
9293}
9294
9296 unsigned Opc = MI.getOpcode();
9297
9298 return isSMRD(Opc);
9299}
9300
9302 return get(Opc).mayLoad() &&
9303 (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc));
9304}
9305
9307 int &FrameIndex) const {
9308 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
9309 if (!Addr || !Addr->isFI())
9310 return Register();
9311
9312 assert(!MI.memoperands_empty() &&
9313 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS);
9314
9315 FrameIndex = Addr->getIndex();
9316 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
9317}
9318
9320 int &FrameIndex) const {
9321 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr);
9322 assert(Addr && Addr->isFI());
9323 FrameIndex = Addr->getIndex();
9324 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg();
9325}
9326
9328 int &FrameIndex) const {
9329 if (!MI.mayLoad())
9330 return Register();
9331
9332 if (isMUBUF(MI) || isVGPRSpill(MI))
9333 return isStackAccess(MI, FrameIndex);
9334
9335 if (isSGPRSpill(MI))
9336 return isSGPRStackAccess(MI, FrameIndex);
9337
9338 return Register();
9339}
9340
9342 int &FrameIndex) const {
9343 if (!MI.mayStore())
9344 return Register();
9345
9346 if (isMUBUF(MI) || isVGPRSpill(MI))
9347 return isStackAccess(MI, FrameIndex);
9348
9349 if (isSGPRSpill(MI))
9350 return isSGPRStackAccess(MI, FrameIndex);
9351
9352 return Register();
9353}
9354
9356 unsigned Size = 0;
9358 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
9359 while (++I != E && I->isInsideBundle()) {
9360 assert(!I->isBundle() && "No nested bundle!");
9362 }
9363
9364 return Size;
9365}
9366
9368 unsigned Opc = MI.getOpcode();
9370 unsigned DescSize = Desc.getSize();
9371
9372 // If we have a definitive size, we can use it. Otherwise we need to inspect
9373 // the operands to know the size.
9374 if (isFixedSize(MI)) {
9375 unsigned Size = DescSize;
9376
9377 // If we hit the buggy offset, an extra nop will be inserted in MC so
9378 // estimate the worst case.
9379 if (MI.isBranch() && ST.hasOffset3fBug())
9380 Size += 4;
9381
9382 return Size;
9383 }
9384
9385 // Instructions may have a 32-bit literal encoded after them. Check
9386 // operands that could ever be literals.
9387 if (isVALU(MI) || isSALU(MI)) {
9388 if (isDPP(MI))
9389 return DescSize;
9390 bool HasLiteral = false;
9391 unsigned LiteralSize = 4;
9392 for (int I = 0, E = MI.getNumExplicitOperands(); I != E; ++I) {
9393 const MachineOperand &Op = MI.getOperand(I);
9394 const MCOperandInfo &OpInfo = Desc.operands()[I];
9395 if (!Op.isReg() && !isInlineConstant(Op, OpInfo)) {
9396 HasLiteral = true;
9397 if (ST.has64BitLiterals()) {
9398 switch (OpInfo.OperandType) {
9399 default:
9400 break;
9402 if (!AMDGPU::isValid32BitLiteral(Op.getImm(), true))
9403 LiteralSize = 8;
9404 break;
9406 if (!Op.isImm() || !AMDGPU::isValid32BitLiteral(Op.getImm(), false))
9407 LiteralSize = 8;
9408 break;
9409 }
9410 }
9411 break;
9412 }
9413 }
9414 return HasLiteral ? DescSize + LiteralSize : DescSize;
9415 }
9416
9417 // Check whether we have extra NSA words.
9418 if (isMIMG(MI)) {
9419 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
9420 if (VAddr0Idx < 0)
9421 return 8;
9422
9423 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
9424 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4);
9425 }
9426
9427 switch (Opc) {
9428 case TargetOpcode::BUNDLE:
9429 return getInstBundleSize(MI);
9430 case TargetOpcode::INLINEASM:
9431 case TargetOpcode::INLINEASM_BR: {
9432 const MachineFunction *MF = MI.getParent()->getParent();
9433 const char *AsmStr = MI.getOperand(0).getSymbolName();
9434 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), &ST);
9435 }
9436 default:
9437 if (MI.isMetaInstruction())
9438 return 0;
9439
9440 // If D16 Pseudo inst, get correct MC code size
9441 const auto *D16Info = AMDGPU::getT16D16Helper(Opc);
9442 if (D16Info) {
9443 // Assume d16_lo/hi inst are always in same size
9444 unsigned LoInstOpcode = D16Info->LoOp;
9445 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(LoInstOpcode);
9446 DescSize = Desc.getSize();
9447 }
9448
9449 return DescSize;
9450 }
9451}
9452
9454 if (!isFLAT(MI))
9455 return false;
9456
9457 if (MI.memoperands_empty())
9458 return true;
9459
9460 for (const MachineMemOperand *MMO : MI.memoperands()) {
9461 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS)
9462 return true;
9463 }
9464 return false;
9465}
9466
9469 static const std::pair<int, const char *> TargetIndices[] = {
9470 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
9471 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
9472 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
9473 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
9474 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
9475 return ArrayRef(TargetIndices);
9476}
9477
9478/// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The
9479/// post-RA version of misched uses CreateTargetMIHazardRecognizer.
9485
9486/// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer
9487/// pass.
9492
9493// Called during:
9494// - pre-RA scheduling and post-RA scheduling
9497 const ScheduleDAGMI *DAG) const {
9498 // Borrowed from Arm Target
9499 // We would like to restrict this hazard recognizer to only
9500 // post-RA scheduling; we can tell that we're post-RA because we don't
9501 // track VRegLiveness.
9502 if (!DAG->hasVRegLiveness())
9503 return new GCNHazardRecognizer(DAG->MF);
9505}
9506
9507std::pair<unsigned, unsigned>
9509 return std::pair(TF & MO_MASK, TF & ~MO_MASK);
9510}
9511
9514 static const std::pair<unsigned, const char *> TargetFlags[] = {
9515 {MO_GOTPCREL, "amdgpu-gotprel"},
9516 {MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo"},
9517 {MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi"},
9518 {MO_GOTPCREL64, "amdgpu-gotprel64"},
9519 {MO_REL32_LO, "amdgpu-rel32-lo"},
9520 {MO_REL32_HI, "amdgpu-rel32-hi"},
9521 {MO_REL64, "amdgpu-rel64"},
9522 {MO_ABS32_LO, "amdgpu-abs32-lo"},
9523 {MO_ABS32_HI, "amdgpu-abs32-hi"},
9524 {MO_ABS64, "amdgpu-abs64"},
9525 };
9526
9527 return ArrayRef(TargetFlags);
9528}
9529
9532 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
9533 {
9534 {MONoClobber, "amdgpu-noclobber"},
9535 {MOLastUse, "amdgpu-last-use"},
9536 {MOCooperative, "amdgpu-cooperative"},
9537 };
9538
9539 return ArrayRef(TargetFlags);
9540}
9541
9543 const MachineFunction &MF) const {
9545 assert(SrcReg.isVirtual());
9546 if (MFI->checkFlag(SrcReg, AMDGPU::VirtRegFlag::WWM_REG))
9547 return AMDGPU::WWM_COPY;
9548
9549 return AMDGPU::COPY;
9550}
9551
9553 Register Reg) const {
9554 // We need to handle instructions which may be inserted during register
9555 // allocation to handle the prolog. The initial prolog instruction may have
9556 // been separated from the start of the block by spills and copies inserted
9557 // needed by the prolog. However, the insertions for scalar registers can
9558 // always be placed at the BB top as they are independent of the exec mask
9559 // value.
9560 const MachineFunction *MF = MI.getParent()->getParent();
9561 bool IsNullOrVectorRegister = true;
9562 if (Reg) {
9563 const MachineRegisterInfo &MRI = MF->getRegInfo();
9564 IsNullOrVectorRegister = !RI.isSGPRClass(RI.getRegClassForReg(MRI, Reg));
9565 }
9566
9567 uint16_t Opcode = MI.getOpcode();
9569 return IsNullOrVectorRegister &&
9570 (isSGPRSpill(Opcode) || isWWMRegSpillOpcode(Opcode) ||
9571 (Opcode == AMDGPU::IMPLICIT_DEF &&
9572 MFI->isWWMReg(MI.getOperand(0).getReg())) ||
9573 (!MI.isTerminator() && Opcode != AMDGPU::COPY &&
9574 MI.modifiesRegister(AMDGPU::EXEC, &RI)));
9575}
9576
9580 const DebugLoc &DL,
9581 Register DestReg) const {
9582 if (ST.hasAddNoCarry())
9583 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg);
9584
9585 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
9586 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC());
9587 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC());
9588
9589 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg)
9590 .addReg(UnusedCarry, RegState::Define | RegState::Dead);
9591}
9592
9595 const DebugLoc &DL,
9596 Register DestReg,
9597 RegScavenger &RS) const {
9598 if (ST.hasAddNoCarry())
9599 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg);
9600
9601 // If available, prefer to use vcc.
9602 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC)
9603 ? Register(RI.getVCC())
9605 *RI.getBoolRC(), I, /* RestoreAfter */ false,
9606 0, /* AllowSpill */ false);
9607
9608 // TODO: Users need to deal with this.
9609 if (!UnusedCarry.isValid())
9610 return MachineInstrBuilder();
9611
9612 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg)
9613 .addReg(UnusedCarry, RegState::Define | RegState::Dead);
9614}
9615
9616bool SIInstrInfo::isKillTerminator(unsigned Opcode) {
9617 switch (Opcode) {
9618 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
9619 case AMDGPU::SI_KILL_I1_TERMINATOR:
9620 return true;
9621 default:
9622 return false;
9623 }
9624}
9625
9627 switch (Opcode) {
9628 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
9629 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR);
9630 case AMDGPU::SI_KILL_I1_PSEUDO:
9631 return get(AMDGPU::SI_KILL_I1_TERMINATOR);
9632 default:
9633 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO");
9634 }
9635}
9636
9637bool SIInstrInfo::isLegalMUBUFImmOffset(unsigned Imm) const {
9638 return Imm <= getMaxMUBUFImmOffset(ST);
9639}
9640
9642 // GFX12 field is non-negative 24-bit signed byte offset.
9643 const unsigned OffsetBits =
9644 ST.getGeneration() >= AMDGPUSubtarget::GFX12 ? 23 : 12;
9645 return (1 << OffsetBits) - 1;
9646}
9647
9649 if (!ST.isWave32())
9650 return;
9651
9652 if (MI.isInlineAsm())
9653 return;
9654
9655 for (auto &Op : MI.implicit_operands()) {
9656 if (Op.isReg() && Op.getReg() == AMDGPU::VCC)
9657 Op.setReg(AMDGPU::VCC_LO);
9658 }
9659}
9660
9662 if (!isSMRD(MI))
9663 return false;
9664
9665 // Check that it is using a buffer resource.
9666 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase);
9667 if (Idx == -1) // e.g. s_memtime
9668 return false;
9669
9670 const auto RCID = MI.getDesc().operands()[Idx].RegClass;
9671 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass);
9672}
9673
9674// Given Imm, split it into the values to put into the SOffset and ImmOffset
9675// fields in an MUBUF instruction. Return false if it is not possible (due to a
9676// hardware bug needing a workaround).
9677//
9678// The required alignment ensures that individual address components remain
9679// aligned if they are aligned to begin with. It also ensures that additional
9680// offsets within the given alignment can be added to the resulting ImmOffset.
9682 uint32_t &ImmOffset, Align Alignment) const {
9683 const uint32_t MaxOffset = SIInstrInfo::getMaxMUBUFImmOffset(ST);
9684 const uint32_t MaxImm = alignDown(MaxOffset, Alignment.value());
9685 uint32_t Overflow = 0;
9686
9687 if (Imm > MaxImm) {
9688 if (Imm <= MaxImm + 64) {
9689 // Use an SOffset inline constant for 4..64
9690 Overflow = Imm - MaxImm;
9691 Imm = MaxImm;
9692 } else {
9693 // Try to keep the same value in SOffset for adjacent loads, so that
9694 // the corresponding register contents can be re-used.
9695 //
9696 // Load values with all low-bits (except for alignment bits) set into
9697 // SOffset, so that a larger range of values can be covered using
9698 // s_movk_i32.
9699 //
9700 // Atomic operations fail to work correctly when individual address
9701 // components are unaligned, even if their sum is aligned.
9702 uint32_t High = (Imm + Alignment.value()) & ~MaxOffset;
9703 uint32_t Low = (Imm + Alignment.value()) & MaxOffset;
9704 Imm = Low;
9705 Overflow = High - Alignment.value();
9706 }
9707 }
9708
9709 if (Overflow > 0) {
9710 // There is a hardware bug in SI and CI which prevents address clamping in
9711 // MUBUF instructions from working correctly with SOffsets. The immediate
9712 // offset is unaffected.
9713 if (ST.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS)
9714 return false;
9715
9716 // It is not possible to set immediate in SOffset field on some targets.
9717 if (ST.hasRestrictedSOffset())
9718 return false;
9719 }
9720
9721 ImmOffset = Imm;
9722 SOffset = Overflow;
9723 return true;
9724}
9725
9726// Depending on the used address space and instructions, some immediate offsets
9727// are allowed and some are not.
9728// Pre-GFX12, flat instruction offsets can only be non-negative, global and
9729// scratch instruction offsets can also be negative. On GFX12, offsets can be
9730// negative for all variants.
9731//
9732// There are several bugs related to these offsets:
9733// On gfx10.1, flat instructions that go into the global address space cannot
9734// use an offset.
9735//
9736// For scratch instructions, the address can be either an SGPR or a VGPR.
9737// The following offsets can be used, depending on the architecture (x means
9738// cannot be used):
9739// +----------------------------+------+------+
9740// | Address-Mode | SGPR | VGPR |
9741// +----------------------------+------+------+
9742// | gfx9 | | |
9743// | negative, 4-aligned offset | x | ok |
9744// | negative, unaligned offset | x | ok |
9745// +----------------------------+------+------+
9746// | gfx10 | | |
9747// | negative, 4-aligned offset | ok | ok |
9748// | negative, unaligned offset | ok | x |
9749// +----------------------------+------+------+
9750// | gfx10.3 | | |
9751// | negative, 4-aligned offset | ok | ok |
9752// | negative, unaligned offset | ok | ok |
9753// +----------------------------+------+------+
9754//
9755// This function ignores the addressing mode, so if an offset cannot be used in
9756// one addressing mode, it is considered illegal.
9757bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace,
9758 uint64_t FlatVariant) const {
9759 // TODO: Should 0 be special cased?
9760 if (!ST.hasFlatInstOffsets())
9761 return false;
9762
9763 if (ST.hasFlatSegmentOffsetBug() && FlatVariant == SIInstrFlags::FLAT &&
9764 (AddrSpace == AMDGPUAS::FLAT_ADDRESS ||
9765 AddrSpace == AMDGPUAS::GLOBAL_ADDRESS))
9766 return false;
9767
9768 if (ST.hasNegativeUnalignedScratchOffsetBug() &&
9769 FlatVariant == SIInstrFlags::FlatScratch && Offset < 0 &&
9770 (Offset % 4) != 0) {
9771 return false;
9772 }
9773
9774 bool AllowNegative = allowNegativeFlatOffset(FlatVariant);
9775 unsigned N = AMDGPU::getNumFlatOffsetBits(ST);
9776 return isIntN(N, Offset) && (AllowNegative || Offset >= 0);
9777}
9778
9779// See comment on SIInstrInfo::isLegalFLATOffset for what is legal and what not.
9780std::pair<int64_t, int64_t>
9781SIInstrInfo::splitFlatOffset(int64_t COffsetVal, unsigned AddrSpace,
9782 uint64_t FlatVariant) const {
9783 int64_t RemainderOffset = COffsetVal;
9784 int64_t ImmField = 0;
9785
9786 bool AllowNegative = allowNegativeFlatOffset(FlatVariant);
9787 const unsigned NumBits = AMDGPU::getNumFlatOffsetBits(ST) - 1;
9788
9789 if (AllowNegative) {
9790 // Use signed division by a power of two to truncate towards 0.
9791 int64_t D = 1LL << NumBits;
9792 RemainderOffset = (COffsetVal / D) * D;
9793 ImmField = COffsetVal - RemainderOffset;
9794
9795 if (ST.hasNegativeUnalignedScratchOffsetBug() &&
9796 FlatVariant == SIInstrFlags::FlatScratch && ImmField < 0 &&
9797 (ImmField % 4) != 0) {
9798 // Make ImmField a multiple of 4
9799 RemainderOffset += ImmField % 4;
9800 ImmField -= ImmField % 4;
9801 }
9802 } else if (COffsetVal >= 0) {
9803 ImmField = COffsetVal & maskTrailingOnes<uint64_t>(NumBits);
9804 RemainderOffset = COffsetVal - ImmField;
9805 }
9806
9807 assert(isLegalFLATOffset(ImmField, AddrSpace, FlatVariant));
9808 assert(RemainderOffset + ImmField == COffsetVal);
9809 return {ImmField, RemainderOffset};
9810}
9811
9813 if (ST.hasNegativeScratchOffsetBug() &&
9814 FlatVariant == SIInstrFlags::FlatScratch)
9815 return false;
9816
9817 return FlatVariant != SIInstrFlags::FLAT || AMDGPU::isGFX12Plus(ST);
9818}
9819
9820static unsigned subtargetEncodingFamily(const GCNSubtarget &ST) {
9821 switch (ST.getGeneration()) {
9822 default:
9823 break;
9826 return SIEncodingFamily::SI;
9829 return SIEncodingFamily::VI;
9835 return ST.hasGFX1250Insts() ? SIEncodingFamily::GFX1250
9837 }
9838 llvm_unreachable("Unknown subtarget generation!");
9839}
9840
9841bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const {
9842 switch(MCOp) {
9843 // These opcodes use indirect register addressing so
9844 // they need special handling by codegen (currently missing).
9845 // Therefore it is too risky to allow these opcodes
9846 // to be selected by dpp combiner or sdwa peepholer.
9847 case AMDGPU::V_MOVRELS_B32_dpp_gfx10:
9848 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10:
9849 case AMDGPU::V_MOVRELD_B32_dpp_gfx10:
9850 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10:
9851 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10:
9852 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10:
9853 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10:
9854 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10:
9855 return true;
9856 default:
9857 return false;
9858 }
9859}
9860
9861#define GENERATE_RENAMED_GFX9_CASES(OPCODE) \
9862 case OPCODE##_dpp: \
9863 case OPCODE##_e32: \
9864 case OPCODE##_e64: \
9865 case OPCODE##_e64_dpp: \
9866 case OPCODE##_sdwa:
9867
9868static bool isRenamedInGFX9(int Opcode) {
9869 switch (Opcode) {
9870 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_ADDC_U32)
9871 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_ADD_CO_U32)
9872 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_ADD_U32)
9873 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUBBREV_U32)
9874 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUBB_U32)
9875 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUBREV_CO_U32)
9876 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUBREV_U32)
9877 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUB_CO_U32)
9878 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUB_U32)
9879 //
9880 case AMDGPU::V_DIV_FIXUP_F16_gfx9_e64:
9881 case AMDGPU::V_DIV_FIXUP_F16_gfx9_fake16_e64:
9882 case AMDGPU::V_FMA_F16_gfx9_e64:
9883 case AMDGPU::V_FMA_F16_gfx9_fake16_e64:
9884 case AMDGPU::V_INTERP_P2_F16:
9885 case AMDGPU::V_MAD_F16_e64:
9886 case AMDGPU::V_MAD_U16_e64:
9887 case AMDGPU::V_MAD_I16_e64:
9888 return true;
9889 default:
9890 return false;
9891 }
9892}
9893
9894int SIInstrInfo::pseudoToMCOpcode(int Opcode) const {
9895 Opcode = SIInstrInfo::getNonSoftWaitcntOpcode(Opcode);
9896
9897 unsigned Gen = subtargetEncodingFamily(ST);
9898
9899 if (ST.getGeneration() == AMDGPUSubtarget::GFX9 && isRenamedInGFX9(Opcode))
9901
9902 // Adjust the encoding family to GFX80 for D16 buffer instructions when the
9903 // subtarget has UnpackedD16VMem feature.
9904 // TODO: remove this when we discard GFX80 encoding.
9905 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf))
9907
9908 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) {
9909 switch (ST.getGeneration()) {
9910 default:
9912 break;
9915 break;
9918 break;
9919 }
9920 }
9921
9922 if (isMAI(Opcode)) {
9923 int MFMAOp = AMDGPU::getMFMAEarlyClobberOp(Opcode);
9924 if (MFMAOp != -1)
9925 Opcode = MFMAOp;
9926 }
9927
9928 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen);
9929
9930 if (MCOp == (uint16_t)-1 && ST.hasGFX1250Insts())
9932
9933 // -1 means that Opcode is already a native instruction.
9934 if (MCOp == -1)
9935 return Opcode;
9936
9937 if (ST.hasGFX90AInsts()) {
9938 uint16_t NMCOp = (uint16_t)-1;
9939 if (ST.hasGFX940Insts())
9941 if (NMCOp == (uint16_t)-1)
9943 if (NMCOp == (uint16_t)-1)
9945 if (NMCOp != (uint16_t)-1)
9946 MCOp = NMCOp;
9947 }
9948
9949 // (uint16_t)-1 means that Opcode is a pseudo instruction that has
9950 // no encoding in the given subtarget generation.
9951 if (MCOp == (uint16_t)-1)
9952 return -1;
9953
9954 if (isAsmOnlyOpcode(MCOp))
9955 return -1;
9956
9957 return MCOp;
9958}
9959
9960static
9962 assert(RegOpnd.isReg());
9963 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() :
9964 getRegSubRegPair(RegOpnd);
9965}
9966
9969 assert(MI.isRegSequence());
9970 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I)
9971 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) {
9972 auto &RegOp = MI.getOperand(1 + 2 * I);
9973 return getRegOrUndef(RegOp);
9974 }
9976}
9977
9978// Try to find the definition of reg:subreg in subreg-manipulation pseudos
9979// Following a subreg of reg:subreg isn't supported
9982 if (!RSR.SubReg)
9983 return false;
9984 switch (MI.getOpcode()) {
9985 default: break;
9986 case AMDGPU::REG_SEQUENCE:
9987 RSR = getRegSequenceSubReg(MI, RSR.SubReg);
9988 return true;
9989 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg
9990 case AMDGPU::INSERT_SUBREG:
9991 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm())
9992 // inserted the subreg we're looking for
9993 RSR = getRegOrUndef(MI.getOperand(2));
9994 else { // the subreg in the rest of the reg
9995 auto R1 = getRegOrUndef(MI.getOperand(1));
9996 if (R1.SubReg) // subreg of subreg isn't supported
9997 return false;
9998 RSR.Reg = R1.Reg;
9999 }
10000 return true;
10001 }
10002 return false;
10003}
10004
10007 assert(MRI.isSSA());
10008 if (!P.Reg.isVirtual())
10009 return nullptr;
10010
10011 auto RSR = P;
10012 auto *DefInst = MRI.getVRegDef(RSR.Reg);
10013 while (auto *MI = DefInst) {
10014 DefInst = nullptr;
10015 switch (MI->getOpcode()) {
10016 case AMDGPU::COPY:
10017 case AMDGPU::V_MOV_B32_e32: {
10018 auto &Op1 = MI->getOperand(1);
10019 if (Op1.isReg() && Op1.getReg().isVirtual()) {
10020 if (Op1.isUndef())
10021 return nullptr;
10022 RSR = getRegSubRegPair(Op1);
10023 DefInst = MRI.getVRegDef(RSR.Reg);
10024 }
10025 break;
10026 }
10027 default:
10028 if (followSubRegDef(*MI, RSR)) {
10029 if (!RSR.Reg)
10030 return nullptr;
10031 DefInst = MRI.getVRegDef(RSR.Reg);
10032 }
10033 }
10034 if (!DefInst)
10035 return MI;
10036 }
10037 return nullptr;
10038}
10039
10041 Register VReg,
10042 const MachineInstr &DefMI,
10043 const MachineInstr &UseMI) {
10044 assert(MRI.isSSA() && "Must be run on SSA");
10045
10046 auto *TRI = MRI.getTargetRegisterInfo();
10047 auto *DefBB = DefMI.getParent();
10048
10049 // Don't bother searching between blocks, although it is possible this block
10050 // doesn't modify exec.
10051 if (UseMI.getParent() != DefBB)
10052 return true;
10053
10054 const int MaxInstScan = 20;
10055 int NumInst = 0;
10056
10057 // Stop scan at the use.
10058 auto E = UseMI.getIterator();
10059 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) {
10060 if (I->isDebugInstr())
10061 continue;
10062
10063 if (++NumInst > MaxInstScan)
10064 return true;
10065
10066 if (I->modifiesRegister(AMDGPU::EXEC, TRI))
10067 return true;
10068 }
10069
10070 return false;
10071}
10072
10074 Register VReg,
10075 const MachineInstr &DefMI) {
10076 assert(MRI.isSSA() && "Must be run on SSA");
10077
10078 auto *TRI = MRI.getTargetRegisterInfo();
10079 auto *DefBB = DefMI.getParent();
10080
10081 const int MaxUseScan = 10;
10082 int NumUse = 0;
10083
10084 for (auto &Use : MRI.use_nodbg_operands(VReg)) {
10085 auto &UseInst = *Use.getParent();
10086 // Don't bother searching between blocks, although it is possible this block
10087 // doesn't modify exec.
10088 if (UseInst.getParent() != DefBB || UseInst.isPHI())
10089 return true;
10090
10091 if (++NumUse > MaxUseScan)
10092 return true;
10093 }
10094
10095 if (NumUse == 0)
10096 return false;
10097
10098 const int MaxInstScan = 20;
10099 int NumInst = 0;
10100
10101 // Stop scan when we have seen all the uses.
10102 for (auto I = std::next(DefMI.getIterator()); ; ++I) {
10103 assert(I != DefBB->end());
10104
10105 if (I->isDebugInstr())
10106 continue;
10107
10108 if (++NumInst > MaxInstScan)
10109 return true;
10110
10111 for (const MachineOperand &Op : I->operands()) {
10112 // We don't check reg masks here as they're used only on calls:
10113 // 1. EXEC is only considered const within one BB
10114 // 2. Call should be a terminator instruction if present in a BB
10115
10116 if (!Op.isReg())
10117 continue;
10118
10119 Register Reg = Op.getReg();
10120 if (Op.isUse()) {
10121 if (Reg == VReg && --NumUse == 0)
10122 return false;
10123 } else if (TRI->regsOverlap(Reg, AMDGPU::EXEC))
10124 return true;
10125 }
10126 }
10127}
10128
10131 const DebugLoc &DL, Register Src, Register Dst) const {
10132 auto Cur = MBB.begin();
10133 if (Cur != MBB.end())
10134 do {
10135 if (!Cur->isPHI() && Cur->readsRegister(Dst, /*TRI=*/nullptr))
10136 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src);
10137 ++Cur;
10138 } while (Cur != MBB.end() && Cur != LastPHIIt);
10139
10140 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src,
10141 Dst);
10142}
10143
10146 const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const {
10147 if (InsPt != MBB.end() &&
10148 (InsPt->getOpcode() == AMDGPU::SI_IF ||
10149 InsPt->getOpcode() == AMDGPU::SI_ELSE ||
10150 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) &&
10151 InsPt->definesRegister(Src, /*TRI=*/nullptr)) {
10152 InsPt++;
10153 return BuildMI(MBB, InsPt, DL,
10154 get(AMDGPU::LaneMaskConstants::get(ST).MovTermOpc), Dst)
10155 .addReg(Src, 0, SrcSubReg)
10156 .addReg(AMDGPU::EXEC, RegState::Implicit);
10157 }
10158 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg,
10159 Dst);
10160}
10161
10162bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); }
10163
10166 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
10167 VirtRegMap *VRM) const {
10168 // This is a bit of a hack (copied from AArch64). Consider this instruction:
10169 //
10170 // %0:sreg_32 = COPY $m0
10171 //
10172 // We explicitly chose SReg_32 for the virtual register so such a copy might
10173 // be eliminated by RegisterCoalescer. However, that may not be possible, and
10174 // %0 may even spill. We can't spill $m0 normally (it would require copying to
10175 // a numbered SGPR anyway), and since it is in the SReg_32 register class,
10176 // TargetInstrInfo::foldMemoryOperand() is going to try.
10177 // A similar issue also exists with spilling and reloading $exec registers.
10178 //
10179 // To prevent that, constrain the %0 register class here.
10180 if (isFullCopyInstr(MI)) {
10181 Register DstReg = MI.getOperand(0).getReg();
10182 Register SrcReg = MI.getOperand(1).getReg();
10183 if ((DstReg.isVirtual() || SrcReg.isVirtual()) &&
10184 (DstReg.isVirtual() != SrcReg.isVirtual())) {
10186 Register VirtReg = DstReg.isVirtual() ? DstReg : SrcReg;
10187 const TargetRegisterClass *RC = MRI.getRegClass(VirtReg);
10188 if (RC->hasSuperClassEq(&AMDGPU::SReg_32RegClass)) {
10189 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
10190 return nullptr;
10191 }
10192 if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) {
10193 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass);
10194 return nullptr;
10195 }
10196 }
10197 }
10198
10199 return nullptr;
10200}
10201
10203 const MachineInstr &MI,
10204 unsigned *PredCost) const {
10205 if (MI.isBundle()) {
10207 MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end());
10208 unsigned Lat = 0, Count = 0;
10209 for (++I; I != E && I->isBundledWithPred(); ++I) {
10210 ++Count;
10211 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I));
10212 }
10213 return Lat + Count - 1;
10214 }
10215
10216 return SchedModel.computeInstrLatency(&MI);
10217}
10218
10221 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
10222 unsigned Opcode = MI.getOpcode();
10223
10224 auto HandleAddrSpaceCast = [this, &MRI](const MachineInstr &MI) {
10225 Register Dst = MI.getOperand(0).getReg();
10226 Register Src = isa<GIntrinsic>(MI) ? MI.getOperand(2).getReg()
10227 : MI.getOperand(1).getReg();
10228 LLT DstTy = MRI.getType(Dst);
10229 LLT SrcTy = MRI.getType(Src);
10230 unsigned DstAS = DstTy.getAddressSpace();
10231 unsigned SrcAS = SrcTy.getAddressSpace();
10232 return SrcAS == AMDGPUAS::PRIVATE_ADDRESS &&
10233 DstAS == AMDGPUAS::FLAT_ADDRESS &&
10234 ST.hasGloballyAddressableScratch()
10237 };
10238
10239 // If the target supports globally addressable scratch, the mapping from
10240 // scratch memory to the flat aperture changes therefore an address space cast
10241 // is no longer uniform.
10242 if (Opcode == TargetOpcode::G_ADDRSPACE_CAST)
10243 return HandleAddrSpaceCast(MI);
10244
10245 if (auto *GI = dyn_cast<GIntrinsic>(&MI)) {
10246 auto IID = GI->getIntrinsicID();
10251
10252 switch (IID) {
10253 case Intrinsic::amdgcn_addrspacecast_nonnull:
10254 return HandleAddrSpaceCast(MI);
10255 case Intrinsic::amdgcn_if:
10256 case Intrinsic::amdgcn_else:
10257 // FIXME: Uniform if second result
10258 break;
10259 }
10260
10262 }
10263
10264 // Loads from the private and flat address spaces are divergent, because
10265 // threads can execute the load instruction with the same inputs and get
10266 // different results.
10267 //
10268 // All other loads are not divergent, because if threads issue loads with the
10269 // same arguments, they will always get the same result.
10270 if (Opcode == AMDGPU::G_LOAD || Opcode == AMDGPU::G_ZEXTLOAD ||
10271 Opcode == AMDGPU::G_SEXTLOAD) {
10272 if (MI.memoperands_empty())
10273 return InstructionUniformity::NeverUniform; // conservative assumption
10274
10275 if (llvm::any_of(MI.memoperands(), [](const MachineMemOperand *mmo) {
10276 return mmo->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
10277 mmo->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS;
10278 })) {
10279 // At least one MMO in a non-global address space.
10281 }
10283 }
10284
10285 if (SIInstrInfo::isGenericAtomicRMWOpcode(Opcode) ||
10286 Opcode == AMDGPU::G_ATOMIC_CMPXCHG ||
10287 Opcode == AMDGPU::G_ATOMIC_CMPXCHG_WITH_SUCCESS ||
10288 AMDGPU::isGenericAtomic(Opcode)) {
10290 }
10292}
10293
10296
10297 if (isNeverUniform(MI))
10299
10300 unsigned opcode = MI.getOpcode();
10301 if (opcode == AMDGPU::V_READLANE_B32 ||
10302 opcode == AMDGPU::V_READFIRSTLANE_B32 ||
10303 opcode == AMDGPU::SI_RESTORE_S32_FROM_VGPR)
10305
10306 if (isCopyInstr(MI)) {
10307 const MachineOperand &srcOp = MI.getOperand(1);
10308 if (srcOp.isReg() && srcOp.getReg().isPhysical()) {
10309 const TargetRegisterClass *regClass =
10310 RI.getPhysRegBaseClass(srcOp.getReg());
10311 return RI.isSGPRClass(regClass) ? InstructionUniformity::AlwaysUniform
10313 }
10315 }
10316
10317 // GMIR handling
10318 if (MI.isPreISelOpcode())
10320
10321 // Atomics are divergent because they are executed sequentially: when an
10322 // atomic operation refers to the same address in each thread, then each
10323 // thread after the first sees the value written by the previous thread as
10324 // original value.
10325
10326 if (isAtomic(MI))
10328
10329 // Loads from the private and flat address spaces are divergent, because
10330 // threads can execute the load instruction with the same inputs and get
10331 // different results.
10332 if (isFLAT(MI) && MI.mayLoad()) {
10333 if (MI.memoperands_empty())
10334 return InstructionUniformity::NeverUniform; // conservative assumption
10335
10336 if (llvm::any_of(MI.memoperands(), [](const MachineMemOperand *mmo) {
10337 return mmo->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
10338 mmo->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS;
10339 })) {
10340 // At least one MMO in a non-global address space.
10342 }
10343
10345 }
10346
10347 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
10348 const AMDGPURegisterBankInfo *RBI = ST.getRegBankInfo();
10349
10350 // FIXME: It's conceptually broken to report this for an instruction, and not
10351 // a specific def operand. For inline asm in particular, there could be mixed
10352 // uniform and divergent results.
10353 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
10354 const MachineOperand &SrcOp = MI.getOperand(I);
10355 if (!SrcOp.isReg())
10356 continue;
10357
10358 Register Reg = SrcOp.getReg();
10359 if (!Reg || !SrcOp.readsReg())
10360 continue;
10361
10362 // If RegBank is null, this is unassigned or an unallocatable special
10363 // register, which are all scalars.
10364 const RegisterBank *RegBank = RBI->getRegBank(Reg, MRI, RI);
10365 if (RegBank && RegBank->getID() != AMDGPU::SGPRRegBankID)
10367 }
10368
10369 // TODO: Uniformity check condtions above can be rearranged for more
10370 // redability
10371
10372 // TODO: amdgcn.{ballot, [if]cmp} should be AlwaysUniform, but they are
10373 // currently turned into no-op COPYs by SelectionDAG ISel and are
10374 // therefore no longer recognizable.
10375
10377}
10378
10380 switch (MF.getFunction().getCallingConv()) {
10382 return 1;
10384 return 2;
10386 return 3;
10390 const Function &F = MF.getFunction();
10391 F.getContext().diagnose(DiagnosticInfoUnsupported(
10392 F, "ds_ordered_count unsupported for this calling conv"));
10393 [[fallthrough]];
10394 }
10397 case CallingConv::C:
10398 case CallingConv::Fast:
10399 default:
10400 // Assume other calling conventions are various compute callable functions
10401 return 0;
10402 }
10403}
10404
10406 Register &SrcReg2, int64_t &CmpMask,
10407 int64_t &CmpValue) const {
10408 if (!MI.getOperand(0).isReg() || MI.getOperand(0).getSubReg())
10409 return false;
10410
10411 switch (MI.getOpcode()) {
10412 default:
10413 break;
10414 case AMDGPU::S_CMP_EQ_U32:
10415 case AMDGPU::S_CMP_EQ_I32:
10416 case AMDGPU::S_CMP_LG_U32:
10417 case AMDGPU::S_CMP_LG_I32:
10418 case AMDGPU::S_CMP_LT_U32:
10419 case AMDGPU::S_CMP_LT_I32:
10420 case AMDGPU::S_CMP_GT_U32:
10421 case AMDGPU::S_CMP_GT_I32:
10422 case AMDGPU::S_CMP_LE_U32:
10423 case AMDGPU::S_CMP_LE_I32:
10424 case AMDGPU::S_CMP_GE_U32:
10425 case AMDGPU::S_CMP_GE_I32:
10426 case AMDGPU::S_CMP_EQ_U64:
10427 case AMDGPU::S_CMP_LG_U64:
10428 SrcReg = MI.getOperand(0).getReg();
10429 if (MI.getOperand(1).isReg()) {
10430 if (MI.getOperand(1).getSubReg())
10431 return false;
10432 SrcReg2 = MI.getOperand(1).getReg();
10433 CmpValue = 0;
10434 } else if (MI.getOperand(1).isImm()) {
10435 SrcReg2 = Register();
10436 CmpValue = MI.getOperand(1).getImm();
10437 } else {
10438 return false;
10439 }
10440 CmpMask = ~0;
10441 return true;
10442 case AMDGPU::S_CMPK_EQ_U32:
10443 case AMDGPU::S_CMPK_EQ_I32:
10444 case AMDGPU::S_CMPK_LG_U32:
10445 case AMDGPU::S_CMPK_LG_I32:
10446 case AMDGPU::S_CMPK_LT_U32:
10447 case AMDGPU::S_CMPK_LT_I32:
10448 case AMDGPU::S_CMPK_GT_U32:
10449 case AMDGPU::S_CMPK_GT_I32:
10450 case AMDGPU::S_CMPK_LE_U32:
10451 case AMDGPU::S_CMPK_LE_I32:
10452 case AMDGPU::S_CMPK_GE_U32:
10453 case AMDGPU::S_CMPK_GE_I32:
10454 SrcReg = MI.getOperand(0).getReg();
10455 SrcReg2 = Register();
10456 CmpValue = MI.getOperand(1).getImm();
10457 CmpMask = ~0;
10458 return true;
10459 }
10460
10461 return false;
10462}
10463
10465 Register SrcReg2, int64_t CmpMask,
10466 int64_t CmpValue,
10467 const MachineRegisterInfo *MRI) const {
10468 if (!SrcReg || SrcReg.isPhysical())
10469 return false;
10470
10471 if (SrcReg2 && !getFoldableImm(SrcReg2, *MRI, CmpValue))
10472 return false;
10473
10474 const auto optimizeCmpAnd = [&CmpInstr, SrcReg, CmpValue, MRI,
10475 this](int64_t ExpectedValue, unsigned SrcSize,
10476 bool IsReversible, bool IsSigned) -> bool {
10477 // s_cmp_eq_u32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n
10478 // s_cmp_eq_i32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n
10479 // s_cmp_ge_u32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n
10480 // s_cmp_ge_i32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n
10481 // s_cmp_eq_u64 (s_and_b64 $src, 1 << n), 1 << n => s_and_b64 $src, 1 << n
10482 // s_cmp_lg_u32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n
10483 // s_cmp_lg_i32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n
10484 // s_cmp_gt_u32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n
10485 // s_cmp_gt_i32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n
10486 // s_cmp_lg_u64 (s_and_b64 $src, 1 << n), 0 => s_and_b64 $src, 1 << n
10487 //
10488 // Signed ge/gt are not used for the sign bit.
10489 //
10490 // If result of the AND is unused except in the compare:
10491 // s_and_b(32|64) $src, 1 << n => s_bitcmp1_b(32|64) $src, n
10492 //
10493 // s_cmp_eq_u32 (s_and_b32 $src, 1 << n), 0 => s_bitcmp0_b32 $src, n
10494 // s_cmp_eq_i32 (s_and_b32 $src, 1 << n), 0 => s_bitcmp0_b32 $src, n
10495 // s_cmp_eq_u64 (s_and_b64 $src, 1 << n), 0 => s_bitcmp0_b64 $src, n
10496 // s_cmp_lg_u32 (s_and_b32 $src, 1 << n), 1 << n => s_bitcmp0_b32 $src, n
10497 // s_cmp_lg_i32 (s_and_b32 $src, 1 << n), 1 << n => s_bitcmp0_b32 $src, n
10498 // s_cmp_lg_u64 (s_and_b64 $src, 1 << n), 1 << n => s_bitcmp0_b64 $src, n
10499
10500 MachineInstr *Def = MRI->getUniqueVRegDef(SrcReg);
10501 if (!Def || Def->getParent() != CmpInstr.getParent())
10502 return false;
10503
10504 if (Def->getOpcode() != AMDGPU::S_AND_B32 &&
10505 Def->getOpcode() != AMDGPU::S_AND_B64)
10506 return false;
10507
10508 int64_t Mask;
10509 const auto isMask = [&Mask, SrcSize](const MachineOperand *MO) -> bool {
10510 if (MO->isImm())
10511 Mask = MO->getImm();
10512 else if (!getFoldableImm(MO, Mask))
10513 return false;
10514 Mask &= maxUIntN(SrcSize);
10515 return isPowerOf2_64(Mask);
10516 };
10517
10518 MachineOperand *SrcOp = &Def->getOperand(1);
10519 if (isMask(SrcOp))
10520 SrcOp = &Def->getOperand(2);
10521 else if (isMask(&Def->getOperand(2)))
10522 SrcOp = &Def->getOperand(1);
10523 else
10524 return false;
10525
10526 // A valid Mask is required to have a single bit set, hence a non-zero and
10527 // power-of-two value. This verifies that we will not do 64-bit shift below.
10528 assert(llvm::has_single_bit<uint64_t>(Mask) && "Invalid mask.");
10529 unsigned BitNo = llvm::countr_zero((uint64_t)Mask);
10530 if (IsSigned && BitNo == SrcSize - 1)
10531 return false;
10532
10533 ExpectedValue <<= BitNo;
10534
10535 bool IsReversedCC = false;
10536 if (CmpValue != ExpectedValue) {
10537 if (!IsReversible)
10538 return false;
10539 IsReversedCC = CmpValue == (ExpectedValue ^ Mask);
10540 if (!IsReversedCC)
10541 return false;
10542 }
10543
10544 Register DefReg = Def->getOperand(0).getReg();
10545 if (IsReversedCC && !MRI->hasOneNonDBGUse(DefReg))
10546 return false;
10547
10548 for (auto I = std::next(Def->getIterator()), E = CmpInstr.getIterator();
10549 I != E; ++I) {
10550 if (I->modifiesRegister(AMDGPU::SCC, &RI) ||
10551 I->killsRegister(AMDGPU::SCC, &RI))
10552 return false;
10553 }
10554
10555 MachineOperand *SccDef =
10556 Def->findRegisterDefOperand(AMDGPU::SCC, /*TRI=*/nullptr);
10557 SccDef->setIsDead(false);
10558 CmpInstr.eraseFromParent();
10559
10560 if (!MRI->use_nodbg_empty(DefReg)) {
10561 assert(!IsReversedCC);
10562 return true;
10563 }
10564
10565 // Replace AND with unused result with a S_BITCMP.
10566 MachineBasicBlock *MBB = Def->getParent();
10567
10568 unsigned NewOpc = (SrcSize == 32) ? IsReversedCC ? AMDGPU::S_BITCMP0_B32
10569 : AMDGPU::S_BITCMP1_B32
10570 : IsReversedCC ? AMDGPU::S_BITCMP0_B64
10571 : AMDGPU::S_BITCMP1_B64;
10572
10573 BuildMI(*MBB, Def, Def->getDebugLoc(), get(NewOpc))
10574 .add(*SrcOp)
10575 .addImm(BitNo);
10576 Def->eraseFromParent();
10577
10578 return true;
10579 };
10580
10581 switch (CmpInstr.getOpcode()) {
10582 default:
10583 break;
10584 case AMDGPU::S_CMP_EQ_U32:
10585 case AMDGPU::S_CMP_EQ_I32:
10586 case AMDGPU::S_CMPK_EQ_U32:
10587 case AMDGPU::S_CMPK_EQ_I32:
10588 return optimizeCmpAnd(1, 32, true, false);
10589 case AMDGPU::S_CMP_GE_U32:
10590 case AMDGPU::S_CMPK_GE_U32:
10591 return optimizeCmpAnd(1, 32, false, false);
10592 case AMDGPU::S_CMP_GE_I32:
10593 case AMDGPU::S_CMPK_GE_I32:
10594 return optimizeCmpAnd(1, 32, false, true);
10595 case AMDGPU::S_CMP_EQ_U64:
10596 return optimizeCmpAnd(1, 64, true, false);
10597 case AMDGPU::S_CMP_LG_U32:
10598 case AMDGPU::S_CMP_LG_I32:
10599 case AMDGPU::S_CMPK_LG_U32:
10600 case AMDGPU::S_CMPK_LG_I32:
10601 return optimizeCmpAnd(0, 32, true, false);
10602 case AMDGPU::S_CMP_GT_U32:
10603 case AMDGPU::S_CMPK_GT_U32:
10604 return optimizeCmpAnd(0, 32, false, false);
10605 case AMDGPU::S_CMP_GT_I32:
10606 case AMDGPU::S_CMPK_GT_I32:
10607 return optimizeCmpAnd(0, 32, false, true);
10608 case AMDGPU::S_CMP_LG_U64:
10609 return optimizeCmpAnd(0, 64, true, false);
10610 }
10611
10612 return false;
10613}
10614
10616 AMDGPU::OpName OpName) const {
10617 if (!ST.needsAlignedVGPRs())
10618 return;
10619
10620 int OpNo = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpName);
10621 if (OpNo < 0)
10622 return;
10623 MachineOperand &Op = MI.getOperand(OpNo);
10624 if (getOpSize(MI, OpNo) > 4)
10625 return;
10626
10627 // Add implicit aligned super-reg to force alignment on the data operand.
10628 const DebugLoc &DL = MI.getDebugLoc();
10629 MachineBasicBlock *BB = MI.getParent();
10631 Register DataReg = Op.getReg();
10632 bool IsAGPR = RI.isAGPR(MRI, DataReg);
10633 Register Undef = MRI.createVirtualRegister(
10634 IsAGPR ? &AMDGPU::AGPR_32RegClass : &AMDGPU::VGPR_32RegClass);
10635 BuildMI(*BB, MI, DL, get(AMDGPU::IMPLICIT_DEF), Undef);
10636 Register NewVR =
10637 MRI.createVirtualRegister(IsAGPR ? &AMDGPU::AReg_64_Align2RegClass
10638 : &AMDGPU::VReg_64_Align2RegClass);
10639 BuildMI(*BB, MI, DL, get(AMDGPU::REG_SEQUENCE), NewVR)
10640 .addReg(DataReg, 0, Op.getSubReg())
10641 .addImm(AMDGPU::sub0)
10642 .addReg(Undef)
10643 .addImm(AMDGPU::sub1);
10644 Op.setReg(NewVR);
10645 Op.setSubReg(AMDGPU::sub0);
10646 MI.addOperand(MachineOperand::CreateReg(NewVR, false, true));
10647}
10648
10650 if (isIGLP(*MI))
10651 return false;
10652
10654}
10655
10657 if (!isWMMA(MI) && !isSWMMAC(MI))
10658 return false;
10659
10660 if (AMDGPU::isGFX1250(ST))
10661 return AMDGPU::getWMMAIsXDL(MI.getOpcode());
10662
10663 return true;
10664}
10665
10667 unsigned Opcode = MI.getOpcode();
10668
10669 if (AMDGPU::isGFX12Plus(ST))
10670 return isDOT(MI) || isXDLWMMA(MI);
10671
10672 if (!isMAI(MI) || isDGEMM(Opcode) ||
10673 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_e64 ||
10674 Opcode == AMDGPU::V_ACCVGPR_READ_B32_e64)
10675 return false;
10676
10677 if (!ST.hasGFX940Insts())
10678 return true;
10679
10680 return AMDGPU::getMAIIsGFX940XDL(Opcode);
10681}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Contains the definition of a TargetInstrInfo class that is common to all AMD GPUs.
AMDGPU Register Bank Select
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
AMD GCN specific subclass of TargetSubtarget.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
static bool isUndef(const MachineInstr &MI)
TargetInstrInfo::RegSubRegPair RegSubRegPair
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
R600 Clause Merge
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static cl::opt< bool > Fix16BitCopies("amdgpu-fix-16-bit-physreg-copies", cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"), cl::init(true), cl::ReallyHidden)
static void expandSGPRCopy(const SIInstrInfo &TII, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RC, bool Forward)
static unsigned getNewFMAInst(const GCNSubtarget &ST, unsigned Opc)
static void indirectCopyToAGPR(const SIInstrInfo &TII, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, RegScavenger &RS, bool RegsOverlap, Register ImpDefSuperReg=Register(), Register ImpUseSuperReg=Register())
Handle copying from SGPR to AGPR, or from AGPR to AGPR on GFX908.
static unsigned getIndirectSGPRWriteMovRelPseudo32(unsigned VecSize)
static bool compareMachineOp(const MachineOperand &Op0, const MachineOperand &Op1)
static bool isStride64(unsigned Opc)
#define GENERATE_RENAMED_GFX9_CASES(OPCODE)
static std::tuple< unsigned, unsigned > extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc)
static bool followSubRegDef(MachineInstr &MI, TargetInstrInfo::RegSubRegPair &RSR)
static unsigned getIndirectSGPRWriteMovRelPseudo64(unsigned VecSize)
static MachineInstr * swapImmOperands(MachineInstr &MI, MachineOperand &NonRegOp1, MachineOperand &NonRegOp2)
static void copyFlagsToImplicitVCC(MachineInstr &MI, const MachineOperand &Orig)
static void emitLoadScalarOpsFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, MachineBasicBlock &LoopBB, MachineBasicBlock &BodyBB, const DebugLoc &DL, ArrayRef< MachineOperand * > ScalarOps)
static bool offsetsDoNotOverlap(LocationSize WidthA, int OffsetA, LocationSize WidthB, int OffsetB)
static unsigned getWWMRegSpillSaveOpcode(unsigned Size, bool IsVectorSuperClass)
static bool memOpsHaveSameBaseOperands(ArrayRef< const MachineOperand * > BaseOps1, ArrayRef< const MachineOperand * > BaseOps2)
static unsigned getWWMRegSpillRestoreOpcode(unsigned Size, bool IsVectorSuperClass)
static bool getFoldableImm(Register Reg, const MachineRegisterInfo &MRI, int64_t &Imm, MachineInstr **DefMI=nullptr)
static unsigned getIndirectVGPRWriteMovRelPseudoOpc(unsigned VecSize)
static unsigned subtargetEncodingFamily(const GCNSubtarget &ST)
static void preserveCondRegFlags(MachineOperand &CondReg, const MachineOperand &OrigCond)
static Register findImplicitSGPRRead(const MachineInstr &MI)
static unsigned getNewFMAAKInst(const GCNSubtarget &ST, unsigned Opc)
static cl::opt< unsigned > BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), cl::desc("Restrict range of branch instructions (DEBUG)"))
static void updateLiveVariables(LiveVariables *LV, MachineInstr &MI, MachineInstr &NewMI)
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static unsigned getSGPRSpillRestoreOpcode(unsigned Size)
static bool isRegOrFI(const MachineOperand &MO)
static unsigned getSGPRSpillSaveOpcode(unsigned Size)
static constexpr AMDGPU::OpName ModifierOpNames[]
static unsigned getVGPRSpillSaveOpcode(unsigned Size)
static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, const char *Msg="illegal VGPR to SGPR copy")
static MachineInstr * swapRegAndNonRegOperand(MachineInstr &MI, MachineOperand &RegOp, MachineOperand &NonRegOp)
static const TargetRegisterClass * adjustAllocatableRegClass(const GCNSubtarget &ST, const SIRegisterInfo &RI, const MCInstrDesc &TID, unsigned RCID)
static bool shouldReadExec(const MachineInstr &MI)
static unsigned getNewFMAMKInst(const GCNSubtarget &ST, unsigned Opc)
static bool isRenamedInGFX9(int Opcode)
static TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd)
static bool changesVGPRIndexingMode(const MachineInstr &MI)
static bool isSubRegOf(const SIRegisterInfo &TRI, const MachineOperand &SuperVec, const MachineOperand &SubReg)
static bool nodesHaveSameOperandValue(SDNode *N0, SDNode *N1, AMDGPU::OpName OpName)
Returns true if both nodes have the same value for the given operand Op, or if both nodes do not have...
static unsigned getAVSpillSaveOpcode(unsigned Size)
static unsigned getNumOperandsNoGlue(SDNode *Node)
static bool canRemat(const MachineInstr &MI)
static MachineBasicBlock * loadMBUFScalarOperandsFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, ArrayRef< MachineOperand * > ScalarOps, MachineDominatorTree *MDT, MachineBasicBlock::iterator Begin=nullptr, MachineBasicBlock::iterator End=nullptr)
static unsigned getAVSpillRestoreOpcode(unsigned Size)
static unsigned getVGPRSpillRestoreOpcode(unsigned Size)
Interface definition for SIInstrInfo.
bool IsDead
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const LaneMaskConstants & get(const GCNSubtarget &ST)
Class for arbitrary precision integers.
Definition APInt.h:78
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1562
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
const T & front() const
front - Get the first element.
Definition ArrayRef.h:150
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
uint64_t getZExtValue() const
A debug info location.
Definition DebugLoc.h:124
Diagnostic information for unsupported feature in backend.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
bool hasAddNoCarry() const
CycleT * getCycle(const BlockT *Block) const
Find the innermost cycle containing a given block.
void getExitingBlocks(SmallVectorImpl< BlockT * > &TmpStorage) const
Return all blocks of this cycle that have successor outside of this cycle.
bool contains(const BlockT *Block) const
Return whether Block is contained in the cycle.
const GenericCycle * getParentCycle() const
Itinerary data supplied by a subtarget to be used by a target.
constexpr unsigned getAddressSpace() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LiveInterval - This class represents the liveness of a register, or stack slot.
bool hasInterval(Register Reg) const
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
LiveInterval & getInterval(Register Reg)
LLVM_ABI bool shrinkToUses(LiveInterval *li, SmallVectorImpl< MachineInstr * > *dead=nullptr)
After removing some uses of a register, shrink its live range to just the remaining uses.
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
This class represents the liveness of a register, stack slot, etc.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
LLVM_ABI VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
bool hasValue() const
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
static const MCBinaryExpr * createAnd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:348
static const MCBinaryExpr * createAShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:418
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:428
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayStore() const
Return true if this instruction could possibly modify memory.
bool mayLoad() const
Return true if this instruction could possibly read memory.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
unsigned getOpcode() const
Return the opcode number for this descriptor.
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:87
uint8_t OperandType
Information about the type of the operand.
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition MCInstrDesc.h:96
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
LLVM_ABI void setVariableValue(const MCExpr *Value)
Definition MCSymbol.cpp:50
Helper class for constructing bundles of MachineInstrs.
MachineBasicBlock::instr_iterator begin() const
Return an iterator to the first bundled instruction.
MIBundleBuilder & append(MachineInstr *MI)
Insert MI into MBB by appending it to the instructions in the bundle.
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
@ LQR_Dead
Register is known to be fully dead.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void push_back(MachineBasicBlock *MBB)
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
bool isCopy() const
const MachineBasicBlock * getParent() const
LLVM_ABI void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
mop_range implicit_operands()
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mop_range explicit_operands()
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
bool isMoveImmediate(QueryType Type=IgnoreBundle) const
Return true if this instruction is a move immediate (including conditional moves) instruction.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
LLVM_ABI void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just after the instruction itself.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
LLVM_ABI unsigned getOperandNo() const
Returns the index of this operand in the instruction that it belongs to.
const GlobalValue * getGlobal() const
void setImplicit(bool Val=true)
LLVM_ABI void ChangeToFrameIndex(int Idx, unsigned TargetFlags=0)
Replace this operand with a frame index.
void setImm(int64_t immVal)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsDead(bool Val=true)
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
LLVM_ABI void ChangeToGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
ChangeToGA - Replace this operand with a new global address operand.
void setIsKill(bool Val=true)
LLVM_ABI void ChangeToRegister(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void setOffset(int64_t Offset)
unsigned getTargetFlags() const
static MachineOperand CreateImm(int64_t Val)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isTargetIndex() const
isTargetIndex - Tests if this is a MO_TargetIndex operand.
void setTargetFlags(unsigned F)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
bool isRegUsed(Register Reg, bool includeReserved=true) const
Return if a specific register is currently used.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
void backward()
Update internal register state and move MBB iterator backwards.
void enterBasicBlock(MachineBasicBlock &MBB)
Start tracking liveness from the begin of basic block MBB.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Definition Register.h:102
constexpr bool isValid() const
Definition Register.h:107
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:74
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isLegalMUBUFImmOffset(unsigned Imm) const
bool isInlineConstant(const APInt &Imm) const
void legalizeOperandsVOP3(MachineRegisterInfo &MRI, MachineInstr &MI) const
Fix operands in MI to satisfy constant bus requirements.
static bool isDS(const MachineInstr &MI)
MachineBasicBlock * legalizeOperands(MachineInstr &MI, MachineDominatorTree *MDT=nullptr) const
Legalize all operands in this instruction.
bool areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, int64_t &Offset0, int64_t &Offset1) const override
unsigned getLiveRangeSplitOpcode(Register Reg, const MachineFunction &MF) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const final
Register isSGPRStackAccess(const MachineInstr &MI, int &FrameIndex) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
static bool isNeverUniform(const MachineInstr &MI)
unsigned getOpSize(uint16_t Opcode, unsigned OpNo) const
Return the size in bytes of the operand OpNo on the given.
bool isXDLWMMA(const MachineInstr &MI) const
bool isBasicBlockPrologue(const MachineInstr &MI, Register Reg=Register()) const override
uint64_t getDefaultRsrcDataFormat() const
static bool isSOPP(const MachineInstr &MI)
InstructionUniformity getGenericInstructionUniformity(const MachineInstr &MI) const
bool isIGLP(unsigned Opcode) const
static bool isFLATScratch(const MachineInstr &MI)
const MCInstrDesc & getIndirectRegWriteMovRelPseudo(unsigned VecSize, unsigned EltSize, bool IsSGPR) const
MachineInstrBuilder getAddNoCarry(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg) const
Return a partially built integer add instruction without carry.
bool mayAccessFlatAddressSpace(const MachineInstr &MI) const
bool shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, int64_t Offset0, int64_t Offset1, unsigned NumLoads) const override
bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset, Align Alignment=Align(4)) const
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
void moveToVALU(SIInstrWorklist &Worklist, MachineDominatorTree *MDT) const
Replace the instructions opcode with the equivalent VALU opcode.
static bool isSMRD(const MachineInstr &MI)
void restoreExec(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, SlotIndexes *Indexes=nullptr) const
bool usesConstantBus(const MachineRegisterInfo &MRI, const MachineOperand &MO, const MCOperandInfo &OpInfo) const
Returns true if this operand uses the constant bus.
static unsigned getMaxMUBUFImmOffset(const GCNSubtarget &ST)
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool mayAccessScratchThroughFlat(const MachineInstr &MI) const
void legalizeOperandsFLAT(MachineRegisterInfo &MRI, MachineInstr &MI) const
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
static std::optional< int64_t > extractSubregFromImm(int64_t ImmVal, unsigned SubRegIndex)
Return the extracted immediate value in a subregister use from a constant materialized in a super reg...
Register isStackAccess(const MachineInstr &MI, int &FrameIndex) const
static bool isMTBUF(const MachineInstr &MI)
const MCInstrDesc & getIndirectGPRIDXPseudo(unsigned VecSize, bool IsIndirectSrc) const
void insertReturn(MachineBasicBlock &MBB) const
static bool isDGEMM(unsigned Opcode)
static bool isEXP(const MachineInstr &MI)
static bool isSALU(const MachineInstr &MI)
void legalizeGenericOperand(MachineBasicBlock &InsertMBB, MachineBasicBlock::iterator I, const TargetRegisterClass *DstRC, MachineOperand &Op, MachineRegisterInfo &MRI, const DebugLoc &DL) const
MachineInstr * buildShrunkInst(MachineInstr &MI, unsigned NewOpcode) const
unsigned getInstBundleSize(const MachineInstr &MI) const
static bool isVOP2(const MachineInstr &MI)
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
static bool isSDWA(const MachineInstr &MI)
const MCInstrDesc & getKillTerminatorFromPseudo(unsigned Opcode) const
void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const override
static bool isGather4(const MachineInstr &MI)
MachineInstr * getWholeWaveFunctionSetup(MachineFunction &MF) const
bool isLegalVSrcOperand(const MachineRegisterInfo &MRI, const MCOperandInfo &OpInfo, const MachineOperand &MO) const
Check if MO would be a valid operand for the given operand definition OpInfo.
static bool isDOT(const MachineInstr &MI)
MachineInstr * createPHISourceCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const override
bool hasModifiers(unsigned Opcode) const
Return true if this instruction has any modifiers.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
static bool isSWMMAC(const MachineInstr &MI)
ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *II, const ScheduleDAGMI *DAG) const override
bool isWave32() const
bool isHighLatencyDef(int Opc) const override
void legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const
Legalize the OpIndex operand of this instruction by inserting a MOV.
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
static bool isVOPC(const MachineInstr &MI)
void removeModOperands(MachineInstr &MI) const
std::pair< int64_t, int64_t > splitFlatOffset(int64_t COffsetVal, unsigned AddrSpace, uint64_t FlatVariant) const
Split COffsetVal into {immediate offset field, remainder offset} values.
bool isSpill(uint16_t Opcode) const
unsigned getVectorRegSpillRestoreOpcode(Register Reg, const TargetRegisterClass *RC, unsigned Size, const SIMachineFunctionInfo &MFI) const
bool isXDL(const MachineInstr &MI) const
static bool isVIMAGE(const MachineInstr &MI)
void enforceOperandRCAlignment(MachineInstr &MI, AMDGPU::OpName OpName) const
static bool isSOP2(const MachineInstr &MI)
static bool isGWS(const MachineInstr &MI)
bool isLegalAV64PseudoImm(uint64_t Imm) const
Check if this immediate value can be used for AV_MOV_B64_IMM_PSEUDO.
bool hasModifiersSet(const MachineInstr &MI, AMDGPU::OpName OpName) const
const TargetRegisterClass * getPreferredSelectRegClass(unsigned Size) const
bool isLegalToSwap(const MachineInstr &MI, unsigned fromIdx, unsigned toIdx) const
bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override
static bool isFLATGlobal(const MachineInstr &MI)
bool isGlobalMemoryObject(const MachineInstr *MI) const override
static bool isVSAMPLE(const MachineInstr &MI)
bool isBufferSMRD(const MachineInstr &MI) const
static bool isKillTerminator(unsigned Opcode)
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx0, unsigned &SrcOpIdx1) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
void insertScratchExecCopy(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, bool IsSCCLive, SlotIndexes *Indexes=nullptr) const
bool hasVALU32BitEncoding(unsigned Opcode) const
Return true if this 64-bit VALU instruction has a 32-bit encoding.
unsigned getMovOpcode(const TargetRegisterClass *DstRC) const
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const override
unsigned buildExtractSubReg(MachineBasicBlock::iterator MI, MachineRegisterInfo &MRI, const MachineOperand &SuperReg, const TargetRegisterClass *SuperRC, unsigned SubIdx, const TargetRegisterClass *SubRC) const
void legalizeOperandsVOP2(MachineRegisterInfo &MRI, MachineInstr &MI) const
Legalize operands in MI by either commuting it or inserting a copy of src1.
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const final
static bool isImage(const MachineInstr &MI)
static bool isSOPK(const MachineInstr &MI)
const TargetRegisterClass * getOpRegClass(const MachineInstr &MI, unsigned OpNo) const
Return the correct register class for OpNo.
MachineBasicBlock * insertSimulatedTrap(MachineRegisterInfo &MRI, MachineBasicBlock &MBB, MachineInstr &MI, const DebugLoc &DL) const
Build instructions that simulate the behavior of a s_trap 2 instructions for hardware (namely,...
static unsigned getNonSoftWaitcntOpcode(unsigned Opcode)
static unsigned getDSShaderTypeValue(const MachineFunction &MF)
static bool isFoldableCopy(const MachineInstr &MI)
bool isIgnorableUse(const MachineOperand &MO) const override
static bool isMUBUF(const MachineInstr &MI)
bool expandPostRAPseudo(MachineInstr &MI) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
const TargetRegisterClass * getRegClass(const MCInstrDesc &TID, unsigned OpNum, const TargetRegisterInfo *TRI) const override
InstructionUniformity getInstructionUniformity(const MachineInstr &MI) const override final
static bool isSegmentSpecificFLAT(const MachineInstr &MI)
static bool isVOP3(const MCInstrDesc &Desc)
bool physRegUsesConstantBus(const MachineOperand &Reg) const
static bool isF16PseudoScalarTrans(unsigned Opcode)
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
static bool isDPP(const MachineInstr &MI)
bool analyzeBranchImpl(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const
bool isLowLatencyInstruction(const MachineInstr &MI) const
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
bool isAlwaysGDS(uint16_t Opcode) const
static bool isMAI(const MCInstrDesc &Desc)
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void legalizeOperandsVALUt16(MachineInstr &Inst, MachineRegisterInfo &MRI) const
Fix operands in Inst to fix 16bit SALU to VALU lowering.
void moveToVALUImpl(SIInstrWorklist &Worklist, MachineDominatorTree *MDT, MachineInstr &Inst) const
bool isImmOperandLegal(const MCInstrDesc &InstDesc, unsigned OpNo, const MachineOperand &MO) const
bool canShrink(const MachineInstr &MI, const MachineRegisterInfo &MRI) const
bool isAsmOnlyOpcode(int MCOp) const
Check if this instruction should only be used by assembler.
static bool isVGPRSpill(const MachineInstr &MI)
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
This is used by the post-RA scheduler (SchedulePostRAList.cpp).
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, uint64_t FlatVariant) const
Returns if Offset is legal for the subtarget as the offset to a FLAT encoded instruction with the giv...
static bool isWWMRegSpillOpcode(uint16_t Opcode)
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
int64_t getNamedImmOperand(const MachineInstr &MI, AMDGPU::OpName OperandName) const
Get required immediate operand.
ArrayRef< std::pair< int, const char * > > getSerializableTargetIndices() const override
bool regUsesConstantBus(const MachineOperand &Reg, const MachineRegisterInfo &MRI) const
static bool isMIMG(const MachineInstr &MI)
MachineOperand buildExtractSubRegOrImm(MachineBasicBlock::iterator MI, MachineRegisterInfo &MRI, const MachineOperand &SuperReg, const TargetRegisterClass *SuperRC, unsigned SubIdx, const TargetRegisterClass *SubRC) const
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isLegalRegOperand(const MachineRegisterInfo &MRI, const MCOperandInfo &OpInfo, const MachineOperand &MO) const
Check if MO (a register operand) is a legal register for the given operand description or operand ind...
bool allowNegativeFlatOffset(uint64_t FlatVariant) const
Returns true if negative offsets are allowed for the given FlatVariant.
static unsigned getNumWaitStates(const MachineInstr &MI)
Return the number of wait states that result from executing this instruction.
unsigned getVectorRegSpillSaveOpcode(Register Reg, const TargetRegisterClass *RC, unsigned Size, const SIMachineFunctionInfo &MFI) const
unsigned getVALUOp(const MachineInstr &MI) const
static bool modifiesModeRegister(const MachineInstr &MI)
Return true if the instruction modifies the mode register.q.
Register readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI, MachineRegisterInfo &MRI, const TargetRegisterClass *DstRC=nullptr) const
Copy a value from a VGPR (SrcReg) to SGPR.
bool hasDivergentBranch(const MachineBasicBlock *MBB) const
Return whether the block terminate with divergent branch.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void fixImplicitOperands(MachineInstr &MI) const
bool moveFlatAddrToVGPR(MachineInstr &Inst) const
Change SADDR form of a FLAT Inst to its VADDR form if saddr operand was moved to VGPR.
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool swapSourceModifiers(MachineInstr &MI, MachineOperand &Src0, AMDGPU::OpName Src0OpName, MachineOperand &Src1, AMDGPU::OpName Src1OpName) const
Register insertNE(MachineBasicBlock *MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register SrcReg, int Value) const
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
bool hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const
This function is used to determine if an instruction can be safely executed under EXEC = 0 without ha...
bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const override
static bool isAtomic(const MachineInstr &MI)
bool canInsertSelect(const MachineBasicBlock &MBB, ArrayRef< MachineOperand > Cond, Register DstReg, Register TrueReg, Register FalseReg, int &CondCycles, int &TrueCycles, int &FalseCycles) const override
bool isLiteralOperandLegal(const MCInstrDesc &InstDesc, const MCOperandInfo &OpInfo) const
static bool sopkIsZext(unsigned Opcode)
static bool isSGPRSpill(const MachineInstr &MI)
static bool isWMMA(const MachineInstr &MI)
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
bool mayReadEXEC(const MachineRegisterInfo &MRI, const MachineInstr &MI) const
Returns true if the instruction could potentially depend on the value of exec.
void legalizeOperandsSMRD(MachineRegisterInfo &MRI, MachineInstr &MI) const
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
void insertVectorSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
std::pair< MachineInstr *, MachineInstr * > expandMovDPP64(MachineInstr &MI) const
Register insertEQ(MachineBasicBlock *MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register SrcReg, int Value) const
static bool isSOPC(const MachineInstr &MI)
static bool isFLAT(const MachineInstr &MI)
static bool isVALU(const MachineInstr &MI)
bool isBarrier(unsigned Opcode) const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx0, unsigned OpIdx1) const override
int pseudoToMCOpcode(int Opcode) const
Return a target-specific opcode if Opcode is a pseudo instruction.
const MCInstrDesc & getMCOpcodeFromPseudo(unsigned Opcode) const
Return the descriptor of the target-specific machine instruction that corresponds to the specified ps...
bool isLegalGFX12PlusPackedMathFP32Operand(const MachineRegisterInfo &MRI, const MachineInstr &MI, unsigned SrcN, const MachineOperand *MO=nullptr) const
Check if MO would be a legal operand for gfx12+ packed math FP32 instructions.
MachineInstr * createPHIDestinationCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, Register Dst) const override
static bool isFixedSize(const MachineInstr &MI)
bool isSafeToSink(MachineInstr &MI, MachineBasicBlock *SuccToSinkTo, MachineCycleInfo *CI) const override
LLVM_READONLY int commuteOpcode(unsigned Opc) const
uint64_t getScratchRsrcWords23() const
LLVM_READONLY MachineOperand * getNamedOperand(MachineInstr &MI, AMDGPU::OpName OperandName) const
Returns the operand named Op.
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
bool isOperandLegal(const MachineInstr &MI, unsigned OpIdx, const MachineOperand *MO=nullptr) const
Check if MO is a legal operand if it was the OpIdx Operand for MI.
static bool isLDSDMA(const MachineInstr &MI)
static bool isVOP1(const MachineInstr &MI)
SIInstrInfo(const GCNSubtarget &ST)
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool hasAnyModifiersSet(const MachineInstr &MI) const
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
void setHasSpilledVGPRs(bool Spill=true)
bool isWWMReg(Register Reg) const
bool checkFlag(Register Reg, uint8_t Flag) const
void setHasSpilledSGPRs(bool Spill=true)
const TargetRegisterClass * getRegClass(unsigned RCID) const
static unsigned getSubRegFromChannel(unsigned Channel, unsigned NumRegs=1)
const TargetRegisterClass * getProperlyAlignedRC(const TargetRegisterClass *RC) const
ArrayRef< int16_t > getRegSplitParts(const TargetRegisterClass *RC, unsigned EltSize) const
unsigned getHWRegIndex(MCRegister Reg) const
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
unsigned getChannelFromSubReg(unsigned SubReg) const
static bool isAGPRClass(const TargetRegisterClass *RC)
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
MachineFunction & MF
Machine function.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SlotIndex - An opaque wrapper around machine indexes.
Definition SlotIndexes.h:66
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
SlotIndexes pass.
SlotIndex insertMachineInstrInMaps(MachineInstr &MI, bool Late=false)
Insert the given machine instruction into the mapping.
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition DenseSet.h:281
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
int64_t getImm() const
Register getReg() const
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * createPHIDestinationCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
virtual MachineInstr * createPHISourceCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool isGlobalMemoryObject(const MachineInstr *MI) const
Returns true if MI is an instruction we are unable to reason about (like a call or something with unm...
virtual bool expandPostRAPseudo(MachineInstr &MI) const
This function is called for all pseudo instructions that remain after register allocation.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:194
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition DenseSet.h:174
self_iterator getIterator()
Definition ilist_node.h:130
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst)
bool isPackedFP32Inst(unsigned Opc)
bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi)
const uint64_t RSRC_DATA_FORMAT
LLVM_READONLY int getBasicFromSDWAOp(uint16_t Opcode)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi)
LLVM_READONLY int getVOPe32(uint16_t Opcode)
bool getWMMAIsXDL(unsigned Opc)
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
bool isInlinableLiteralV2I16(uint32_t Literal)
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
bool isInlinableLiteralV2BF16(uint32_t Literal)
LLVM_READONLY int getFlatScratchInstSVfromSS(uint16_t Opcode)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool isGFX12Plus(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2F16(uint32_t Literal)
LLVM_READONLY int getGlobalVaddrOp(uint16_t Opcode)
bool isValid32BitLiteral(uint64_t Val, bool IsFP64)
LLVM_READNONE bool isLegalDPALU_DPPControl(const MCSubtargetInfo &ST, unsigned DC)
bool getMAIIsGFX940XDL(unsigned Opc)
const uint64_t RSRC_ELEMENT_SIZE_SHIFT
LLVM_READONLY int getAddr64Inst(uint16_t Opcode)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
LLVM_READONLY int getMFMAEarlyClobberOp(uint16_t Opcode)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
const uint64_t RSRC_TID_ENABLE
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
constexpr bool isSISrcOperand(const MCOperandInfo &OpInfo)
Is this an AMDGPU specific source operand?
bool isGenericAtomic(unsigned Opc)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCSubtargetInfo &ST)
LLVM_READONLY int getCommuteRev(uint16_t Opcode)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
Definition SIDefines.h:231
@ OPERAND_REG_IMM_INT64
Definition SIDefines.h:202
@ OPERAND_REG_IMM_V2FP16
Definition SIDefines.h:209
@ OPERAND_REG_INLINE_C_FP64
Definition SIDefines.h:222
@ OPERAND_REG_INLINE_C_BF16
Definition SIDefines.h:219
@ OPERAND_REG_INLINE_C_V2BF16
Definition SIDefines.h:224
@ OPERAND_REG_IMM_V2INT16
Definition SIDefines.h:210
@ OPERAND_REG_IMM_BF16
Definition SIDefines.h:206
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
Definition SIDefines.h:201
@ OPERAND_REG_IMM_V2BF16
Definition SIDefines.h:208
@ OPERAND_REG_IMM_FP16
Definition SIDefines.h:207
@ OPERAND_REG_INLINE_C_INT64
Definition SIDefines.h:218
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
Definition SIDefines.h:216
@ OPERAND_REG_IMM_NOINLINE_V2FP16
Definition SIDefines.h:211
@ OPERAND_REG_IMM_FP64
Definition SIDefines.h:205
@ OPERAND_REG_INLINE_C_V2FP16
Definition SIDefines.h:225
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
Definition SIDefines.h:236
@ OPERAND_REG_INLINE_AC_FP32
Definition SIDefines.h:237
@ OPERAND_REG_IMM_V2INT32
Definition SIDefines.h:212
@ OPERAND_SDWA_VOPC_DST
Definition SIDefines.h:248
@ OPERAND_REG_IMM_FP32
Definition SIDefines.h:204
@ OPERAND_REG_INLINE_C_FP32
Definition SIDefines.h:221
@ OPERAND_REG_INLINE_C_INT32
Definition SIDefines.h:217
@ OPERAND_REG_INLINE_C_V2INT16
Definition SIDefines.h:223
@ OPERAND_INLINE_C_AV64_PSEUDO
Definition SIDefines.h:242
@ OPERAND_REG_IMM_V2FP32
Definition SIDefines.h:213
@ OPERAND_REG_INLINE_AC_FP64
Definition SIDefines.h:238
@ OPERAND_REG_INLINE_C_FP16
Definition SIDefines.h:220
@ OPERAND_REG_IMM_INT16
Definition SIDefines.h:203
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
Definition SIDefines.h:228
@ TI_SCRATCH_RSRC_DWORD1
Definition AMDGPU.h:569
@ TI_SCRATCH_RSRC_DWORD3
Definition AMDGPU.h:571
@ TI_SCRATCH_RSRC_DWORD0
Definition AMDGPU.h:568
@ TI_SCRATCH_RSRC_DWORD2
Definition AMDGPU.h:570
@ TI_CONSTDATA_START
Definition AMDGPU.h:567
LLVM_READONLY int getCommuteOrig(uint16_t Opcode)
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
bool isGFX1250(const MCSubtargetInfo &STI)
int getMCOpcode(uint16_t Opcode, unsigned Gen)
bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode)
const uint64_t RSRC_INDEX_STRIDE_SHIFT
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi)
LLVM_READNONE constexpr bool isGraphics(CallingConv::ID CC)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
LLVM_READONLY int getIfAddr64Inst(uint16_t Opcode)
Check if Opcode is an Addr64 opcode.
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ OPERAND_GENERIC_4
Definition MCInstrDesc.h:72
@ OPERAND_GENERIC_2
Definition MCInstrDesc.h:70
@ OPERAND_GENERIC_1
Definition MCInstrDesc.h:69
@ OPERAND_GENERIC_3
Definition MCInstrDesc.h:71
@ OPERAND_IMMEDIATE
Definition MCInstrDesc.h:62
@ OPERAND_GENERIC_0
Definition MCInstrDesc.h:68
@ OPERAND_GENERIC_5
Definition MCInstrDesc.h:73
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Dead
Unused definition.
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
Not(const Pred &P) -> Not< Pred >
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:262
@ Offset
Definition DWP.cpp:477
LLVM_ABI void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
finalizeBundle - Finalize a machine instruction bundle which includes a sequence of instructions star...
TargetInstrInfo::RegSubRegPair getRegSubRegPair(const MachineOperand &O)
Create RegSubRegPair from a register MachineOperand.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
constexpr uint64_t maxUIntN(uint64_t N)
Gets the maximum value for a N-bit unsigned integer.
Definition MathExtras.h:216
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
bool execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, Register VReg, const MachineInstr &DefMI, const MachineInstr &UseMI)
Return false if EXEC is not changed between the def of VReg at DefMI and the use at UseMI.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2452
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:557
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:293
Op::Description Desc
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:186
TargetInstrInfo::RegSubRegPair getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg)
Return the SubReg component from REG_SEQUENCE.
static const MachineMemOperand::Flags MONoClobber
Mark the MMO of a uniform load if there are no potentially clobbering stores on any path from the sta...
Definition SIInstrInfo.h:44
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:342
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition MathExtras.h:159
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
MachineInstr * getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, MachineRegisterInfo &MRI)
Return the defining instruction for a given reg:subreg pair skipping copy like instructions and subre...
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:198
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
Definition MathExtras.h:164
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
static const MachineMemOperand::Flags MOCooperative
Mark the MMO of cooperative load/store atomics.
Definition SIInstrInfo.h:52
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:405
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
unsigned getUndefRegState(bool B)
@ Xor
Bitwise or logical XOR of integers.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
unsigned getKillRegState(bool B)
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned DefaultMemoryClusterDWordsLimit
Definition SIInstrInfo.h:40
constexpr unsigned BitWidth
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition MathExtras.h:257
static const MachineMemOperand::Flags MOLastUse
Mark the MMO of a load as the last use.
Definition SIInstrInfo.h:48
constexpr T reverseBits(T Val)
Reverse the bits in Val.
Definition MathExtras.h:127
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1877
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition MathExtras.h:583
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
Definition MathExtras.h:86
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
InstructionUniformity
Enum describing how instructions behave with respect to uniformity and divergence,...
Definition Uniformity.h:18
@ AlwaysUniform
The result values are always uniform.
Definition Uniformity.h:23
@ NeverUniform
The result values can never be assumed to be uniform.
Definition Uniformity.h:26
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
GenericCycleInfo< MachineSSAContext > MachineCycleInfo
MachineCycleInfo::CycleT MachineCycle
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition bit.h:154
bool execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, Register VReg, const MachineInstr &DefMI)
Return false if EXEC is not changed between the def of VReg at DefMI and all its uses.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:853
#define N
static LLVM_ABI Semantics SemanticsToEnum(const llvm::fltSemantics &Sem)
Definition APFloat.cpp:219
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:85
SparseBitVector AliveBlocks
AliveBlocks - Set of blocks in which this value is alive completely through.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Utility to store machine instructions worklist.
Definition SIInstrInfo.h:56
MachineInstr * top() const
Definition SIInstrInfo.h:61
bool isDeferred(MachineInstr *MI)
SetVector< MachineInstr * > & getDeferredList()
Definition SIInstrInfo.h:80
void insert(MachineInstr *MI)
A pair composed of a register and a sub-register index.