LLVM 22.0.0git
TargetInstrInfo.cpp
Go to the documentation of this file.
1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/SmallSet.h"
31#include "llvm/IR/DataLayout.h"
33#include "llvm/MC/MCAsmInfo.h"
39
40using namespace llvm;
41
43 "disable-sched-hazard", cl::Hidden, cl::init(false),
44 cl::desc("Disable hazard detection during preRA scheduling"));
45
47 "acc-reassoc", cl::Hidden, cl::init(true),
48 cl::desc("Enable reassociation of accumulation chains"));
49
52 cl::desc("Minimum length of accumulator chains "
53 "required for the optimization to kick in"));
54
56 "acc-max-width", cl::Hidden, cl::init(3),
57 cl::desc("Maximum number of branches in the accumulator tree"));
58
60
63 const TargetRegisterInfo *TRI) const {
64 if (OpNum >= MCID.getNumOperands())
65 return nullptr;
66
67 const MCOperandInfo &OpInfo = MCID.operands()[OpNum];
68 int16_t RegClass = getOpRegClassID(OpInfo);
69
70 // TODO: Remove isLookupPtrRegClass in favor of isLookupRegClassByHwMode
71 if (OpInfo.isLookupPtrRegClass())
72 return TRI->getPointerRegClass(RegClass);
73
74 // Instructions like INSERT_SUBREG do not have fixed register classes.
75 if (RegClass < 0)
76 return nullptr;
77
78 // Otherwise just look it up normally.
79 return TRI->getRegClass(RegClass);
80}
81
82/// insertNoop - Insert a noop into the instruction stream at the specified
83/// point.
86 llvm_unreachable("Target didn't implement insertNoop!");
87}
88
89/// insertNoops - Insert noops into the instruction stream at the specified
90/// point.
93 unsigned Quantity) const {
94 for (unsigned i = 0; i < Quantity; ++i)
96}
97
98static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
99 return strncmp(Str, MAI.getCommentString().data(),
100 MAI.getCommentString().size()) == 0;
101}
102
103/// Measure the specified inline asm to determine an approximation of its
104/// length.
105/// Comments (which run till the next SeparatorString or newline) do not
106/// count as an instruction.
107/// Any other non-whitespace text is considered an instruction, with
108/// multiple instructions separated by SeparatorString or newlines.
109/// Variable-length instructions are not handled here; this function
110/// may be overloaded in the target code to do that.
111/// We implement a special case of the .space directive which takes only a
112/// single integer argument in base 10 that is the size in bytes. This is a
113/// restricted form of the GAS directive in that we only interpret
114/// simple--i.e. not a logical or arithmetic expression--size values without
115/// the optional fill value. This is primarily used for creating arbitrary
116/// sized inline asm blocks for testing purposes.
118 const char *Str,
119 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
120 // Count the number of instructions in the asm.
121 bool AtInsnStart = true;
122 unsigned Length = 0;
123 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
124 for (; *Str; ++Str) {
125 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
126 strlen(MAI.getSeparatorString())) == 0) {
127 AtInsnStart = true;
128 } else if (isAsmComment(Str, MAI)) {
129 // Stop counting as an instruction after a comment until the next
130 // separator.
131 AtInsnStart = false;
132 }
133
134 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
135 unsigned AddLength = MaxInstLength;
136 if (strncmp(Str, ".space", 6) == 0) {
137 char *EStr;
138 int SpaceSize;
139 SpaceSize = strtol(Str + 6, &EStr, 10);
140 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
141 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
142 ++EStr;
143 if (*EStr == '\0' || *EStr == '\n' ||
144 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
145 AddLength = SpaceSize;
146 }
147 Length += AddLength;
148 AtInsnStart = false;
149 }
150 }
151
152 return Length;
153}
154
155/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
156/// after it, replacing it with an unconditional branch to NewDest.
157void
159 MachineBasicBlock *NewDest) const {
160 MachineBasicBlock *MBB = Tail->getParent();
161
162 // Remove all the old successors of MBB from the CFG.
163 while (!MBB->succ_empty())
164 MBB->removeSuccessor(MBB->succ_begin());
165
166 // Save off the debug loc before erasing the instruction.
167 DebugLoc DL = Tail->getDebugLoc();
168
169 // Update call info and remove all the dead instructions
170 // from the end of MBB.
171 while (Tail != MBB->end()) {
172 auto MI = Tail++;
173 if (MI->shouldUpdateAdditionalCallInfo())
174 MBB->getParent()->eraseAdditionalCallInfo(&*MI);
175 MBB->erase(MI);
176 }
177
178 // If MBB isn't immediately before MBB, insert a branch to it.
180 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
181 MBB->addSuccessor(NewDest);
182}
183
185 bool NewMI, unsigned Idx1,
186 unsigned Idx2) const {
187 const MCInstrDesc &MCID = MI.getDesc();
188 bool HasDef = MCID.getNumDefs();
189 if (HasDef && !MI.getOperand(0).isReg())
190 // No idea how to commute this instruction. Target should implement its own.
191 return nullptr;
192
193 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
194 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
195 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
196 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
197 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
198 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
199 "This only knows how to commute register operands so far");
200
201 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
202 Register Reg1 = MI.getOperand(Idx1).getReg();
203 Register Reg2 = MI.getOperand(Idx2).getReg();
204 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
205 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
206 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
207 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
208 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
209 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
210 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
211 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
212 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
213 // Avoid calling isRenamable for virtual registers since we assert that
214 // renamable property is only queried/set for physical registers.
215 bool Reg1IsRenamable =
216 Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false;
217 bool Reg2IsRenamable =
218 Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false;
219
220 // For a case like this:
221 // %0.sub = INST %0.sub(tied), %1.sub, implicit-def %0
222 // we need to update the implicit-def after commuting to result in:
223 // %1.sub = INST %1.sub(tied), %0.sub, implicit-def %1
224 SmallVector<unsigned> UpdateImplicitDefIdx;
225 if (HasDef && MI.hasImplicitDef()) {
226 const TargetRegisterInfo *TRI =
227 MI.getMF()->getSubtarget().getRegisterInfo();
228 for (auto [OpNo, MO] : llvm::enumerate(MI.implicit_operands())) {
229 Register ImplReg = MO.getReg();
230 if ((ImplReg.isVirtual() && ImplReg == Reg0) ||
231 (ImplReg.isPhysical() && Reg0.isPhysical() &&
232 TRI->isSubRegisterEq(ImplReg, Reg0)))
233 UpdateImplicitDefIdx.push_back(OpNo + MI.getNumExplicitOperands());
234 }
235 }
236
237 // If destination is tied to either of the commuted source register, then
238 // it must be updated.
239 if (HasDef && Reg0 == Reg1 &&
240 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
241 Reg2IsKill = false;
242 Reg0 = Reg2;
243 SubReg0 = SubReg2;
244 } else if (HasDef && Reg0 == Reg2 &&
245 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
246 Reg1IsKill = false;
247 Reg0 = Reg1;
248 SubReg0 = SubReg1;
249 }
250
251 MachineInstr *CommutedMI = nullptr;
252 if (NewMI) {
253 // Create a new instruction.
254 MachineFunction &MF = *MI.getMF();
255 CommutedMI = MF.CloneMachineInstr(&MI);
256 } else {
257 CommutedMI = &MI;
258 }
259
260 if (HasDef) {
261 CommutedMI->getOperand(0).setReg(Reg0);
262 CommutedMI->getOperand(0).setSubReg(SubReg0);
263 for (unsigned Idx : UpdateImplicitDefIdx)
264 CommutedMI->getOperand(Idx).setReg(Reg0);
265 }
266 CommutedMI->getOperand(Idx2).setReg(Reg1);
267 CommutedMI->getOperand(Idx1).setReg(Reg2);
268 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
269 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
270 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
271 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
272 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
273 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
274 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
275 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
276 // Avoid calling setIsRenamable for virtual registers since we assert that
277 // renamable property is only queried/set for physical registers.
278 if (Reg1.isPhysical())
279 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
280 if (Reg2.isPhysical())
281 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
282 return CommutedMI;
283}
284
286 unsigned OpIdx1,
287 unsigned OpIdx2) const {
288 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
289 // any commutable operand, which is done in findCommutedOpIndices() method
290 // called below.
291 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
292 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
293 assert(MI.isCommutable() &&
294 "Precondition violation: MI must be commutable.");
295 return nullptr;
296 }
297 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
298}
299
301 unsigned &ResultIdx2,
302 unsigned CommutableOpIdx1,
303 unsigned CommutableOpIdx2) {
304 if (ResultIdx1 == CommuteAnyOperandIndex &&
305 ResultIdx2 == CommuteAnyOperandIndex) {
306 ResultIdx1 = CommutableOpIdx1;
307 ResultIdx2 = CommutableOpIdx2;
308 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
309 if (ResultIdx2 == CommutableOpIdx1)
310 ResultIdx1 = CommutableOpIdx2;
311 else if (ResultIdx2 == CommutableOpIdx2)
312 ResultIdx1 = CommutableOpIdx1;
313 else
314 return false;
315 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
316 if (ResultIdx1 == CommutableOpIdx1)
317 ResultIdx2 = CommutableOpIdx2;
318 else if (ResultIdx1 == CommutableOpIdx2)
319 ResultIdx2 = CommutableOpIdx1;
320 else
321 return false;
322 } else
323 // Check that the result operand indices match the given commutable
324 // operand indices.
325 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
326 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
327
328 return true;
329}
330
332 unsigned &SrcOpIdx1,
333 unsigned &SrcOpIdx2) const {
334 assert(!MI.isBundle() &&
335 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
336
337 const MCInstrDesc &MCID = MI.getDesc();
338 if (!MCID.isCommutable())
339 return false;
340
341 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
342 // is not true, then the target must implement this.
343 unsigned CommutableOpIdx1 = MCID.getNumDefs();
344 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
345 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
346 CommutableOpIdx1, CommutableOpIdx2))
347 return false;
348
349 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
350 // No idea.
351 return false;
352 return true;
353}
354
356 if (!MI.isTerminator()) return false;
357
358 // Conditional branch is a special case.
359 if (MI.isBranch() && !MI.isBarrier())
360 return true;
361 if (!MI.isPredicable())
362 return true;
363 return !isPredicated(MI);
364}
365
368 bool MadeChange = false;
369
370 assert(!MI.isBundle() &&
371 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
372
373 const MCInstrDesc &MCID = MI.getDesc();
374 if (!MI.isPredicable())
375 return false;
376
377 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
378 if (MCID.operands()[i].isPredicate()) {
379 MachineOperand &MO = MI.getOperand(i);
380 if (MO.isReg()) {
381 MO.setReg(Pred[j].getReg());
382 MadeChange = true;
383 } else if (MO.isImm()) {
384 MO.setImm(Pred[j].getImm());
385 MadeChange = true;
386 } else if (MO.isMBB()) {
387 MO.setMBB(Pred[j].getMBB());
388 MadeChange = true;
389 }
390 ++j;
391 }
392 }
393 return MadeChange;
394}
395
397 const MachineInstr &MI,
399 size_t StartSize = Accesses.size();
400 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
401 oe = MI.memoperands_end();
402 o != oe; ++o) {
403 if ((*o)->isLoad() &&
404 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
405 Accesses.push_back(*o);
406 }
407 return Accesses.size() != StartSize;
408}
409
411 const MachineInstr &MI,
413 size_t StartSize = Accesses.size();
414 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
415 oe = MI.memoperands_end();
416 o != oe; ++o) {
417 if ((*o)->isStore() &&
418 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
419 Accesses.push_back(*o);
420 }
421 return Accesses.size() != StartSize;
422}
423
425 unsigned SubIdx, unsigned &Size,
426 unsigned &Offset,
427 const MachineFunction &MF) const {
429 if (!SubIdx) {
430 Size = TRI->getSpillSize(*RC);
431 Offset = 0;
432 return true;
433 }
434 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
435 // Convert bit size to byte size.
436 if (BitSize % 8)
437 return false;
438
439 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
440 if (BitOffset < 0 || BitOffset % 8)
441 return false;
442
443 Size = BitSize / 8;
444 Offset = (unsigned)BitOffset / 8;
445
446 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
447
448 if (!MF.getDataLayout().isLittleEndian()) {
449 Offset = TRI->getSpillSize(*RC) - (Offset + Size);
450 }
451 return true;
452}
453
456 Register DestReg, unsigned SubIdx,
457 const MachineInstr &Orig,
458 const TargetRegisterInfo &TRI) const {
459 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
460 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
461 MBB.insert(I, MI);
462}
463
465 const MachineInstr &MI1,
466 const MachineRegisterInfo *MRI) const {
468}
469
472 MachineBasicBlock::iterator InsertBefore,
473 const MachineInstr &Orig) const {
474 MachineFunction &MF = *MBB.getParent();
475 // CFI instructions are marked as non-duplicable, because Darwin compact
476 // unwind info emission can't handle multiple prologue setups.
477 assert((!Orig.isNotDuplicable() ||
479 Orig.isCFIInstruction())) &&
480 "Instruction cannot be duplicated");
481
482 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
483}
484
485// If the COPY instruction in MI can be folded to a stack operation, return
486// the register class to use.
488 const TargetInstrInfo &TII,
489 unsigned FoldIdx) {
490 assert(TII.isCopyInstr(MI) && "MI must be a COPY instruction");
491 if (MI.getNumOperands() != 2)
492 return nullptr;
493 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
494
495 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
496 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
497
498 if (FoldOp.getSubReg() || LiveOp.getSubReg())
499 return nullptr;
500
501 Register FoldReg = FoldOp.getReg();
502 Register LiveReg = LiveOp.getReg();
503
504 assert(FoldReg.isVirtual() && "Cannot fold physregs");
505
506 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
507 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
508
509 if (LiveOp.getReg().isPhysical())
510 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
511
512 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
513 return RC;
514
515 // FIXME: Allow folding when register classes are memory compatible.
516 return nullptr;
517}
518
519MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
520
521/// Try to remove the load by folding it to a register
522/// operand at the use. We fold the load instructions if load defines a virtual
523/// register, the virtual register is used once in the same BB, and the
524/// instructions in-between do not load or store, and have no side effects.
527 Register &FoldAsLoadDefReg,
528 MachineInstr *&DefMI) const {
529 // Check whether we can move DefMI here.
530 DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
531 assert(DefMI);
532 bool SawStore = false;
533 if (!DefMI->isSafeToMove(SawStore))
534 return nullptr;
535
536 // Collect information about virtual register operands of MI.
537 SmallVector<unsigned, 1> SrcOperandIds;
538 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
539 MachineOperand &MO = MI.getOperand(i);
540 if (!MO.isReg())
541 continue;
542 Register Reg = MO.getReg();
543 if (Reg != FoldAsLoadDefReg)
544 continue;
545 // Do not fold if we have a subreg use or a def.
546 if (MO.getSubReg() || MO.isDef())
547 return nullptr;
548 SrcOperandIds.push_back(i);
549 }
550 if (SrcOperandIds.empty())
551 return nullptr;
552
553 // Check whether we can fold the def into SrcOperandId.
554 if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
555 FoldAsLoadDefReg = 0;
556 return FoldMI;
557 }
558
559 return nullptr;
560}
561
562std::pair<unsigned, unsigned>
564 switch (MI.getOpcode()) {
565 case TargetOpcode::STACKMAP:
566 // StackMapLiveValues are foldable
567 return std::make_pair(0, StackMapOpers(&MI).getVarIdx());
568 case TargetOpcode::PATCHPOINT:
569 // For PatchPoint, the call args are not foldable (even if reported in the
570 // stackmap e.g. via anyregcc).
571 return std::make_pair(0, PatchPointOpers(&MI).getVarIdx());
572 case TargetOpcode::STATEPOINT:
573 // For statepoints, fold deopt and gc arguments, but not call arguments.
574 return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx());
575 default:
576 llvm_unreachable("unexpected stackmap opcode");
577 }
578}
579
581 ArrayRef<unsigned> Ops, int FrameIndex,
582 const TargetInstrInfo &TII) {
583 unsigned StartIdx = 0;
584 unsigned NumDefs = 0;
585 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
586 std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI);
587
588 unsigned DefToFoldIdx = MI.getNumOperands();
589
590 // Return false if any operands requested for folding are not foldable (not
591 // part of the stackmap's live values).
592 for (unsigned Op : Ops) {
593 if (Op < NumDefs) {
594 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
595 DefToFoldIdx = Op;
596 } else if (Op < StartIdx) {
597 return nullptr;
598 }
599 if (MI.getOperand(Op).isTied())
600 return nullptr;
601 }
602
603 MachineInstr *NewMI =
604 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
605 MachineInstrBuilder MIB(MF, NewMI);
606
607 // No need to fold return, the meta data, and function arguments
608 for (unsigned i = 0; i < StartIdx; ++i)
609 if (i != DefToFoldIdx)
610 MIB.add(MI.getOperand(i));
611
612 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
613 MachineOperand &MO = MI.getOperand(i);
614 unsigned TiedTo = e;
615 (void)MI.isRegTiedToDefOperand(i, &TiedTo);
616
617 if (is_contained(Ops, i)) {
618 assert(TiedTo == e && "Cannot fold tied operands");
619 unsigned SpillSize;
620 unsigned SpillOffset;
621 // Compute the spill slot size and offset.
622 const TargetRegisterClass *RC =
623 MF.getRegInfo().getRegClass(MO.getReg());
624 bool Valid =
625 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
626 if (!Valid)
627 report_fatal_error("cannot spill patchpoint subregister operand");
628 MIB.addImm(StackMaps::IndirectMemRefOp);
629 MIB.addImm(SpillSize);
630 MIB.addFrameIndex(FrameIndex);
631 MIB.addImm(SpillOffset);
632 } else {
633 MIB.add(MO);
634 if (TiedTo < e) {
635 assert(TiedTo < NumDefs && "Bad tied operand");
636 if (TiedTo > DefToFoldIdx)
637 --TiedTo;
638 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
639 }
640 }
641 }
642 return NewMI;
643}
644
645static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
646 const TargetInstrInfo &TII) {
647 // If the machine operand is tied, untie it first.
648 if (MI->getOperand(OpNo).isTied()) {
649 unsigned TiedTo = MI->findTiedOperandIdx(OpNo);
650 MI->untieRegOperand(OpNo);
651 // Intentional recursion!
652 foldInlineAsmMemOperand(MI, TiedTo, FI, TII);
653 }
654
656 TII.getFrameIndexOperands(NewOps, FI);
657 assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
658 MI->removeOperand(OpNo);
659 MI->insert(MI->operands_begin() + OpNo, NewOps);
660
661 // Change the previous operand to a MemKind InlineAsm::Flag. The second param
662 // is the per-target number of operands that represent the memory operand
663 // excluding this one (MD). This includes MO.
665 F.setMemConstraint(InlineAsm::ConstraintCode::m);
666 MachineOperand &MD = MI->getOperand(OpNo - 1);
667 MD.setImm(F);
668}
669
670// Returns nullptr if not possible to fold.
672 ArrayRef<unsigned> Ops, int FI,
673 const TargetInstrInfo &TII) {
674 assert(MI.isInlineAsm() && "wrong opcode");
675 if (Ops.size() > 1)
676 return nullptr;
677 unsigned Op = Ops[0];
678 assert(Op && "should never be first operand");
679 assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");
680
681 if (!MI.mayFoldInlineAsmRegOp(Op))
682 return nullptr;
683
684 MachineInstr &NewMI = TII.duplicate(*MI.getParent(), MI.getIterator(), MI);
685
686 foldInlineAsmMemOperand(&NewMI, Op, FI, TII);
687
688 // Update mayload/maystore metadata, and memoperands.
689 const VirtRegInfo &RI =
690 AnalyzeVirtRegInBundle(MI, MI.getOperand(Op).getReg());
693 if (RI.Reads) {
694 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
696 }
697 if (RI.Writes) {
698 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
700 }
701 MachineFunction *MF = NewMI.getMF();
702 const MachineFrameInfo &MFI = MF->getFrameInfo();
704 MachinePointerInfo::getFixedStack(*MF, FI), Flags, MFI.getObjectSize(FI),
705 MFI.getObjectAlign(FI));
706 NewMI.addMemOperand(*MF, MMO);
707
708 return &NewMI;
709}
710
712 ArrayRef<unsigned> Ops, int FI,
713 LiveIntervals *LIS,
714 VirtRegMap *VRM) const {
715 auto Flags = MachineMemOperand::MONone;
716 for (unsigned OpIdx : Ops)
717 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
719
720 MachineBasicBlock *MBB = MI.getParent();
721 assert(MBB && "foldMemoryOperand needs an inserted instruction");
722 MachineFunction &MF = *MBB->getParent();
723
724 // If we're not folding a load into a subreg, the size of the load is the
725 // size of the spill slot. But if we are, we need to figure out what the
726 // actual load size is.
727 int64_t MemSize = 0;
728 const MachineFrameInfo &MFI = MF.getFrameInfo();
730
731 if (Flags & MachineMemOperand::MOStore) {
732 MemSize = MFI.getObjectSize(FI);
733 } else {
734 for (unsigned OpIdx : Ops) {
735 int64_t OpSize = MFI.getObjectSize(FI);
736
737 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
738 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
739 if (SubRegSize > 0 && !(SubRegSize % 8))
740 OpSize = SubRegSize / 8;
741 }
742
743 MemSize = std::max(MemSize, OpSize);
744 }
745 }
746
747 assert(MemSize && "Did not expect a zero-sized stack slot");
748
749 MachineInstr *NewMI = nullptr;
750
751 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
752 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
753 MI.getOpcode() == TargetOpcode::STATEPOINT) {
754 // Fold stackmap/patchpoint.
755 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
756 if (NewMI)
757 MBB->insert(MI, NewMI);
758 } else if (MI.isInlineAsm()) {
759 return foldInlineAsmMemOperand(MI, Ops, FI, *this);
760 } else {
761 // Ask the target to do the actual folding.
762 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
763 }
764
765 if (NewMI) {
766 NewMI->setMemRefs(MF, MI.memoperands());
767 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
769 NewMI->mayStore()) &&
770 "Folded a def to a non-store!");
771 assert((!(Flags & MachineMemOperand::MOLoad) ||
772 NewMI->mayLoad()) &&
773 "Folded a use to a non-load!");
774 assert(MFI.getObjectOffset(FI) != -1);
775 MachineMemOperand *MMO =
777 Flags, MemSize, MFI.getObjectAlign(FI));
778 NewMI->addMemOperand(MF, MMO);
779
780 // The pass "x86 speculative load hardening" always attaches symbols to
781 // call instructions. We need copy it form old instruction.
782 NewMI->cloneInstrSymbols(MF, MI);
783
784 return NewMI;
785 }
786
787 // Straight COPY may fold as load/store.
788 if (!isCopyInstr(MI) || Ops.size() != 1)
789 return nullptr;
790
791 const TargetRegisterClass *RC = canFoldCopy(MI, *this, Ops[0]);
792 if (!RC)
793 return nullptr;
794
795 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
797 if (Flags == MachineMemOperand::MOStore) {
798 if (MO.isUndef()) {
799 // If this is an undef copy, we do not need to bother we inserting spill
800 // code.
801 BuildMI(*MBB, Pos, MI.getDebugLoc(), get(TargetOpcode::KILL)).add(MO);
802 } else {
803 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI,
804 Register());
805 }
806 } else
807 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI, Register());
808
809 return &*--Pos;
810}
811
814 MachineInstr &LoadMI,
815 LiveIntervals *LIS) const {
816 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
817#ifndef NDEBUG
818 for (unsigned OpIdx : Ops)
819 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
820#endif
821
822 MachineBasicBlock &MBB = *MI.getParent();
823 MachineFunction &MF = *MBB.getParent();
824
825 // Ask the target to do the actual folding.
826 MachineInstr *NewMI = nullptr;
827 int FrameIndex = 0;
828
829 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
830 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
831 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
832 isLoadFromStackSlot(LoadMI, FrameIndex)) {
833 // Fold stackmap/patchpoint.
834 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
835 if (NewMI)
836 NewMI = &*MBB.insert(MI, NewMI);
837 } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) {
838 return foldInlineAsmMemOperand(MI, Ops, FrameIndex, *this);
839 } else {
840 // Ask the target to do the actual folding.
841 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
842 }
843
844 if (!NewMI)
845 return nullptr;
846
847 // Copy the memoperands from the load to the folded instruction.
848 if (MI.memoperands_empty()) {
849 NewMI->setMemRefs(MF, LoadMI.memoperands());
850 } else {
851 // Handle the rare case of folding multiple loads.
852 NewMI->setMemRefs(MF, MI.memoperands());
854 E = LoadMI.memoperands_end();
855 I != E; ++I) {
856 NewMI->addMemOperand(MF, *I);
857 }
858 }
859 return NewMI;
860}
861
862/// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
863/// replacement instructions immediately precede it. Copy any implicit
864/// operands from MI to the replacement instruction.
866 const TargetRegisterInfo *TRI) {
868 --CopyMI;
869
870 Register DstReg = MI->getOperand(0).getReg();
871 for (const MachineOperand &MO : MI->implicit_operands()) {
872 CopyMI->addOperand(MO);
873
874 // Be conservative about preserving kills when subregister defs are
875 // involved. If there was implicit kill of a super-register overlapping the
876 // copy result, we would kill the subregisters previous copies defined.
877
878 if (MO.isKill() && TRI->regsOverlap(DstReg, MO.getReg()))
879 CopyMI->getOperand(CopyMI->getNumOperands() - 1).setIsKill(false);
880 }
881}
882
884 const TargetRegisterInfo *TRI) const {
885 if (MI->allDefsAreDead()) {
886 MI->setDesc(get(TargetOpcode::KILL));
887 return;
888 }
889
890 MachineOperand &DstMO = MI->getOperand(0);
891 MachineOperand &SrcMO = MI->getOperand(1);
892
893 bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
894 if (IdentityCopy || SrcMO.isUndef()) {
895 // No need to insert an identity copy instruction, but replace with a KILL
896 // if liveness is changed.
897 if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
898 // We must make sure the super-register gets killed. Replace the
899 // instruction with KILL.
900 MI->setDesc(get(TargetOpcode::KILL));
901 return;
902 }
903 // Vanilla identity copy.
904 MI->eraseFromParent();
905 return;
906 }
907
908 copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(), DstMO.getReg(),
909 SrcMO.getReg(), SrcMO.isKill(),
910 DstMO.getReg().isPhysical() ? DstMO.isRenamable() : false,
911 SrcMO.getReg().isPhysical() ? SrcMO.isRenamable() : false);
912
913 if (MI->getNumOperands() > 2)
915 MI->eraseFromParent();
916}
917
919 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
920 const MachineOperand &Op1 = Inst.getOperand(1);
921 const MachineOperand &Op2 = Inst.getOperand(2);
922 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
923
924 // We need virtual register definitions for the operands that we will
925 // reassociate.
926 MachineInstr *MI1 = nullptr;
927 MachineInstr *MI2 = nullptr;
928 if (Op1.isReg() && Op1.getReg().isVirtual())
929 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
930 if (Op2.isReg() && Op2.getReg().isVirtual())
931 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
932
933 // And at least one operand must be defined in MBB.
934 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
935}
936
938 unsigned Opcode2) const {
939 return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2;
940}
941
943 bool &Commuted) const {
944 const MachineBasicBlock *MBB = Inst.getParent();
945 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
946 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
947 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
948 unsigned Opcode = Inst.getOpcode();
949
950 // If only one operand has the same or inverse opcode and it's the second
951 // source operand, the operands must be commuted.
952 Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
953 areOpcodesEqualOrInverse(Opcode, MI2->getOpcode());
954 if (Commuted)
955 std::swap(MI1, MI2);
956
957 // 1. The previous instruction must be the same type as Inst.
958 // 2. The previous instruction must also be associative/commutative or be the
959 // inverse of such an operation (this can be different even for
960 // instructions with the same opcode if traits like fast-math-flags are
961 // included).
962 // 3. The previous instruction must have virtual register definitions for its
963 // operands in the same basic block as Inst.
964 // 4. The previous instruction's result must only be used by Inst.
965 return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
967 isAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
969 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
970}
971
972// 1. The operation must be associative and commutative or be the inverse of
973// such an operation.
974// 2. The instruction must have virtual register definitions for its
975// operands in the same basic block.
976// 3. The instruction must have a reassociable sibling.
978 bool &Commuted) const {
979 return (isAssociativeAndCommutative(Inst) ||
980 isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
981 hasReassociableOperands(Inst, Inst.getParent()) &&
982 hasReassociableSibling(Inst, Commuted);
983}
984
985// Utility routine that checks if \param MO is defined by an
986// \param CombineOpc instruction in the basic block \param MBB.
987// If \param CombineOpc is not provided, the OpCode check will
988// be skipped.
990 unsigned CombineOpc = 0) {
991 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
992 MachineInstr *MI = nullptr;
993
994 if (MO.isReg() && MO.getReg().isVirtual())
995 MI = MRI.getUniqueVRegDef(MO.getReg());
996 // And it needs to be in the trace (otherwise, it won't have a depth).
997 if (!MI || MI->getParent() != &MBB ||
998 (MI->getOpcode() != CombineOpc && CombineOpc != 0))
999 return false;
1000 // Must only used by the user we combine with.
1001 if (!MRI.hasOneNonDBGUse(MO.getReg()))
1002 return false;
1003
1004 return true;
1005}
1006
1007// A chain of accumulation instructions will be selected IFF:
1008// 1. All the accumulation instructions in the chain have the same opcode,
1009// besides the first that has a slightly different opcode because it does
1010// not accumulate into a register.
1011// 2. All the instructions in the chain are combinable (have a single use
1012// which itself is part of the chain).
1013// 3. Meets the required minimum length.
1015 MachineInstr *CurrentInstr, SmallVectorImpl<Register> &Chain) const {
1016 // Walk up the chain of accumulation instructions and collect them in the
1017 // vector.
1018 MachineBasicBlock &MBB = *CurrentInstr->getParent();
1019 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1020 unsigned AccumulatorOpcode = CurrentInstr->getOpcode();
1021 std::optional<unsigned> ChainStartOpCode =
1022 getAccumulationStartOpcode(AccumulatorOpcode);
1023
1024 if (!ChainStartOpCode.has_value())
1025 return;
1026
1027 // Push the first accumulator result to the start of the chain.
1028 Chain.push_back(CurrentInstr->getOperand(0).getReg());
1029
1030 // Collect the accumulator input register from all instructions in the chain.
1031 while (CurrentInstr &&
1032 canCombine(MBB, CurrentInstr->getOperand(1), AccumulatorOpcode)) {
1033 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1034 CurrentInstr = MRI.getUniqueVRegDef(CurrentInstr->getOperand(1).getReg());
1035 }
1036
1037 // Add the instruction at the top of the chain.
1038 if (CurrentInstr->getOpcode() == AccumulatorOpcode &&
1039 canCombine(MBB, CurrentInstr->getOperand(1)))
1040 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1041}
1042
1043/// Find chains of accumulations that can be rewritten as a tree for increased
1044/// ILP.
1046 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const {
1048 return false;
1049
1050 unsigned Opc = Root.getOpcode();
1052 return false;
1053
1054 // Verify that this is the end of the chain.
1055 MachineBasicBlock &MBB = *Root.getParent();
1056 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1057 if (!MRI.hasOneNonDBGUser(Root.getOperand(0).getReg()))
1058 return false;
1059
1060 auto User = MRI.use_instr_begin(Root.getOperand(0).getReg());
1061 if (User->getOpcode() == Opc)
1062 return false;
1063
1064 // Walk up the use chain and collect the reduction chain.
1066 getAccumulatorChain(&Root, Chain);
1067
1068 // Reject chains which are too short to be worth modifying.
1069 if (Chain.size() < MinAccumulatorDepth)
1070 return false;
1071
1072 // Check if the MBB this instruction is a part of contains any other chains.
1073 // If so, don't apply it.
1074 SmallSet<Register, 32> ReductionChain(llvm::from_range, Chain);
1075 for (const auto &I : MBB) {
1076 if (I.getOpcode() == Opc &&
1077 !ReductionChain.contains(I.getOperand(0).getReg()))
1078 return false;
1079 }
1080
1082 return true;
1083}
1084
1085// Reduce branches of the accumulator tree by adding them together.
1087 SmallVectorImpl<Register> &RegistersToReduce,
1090 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
1091 Register ResultReg) const {
1094
1095 // Get the opcode for the reduction instruction we will need to build.
1096 // If for some reason it is not defined, early exit and don't apply this.
1097 unsigned ReduceOpCode = getReduceOpcodeForAccumulator(Root.getOpcode());
1098
1099 for (unsigned int i = 1; i <= (RegistersToReduce.size() / 2); i += 2) {
1100 auto RHS = RegistersToReduce[i - 1];
1101 auto LHS = RegistersToReduce[i];
1102 Register Dest;
1103 // If we are reducing 2 registers, reuse the original result register.
1104 if (RegistersToReduce.size() == 2)
1105 Dest = ResultReg;
1106 // Otherwise, create a new virtual register to hold the partial sum.
1107 else {
1108 auto NewVR = MRI.createVirtualRegister(
1109 MRI.getRegClass(Root.getOperand(0).getReg()));
1110 Dest = NewVR;
1111 NewRegs.push_back(Dest);
1112 InstrIdxForVirtReg.insert(std::make_pair(Dest, InsInstrs.size()));
1113 }
1114
1115 // Create the new reduction instruction.
1117 BuildMI(MF, MIMetadata(Root), TII->get(ReduceOpCode), Dest)
1118 .addReg(RHS, getKillRegState(true))
1119 .addReg(LHS, getKillRegState(true));
1120 // Copy any flags needed from the original instruction.
1121 MIB->setFlags(Root.getFlags());
1122 InsInstrs.push_back(MIB);
1123 }
1124
1125 // If the number of registers to reduce is odd, add the remaining register to
1126 // the vector of registers to reduce.
1127 if (RegistersToReduce.size() % 2 != 0)
1128 NewRegs.push_back(RegistersToReduce[RegistersToReduce.size() - 1]);
1129
1130 RegistersToReduce = NewRegs;
1131}
1132
1133// The concept of the reassociation pass is that these operations can benefit
1134// from this kind of transformation:
1135//
1136// A = ? op ?
1137// B = A op X (Prev)
1138// C = B op Y (Root)
1139// -->
1140// A = ? op ?
1141// B = X op Y
1142// C = A op B
1143//
1144// breaking the dependency between A and B, allowing them to be executed in
1145// parallel (or back-to-back in a pipeline) instead of depending on each other.
1146
1147// FIXME: This has the potential to be expensive (compile time) while not
1148// improving the code at all. Some ways to limit the overhead:
1149// 1. Track successful transforms; bail out if hit rate gets too low.
1150// 2. Only enable at -O3 or some other non-default optimization level.
1151// 3. Pre-screen pattern candidates here: if an operand of the previous
1152// instruction is known to not increase the critical path, then don't match
1153// that pattern.
1155 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
1156 bool DoRegPressureReduce) const {
1157 bool Commute;
1158 if (isReassociationCandidate(Root, Commute)) {
1159 // We found a sequence of instructions that may be suitable for a
1160 // reassociation of operands to increase ILP. Specify each commutation
1161 // possibility for the Prev instruction in the sequence and let the
1162 // machine combiner decide if changing the operands is worthwhile.
1163 if (Commute) {
1166 } else {
1169 }
1170 return true;
1171 }
1172 if (getAccumulatorReassociationPatterns(Root, Patterns))
1173 return true;
1174
1175 return false;
1176}
1177
1178/// Return true when a code sequence can improve loop throughput.
1180 return false;
1181}
1182
1185 switch (Pattern) {
1188 default:
1190 }
1191}
1192
1193std::pair<unsigned, unsigned>
1195 const MachineInstr &Root,
1196 const MachineInstr &Prev) const {
1197 bool AssocCommutRoot = isAssociativeAndCommutative(Root);
1198 bool AssocCommutPrev = isAssociativeAndCommutative(Prev);
1199
1200 // Early exit if both opcodes are associative and commutative. It's a trivial
1201 // reassociation when we only change operands order. In this case opcodes are
1202 // not required to have inverse versions.
1203 if (AssocCommutRoot && AssocCommutPrev) {
1204 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
1205 return std::make_pair(Root.getOpcode(), Root.getOpcode());
1206 }
1207
1208 // At least one instruction is not associative or commutative.
1209 // Since we have matched one of the reassociation patterns, we expect that the
1210 // instructions' opcodes are equal or one of them is the inversion of the
1211 // other.
1213 "Incorrectly matched pattern");
1214 unsigned AssocCommutOpcode = Root.getOpcode();
1215 unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
1216 if (!AssocCommutRoot)
1217 std::swap(AssocCommutOpcode, InverseOpcode);
1218
1219 // The transformation rule (`+` is any associative and commutative binary
1220 // operation, `-` is the inverse):
1221 // REASSOC_AX_BY:
1222 // (A + X) + Y => A + (X + Y)
1223 // (A + X) - Y => A + (X - Y)
1224 // (A - X) + Y => A - (X - Y)
1225 // (A - X) - Y => A - (X + Y)
1226 // REASSOC_XA_BY:
1227 // (X + A) + Y => (X + Y) + A
1228 // (X + A) - Y => (X - Y) + A
1229 // (X - A) + Y => (X + Y) - A
1230 // (X - A) - Y => (X - Y) - A
1231 // REASSOC_AX_YB:
1232 // Y + (A + X) => (Y + X) + A
1233 // Y - (A + X) => (Y - X) - A
1234 // Y + (A - X) => (Y - X) + A
1235 // Y - (A - X) => (Y + X) - A
1236 // REASSOC_XA_YB:
1237 // Y + (X + A) => (Y + X) + A
1238 // Y - (X + A) => (Y - X) - A
1239 // Y + (X - A) => (Y + X) - A
1240 // Y - (X - A) => (Y - X) + A
1241 switch (Pattern) {
1242 default:
1243 llvm_unreachable("Unexpected pattern");
1245 if (!AssocCommutRoot && AssocCommutPrev)
1246 return {AssocCommutOpcode, InverseOpcode};
1247 if (AssocCommutRoot && !AssocCommutPrev)
1248 return {InverseOpcode, InverseOpcode};
1249 if (!AssocCommutRoot && !AssocCommutPrev)
1250 return {InverseOpcode, AssocCommutOpcode};
1251 break;
1253 if (!AssocCommutRoot && AssocCommutPrev)
1254 return {AssocCommutOpcode, InverseOpcode};
1255 if (AssocCommutRoot && !AssocCommutPrev)
1256 return {InverseOpcode, AssocCommutOpcode};
1257 if (!AssocCommutRoot && !AssocCommutPrev)
1258 return {InverseOpcode, InverseOpcode};
1259 break;
1261 if (!AssocCommutRoot && AssocCommutPrev)
1262 return {InverseOpcode, InverseOpcode};
1263 if (AssocCommutRoot && !AssocCommutPrev)
1264 return {AssocCommutOpcode, InverseOpcode};
1265 if (!AssocCommutRoot && !AssocCommutPrev)
1266 return {InverseOpcode, AssocCommutOpcode};
1267 break;
1269 if (!AssocCommutRoot && AssocCommutPrev)
1270 return {InverseOpcode, InverseOpcode};
1271 if (AssocCommutRoot && !AssocCommutPrev)
1272 return {InverseOpcode, AssocCommutOpcode};
1273 if (!AssocCommutRoot && !AssocCommutPrev)
1274 return {AssocCommutOpcode, InverseOpcode};
1275 break;
1276 }
1277 llvm_unreachable("Unhandled combination");
1278}
1279
1280// Return a pair of boolean flags showing if the new root and new prev operands
1281// must be swapped. See visual example of the rule in
1282// TargetInstrInfo::getReassociationOpcodes.
1283static std::pair<bool, bool> mustSwapOperands(unsigned Pattern) {
1284 switch (Pattern) {
1285 default:
1286 llvm_unreachable("Unexpected pattern");
1288 return {false, false};
1290 return {true, false};
1292 return {true, true};
1294 return {true, true};
1295 }
1296}
1297
1299 const MachineInstr &Root, unsigned Pattern,
1300 std::array<unsigned, 5> &OperandIndices) const {
1301 switch (Pattern) {
1303 OperandIndices = {1, 1, 1, 2, 2};
1304 break;
1306 OperandIndices = {2, 1, 2, 2, 1};
1307 break;
1309 OperandIndices = {1, 2, 1, 1, 2};
1310 break;
1312 OperandIndices = {2, 2, 2, 1, 1};
1313 break;
1314 default:
1315 llvm_unreachable("unexpected MachineCombinerPattern");
1316 }
1317}
1318
1319/// Attempt the reassociation transformation to reduce critical path length.
1320/// See the above comments before getMachineCombinerPatterns().
1322 MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1326 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const {
1327 MachineFunction *MF = Root.getMF();
1331 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
1332
1337 MachineOperand &OpC = Root.getOperand(0);
1338
1339 Register RegA = OpA.getReg();
1340 Register RegB = OpB.getReg();
1341 Register RegX = OpX.getReg();
1342 Register RegY = OpY.getReg();
1343 Register RegC = OpC.getReg();
1344
1345 if (RegA.isVirtual())
1346 MRI.constrainRegClass(RegA, RC);
1347 if (RegB.isVirtual())
1348 MRI.constrainRegClass(RegB, RC);
1349 if (RegX.isVirtual())
1350 MRI.constrainRegClass(RegX, RC);
1351 if (RegY.isVirtual())
1352 MRI.constrainRegClass(RegY, RC);
1353 if (RegC.isVirtual())
1354 MRI.constrainRegClass(RegC, RC);
1355
1356 // Create a new virtual register for the result of (X op Y) instead of
1357 // recycling RegB because the MachineCombiner's computation of the critical
1358 // path requires a new register definition rather than an existing one.
1359 Register NewVR = MRI.createVirtualRegister(RC);
1360 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
1361
1362 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
1363 bool KillA = OpA.isKill();
1364 bool KillX = OpX.isKill();
1365 bool KillY = OpY.isKill();
1366 bool KillNewVR = true;
1367
1368 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
1369
1370 if (SwapPrevOperands) {
1371 std::swap(RegX, RegY);
1372 std::swap(KillX, KillY);
1373 }
1374
1375 unsigned PrevFirstOpIdx, PrevSecondOpIdx;
1376 unsigned RootFirstOpIdx, RootSecondOpIdx;
1377 switch (Pattern) {
1379 PrevFirstOpIdx = OperandIndices[1];
1380 PrevSecondOpIdx = OperandIndices[3];
1381 RootFirstOpIdx = OperandIndices[2];
1382 RootSecondOpIdx = OperandIndices[4];
1383 break;
1385 PrevFirstOpIdx = OperandIndices[1];
1386 PrevSecondOpIdx = OperandIndices[3];
1387 RootFirstOpIdx = OperandIndices[4];
1388 RootSecondOpIdx = OperandIndices[2];
1389 break;
1391 PrevFirstOpIdx = OperandIndices[3];
1392 PrevSecondOpIdx = OperandIndices[1];
1393 RootFirstOpIdx = OperandIndices[2];
1394 RootSecondOpIdx = OperandIndices[4];
1395 break;
1397 PrevFirstOpIdx = OperandIndices[3];
1398 PrevSecondOpIdx = OperandIndices[1];
1399 RootFirstOpIdx = OperandIndices[4];
1400 RootSecondOpIdx = OperandIndices[2];
1401 break;
1402 default:
1403 llvm_unreachable("unexpected MachineCombinerPattern");
1404 }
1405
1406 // Basically BuildMI but doesn't add implicit operands by default.
1407 auto buildMINoImplicit = [](MachineFunction &MF, const MIMetadata &MIMD,
1408 const MCInstrDesc &MCID, Register DestReg) {
1409 return MachineInstrBuilder(
1410 MF, MF.CreateMachineInstr(MCID, MIMD.getDL(), /*NoImpl=*/true))
1411 .copyMIMetadata(MIMD)
1412 .addReg(DestReg, RegState::Define);
1413 };
1414
1415 // Create new instructions for insertion.
1416 MachineInstrBuilder MIB1 =
1417 buildMINoImplicit(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR);
1418 for (const auto &MO : Prev.explicit_operands()) {
1419 unsigned Idx = MO.getOperandNo();
1420 // Skip the result operand we'd already added.
1421 if (Idx == 0)
1422 continue;
1423 if (Idx == PrevFirstOpIdx)
1424 MIB1.addReg(RegX, getKillRegState(KillX));
1425 else if (Idx == PrevSecondOpIdx)
1426 MIB1.addReg(RegY, getKillRegState(KillY));
1427 else
1428 MIB1.add(MO);
1429 }
1430 MIB1.copyImplicitOps(Prev);
1431
1432 if (SwapRootOperands) {
1433 std::swap(RegA, NewVR);
1434 std::swap(KillA, KillNewVR);
1435 }
1436
1437 MachineInstrBuilder MIB2 =
1438 buildMINoImplicit(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC);
1439 for (const auto &MO : Root.explicit_operands()) {
1440 unsigned Idx = MO.getOperandNo();
1441 // Skip the result operand.
1442 if (Idx == 0)
1443 continue;
1444 if (Idx == RootFirstOpIdx)
1445 MIB2 = MIB2.addReg(RegA, getKillRegState(KillA));
1446 else if (Idx == RootSecondOpIdx)
1447 MIB2 = MIB2.addReg(NewVR, getKillRegState(KillNewVR));
1448 else
1449 MIB2 = MIB2.add(MO);
1450 }
1451 MIB2.copyImplicitOps(Root);
1452
1453 // Propagate FP flags from the original instructions.
1454 // But clear poison-generating flags because those may not be valid now.
1455 // TODO: There should be a helper function for copying only fast-math-flags.
1456 uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1457 MIB1->setFlags(IntersectedFlags);
1462
1463 MIB2->setFlags(IntersectedFlags);
1468
1469 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
1470
1471 // Record new instructions for insertion and old instructions for deletion.
1472 InsInstrs.push_back(MIB1);
1473 InsInstrs.push_back(MIB2);
1474 DelInstrs.push_back(&Prev);
1475 DelInstrs.push_back(&Root);
1476
1477 // We transformed:
1478 // B = A op X (Prev)
1479 // C = B op Y (Root)
1480 // Into:
1481 // B = X op Y (MIB1)
1482 // C = A op B (MIB2)
1483 // C has the same value as before, B doesn't; as such, keep the debug number
1484 // of C but not of B.
1485 if (unsigned OldRootNum = Root.peekDebugInstrNum())
1486 MIB2.getInstr()->setDebugInstrNum(OldRootNum);
1487}
1488
1490 MachineInstr &Root, unsigned Pattern,
1493 DenseMap<Register, unsigned> &InstIdxForVirtReg) const {
1495 MachineBasicBlock &MBB = *Root.getParent();
1496 MachineFunction &MF = *MBB.getParent();
1498
1499 switch (Pattern) {
1504 // Select the previous instruction in the sequence based on the input
1505 // pattern.
1506 std::array<unsigned, 5> OperandIndices;
1508 MachineInstr *Prev =
1509 MRI.getUniqueVRegDef(Root.getOperand(OperandIndices[0]).getReg());
1510
1511 // Don't reassociate if Prev and Root are in different blocks.
1512 if (Prev->getParent() != Root.getParent())
1513 return;
1514
1515 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, OperandIndices,
1516 InstIdxForVirtReg);
1517 break;
1518 }
1520 SmallVector<Register, 32> ChainRegs;
1521 getAccumulatorChain(&Root, ChainRegs);
1522 unsigned int Depth = ChainRegs.size();
1524 "Max accumulator width set to illegal value");
1525 unsigned int MaxWidth = Log2_32(Depth) < MaxAccumulatorWidth
1526 ? Log2_32(Depth)
1528
1529 // Walk down the chain and rewrite it as a tree.
1530 for (auto IndexedReg : llvm::enumerate(llvm::reverse(ChainRegs))) {
1531 // No need to rewrite the first node, it is already perfect as it is.
1532 if (IndexedReg.index() == 0)
1533 continue;
1534
1535 MachineInstr *Instr = MRI.getUniqueVRegDef(IndexedReg.value());
1537 Register AccReg;
1538 if (IndexedReg.index() < MaxWidth) {
1539 // Now we need to create new instructions for the first row.
1540 AccReg = Instr->getOperand(0).getReg();
1541 unsigned OpCode = getAccumulationStartOpcode(Root.getOpcode());
1542
1543 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(OpCode), AccReg)
1544 .addReg(Instr->getOperand(2).getReg(),
1545 getKillRegState(Instr->getOperand(2).isKill()))
1546 .addReg(Instr->getOperand(3).getReg(),
1547 getKillRegState(Instr->getOperand(3).isKill()));
1548 } else {
1549 // For the remaining cases, we need to use an output register of one of
1550 // the newly inserted instuctions as operand 1
1551 AccReg = Instr->getOperand(0).getReg() == Root.getOperand(0).getReg()
1552 ? MRI.createVirtualRegister(
1553 MRI.getRegClass(Root.getOperand(0).getReg()))
1554 : Instr->getOperand(0).getReg();
1555 assert(IndexedReg.index() >= MaxWidth);
1556 auto AccumulatorInput =
1557 ChainRegs[Depth - (IndexedReg.index() - MaxWidth) - 1];
1558 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(Instr->getOpcode()),
1559 AccReg)
1560 .addReg(AccumulatorInput, getKillRegState(true))
1561 .addReg(Instr->getOperand(2).getReg(),
1562 getKillRegState(Instr->getOperand(2).isKill()))
1563 .addReg(Instr->getOperand(3).getReg(),
1564 getKillRegState(Instr->getOperand(3).isKill()));
1565 }
1566
1567 MIB->setFlags(Instr->getFlags());
1568 InstIdxForVirtReg.insert(std::make_pair(AccReg, InsInstrs.size()));
1569 InsInstrs.push_back(MIB);
1570 DelInstrs.push_back(Instr);
1571 }
1572
1573 SmallVector<Register, 8> RegistersToReduce;
1574 for (unsigned i = (InsInstrs.size() - MaxWidth); i < InsInstrs.size();
1575 ++i) {
1576 auto Reg = InsInstrs[i]->getOperand(0).getReg();
1577 RegistersToReduce.push_back(Reg);
1578 }
1579
1580 while (RegistersToReduce.size() > 1)
1581 reduceAccumulatorTree(RegistersToReduce, InsInstrs, MF, Root, MRI,
1582 InstIdxForVirtReg, Root.getOperand(0).getReg());
1583
1584 break;
1585 }
1586 }
1587}
1588
1592
1594 const MachineInstr &MI) const {
1595 const MachineFunction &MF = *MI.getMF();
1596 const MachineRegisterInfo &MRI = MF.getRegInfo();
1597
1598 // Remat clients assume operand 0 is the defined register.
1599 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
1600 return false;
1601 Register DefReg = MI.getOperand(0).getReg();
1602
1603 // A sub-register definition can only be rematerialized if the instruction
1604 // doesn't read the other parts of the register. Otherwise it is really a
1605 // read-modify-write operation on the full virtual register which cannot be
1606 // moved safely.
1607 if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() &&
1608 MI.readsVirtualRegister(DefReg))
1609 return false;
1610
1611 // A load from a fixed stack slot can be rematerialized. This may be
1612 // redundant with subsequent checks, but it's target-independent,
1613 // simple, and a common case.
1614 int FrameIdx = 0;
1615 if (isLoadFromStackSlot(MI, FrameIdx) &&
1616 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
1617 return true;
1618
1619 // Avoid instructions obviously unsafe for remat.
1620 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1621 MI.hasUnmodeledSideEffects())
1622 return false;
1623
1624 // Don't remat inline asm. We have no idea how expensive it is
1625 // even if it's side effect free.
1626 if (MI.isInlineAsm())
1627 return false;
1628
1629 // Avoid instructions which load from potentially varying memory.
1630 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1631 return false;
1632
1633 // If any of the registers accessed are non-constant, conservatively assume
1634 // the instruction is not rematerializable.
1635 for (const MachineOperand &MO : MI.operands()) {
1636 if (!MO.isReg()) continue;
1637 Register Reg = MO.getReg();
1638 if (Reg == 0)
1639 continue;
1640
1641 // Check for a well-behaved physical register.
1642 if (Reg.isPhysical()) {
1643 if (MO.isUse()) {
1644 // If the physreg has no defs anywhere, it's just an ambient register
1645 // and we can freely move its uses. Alternatively, if it's allocatable,
1646 // it could get allocated to something with a def during allocation.
1647 if (!MRI.isConstantPhysReg(Reg))
1648 return false;
1649 } else {
1650 // A physreg def. We can't remat it.
1651 return false;
1652 }
1653 continue;
1654 }
1655
1656 // Only allow one virtual-register def. There may be multiple defs of the
1657 // same virtual register, though.
1658 if (MO.isDef() && Reg != DefReg)
1659 return false;
1660
1661 // Don't allow any virtual-register uses. Rematting an instruction with
1662 // virtual register uses would length the live ranges of the uses, which
1663 // is not necessarily a good idea, certainly not "trivial".
1664 if (MO.isUse())
1665 return false;
1666 }
1667
1668 // Everything checked out.
1669 return true;
1670}
1671
1673 const MachineFunction *MF = MI.getMF();
1675 bool StackGrowsDown =
1677
1678 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1679 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1680
1681 if (!isFrameInstr(MI))
1682 return 0;
1683
1684 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1685
1686 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1687 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1688 SPAdj = -SPAdj;
1689
1690 return SPAdj;
1691}
1692
1693/// isSchedulingBoundary - Test if the given instruction should be
1694/// considered a scheduling boundary. This primarily includes labels
1695/// and terminators.
1697 const MachineBasicBlock *MBB,
1698 const MachineFunction &MF) const {
1699 // Terminators and labels can't be scheduled around.
1700 if (MI.isTerminator() || MI.isPosition())
1701 return true;
1702
1703 // INLINEASM_BR can jump to another block
1704 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1705 return true;
1706
1707 // Don't attempt to schedule around any instruction that defines
1708 // a stack-oriented pointer, as it's unlikely to be profitable. This
1709 // saves compile time, because it doesn't require every single
1710 // stack slot reference to depend on the instruction that does the
1711 // modification.
1712 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1714 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
1715}
1716
1717// Provide a global flag for disabling the PreRA hazard recognizer that targets
1718// may choose to honor.
1722
1723// Default implementation of CreateTargetRAHazardRecognizer.
1726 const ScheduleDAG *DAG) const {
1727 // Dummy hazard recognizer allows all instructions to issue.
1728 return new ScheduleHazardRecognizer();
1729}
1730
1731// Default implementation of CreateTargetMIHazardRecognizer.
1733 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1734 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1735}
1736
1737// Default implementation of CreateTargetPostRAHazardRecognizer.
1743
1744// Default implementation of getMemOperandWithOffset.
1746 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1747 bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
1750 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1751 Width, TRI) ||
1752 BaseOps.size() != 1)
1753 return false;
1754 BaseOp = BaseOps.front();
1755 return true;
1756}
1757
1758//===----------------------------------------------------------------------===//
1759// SelectionDAG latency interface.
1760//===----------------------------------------------------------------------===//
1761
1762std::optional<unsigned>
1764 SDNode *DefNode, unsigned DefIdx,
1765 SDNode *UseNode, unsigned UseIdx) const {
1766 if (!ItinData || ItinData->isEmpty())
1767 return std::nullopt;
1768
1769 if (!DefNode->isMachineOpcode())
1770 return std::nullopt;
1771
1772 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1773 if (!UseNode->isMachineOpcode())
1774 return ItinData->getOperandCycle(DefClass, DefIdx);
1775 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1776 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1777}
1778
1780 SDNode *N) const {
1781 if (!ItinData || ItinData->isEmpty())
1782 return 1;
1783
1784 if (!N->isMachineOpcode())
1785 return 1;
1786
1787 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1788}
1789
1790//===----------------------------------------------------------------------===//
1791// MachineInstr latency interface.
1792//===----------------------------------------------------------------------===//
1793
1795 const MachineInstr &MI) const {
1796 if (!ItinData || ItinData->isEmpty())
1797 return 1;
1798
1799 unsigned Class = MI.getDesc().getSchedClass();
1800 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1801 if (UOps >= 0)
1802 return UOps;
1803
1804 // The # of u-ops is dynamically determined. The specific target should
1805 // override this function to return the right number.
1806 return 1;
1807}
1808
1809/// Return the default expected latency for a def based on it's opcode.
1811 const MachineInstr &DefMI) const {
1812 if (DefMI.isTransient())
1813 return 0;
1814 if (DefMI.mayLoad())
1815 return SchedModel.LoadLatency;
1816 if (isHighLatencyDef(DefMI.getOpcode()))
1817 return SchedModel.HighLatency;
1818 return 1;
1819}
1820
1822 return 0;
1823}
1824
1826 const MachineInstr &MI,
1827 unsigned *PredCost) const {
1828 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1829 // still have a MinLatency property, which getStageLatency checks.
1830 if (!ItinData)
1831 return MI.mayLoad() ? 2 : 1;
1832
1833 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1834}
1835
1837 const MachineInstr &DefMI,
1838 unsigned DefIdx) const {
1839 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1840 if (!ItinData || ItinData->isEmpty())
1841 return false;
1842
1843 unsigned DefClass = DefMI.getDesc().getSchedClass();
1844 std::optional<unsigned> DefCycle =
1845 ItinData->getOperandCycle(DefClass, DefIdx);
1846 return DefCycle && DefCycle <= 1U;
1847}
1848
1850 // TODO: We don't split functions where a section attribute has been set
1851 // since the split part may not be placed in a contiguous region. It may also
1852 // be more beneficial to augment the linker to ensure contiguous layout of
1853 // split functions within the same section as specified by the attribute.
1854 if (MF.getFunction().hasSection())
1855 return false;
1856
1857 // We don't want to proceed further for cold functions
1858 // or functions of unknown hotness. Lukewarm functions have no prefix.
1859 std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
1860 if (SectionPrefix &&
1861 (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
1862 return false;
1863 }
1864
1865 return true;
1866}
1867
1868std::optional<ParamLoadedValue>
1870 Register Reg) const {
1871 const MachineFunction *MF = MI.getMF();
1874 int64_t Offset;
1875 bool OffsetIsScalable;
1876
1877 // To simplify the sub-register handling, verify that we only need to
1878 // consider physical registers.
1879 assert(MF->getProperties().hasNoVRegs());
1880
1881 if (auto DestSrc = isCopyInstr(MI)) {
1882 Register DestReg = DestSrc->Destination->getReg();
1883
1884 // If the copy destination is the forwarding reg, describe the forwarding
1885 // reg using the copy source as the backup location. Example:
1886 //
1887 // x0 = MOV x7
1888 // call callee(x0) ; x0 described as x7
1889 if (Reg == DestReg)
1890 return ParamLoadedValue(*DestSrc->Source, Expr);
1891
1892 // If the target's hook couldn't describe this copy, give up.
1893 return std::nullopt;
1894 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1895 Register SrcReg = RegImm->Reg;
1896 Offset = RegImm->Imm;
1898 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1899 } else if (MI.hasOneMemOperand()) {
1900 // Only describe memory which provably does not escape the function. As
1901 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1902 // callee (or by another thread).
1903 const auto &TII = MF->getSubtarget().getInstrInfo();
1904 const MachineFrameInfo &MFI = MF->getFrameInfo();
1905 const MachineMemOperand *MMO = MI.memoperands()[0];
1906 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1907
1908 // If the address points to "special" memory (e.g. a spill slot), it's
1909 // sufficient to check that it isn't aliased by any high-level IR value.
1910 if (!PSV || PSV->mayAlias(&MFI))
1911 return std::nullopt;
1912
1913 const MachineOperand *BaseOp;
1914 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
1915 TRI))
1916 return std::nullopt;
1917
1918 // FIXME: Scalable offsets are not yet handled in the offset code below.
1919 if (OffsetIsScalable)
1920 return std::nullopt;
1921
1922 // TODO: Can currently only handle mem instructions with a single define.
1923 // An example from the x86 target:
1924 // ...
1925 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1926 // ...
1927 //
1928 if (MI.getNumExplicitDefs() != 1)
1929 return std::nullopt;
1930
1931 // TODO: In what way do we need to take Reg into consideration here?
1932
1935 Ops.push_back(dwarf::DW_OP_deref_size);
1936 Ops.push_back(MMO->getSize().hasValue() ? MMO->getSize().getValue()
1937 : ~UINT64_C(0));
1938 Expr = DIExpression::prependOpcodes(Expr, Ops);
1939 return ParamLoadedValue(*BaseOp, Expr);
1940 }
1941
1942 return std::nullopt;
1943}
1944
1945// Get the call frame size just before MI.
1947 // Search backwards from MI for the most recent call frame instruction.
1948 MachineBasicBlock *MBB = MI.getParent();
1949 for (auto &AdjI : reverse(make_range(MBB->instr_begin(), MI.getIterator()))) {
1950 if (AdjI.getOpcode() == getCallFrameSetupOpcode())
1951 return getFrameTotalSize(AdjI);
1952 if (AdjI.getOpcode() == getCallFrameDestroyOpcode())
1953 return 0;
1954 }
1955
1956 // If none was found, use the call frame size from the start of the basic
1957 // block.
1958 return MBB->getCallFrameSize();
1959}
1960
1961/// Both DefMI and UseMI must be valid. By default, call directly to the
1962/// itinerary. This may be overriden by the target.
1964 const InstrItineraryData *ItinData, const MachineInstr &DefMI,
1965 unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
1966 unsigned DefClass = DefMI.getDesc().getSchedClass();
1967 unsigned UseClass = UseMI.getDesc().getSchedClass();
1968 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1969}
1970
1972 const MachineInstr &MI, unsigned DefIdx,
1973 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1974 assert((MI.isRegSequence() ||
1975 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1976
1977 if (!MI.isRegSequence())
1978 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1979
1980 // We are looking at:
1981 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1982 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1983 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1984 OpIdx += 2) {
1985 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1986 if (MOReg.isUndef())
1987 continue;
1988 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1989 assert(MOSubIdx.isImm() &&
1990 "One of the subindex of the reg_sequence is not an immediate");
1991 // Record Reg:SubReg, SubIdx.
1992 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1993 (unsigned)MOSubIdx.getImm()));
1994 }
1995 return true;
1996}
1997
1999 const MachineInstr &MI, unsigned DefIdx,
2000 RegSubRegPairAndIdx &InputReg) const {
2001 assert((MI.isExtractSubreg() ||
2002 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
2003
2004 if (!MI.isExtractSubreg())
2005 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
2006
2007 // We are looking at:
2008 // Def = EXTRACT_SUBREG v0.sub1, sub0.
2009 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
2010 const MachineOperand &MOReg = MI.getOperand(1);
2011 if (MOReg.isUndef())
2012 return false;
2013 const MachineOperand &MOSubIdx = MI.getOperand(2);
2014 assert(MOSubIdx.isImm() &&
2015 "The subindex of the extract_subreg is not an immediate");
2016
2017 InputReg.Reg = MOReg.getReg();
2018 InputReg.SubReg = MOReg.getSubReg();
2019 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
2020 return true;
2021}
2022
2024 const MachineInstr &MI, unsigned DefIdx,
2025 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
2026 assert((MI.isInsertSubreg() ||
2027 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
2028
2029 if (!MI.isInsertSubreg())
2030 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
2031
2032 // We are looking at:
2033 // Def = INSERT_SEQUENCE v0, v1, sub0.
2034 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
2035 const MachineOperand &MOBaseReg = MI.getOperand(1);
2036 const MachineOperand &MOInsertedReg = MI.getOperand(2);
2037 if (MOInsertedReg.isUndef())
2038 return false;
2039 const MachineOperand &MOSubIdx = MI.getOperand(3);
2040 assert(MOSubIdx.isImm() &&
2041 "One of the subindex of the reg_sequence is not an immediate");
2042 BaseReg.Reg = MOBaseReg.getReg();
2043 BaseReg.SubReg = MOBaseReg.getSubReg();
2044
2045 InsertedReg.Reg = MOInsertedReg.getReg();
2046 InsertedReg.SubReg = MOInsertedReg.getSubReg();
2047 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
2048 return true;
2049}
2050
2051// Returns a MIRPrinter comment for this machine operand.
2053 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
2054 const TargetRegisterInfo *TRI) const {
2055
2056 if (!MI.isInlineAsm())
2057 return "";
2058
2059 std::string Flags;
2060 raw_string_ostream OS(Flags);
2061
2063 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
2064 unsigned ExtraInfo = Op.getImm();
2065 bool First = true;
2066 for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
2067 if (!First)
2068 OS << " ";
2069 First = false;
2070 OS << Info;
2071 }
2072
2073 return Flags;
2074 }
2075
2076 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
2077 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
2078 return "";
2079
2080 assert(Op.isImm() && "Expected flag operand to be an immediate");
2081 // Pretty print the inline asm operand descriptor.
2082 unsigned Flag = Op.getImm();
2083 const InlineAsm::Flag F(Flag);
2084 OS << F.getKindName();
2085
2086 unsigned RCID;
2087 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
2088 if (TRI) {
2089 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
2090 } else
2091 OS << ":RC" << RCID;
2092 }
2093
2094 if (F.isMemKind()) {
2095 InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
2097 }
2098
2099 unsigned TiedTo;
2100 if (F.isUseOperandTiedToDef(TiedTo))
2101 OS << " tiedto:$" << TiedTo;
2102
2103 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
2104 F.getRegMayBeFolded())
2105 OS << " foldable";
2106
2107 return Flags;
2108}
2109
2111
2113 Function &F, std::vector<outliner::Candidate> &Candidates) const {
2114 // Include target features from an arbitrary candidate for the outlined
2115 // function. This makes sure the outlined function knows what kinds of
2116 // instructions are going into it. This is fine, since all parent functions
2117 // must necessarily support the instructions that are in the outlined region.
2118 outliner::Candidate &FirstCand = Candidates.front();
2119 const Function &ParentFn = FirstCand.getMF()->getFunction();
2120 if (ParentFn.hasFnAttribute("target-features"))
2121 F.addFnAttr(ParentFn.getFnAttribute("target-features"));
2122 if (ParentFn.hasFnAttribute("target-cpu"))
2123 F.addFnAttr(ParentFn.getFnAttribute("target-cpu"));
2124
2125 // Set nounwind, so we don't generate eh_frame.
2126 if (llvm::all_of(Candidates, [](const outliner::Candidate &C) {
2127 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
2128 }))
2129 F.addFnAttr(Attribute::NoUnwind);
2130}
2131
2135 unsigned Flags) const {
2136 MachineInstr &MI = *MIT;
2137
2138 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
2139 // have support for outlining those. Special-case that here.
2140 if (MI.isCFIInstruction())
2141 // Just go right to the target implementation.
2142 return getOutliningTypeImpl(MMI, MIT, Flags);
2143
2144 // Be conservative about inline assembly.
2145 if (MI.isInlineAsm())
2147
2148 // Labels generally can't safely be outlined.
2149 if (MI.isLabel())
2151
2152 // Don't let debug instructions impact analysis.
2153 if (MI.isDebugInstr())
2155
2156 // Some other special cases.
2157 switch (MI.getOpcode()) {
2158 case TargetOpcode::IMPLICIT_DEF:
2159 case TargetOpcode::KILL:
2160 case TargetOpcode::LIFETIME_START:
2161 case TargetOpcode::LIFETIME_END:
2163 default:
2164 break;
2165 }
2166
2167 // Is this a terminator for a basic block?
2168 if (MI.isTerminator()) {
2169 // If this is a branch to another block, we can't outline it.
2170 if (!MI.getParent()->succ_empty())
2172
2173 // Don't outline if the branch is not unconditional.
2174 if (isPredicated(MI))
2176 }
2177
2178 // Make sure none of the operands of this instruction do anything that
2179 // might break if they're moved outside their current function.
2180 // This includes MachineBasicBlock references, BlockAddressses,
2181 // Constant pool indices and jump table indices.
2182 //
2183 // A quick note on MO_TargetIndex:
2184 // This doesn't seem to be used in any of the architectures that the
2185 // MachineOutliner supports, but it was still filtered out in all of them.
2186 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
2187 // As such, this check is removed both here and in the target-specific
2188 // implementations. Instead, we assert to make sure this doesn't
2189 // catch anyone off-guard somewhere down the line.
2190 for (const MachineOperand &MOP : MI.operands()) {
2191 // If you hit this assertion, please remove it and adjust
2192 // `getOutliningTypeImpl` for your target appropriately if necessary.
2193 // Adding the assertion back to other supported architectures
2194 // would be nice too :)
2195 assert(!MOP.isTargetIndex() && "This isn't used quite yet!");
2196
2197 // CFI instructions should already have been filtered out at this point.
2198 assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!");
2199
2200 // PrologEpilogInserter should've already run at this point.
2201 assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!");
2202
2203 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
2205 }
2206
2207 // If we don't know, delegate to the target-specific hook.
2208 return getOutliningTypeImpl(MMI, MIT, Flags);
2209}
2210
2212 unsigned &Flags) const {
2213 // Some instrumentations create special TargetOpcode at the start which
2214 // expands to special code sequences which must be present.
2215 auto First = MBB.getFirstNonDebugInstr();
2216 if (First == MBB.end())
2217 return true;
2218
2219 if (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
2220 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
2221 return false;
2222
2223 // Some instrumentations create special pseudo-instructions at or just before
2224 // the end that must be present.
2225 auto Last = MBB.getLastNonDebugInstr();
2226 if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET ||
2227 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2228 return false;
2229
2230 if (Last != First && Last->isReturn()) {
2231 --Last;
2232 if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT ||
2233 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2234 return false;
2235 }
2236 return true;
2237}
2238
2240 return MI->isCall() || MI->hasUnmodeledSideEffects() ||
2241 (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad());
2242}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
DXIL Forward Handle Accesses
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
This file defines the SmallSet class.
This file contains some functions that are useful when dealing with strings.
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
static void transferImplicitOperands(MachineInstr *MI, const TargetRegisterInfo *TRI)
transferImplicitOperands - MI is a pseudo-instruction, and the lowered replacement instructions immed...
static cl::opt< bool > EnableAccReassociation("acc-reassoc", cl::Hidden, cl::init(true), cl::desc("Enable reassociation of accumulation chains"))
static std::pair< bool, bool > mustSwapOperands(unsigned Pattern)
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, const TargetInstrInfo &TII, unsigned FoldIdx)
static cl::opt< unsigned int > MinAccumulatorDepth("acc-min-depth", cl::Hidden, cl::init(8), cl::desc("Minimum length of accumulator chains " "required for the optimization to kick in"))
static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI, const TargetInstrInfo &TII)
static cl::opt< unsigned int > MaxAccumulatorWidth("acc-max-width", cl::Hidden, cl::init(3), cl::desc("Maximum number of branches in the accumulator tree"))
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
DWARF expression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:198
A debug info location.
Definition DebugLoc.h:124
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:214
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:762
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
LLVM_ABI std::optional< StringRef > getSectionPrefix() const
Get the section prefix for this global object.
Definition Globals.cpp:309
bool hasSection() const
Check if this global has a custom object file section.
static std::vector< StringRef > getExtraInfoNames(unsigned ExtraInfo)
Definition InlineAsm.h:446
static StringRef getMemConstraintName(ConstraintCode C)
Definition InlineAsm.h:470
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
const InstrItinerary * Itineraries
Array of itineraries selected.
bool isEmpty() const
Returns true if there are no itineraries.
bool hasValue() const
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI=nullptr) const
Returns the maximum possible encoded instruction size in bytes.
Definition MCAsmInfo.h:527
StringRef getCommentString() const
Definition MCAsmInfo.h:538
const char * getSeparatorString() const
Definition MCAsmInfo.h:533
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:87
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1565
Set of metadata that should be preserved when using BuildMI().
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
const MachineFunctionProperties & getProperties() const
Get the function properties.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & copyMIMetadata(const MIMetadata &MIMD) const
Representation of each machine instruction.
ArrayRef< MachineMemOperand * >::iterator mmo_iterator
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
void setFlags(unsigned flags)
unsigned getNumOperands() const
Retuns the total number of operands.
void setDebugInstrNum(unsigned Num)
Set instruction number of this MachineInstr.
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
LLVM_ABI void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool isCFIInstruction() const
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
mop_range explicit_operands()
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
LLVM_ABI const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setIsInternalRead(bool Val=true)
void setImm(int64_t immVal)
int64_t getImm() const
LLVM_ABI void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
LLVM_ABI bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
void setMBB(MachineBasicBlock *MBB)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MI-level patchpoint operands.
Definition StackMaps.h:77
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:74
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:226
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
Definition StackMaps.h:36
MI-level Statepoint operands.
Definition StackMaps.h:159
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:154
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:148
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of 'Reg'.
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu.
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
int16_t getOpRegClassID(const MCOperandInfo &OpInfo) const
bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const
Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
outliner::InstrType getOutliningType(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MIT should be outlined.
virtual bool isThroughputPattern(unsigned Pattern) const
Return true when a code sequence can improve throughput.
bool getAccumulatorReassociationPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns) const
Find chains of accumulations that can be rewritten as a tree for increased ILP.
virtual std::pair< unsigned, unsigned > getPatchpointUnfoldableRange(const MachineInstr &MI) const
For a patchpoint, stackmap, or statepoint intrinsic, return the range of operands which can't be fold...
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const
Optional target hook to create the LLVM IR attributes for the outlined function.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
void getAccumulatorChain(MachineInstr *CurrentInstr, SmallVectorImpl< Register > &Chain) const
Find the chain of accumulator instructions in \P MBB and return them in \P Chain.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const
This function defines the logic to lower COPY instruction to target specific instruction(s).
virtual unsigned getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const
Returns the opcode that should be use to reduce accumulation registers.
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
virtual MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const
Try to remove the load by folding it to a register operand at the use.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Store the specified register of the given register class to the specified stack frame index.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const
Emit instructions to copy a pair of physical registers.
virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const
Returns an opcode which defines the accumulator used by \P Opcode.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual MCInst getNop() const
Return the noop instruction to use for a noop.
unsigned getCallFrameSizeAt(MachineInstr &MI) const
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
std::pair< unsigned, unsigned > getReassociationOpcodes(unsigned Pattern, const MachineInstr &Root, const MachineInstr &Prev) const
Reassociation of some instructions requires inverse operations (e.g.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Load the specified register of the given register class from the specified stack frame index.
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, ArrayRef< unsigned > OperandIndices, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate \P Root and \P Prev according to \P Pattern to reduce critical path length.
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
virtual void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const
Insert noops into the instruction stream at the specified point.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual ~TargetInstrInfo()
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
virtual bool isAccumulationOpcode(unsigned Opcode) const
Return true when \P OpCode is an instruction which performs accumulation into one of its operand regi...
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input \P Inst is part of a chain of dependent ops that are suitable for reassociat...
void reduceAccumulatorTree(SmallVectorImpl< Register > &RegistersToReduce, SmallVectorImpl< MachineInstr * > &InsInstrs, MachineFunction &MF, MachineInstr &Root, MachineRegisterInfo &MRI, DenseMap< Register, unsigned > &InstrIdxForVirtReg, Register ResultReg) const
Reduces branches of the accumulator tree into a single register.
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
TargetInstrInfo(unsigned CFSetupOpcode=~0u, unsigned CFDestroyOpcode=~0u, unsigned CatchRetOpcode=~0u, unsigned ReturnOpcode=~0u, const int16_t *const RegClassByHwModeTable=nullptr)
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual unsigned getPredicationCost(const MachineInstr &MI) const
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const
Return true if the function is a viable candidate for machine function splitting.
virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const
Return a strategy that MachineCombiner must use when creating traces.
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isGlobalMemoryObject(const MachineInstr *MI) const
Returns true if MI is an instruction we are unable to reason about (like a call or something with unm...
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a register,...
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
static const unsigned CommuteAnyOperandIndex
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1,...
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor.
bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
const Triple & getTargetTriple() const
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:611
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ Define
Register definition.
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
@ Length
Definition DWP.cpp:477
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2452
constexpr from_range_t from_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:682
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:342
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
LLVM_ABI VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1877
bool isSpace(char C)
Checks whether character C is whitespace in the "C" locale.
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:853
#define N
Machine model for scheduling, bundling, and heuristics.
Definition MCSchedule.h:258
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Writes
Writes - One of the operands writes the virtual register.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const