LLVM 22.0.0git
TargetInstrInfo.cpp
Go to the documentation of this file.
1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/SmallSet.h"
31#include "llvm/IR/DataLayout.h"
33#include "llvm/MC/MCAsmInfo.h"
39
40using namespace llvm;
41
43 "disable-sched-hazard", cl::Hidden, cl::init(false),
44 cl::desc("Disable hazard detection during preRA scheduling"));
45
47 "acc-reassoc", cl::Hidden, cl::init(true),
48 cl::desc("Enable reassociation of accumulation chains"));
49
52 cl::desc("Minimum length of accumulator chains "
53 "required for the optimization to kick in"));
54
56 "acc-max-width", cl::Hidden, cl::init(3),
57 cl::desc("Maximum number of branches in the accumulator tree"));
58
60
62TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
64 const MachineFunction &MF) const {
65 if (OpNum >= MCID.getNumOperands())
66 return nullptr;
67
68 short RegClass = MCID.operands()[OpNum].RegClass;
69 if (MCID.operands()[OpNum].isLookupPtrRegClass())
70 return TRI->getPointerRegClass(MF, RegClass);
71
72 // Instructions like INSERT_SUBREG do not have fixed register classes.
73 if (RegClass < 0)
74 return nullptr;
75
76 // Otherwise just look it up normally.
77 return TRI->getRegClass(RegClass);
78}
79
80/// insertNoop - Insert a noop into the instruction stream at the specified
81/// point.
84 llvm_unreachable("Target didn't implement insertNoop!");
85}
86
87/// insertNoops - Insert noops into the instruction stream at the specified
88/// point.
91 unsigned Quantity) const {
92 for (unsigned i = 0; i < Quantity; ++i)
94}
95
96static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
97 return strncmp(Str, MAI.getCommentString().data(),
98 MAI.getCommentString().size()) == 0;
99}
100
101/// Measure the specified inline asm to determine an approximation of its
102/// length.
103/// Comments (which run till the next SeparatorString or newline) do not
104/// count as an instruction.
105/// Any other non-whitespace text is considered an instruction, with
106/// multiple instructions separated by SeparatorString or newlines.
107/// Variable-length instructions are not handled here; this function
108/// may be overloaded in the target code to do that.
109/// We implement a special case of the .space directive which takes only a
110/// single integer argument in base 10 that is the size in bytes. This is a
111/// restricted form of the GAS directive in that we only interpret
112/// simple--i.e. not a logical or arithmetic expression--size values without
113/// the optional fill value. This is primarily used for creating arbitrary
114/// sized inline asm blocks for testing purposes.
116 const char *Str,
117 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
118 // Count the number of instructions in the asm.
119 bool AtInsnStart = true;
120 unsigned Length = 0;
121 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
122 for (; *Str; ++Str) {
123 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
124 strlen(MAI.getSeparatorString())) == 0) {
125 AtInsnStart = true;
126 } else if (isAsmComment(Str, MAI)) {
127 // Stop counting as an instruction after a comment until the next
128 // separator.
129 AtInsnStart = false;
130 }
131
132 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
133 unsigned AddLength = MaxInstLength;
134 if (strncmp(Str, ".space", 6) == 0) {
135 char *EStr;
136 int SpaceSize;
137 SpaceSize = strtol(Str + 6, &EStr, 10);
138 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
139 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
140 ++EStr;
141 if (*EStr == '\0' || *EStr == '\n' ||
142 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
143 AddLength = SpaceSize;
144 }
145 Length += AddLength;
146 AtInsnStart = false;
147 }
148 }
149
150 return Length;
151}
152
153/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
154/// after it, replacing it with an unconditional branch to NewDest.
155void
157 MachineBasicBlock *NewDest) const {
158 MachineBasicBlock *MBB = Tail->getParent();
159
160 // Remove all the old successors of MBB from the CFG.
161 while (!MBB->succ_empty())
163
164 // Save off the debug loc before erasing the instruction.
165 DebugLoc DL = Tail->getDebugLoc();
166
167 // Update call info and remove all the dead instructions
168 // from the end of MBB.
169 while (Tail != MBB->end()) {
170 auto MI = Tail++;
171 if (MI->shouldUpdateAdditionalCallInfo())
173 MBB->erase(MI);
174 }
175
176 // If MBB isn't immediately before MBB, insert a branch to it.
178 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
179 MBB->addSuccessor(NewDest);
180}
181
183 bool NewMI, unsigned Idx1,
184 unsigned Idx2) const {
185 const MCInstrDesc &MCID = MI.getDesc();
186 bool HasDef = MCID.getNumDefs();
187 if (HasDef && !MI.getOperand(0).isReg())
188 // No idea how to commute this instruction. Target should implement its own.
189 return nullptr;
190
191 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
192 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
193 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
194 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
195 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
196 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
197 "This only knows how to commute register operands so far");
198
199 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
200 Register Reg1 = MI.getOperand(Idx1).getReg();
201 Register Reg2 = MI.getOperand(Idx2).getReg();
202 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
203 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
204 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
205 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
206 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
207 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
208 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
209 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
210 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
211 // Avoid calling isRenamable for virtual registers since we assert that
212 // renamable property is only queried/set for physical registers.
213 bool Reg1IsRenamable =
214 Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false;
215 bool Reg2IsRenamable =
216 Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false;
217
218 // For a case like this:
219 // %0.sub = INST %0.sub(tied), %1.sub, implicit-def %0
220 // we need to update the implicit-def after commuting to result in:
221 // %1.sub = INST %1.sub(tied), %0.sub, implicit-def %1
222 SmallVector<unsigned> UpdateImplicitDefIdx;
223 if (HasDef && MI.hasImplicitDef()) {
224 const TargetRegisterInfo *TRI =
225 MI.getMF()->getSubtarget().getRegisterInfo();
226 for (auto [OpNo, MO] : llvm::enumerate(MI.implicit_operands())) {
227 Register ImplReg = MO.getReg();
228 if ((ImplReg.isVirtual() && ImplReg == Reg0) ||
229 (ImplReg.isPhysical() && Reg0.isPhysical() &&
230 TRI->isSubRegisterEq(ImplReg, Reg0)))
231 UpdateImplicitDefIdx.push_back(OpNo + MI.getNumExplicitOperands());
232 }
233 }
234
235 // If destination is tied to either of the commuted source register, then
236 // it must be updated.
237 if (HasDef && Reg0 == Reg1 &&
238 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
239 Reg2IsKill = false;
240 Reg0 = Reg2;
241 SubReg0 = SubReg2;
242 } else if (HasDef && Reg0 == Reg2 &&
243 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
244 Reg1IsKill = false;
245 Reg0 = Reg1;
246 SubReg0 = SubReg1;
247 }
248
249 MachineInstr *CommutedMI = nullptr;
250 if (NewMI) {
251 // Create a new instruction.
252 MachineFunction &MF = *MI.getMF();
253 CommutedMI = MF.CloneMachineInstr(&MI);
254 } else {
255 CommutedMI = &MI;
256 }
257
258 if (HasDef) {
259 CommutedMI->getOperand(0).setReg(Reg0);
260 CommutedMI->getOperand(0).setSubReg(SubReg0);
261 for (unsigned Idx : UpdateImplicitDefIdx)
262 CommutedMI->getOperand(Idx).setReg(Reg0);
263 }
264 CommutedMI->getOperand(Idx2).setReg(Reg1);
265 CommutedMI->getOperand(Idx1).setReg(Reg2);
266 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
267 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
268 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
269 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
270 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
271 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
272 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
273 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
274 // Avoid calling setIsRenamable for virtual registers since we assert that
275 // renamable property is only queried/set for physical registers.
276 if (Reg1.isPhysical())
277 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
278 if (Reg2.isPhysical())
279 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
280 return CommutedMI;
281}
282
284 unsigned OpIdx1,
285 unsigned OpIdx2) const {
286 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
287 // any commutable operand, which is done in findCommutedOpIndices() method
288 // called below.
289 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
290 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
291 assert(MI.isCommutable() &&
292 "Precondition violation: MI must be commutable.");
293 return nullptr;
294 }
295 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
296}
297
299 unsigned &ResultIdx2,
300 unsigned CommutableOpIdx1,
301 unsigned CommutableOpIdx2) {
302 if (ResultIdx1 == CommuteAnyOperandIndex &&
303 ResultIdx2 == CommuteAnyOperandIndex) {
304 ResultIdx1 = CommutableOpIdx1;
305 ResultIdx2 = CommutableOpIdx2;
306 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
307 if (ResultIdx2 == CommutableOpIdx1)
308 ResultIdx1 = CommutableOpIdx2;
309 else if (ResultIdx2 == CommutableOpIdx2)
310 ResultIdx1 = CommutableOpIdx1;
311 else
312 return false;
313 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
314 if (ResultIdx1 == CommutableOpIdx1)
315 ResultIdx2 = CommutableOpIdx2;
316 else if (ResultIdx1 == CommutableOpIdx2)
317 ResultIdx2 = CommutableOpIdx1;
318 else
319 return false;
320 } else
321 // Check that the result operand indices match the given commutable
322 // operand indices.
323 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
324 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
325
326 return true;
327}
328
330 unsigned &SrcOpIdx1,
331 unsigned &SrcOpIdx2) const {
332 assert(!MI.isBundle() &&
333 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
334
335 const MCInstrDesc &MCID = MI.getDesc();
336 if (!MCID.isCommutable())
337 return false;
338
339 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
340 // is not true, then the target must implement this.
341 unsigned CommutableOpIdx1 = MCID.getNumDefs();
342 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
343 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
344 CommutableOpIdx1, CommutableOpIdx2))
345 return false;
346
347 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
348 // No idea.
349 return false;
350 return true;
351}
352
354 if (!MI.isTerminator()) return false;
355
356 // Conditional branch is a special case.
357 if (MI.isBranch() && !MI.isBarrier())
358 return true;
359 if (!MI.isPredicable())
360 return true;
361 return !isPredicated(MI);
362}
363
366 bool MadeChange = false;
367
368 assert(!MI.isBundle() &&
369 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
370
371 const MCInstrDesc &MCID = MI.getDesc();
372 if (!MI.isPredicable())
373 return false;
374
375 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
376 if (MCID.operands()[i].isPredicate()) {
377 MachineOperand &MO = MI.getOperand(i);
378 if (MO.isReg()) {
379 MO.setReg(Pred[j].getReg());
380 MadeChange = true;
381 } else if (MO.isImm()) {
382 MO.setImm(Pred[j].getImm());
383 MadeChange = true;
384 } else if (MO.isMBB()) {
385 MO.setMBB(Pred[j].getMBB());
386 MadeChange = true;
387 }
388 ++j;
389 }
390 }
391 return MadeChange;
392}
393
395 const MachineInstr &MI,
397 size_t StartSize = Accesses.size();
398 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
399 oe = MI.memoperands_end();
400 o != oe; ++o) {
401 if ((*o)->isLoad() &&
402 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
403 Accesses.push_back(*o);
404 }
405 return Accesses.size() != StartSize;
406}
407
409 const MachineInstr &MI,
411 size_t StartSize = Accesses.size();
412 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
413 oe = MI.memoperands_end();
414 o != oe; ++o) {
415 if ((*o)->isStore() &&
416 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
417 Accesses.push_back(*o);
418 }
419 return Accesses.size() != StartSize;
420}
421
423 unsigned SubIdx, unsigned &Size,
424 unsigned &Offset,
425 const MachineFunction &MF) const {
427 if (!SubIdx) {
428 Size = TRI->getSpillSize(*RC);
429 Offset = 0;
430 return true;
431 }
432 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
433 // Convert bit size to byte size.
434 if (BitSize % 8)
435 return false;
436
437 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
438 if (BitOffset < 0 || BitOffset % 8)
439 return false;
440
441 Size = BitSize / 8;
442 Offset = (unsigned)BitOffset / 8;
443
444 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
445
446 if (!MF.getDataLayout().isLittleEndian()) {
447 Offset = TRI->getSpillSize(*RC) - (Offset + Size);
448 }
449 return true;
450}
451
454 Register DestReg, unsigned SubIdx,
455 const MachineInstr &Orig,
456 const TargetRegisterInfo &TRI) const {
458 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
459 MBB.insert(I, MI);
460}
461
463 const MachineInstr &MI1,
464 const MachineRegisterInfo *MRI) const {
466}
467
470 MachineBasicBlock::iterator InsertBefore,
471 const MachineInstr &Orig) const {
473 // CFI instructions are marked as non-duplicable, because Darwin compact
474 // unwind info emission can't handle multiple prologue setups.
475 assert((!Orig.isNotDuplicable() ||
477 Orig.isCFIInstruction())) &&
478 "Instruction cannot be duplicated");
479
480 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
481}
482
483// If the COPY instruction in MI can be folded to a stack operation, return
484// the register class to use.
486 const TargetInstrInfo &TII,
487 unsigned FoldIdx) {
488 assert(TII.isCopyInstr(MI) && "MI must be a COPY instruction");
489 if (MI.getNumOperands() != 2)
490 return nullptr;
491 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
492
493 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
494 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
495
496 if (FoldOp.getSubReg() || LiveOp.getSubReg())
497 return nullptr;
498
499 Register FoldReg = FoldOp.getReg();
500 Register LiveReg = LiveOp.getReg();
501
502 assert(FoldReg.isVirtual() && "Cannot fold physregs");
503
504 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
505 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
506
507 if (LiveOp.getReg().isPhysical())
508 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
509
510 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
511 return RC;
512
513 // FIXME: Allow folding when register classes are memory compatible.
514 return nullptr;
515}
516
517MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
518
519/// Try to remove the load by folding it to a register
520/// operand at the use. We fold the load instructions if load defines a virtual
521/// register, the virtual register is used once in the same BB, and the
522/// instructions in-between do not load or store, and have no side effects.
525 Register &FoldAsLoadDefReg,
526 MachineInstr *&DefMI) const {
527 // Check whether we can move DefMI here.
528 DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
529 assert(DefMI);
530 bool SawStore = false;
531 if (!DefMI->isSafeToMove(SawStore))
532 return nullptr;
533
534 // Collect information about virtual register operands of MI.
535 SmallVector<unsigned, 1> SrcOperandIds;
536 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
537 MachineOperand &MO = MI.getOperand(i);
538 if (!MO.isReg())
539 continue;
540 Register Reg = MO.getReg();
541 if (Reg != FoldAsLoadDefReg)
542 continue;
543 // Do not fold if we have a subreg use or a def.
544 if (MO.getSubReg() || MO.isDef())
545 return nullptr;
546 SrcOperandIds.push_back(i);
547 }
548 if (SrcOperandIds.empty())
549 return nullptr;
550
551 // Check whether we can fold the def into SrcOperandId.
552 if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
553 FoldAsLoadDefReg = 0;
554 return FoldMI;
555 }
556
557 return nullptr;
558}
559
560std::pair<unsigned, unsigned>
562 switch (MI.getOpcode()) {
563 case TargetOpcode::STACKMAP:
564 // StackMapLiveValues are foldable
565 return std::make_pair(0, StackMapOpers(&MI).getVarIdx());
566 case TargetOpcode::PATCHPOINT:
567 // For PatchPoint, the call args are not foldable (even if reported in the
568 // stackmap e.g. via anyregcc).
569 return std::make_pair(0, PatchPointOpers(&MI).getVarIdx());
570 case TargetOpcode::STATEPOINT:
571 // For statepoints, fold deopt and gc arguments, but not call arguments.
572 return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx());
573 default:
574 llvm_unreachable("unexpected stackmap opcode");
575 }
576}
577
579 ArrayRef<unsigned> Ops, int FrameIndex,
580 const TargetInstrInfo &TII) {
581 unsigned StartIdx = 0;
582 unsigned NumDefs = 0;
583 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
584 std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI);
585
586 unsigned DefToFoldIdx = MI.getNumOperands();
587
588 // Return false if any operands requested for folding are not foldable (not
589 // part of the stackmap's live values).
590 for (unsigned Op : Ops) {
591 if (Op < NumDefs) {
592 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
593 DefToFoldIdx = Op;
594 } else if (Op < StartIdx) {
595 return nullptr;
596 }
597 if (MI.getOperand(Op).isTied())
598 return nullptr;
599 }
600
601 MachineInstr *NewMI =
602 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
603 MachineInstrBuilder MIB(MF, NewMI);
604
605 // No need to fold return, the meta data, and function arguments
606 for (unsigned i = 0; i < StartIdx; ++i)
607 if (i != DefToFoldIdx)
608 MIB.add(MI.getOperand(i));
609
610 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
611 MachineOperand &MO = MI.getOperand(i);
612 unsigned TiedTo = e;
613 (void)MI.isRegTiedToDefOperand(i, &TiedTo);
614
615 if (is_contained(Ops, i)) {
616 assert(TiedTo == e && "Cannot fold tied operands");
617 unsigned SpillSize;
618 unsigned SpillOffset;
619 // Compute the spill slot size and offset.
620 const TargetRegisterClass *RC =
621 MF.getRegInfo().getRegClass(MO.getReg());
622 bool Valid =
623 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
624 if (!Valid)
625 report_fatal_error("cannot spill patchpoint subregister operand");
626 MIB.addImm(StackMaps::IndirectMemRefOp);
627 MIB.addImm(SpillSize);
628 MIB.addFrameIndex(FrameIndex);
629 MIB.addImm(SpillOffset);
630 } else {
631 MIB.add(MO);
632 if (TiedTo < e) {
633 assert(TiedTo < NumDefs && "Bad tied operand");
634 if (TiedTo > DefToFoldIdx)
635 --TiedTo;
636 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
637 }
638 }
639 }
640 return NewMI;
641}
642
643static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
644 const TargetInstrInfo &TII) {
645 // If the machine operand is tied, untie it first.
646 if (MI->getOperand(OpNo).isTied()) {
647 unsigned TiedTo = MI->findTiedOperandIdx(OpNo);
648 MI->untieRegOperand(OpNo);
649 // Intentional recursion!
650 foldInlineAsmMemOperand(MI, TiedTo, FI, TII);
651 }
652
654 TII.getFrameIndexOperands(NewOps, FI);
655 assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
656 MI->removeOperand(OpNo);
657 MI->insert(MI->operands_begin() + OpNo, NewOps);
658
659 // Change the previous operand to a MemKind InlineAsm::Flag. The second param
660 // is the per-target number of operands that represent the memory operand
661 // excluding this one (MD). This includes MO.
663 F.setMemConstraint(InlineAsm::ConstraintCode::m);
664 MachineOperand &MD = MI->getOperand(OpNo - 1);
665 MD.setImm(F);
666}
667
668// Returns nullptr if not possible to fold.
670 ArrayRef<unsigned> Ops, int FI,
671 const TargetInstrInfo &TII) {
672 assert(MI.isInlineAsm() && "wrong opcode");
673 if (Ops.size() > 1)
674 return nullptr;
675 unsigned Op = Ops[0];
676 assert(Op && "should never be first operand");
677 assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");
678
679 if (!MI.mayFoldInlineAsmRegOp(Op))
680 return nullptr;
681
682 MachineInstr &NewMI = TII.duplicate(*MI.getParent(), MI.getIterator(), MI);
683
684 foldInlineAsmMemOperand(&NewMI, Op, FI, TII);
685
686 // Update mayload/maystore metadata, and memoperands.
687 const VirtRegInfo &RI =
688 AnalyzeVirtRegInBundle(MI, MI.getOperand(Op).getReg());
691 if (RI.Reads) {
692 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
694 }
695 if (RI.Writes) {
696 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
698 }
699 MachineFunction *MF = NewMI.getMF();
700 const MachineFrameInfo &MFI = MF->getFrameInfo();
702 MachinePointerInfo::getFixedStack(*MF, FI), Flags, MFI.getObjectSize(FI),
703 MFI.getObjectAlign(FI));
704 NewMI.addMemOperand(*MF, MMO);
705
706 return &NewMI;
707}
708
710 ArrayRef<unsigned> Ops, int FI,
711 LiveIntervals *LIS,
712 VirtRegMap *VRM) const {
713 auto Flags = MachineMemOperand::MONone;
714 for (unsigned OpIdx : Ops)
715 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
717
718 MachineBasicBlock *MBB = MI.getParent();
719 assert(MBB && "foldMemoryOperand needs an inserted instruction");
720 MachineFunction &MF = *MBB->getParent();
721
722 // If we're not folding a load into a subreg, the size of the load is the
723 // size of the spill slot. But if we are, we need to figure out what the
724 // actual load size is.
725 int64_t MemSize = 0;
726 const MachineFrameInfo &MFI = MF.getFrameInfo();
728
729 if (Flags & MachineMemOperand::MOStore) {
730 MemSize = MFI.getObjectSize(FI);
731 } else {
732 for (unsigned OpIdx : Ops) {
733 int64_t OpSize = MFI.getObjectSize(FI);
734
735 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
736 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
737 if (SubRegSize > 0 && !(SubRegSize % 8))
738 OpSize = SubRegSize / 8;
739 }
740
741 MemSize = std::max(MemSize, OpSize);
742 }
743 }
744
745 assert(MemSize && "Did not expect a zero-sized stack slot");
746
747 MachineInstr *NewMI = nullptr;
748
749 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
750 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
751 MI.getOpcode() == TargetOpcode::STATEPOINT) {
752 // Fold stackmap/patchpoint.
753 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
754 if (NewMI)
755 MBB->insert(MI, NewMI);
756 } else if (MI.isInlineAsm()) {
757 return foldInlineAsmMemOperand(MI, Ops, FI, *this);
758 } else {
759 // Ask the target to do the actual folding.
760 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
761 }
762
763 if (NewMI) {
764 NewMI->setMemRefs(MF, MI.memoperands());
765 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
767 NewMI->mayStore()) &&
768 "Folded a def to a non-store!");
769 assert((!(Flags & MachineMemOperand::MOLoad) ||
770 NewMI->mayLoad()) &&
771 "Folded a use to a non-load!");
772 assert(MFI.getObjectOffset(FI) != -1);
773 MachineMemOperand *MMO =
775 Flags, MemSize, MFI.getObjectAlign(FI));
776 NewMI->addMemOperand(MF, MMO);
777
778 // The pass "x86 speculative load hardening" always attaches symbols to
779 // call instructions. We need copy it form old instruction.
780 NewMI->cloneInstrSymbols(MF, MI);
781
782 return NewMI;
783 }
784
785 // Straight COPY may fold as load/store.
786 if (!isCopyInstr(MI) || Ops.size() != 1)
787 return nullptr;
788
789 const TargetRegisterClass *RC = canFoldCopy(MI, *this, Ops[0]);
790 if (!RC)
791 return nullptr;
792
793 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
795 if (Flags == MachineMemOperand::MOStore) {
796 if (MO.isUndef()) {
797 // If this is an undef copy, we do not need to bother we inserting spill
798 // code.
799 BuildMI(*MBB, Pos, MI.getDebugLoc(), get(TargetOpcode::KILL)).add(MO);
800 } else {
801 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI,
802 Register());
803 }
804 } else
805 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI, Register());
806
807 return &*--Pos;
808}
809
812 MachineInstr &LoadMI,
813 LiveIntervals *LIS) const {
814 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
815#ifndef NDEBUG
816 for (unsigned OpIdx : Ops)
817 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
818#endif
819
820 MachineBasicBlock &MBB = *MI.getParent();
822
823 // Ask the target to do the actual folding.
824 MachineInstr *NewMI = nullptr;
825 int FrameIndex = 0;
826
827 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
828 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
829 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
830 isLoadFromStackSlot(LoadMI, FrameIndex)) {
831 // Fold stackmap/patchpoint.
832 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
833 if (NewMI)
834 NewMI = &*MBB.insert(MI, NewMI);
835 } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) {
836 return foldInlineAsmMemOperand(MI, Ops, FrameIndex, *this);
837 } else {
838 // Ask the target to do the actual folding.
839 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
840 }
841
842 if (!NewMI)
843 return nullptr;
844
845 // Copy the memoperands from the load to the folded instruction.
846 if (MI.memoperands_empty()) {
847 NewMI->setMemRefs(MF, LoadMI.memoperands());
848 } else {
849 // Handle the rare case of folding multiple loads.
850 NewMI->setMemRefs(MF, MI.memoperands());
852 E = LoadMI.memoperands_end();
853 I != E; ++I) {
854 NewMI->addMemOperand(MF, *I);
855 }
856 }
857 return NewMI;
858}
859
860/// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
861/// replacement instructions immediately precede it. Copy any implicit
862/// operands from MI to the replacement instruction.
864 const TargetRegisterInfo *TRI) {
866 --CopyMI;
867
868 Register DstReg = MI->getOperand(0).getReg();
869 for (const MachineOperand &MO : MI->implicit_operands()) {
870 CopyMI->addOperand(MO);
871
872 // Be conservative about preserving kills when subregister defs are
873 // involved. If there was implicit kill of a super-register overlapping the
874 // copy result, we would kill the subregisters previous copies defined.
875
876 if (MO.isKill() && TRI->regsOverlap(DstReg, MO.getReg()))
877 CopyMI->getOperand(CopyMI->getNumOperands() - 1).setIsKill(false);
878 }
879}
880
882 const TargetRegisterInfo *TRI) const {
883 if (MI->allDefsAreDead()) {
884 MI->setDesc(get(TargetOpcode::KILL));
885 return;
886 }
887
888 MachineOperand &DstMO = MI->getOperand(0);
889 MachineOperand &SrcMO = MI->getOperand(1);
890
891 bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
892 if (IdentityCopy || SrcMO.isUndef()) {
893 // No need to insert an identity copy instruction, but replace with a KILL
894 // if liveness is changed.
895 if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
896 // We must make sure the super-register gets killed. Replace the
897 // instruction with KILL.
898 MI->setDesc(get(TargetOpcode::KILL));
899 return;
900 }
901 // Vanilla identity copy.
902 MI->eraseFromParent();
903 return;
904 }
905
906 copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(), DstMO.getReg(),
907 SrcMO.getReg(), SrcMO.isKill(),
908 DstMO.getReg().isPhysical() ? DstMO.isRenamable() : false,
909 SrcMO.getReg().isPhysical() ? SrcMO.isRenamable() : false);
910
911 if (MI->getNumOperands() > 2)
913 MI->eraseFromParent();
914}
915
917 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
918 const MachineOperand &Op1 = Inst.getOperand(1);
919 const MachineOperand &Op2 = Inst.getOperand(2);
921
922 // We need virtual register definitions for the operands that we will
923 // reassociate.
924 MachineInstr *MI1 = nullptr;
925 MachineInstr *MI2 = nullptr;
926 if (Op1.isReg() && Op1.getReg().isVirtual())
927 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
928 if (Op2.isReg() && Op2.getReg().isVirtual())
929 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
930
931 // And at least one operand must be defined in MBB.
932 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
933}
934
936 unsigned Opcode2) const {
937 return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2;
938}
939
941 bool &Commuted) const {
942 const MachineBasicBlock *MBB = Inst.getParent();
944 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
945 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
946 unsigned Opcode = Inst.getOpcode();
947
948 // If only one operand has the same or inverse opcode and it's the second
949 // source operand, the operands must be commuted.
950 Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
951 areOpcodesEqualOrInverse(Opcode, MI2->getOpcode());
952 if (Commuted)
953 std::swap(MI1, MI2);
954
955 // 1. The previous instruction must be the same type as Inst.
956 // 2. The previous instruction must also be associative/commutative or be the
957 // inverse of such an operation (this can be different even for
958 // instructions with the same opcode if traits like fast-math-flags are
959 // included).
960 // 3. The previous instruction must have virtual register definitions for its
961 // operands in the same basic block as Inst.
962 // 4. The previous instruction's result must only be used by Inst.
963 return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
965 isAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
967 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
968}
969
970// 1. The operation must be associative and commutative or be the inverse of
971// such an operation.
972// 2. The instruction must have virtual register definitions for its
973// operands in the same basic block.
974// 3. The instruction must have a reassociable sibling.
976 bool &Commuted) const {
977 return (isAssociativeAndCommutative(Inst) ||
978 isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
979 hasReassociableOperands(Inst, Inst.getParent()) &&
980 hasReassociableSibling(Inst, Commuted);
981}
982
983// Utility routine that checks if \param MO is defined by an
984// \param CombineOpc instruction in the basic block \param MBB.
985// If \param CombineOpc is not provided, the OpCode check will
986// be skipped.
988 unsigned CombineOpc = 0) {
990 MachineInstr *MI = nullptr;
991
992 if (MO.isReg() && MO.getReg().isVirtual())
993 MI = MRI.getUniqueVRegDef(MO.getReg());
994 // And it needs to be in the trace (otherwise, it won't have a depth).
995 if (!MI || MI->getParent() != &MBB ||
996 (MI->getOpcode() != CombineOpc && CombineOpc != 0))
997 return false;
998 // Must only used by the user we combine with.
999 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
1000 return false;
1001
1002 return true;
1003}
1004
1005// A chain of accumulation instructions will be selected IFF:
1006// 1. All the accumulation instructions in the chain have the same opcode,
1007// besides the first that has a slightly different opcode because it does
1008// not accumulate into a register.
1009// 2. All the instructions in the chain are combinable (have a single use
1010// which itself is part of the chain).
1011// 3. Meets the required minimum length.
1013 MachineInstr *CurrentInstr, SmallVectorImpl<Register> &Chain) const {
1014 // Walk up the chain of accumulation instructions and collect them in the
1015 // vector.
1016 MachineBasicBlock &MBB = *CurrentInstr->getParent();
1018 unsigned AccumulatorOpcode = CurrentInstr->getOpcode();
1019 std::optional<unsigned> ChainStartOpCode =
1020 getAccumulationStartOpcode(AccumulatorOpcode);
1021
1022 if (!ChainStartOpCode.has_value())
1023 return;
1024
1025 // Push the first accumulator result to the start of the chain.
1026 Chain.push_back(CurrentInstr->getOperand(0).getReg());
1027
1028 // Collect the accumulator input register from all instructions in the chain.
1029 while (CurrentInstr &&
1030 canCombine(MBB, CurrentInstr->getOperand(1), AccumulatorOpcode)) {
1031 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1032 CurrentInstr = MRI.getUniqueVRegDef(CurrentInstr->getOperand(1).getReg());
1033 }
1034
1035 // Add the instruction at the top of the chain.
1036 if (CurrentInstr->getOpcode() == AccumulatorOpcode &&
1037 canCombine(MBB, CurrentInstr->getOperand(1)))
1038 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1039}
1040
1041/// Find chains of accumulations that can be rewritten as a tree for increased
1042/// ILP.
1044 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const {
1046 return false;
1047
1048 unsigned Opc = Root.getOpcode();
1050 return false;
1051
1052 // Verify that this is the end of the chain.
1053 MachineBasicBlock &MBB = *Root.getParent();
1055 if (!MRI.hasOneNonDBGUser(Root.getOperand(0).getReg()))
1056 return false;
1057
1058 auto User = MRI.use_instr_begin(Root.getOperand(0).getReg());
1059 if (User->getOpcode() == Opc)
1060 return false;
1061
1062 // Walk up the use chain and collect the reduction chain.
1064 getAccumulatorChain(&Root, Chain);
1065
1066 // Reject chains which are too short to be worth modifying.
1067 if (Chain.size() < MinAccumulatorDepth)
1068 return false;
1069
1070 // Check if the MBB this instruction is a part of contains any other chains.
1071 // If so, don't apply it.
1072 SmallSet<Register, 32> ReductionChain(llvm::from_range, Chain);
1073 for (const auto &I : MBB) {
1074 if (I.getOpcode() == Opc &&
1075 !ReductionChain.contains(I.getOperand(0).getReg()))
1076 return false;
1077 }
1078
1080 return true;
1081}
1082
1083// Reduce branches of the accumulator tree by adding them together.
1085 SmallVectorImpl<Register> &RegistersToReduce,
1088 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
1089 Register ResultReg) const {
1092
1093 // Get the opcode for the reduction instruction we will need to build.
1094 // If for some reason it is not defined, early exit and don't apply this.
1095 unsigned ReduceOpCode = getReduceOpcodeForAccumulator(Root.getOpcode());
1096
1097 for (unsigned int i = 1; i <= (RegistersToReduce.size() / 2); i += 2) {
1098 auto RHS = RegistersToReduce[i - 1];
1099 auto LHS = RegistersToReduce[i];
1100 Register Dest;
1101 // If we are reducing 2 registers, reuse the original result register.
1102 if (RegistersToReduce.size() == 2)
1103 Dest = ResultReg;
1104 // Otherwise, create a new virtual register to hold the partial sum.
1105 else {
1106 auto NewVR = MRI.createVirtualRegister(
1107 MRI.getRegClass(Root.getOperand(0).getReg()));
1108 Dest = NewVR;
1109 NewRegs.push_back(Dest);
1110 InstrIdxForVirtReg.insert(std::make_pair(Dest, InsInstrs.size()));
1111 }
1112
1113 // Create the new reduction instruction.
1115 BuildMI(MF, MIMetadata(Root), TII->get(ReduceOpCode), Dest)
1116 .addReg(RHS, getKillRegState(true))
1117 .addReg(LHS, getKillRegState(true));
1118 // Copy any flags needed from the original instruction.
1119 MIB->setFlags(Root.getFlags());
1120 InsInstrs.push_back(MIB);
1121 }
1122
1123 // If the number of registers to reduce is odd, add the remaining register to
1124 // the vector of registers to reduce.
1125 if (RegistersToReduce.size() % 2 != 0)
1126 NewRegs.push_back(RegistersToReduce[RegistersToReduce.size() - 1]);
1127
1128 RegistersToReduce = NewRegs;
1129}
1130
1131// The concept of the reassociation pass is that these operations can benefit
1132// from this kind of transformation:
1133//
1134// A = ? op ?
1135// B = A op X (Prev)
1136// C = B op Y (Root)
1137// -->
1138// A = ? op ?
1139// B = X op Y
1140// C = A op B
1141//
1142// breaking the dependency between A and B, allowing them to be executed in
1143// parallel (or back-to-back in a pipeline) instead of depending on each other.
1144
1145// FIXME: This has the potential to be expensive (compile time) while not
1146// improving the code at all. Some ways to limit the overhead:
1147// 1. Track successful transforms; bail out if hit rate gets too low.
1148// 2. Only enable at -O3 or some other non-default optimization level.
1149// 3. Pre-screen pattern candidates here: if an operand of the previous
1150// instruction is known to not increase the critical path, then don't match
1151// that pattern.
1153 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
1154 bool DoRegPressureReduce) const {
1155 bool Commute;
1156 if (isReassociationCandidate(Root, Commute)) {
1157 // We found a sequence of instructions that may be suitable for a
1158 // reassociation of operands to increase ILP. Specify each commutation
1159 // possibility for the Prev instruction in the sequence and let the
1160 // machine combiner decide if changing the operands is worthwhile.
1161 if (Commute) {
1164 } else {
1167 }
1168 return true;
1169 }
1170 if (getAccumulatorReassociationPatterns(Root, Patterns))
1171 return true;
1172
1173 return false;
1174}
1175
1176/// Return true when a code sequence can improve loop throughput.
1178 return false;
1179}
1180
1183 switch (Pattern) {
1186 default:
1188 }
1189}
1190
1191std::pair<unsigned, unsigned>
1193 const MachineInstr &Root,
1194 const MachineInstr &Prev) const {
1195 bool AssocCommutRoot = isAssociativeAndCommutative(Root);
1196 bool AssocCommutPrev = isAssociativeAndCommutative(Prev);
1197
1198 // Early exit if both opcodes are associative and commutative. It's a trivial
1199 // reassociation when we only change operands order. In this case opcodes are
1200 // not required to have inverse versions.
1201 if (AssocCommutRoot && AssocCommutPrev) {
1202 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
1203 return std::make_pair(Root.getOpcode(), Root.getOpcode());
1204 }
1205
1206 // At least one instruction is not associative or commutative.
1207 // Since we have matched one of the reassociation patterns, we expect that the
1208 // instructions' opcodes are equal or one of them is the inversion of the
1209 // other.
1211 "Incorrectly matched pattern");
1212 unsigned AssocCommutOpcode = Root.getOpcode();
1213 unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
1214 if (!AssocCommutRoot)
1215 std::swap(AssocCommutOpcode, InverseOpcode);
1216
1217 // The transformation rule (`+` is any associative and commutative binary
1218 // operation, `-` is the inverse):
1219 // REASSOC_AX_BY:
1220 // (A + X) + Y => A + (X + Y)
1221 // (A + X) - Y => A + (X - Y)
1222 // (A - X) + Y => A - (X - Y)
1223 // (A - X) - Y => A - (X + Y)
1224 // REASSOC_XA_BY:
1225 // (X + A) + Y => (X + Y) + A
1226 // (X + A) - Y => (X - Y) + A
1227 // (X - A) + Y => (X + Y) - A
1228 // (X - A) - Y => (X - Y) - A
1229 // REASSOC_AX_YB:
1230 // Y + (A + X) => (Y + X) + A
1231 // Y - (A + X) => (Y - X) - A
1232 // Y + (A - X) => (Y - X) + A
1233 // Y - (A - X) => (Y + X) - A
1234 // REASSOC_XA_YB:
1235 // Y + (X + A) => (Y + X) + A
1236 // Y - (X + A) => (Y - X) - A
1237 // Y + (X - A) => (Y + X) - A
1238 // Y - (X - A) => (Y - X) + A
1239 switch (Pattern) {
1240 default:
1241 llvm_unreachable("Unexpected pattern");
1243 if (!AssocCommutRoot && AssocCommutPrev)
1244 return {AssocCommutOpcode, InverseOpcode};
1245 if (AssocCommutRoot && !AssocCommutPrev)
1246 return {InverseOpcode, InverseOpcode};
1247 if (!AssocCommutRoot && !AssocCommutPrev)
1248 return {InverseOpcode, AssocCommutOpcode};
1249 break;
1251 if (!AssocCommutRoot && AssocCommutPrev)
1252 return {AssocCommutOpcode, InverseOpcode};
1253 if (AssocCommutRoot && !AssocCommutPrev)
1254 return {InverseOpcode, AssocCommutOpcode};
1255 if (!AssocCommutRoot && !AssocCommutPrev)
1256 return {InverseOpcode, InverseOpcode};
1257 break;
1259 if (!AssocCommutRoot && AssocCommutPrev)
1260 return {InverseOpcode, InverseOpcode};
1261 if (AssocCommutRoot && !AssocCommutPrev)
1262 return {AssocCommutOpcode, InverseOpcode};
1263 if (!AssocCommutRoot && !AssocCommutPrev)
1264 return {InverseOpcode, AssocCommutOpcode};
1265 break;
1267 if (!AssocCommutRoot && AssocCommutPrev)
1268 return {InverseOpcode, InverseOpcode};
1269 if (AssocCommutRoot && !AssocCommutPrev)
1270 return {InverseOpcode, AssocCommutOpcode};
1271 if (!AssocCommutRoot && !AssocCommutPrev)
1272 return {AssocCommutOpcode, InverseOpcode};
1273 break;
1274 }
1275 llvm_unreachable("Unhandled combination");
1276}
1277
1278// Return a pair of boolean flags showing if the new root and new prev operands
1279// must be swapped. See visual example of the rule in
1280// TargetInstrInfo::getReassociationOpcodes.
1281static std::pair<bool, bool> mustSwapOperands(unsigned Pattern) {
1282 switch (Pattern) {
1283 default:
1284 llvm_unreachable("Unexpected pattern");
1286 return {false, false};
1288 return {true, false};
1290 return {true, true};
1292 return {true, true};
1293 }
1294}
1295
1297 const MachineInstr &Root, unsigned Pattern,
1298 std::array<unsigned, 5> &OperandIndices) const {
1299 switch (Pattern) {
1301 OperandIndices = {1, 1, 1, 2, 2};
1302 break;
1304 OperandIndices = {2, 1, 2, 2, 1};
1305 break;
1307 OperandIndices = {1, 2, 1, 1, 2};
1308 break;
1310 OperandIndices = {2, 2, 2, 1, 1};
1311 break;
1312 default:
1313 llvm_unreachable("unexpected MachineCombinerPattern");
1314 }
1315}
1316
1317/// Attempt the reassociation transformation to reduce critical path length.
1318/// See the above comments before getMachineCombinerPatterns().
1320 MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1324 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const {
1325 MachineFunction *MF = Root.getMF();
1329 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
1330
1335 MachineOperand &OpC = Root.getOperand(0);
1336
1337 Register RegA = OpA.getReg();
1338 Register RegB = OpB.getReg();
1339 Register RegX = OpX.getReg();
1340 Register RegY = OpY.getReg();
1341 Register RegC = OpC.getReg();
1342
1343 if (RegA.isVirtual())
1344 MRI.constrainRegClass(RegA, RC);
1345 if (RegB.isVirtual())
1346 MRI.constrainRegClass(RegB, RC);
1347 if (RegX.isVirtual())
1348 MRI.constrainRegClass(RegX, RC);
1349 if (RegY.isVirtual())
1350 MRI.constrainRegClass(RegY, RC);
1351 if (RegC.isVirtual())
1352 MRI.constrainRegClass(RegC, RC);
1353
1354 // Create a new virtual register for the result of (X op Y) instead of
1355 // recycling RegB because the MachineCombiner's computation of the critical
1356 // path requires a new register definition rather than an existing one.
1357 Register NewVR = MRI.createVirtualRegister(RC);
1358 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
1359
1360 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
1361 bool KillA = OpA.isKill();
1362 bool KillX = OpX.isKill();
1363 bool KillY = OpY.isKill();
1364 bool KillNewVR = true;
1365
1366 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
1367
1368 if (SwapPrevOperands) {
1369 std::swap(RegX, RegY);
1370 std::swap(KillX, KillY);
1371 }
1372
1373 unsigned PrevFirstOpIdx, PrevSecondOpIdx;
1374 unsigned RootFirstOpIdx, RootSecondOpIdx;
1375 switch (Pattern) {
1377 PrevFirstOpIdx = OperandIndices[1];
1378 PrevSecondOpIdx = OperandIndices[3];
1379 RootFirstOpIdx = OperandIndices[2];
1380 RootSecondOpIdx = OperandIndices[4];
1381 break;
1383 PrevFirstOpIdx = OperandIndices[1];
1384 PrevSecondOpIdx = OperandIndices[3];
1385 RootFirstOpIdx = OperandIndices[4];
1386 RootSecondOpIdx = OperandIndices[2];
1387 break;
1389 PrevFirstOpIdx = OperandIndices[3];
1390 PrevSecondOpIdx = OperandIndices[1];
1391 RootFirstOpIdx = OperandIndices[2];
1392 RootSecondOpIdx = OperandIndices[4];
1393 break;
1395 PrevFirstOpIdx = OperandIndices[3];
1396 PrevSecondOpIdx = OperandIndices[1];
1397 RootFirstOpIdx = OperandIndices[4];
1398 RootSecondOpIdx = OperandIndices[2];
1399 break;
1400 default:
1401 llvm_unreachable("unexpected MachineCombinerPattern");
1402 }
1403
1404 // Basically BuildMI but doesn't add implicit operands by default.
1405 auto buildMINoImplicit = [](MachineFunction &MF, const MIMetadata &MIMD,
1406 const MCInstrDesc &MCID, Register DestReg) {
1407 return MachineInstrBuilder(
1408 MF, MF.CreateMachineInstr(MCID, MIMD.getDL(), /*NoImpl=*/true))
1409 .copyMIMetadata(MIMD)
1410 .addReg(DestReg, RegState::Define);
1411 };
1412
1413 // Create new instructions for insertion.
1414 MachineInstrBuilder MIB1 =
1415 buildMINoImplicit(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR);
1416 for (const auto &MO : Prev.explicit_operands()) {
1417 unsigned Idx = MO.getOperandNo();
1418 // Skip the result operand we'd already added.
1419 if (Idx == 0)
1420 continue;
1421 if (Idx == PrevFirstOpIdx)
1422 MIB1.addReg(RegX, getKillRegState(KillX));
1423 else if (Idx == PrevSecondOpIdx)
1424 MIB1.addReg(RegY, getKillRegState(KillY));
1425 else
1426 MIB1.add(MO);
1427 }
1428 MIB1.copyImplicitOps(Prev);
1429
1430 if (SwapRootOperands) {
1431 std::swap(RegA, NewVR);
1432 std::swap(KillA, KillNewVR);
1433 }
1434
1435 MachineInstrBuilder MIB2 =
1436 buildMINoImplicit(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC);
1437 for (const auto &MO : Root.explicit_operands()) {
1438 unsigned Idx = MO.getOperandNo();
1439 // Skip the result operand.
1440 if (Idx == 0)
1441 continue;
1442 if (Idx == RootFirstOpIdx)
1443 MIB2 = MIB2.addReg(RegA, getKillRegState(KillA));
1444 else if (Idx == RootSecondOpIdx)
1445 MIB2 = MIB2.addReg(NewVR, getKillRegState(KillNewVR));
1446 else
1447 MIB2 = MIB2.add(MO);
1448 }
1449 MIB2.copyImplicitOps(Root);
1450
1451 // Propagate FP flags from the original instructions.
1452 // But clear poison-generating flags because those may not be valid now.
1453 // TODO: There should be a helper function for copying only fast-math-flags.
1454 uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1455 MIB1->setFlags(IntersectedFlags);
1459
1460 MIB2->setFlags(IntersectedFlags);
1464
1465 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
1466
1467 // Record new instructions for insertion and old instructions for deletion.
1468 InsInstrs.push_back(MIB1);
1469 InsInstrs.push_back(MIB2);
1470 DelInstrs.push_back(&Prev);
1471 DelInstrs.push_back(&Root);
1472
1473 // We transformed:
1474 // B = A op X (Prev)
1475 // C = B op Y (Root)
1476 // Into:
1477 // B = X op Y (MIB1)
1478 // C = A op B (MIB2)
1479 // C has the same value as before, B doesn't; as such, keep the debug number
1480 // of C but not of B.
1481 if (unsigned OldRootNum = Root.peekDebugInstrNum())
1482 MIB2.getInstr()->setDebugInstrNum(OldRootNum);
1483}
1484
1486 MachineInstr &Root, unsigned Pattern,
1489 DenseMap<Register, unsigned> &InstIdxForVirtReg) const {
1491 MachineBasicBlock &MBB = *Root.getParent();
1492 MachineFunction &MF = *MBB.getParent();
1494
1495 switch (Pattern) {
1500 // Select the previous instruction in the sequence based on the input
1501 // pattern.
1502 std::array<unsigned, 5> OperandIndices;
1504 MachineInstr *Prev =
1505 MRI.getUniqueVRegDef(Root.getOperand(OperandIndices[0]).getReg());
1506
1507 // Don't reassociate if Prev and Root are in different blocks.
1508 if (Prev->getParent() != Root.getParent())
1509 return;
1510
1511 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, OperandIndices,
1512 InstIdxForVirtReg);
1513 break;
1514 }
1516 SmallVector<Register, 32> ChainRegs;
1517 getAccumulatorChain(&Root, ChainRegs);
1518 unsigned int Depth = ChainRegs.size();
1520 "Max accumulator width set to illegal value");
1521 unsigned int MaxWidth = Log2_32(Depth) < MaxAccumulatorWidth
1522 ? Log2_32(Depth)
1524
1525 // Walk down the chain and rewrite it as a tree.
1526 for (auto IndexedReg : llvm::enumerate(llvm::reverse(ChainRegs))) {
1527 // No need to rewrite the first node, it is already perfect as it is.
1528 if (IndexedReg.index() == 0)
1529 continue;
1530
1531 MachineInstr *Instr = MRI.getUniqueVRegDef(IndexedReg.value());
1533 Register AccReg;
1534 if (IndexedReg.index() < MaxWidth) {
1535 // Now we need to create new instructions for the first row.
1536 AccReg = Instr->getOperand(0).getReg();
1537 unsigned OpCode = getAccumulationStartOpcode(Root.getOpcode());
1538
1539 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(OpCode), AccReg)
1540 .addReg(Instr->getOperand(2).getReg(),
1541 getKillRegState(Instr->getOperand(2).isKill()))
1542 .addReg(Instr->getOperand(3).getReg(),
1543 getKillRegState(Instr->getOperand(3).isKill()));
1544 } else {
1545 // For the remaining cases, we need to use an output register of one of
1546 // the newly inserted instuctions as operand 1
1547 AccReg = Instr->getOperand(0).getReg() == Root.getOperand(0).getReg()
1548 ? MRI.createVirtualRegister(
1549 MRI.getRegClass(Root.getOperand(0).getReg()))
1550 : Instr->getOperand(0).getReg();
1551 assert(IndexedReg.index() >= MaxWidth);
1552 auto AccumulatorInput =
1553 ChainRegs[Depth - (IndexedReg.index() - MaxWidth) - 1];
1554 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(Instr->getOpcode()),
1555 AccReg)
1556 .addReg(AccumulatorInput, getKillRegState(true))
1557 .addReg(Instr->getOperand(2).getReg(),
1558 getKillRegState(Instr->getOperand(2).isKill()))
1559 .addReg(Instr->getOperand(3).getReg(),
1560 getKillRegState(Instr->getOperand(3).isKill()));
1561 }
1562
1563 MIB->setFlags(Instr->getFlags());
1564 InstIdxForVirtReg.insert(std::make_pair(AccReg, InsInstrs.size()));
1565 InsInstrs.push_back(MIB);
1566 DelInstrs.push_back(Instr);
1567 }
1568
1569 SmallVector<Register, 8> RegistersToReduce;
1570 for (unsigned i = (InsInstrs.size() - MaxWidth); i < InsInstrs.size();
1571 ++i) {
1572 auto Reg = InsInstrs[i]->getOperand(0).getReg();
1573 RegistersToReduce.push_back(Reg);
1574 }
1575
1576 while (RegistersToReduce.size() > 1)
1577 reduceAccumulatorTree(RegistersToReduce, InsInstrs, MF, Root, MRI,
1578 InstIdxForVirtReg, Root.getOperand(0).getReg());
1579
1580 break;
1581 }
1582 }
1583}
1584
1587}
1588
1590 const MachineInstr &MI) const {
1591 const MachineFunction &MF = *MI.getMF();
1592 const MachineRegisterInfo &MRI = MF.getRegInfo();
1593
1594 // Remat clients assume operand 0 is the defined register.
1595 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
1596 return false;
1597 Register DefReg = MI.getOperand(0).getReg();
1598
1599 // A sub-register definition can only be rematerialized if the instruction
1600 // doesn't read the other parts of the register. Otherwise it is really a
1601 // read-modify-write operation on the full virtual register which cannot be
1602 // moved safely.
1603 if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() &&
1604 MI.readsVirtualRegister(DefReg))
1605 return false;
1606
1607 // A load from a fixed stack slot can be rematerialized. This may be
1608 // redundant with subsequent checks, but it's target-independent,
1609 // simple, and a common case.
1610 int FrameIdx = 0;
1611 if (isLoadFromStackSlot(MI, FrameIdx) &&
1612 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
1613 return true;
1614
1615 // Avoid instructions obviously unsafe for remat.
1616 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1617 MI.hasUnmodeledSideEffects())
1618 return false;
1619
1620 // Don't remat inline asm. We have no idea how expensive it is
1621 // even if it's side effect free.
1622 if (MI.isInlineAsm())
1623 return false;
1624
1625 // Avoid instructions which load from potentially varying memory.
1626 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1627 return false;
1628
1629 // If any of the registers accessed are non-constant, conservatively assume
1630 // the instruction is not rematerializable.
1631 for (const MachineOperand &MO : MI.operands()) {
1632 if (!MO.isReg()) continue;
1633 Register Reg = MO.getReg();
1634 if (Reg == 0)
1635 continue;
1636
1637 // Check for a well-behaved physical register.
1638 if (Reg.isPhysical()) {
1639 if (MO.isUse()) {
1640 // If the physreg has no defs anywhere, it's just an ambient register
1641 // and we can freely move its uses. Alternatively, if it's allocatable,
1642 // it could get allocated to something with a def during allocation.
1643 if (!MRI.isConstantPhysReg(Reg))
1644 return false;
1645 } else {
1646 // A physreg def. We can't remat it.
1647 return false;
1648 }
1649 continue;
1650 }
1651
1652 // Only allow one virtual-register def. There may be multiple defs of the
1653 // same virtual register, though.
1654 if (MO.isDef() && Reg != DefReg)
1655 return false;
1656
1657 // Don't allow any virtual-register uses. Rematting an instruction with
1658 // virtual register uses would length the live ranges of the uses, which
1659 // is not necessarily a good idea, certainly not "trivial".
1660 if (MO.isUse())
1661 return false;
1662 }
1663
1664 // Everything checked out.
1665 return true;
1666}
1667
1669 const MachineFunction *MF = MI.getMF();
1671 bool StackGrowsDown =
1673
1674 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1675 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1676
1677 if (!isFrameInstr(MI))
1678 return 0;
1679
1680 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1681
1682 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1683 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1684 SPAdj = -SPAdj;
1685
1686 return SPAdj;
1687}
1688
1689/// isSchedulingBoundary - Test if the given instruction should be
1690/// considered a scheduling boundary. This primarily includes labels
1691/// and terminators.
1693 const MachineBasicBlock *MBB,
1694 const MachineFunction &MF) const {
1695 // Terminators and labels can't be scheduled around.
1696 if (MI.isTerminator() || MI.isPosition())
1697 return true;
1698
1699 // INLINEASM_BR can jump to another block
1700 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1701 return true;
1702
1703 // Don't attempt to schedule around any instruction that defines
1704 // a stack-oriented pointer, as it's unlikely to be profitable. This
1705 // saves compile time, because it doesn't require every single
1706 // stack slot reference to depend on the instruction that does the
1707 // modification.
1708 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1710 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
1711}
1712
1713// Provide a global flag for disabling the PreRA hazard recognizer that targets
1714// may choose to honor.
1717}
1718
1719// Default implementation of CreateTargetRAHazardRecognizer.
1722 const ScheduleDAG *DAG) const {
1723 // Dummy hazard recognizer allows all instructions to issue.
1724 return new ScheduleHazardRecognizer();
1725}
1726
1727// Default implementation of CreateTargetMIHazardRecognizer.
1729 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1730 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1731}
1732
1733// Default implementation of CreateTargetPostRAHazardRecognizer.
1736 const ScheduleDAG *DAG) const {
1737 return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1738}
1739
1740// Default implementation of getMemOperandWithOffset.
1742 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1743 bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
1746 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1747 Width, TRI) ||
1748 BaseOps.size() != 1)
1749 return false;
1750 BaseOp = BaseOps.front();
1751 return true;
1752}
1753
1754//===----------------------------------------------------------------------===//
1755// SelectionDAG latency interface.
1756//===----------------------------------------------------------------------===//
1757
1758std::optional<unsigned>
1760 SDNode *DefNode, unsigned DefIdx,
1761 SDNode *UseNode, unsigned UseIdx) const {
1762 if (!ItinData || ItinData->isEmpty())
1763 return std::nullopt;
1764
1765 if (!DefNode->isMachineOpcode())
1766 return std::nullopt;
1767
1768 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1769 if (!UseNode->isMachineOpcode())
1770 return ItinData->getOperandCycle(DefClass, DefIdx);
1771 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1772 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1773}
1774
1776 SDNode *N) const {
1777 if (!ItinData || ItinData->isEmpty())
1778 return 1;
1779
1780 if (!N->isMachineOpcode())
1781 return 1;
1782
1783 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1784}
1785
1786//===----------------------------------------------------------------------===//
1787// MachineInstr latency interface.
1788//===----------------------------------------------------------------------===//
1789
1791 const MachineInstr &MI) const {
1792 if (!ItinData || ItinData->isEmpty())
1793 return 1;
1794
1795 unsigned Class = MI.getDesc().getSchedClass();
1796 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1797 if (UOps >= 0)
1798 return UOps;
1799
1800 // The # of u-ops is dynamically determined. The specific target should
1801 // override this function to return the right number.
1802 return 1;
1803}
1804
1805/// Return the default expected latency for a def based on it's opcode.
1807 const MachineInstr &DefMI) const {
1808 if (DefMI.isTransient())
1809 return 0;
1810 if (DefMI.mayLoad())
1811 return SchedModel.LoadLatency;
1812 if (isHighLatencyDef(DefMI.getOpcode()))
1813 return SchedModel.HighLatency;
1814 return 1;
1815}
1816
1818 return 0;
1819}
1820
1822 const MachineInstr &MI,
1823 unsigned *PredCost) const {
1824 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1825 // still have a MinLatency property, which getStageLatency checks.
1826 if (!ItinData)
1827 return MI.mayLoad() ? 2 : 1;
1828
1829 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1830}
1831
1833 const MachineInstr &DefMI,
1834 unsigned DefIdx) const {
1835 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1836 if (!ItinData || ItinData->isEmpty())
1837 return false;
1838
1839 unsigned DefClass = DefMI.getDesc().getSchedClass();
1840 std::optional<unsigned> DefCycle =
1841 ItinData->getOperandCycle(DefClass, DefIdx);
1842 return DefCycle && DefCycle <= 1U;
1843}
1844
1846 // TODO: We don't split functions where a section attribute has been set
1847 // since the split part may not be placed in a contiguous region. It may also
1848 // be more beneficial to augment the linker to ensure contiguous layout of
1849 // split functions within the same section as specified by the attribute.
1850 if (MF.getFunction().hasSection())
1851 return false;
1852
1853 // We don't want to proceed further for cold functions
1854 // or functions of unknown hotness. Lukewarm functions have no prefix.
1855 std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
1856 if (SectionPrefix &&
1857 (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
1858 return false;
1859 }
1860
1861 return true;
1862}
1863
1864std::optional<ParamLoadedValue>
1866 Register Reg) const {
1867 const MachineFunction *MF = MI.getMF();
1870 int64_t Offset;
1871 bool OffsetIsScalable;
1872
1873 // To simplify the sub-register handling, verify that we only need to
1874 // consider physical registers.
1875 assert(MF->getProperties().hasNoVRegs());
1876
1877 if (auto DestSrc = isCopyInstr(MI)) {
1878 Register DestReg = DestSrc->Destination->getReg();
1879
1880 // If the copy destination is the forwarding reg, describe the forwarding
1881 // reg using the copy source as the backup location. Example:
1882 //
1883 // x0 = MOV x7
1884 // call callee(x0) ; x0 described as x7
1885 if (Reg == DestReg)
1886 return ParamLoadedValue(*DestSrc->Source, Expr);
1887
1888 // If the target's hook couldn't describe this copy, give up.
1889 return std::nullopt;
1890 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1891 Register SrcReg = RegImm->Reg;
1892 Offset = RegImm->Imm;
1894 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1895 } else if (MI.hasOneMemOperand()) {
1896 // Only describe memory which provably does not escape the function. As
1897 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1898 // callee (or by another thread).
1899 const auto &TII = MF->getSubtarget().getInstrInfo();
1900 const MachineFrameInfo &MFI = MF->getFrameInfo();
1901 const MachineMemOperand *MMO = MI.memoperands()[0];
1902 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1903
1904 // If the address points to "special" memory (e.g. a spill slot), it's
1905 // sufficient to check that it isn't aliased by any high-level IR value.
1906 if (!PSV || PSV->mayAlias(&MFI))
1907 return std::nullopt;
1908
1909 const MachineOperand *BaseOp;
1910 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
1911 TRI))
1912 return std::nullopt;
1913
1914 // FIXME: Scalable offsets are not yet handled in the offset code below.
1915 if (OffsetIsScalable)
1916 return std::nullopt;
1917
1918 // TODO: Can currently only handle mem instructions with a single define.
1919 // An example from the x86 target:
1920 // ...
1921 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1922 // ...
1923 //
1924 if (MI.getNumExplicitDefs() != 1)
1925 return std::nullopt;
1926
1927 // TODO: In what way do we need to take Reg into consideration here?
1928
1931 Ops.push_back(dwarf::DW_OP_deref_size);
1932 Ops.push_back(MMO->getSize().hasValue() ? MMO->getSize().getValue()
1933 : ~UINT64_C(0));
1934 Expr = DIExpression::prependOpcodes(Expr, Ops);
1935 return ParamLoadedValue(*BaseOp, Expr);
1936 }
1937
1938 return std::nullopt;
1939}
1940
1941// Get the call frame size just before MI.
1943 // Search backwards from MI for the most recent call frame instruction.
1944 MachineBasicBlock *MBB = MI.getParent();
1945 for (auto &AdjI : reverse(make_range(MBB->instr_begin(), MI.getIterator()))) {
1946 if (AdjI.getOpcode() == getCallFrameSetupOpcode())
1947 return getFrameTotalSize(AdjI);
1948 if (AdjI.getOpcode() == getCallFrameDestroyOpcode())
1949 return 0;
1950 }
1951
1952 // If none was found, use the call frame size from the start of the basic
1953 // block.
1954 return MBB->getCallFrameSize();
1955}
1956
1957/// Both DefMI and UseMI must be valid. By default, call directly to the
1958/// itinerary. This may be overriden by the target.
1960 const InstrItineraryData *ItinData, const MachineInstr &DefMI,
1961 unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
1962 unsigned DefClass = DefMI.getDesc().getSchedClass();
1963 unsigned UseClass = UseMI.getDesc().getSchedClass();
1964 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1965}
1966
1968 const MachineInstr &MI, unsigned DefIdx,
1969 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1970 assert((MI.isRegSequence() ||
1971 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1972
1973 if (!MI.isRegSequence())
1974 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1975
1976 // We are looking at:
1977 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1978 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1979 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1980 OpIdx += 2) {
1981 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1982 if (MOReg.isUndef())
1983 continue;
1984 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1985 assert(MOSubIdx.isImm() &&
1986 "One of the subindex of the reg_sequence is not an immediate");
1987 // Record Reg:SubReg, SubIdx.
1988 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1989 (unsigned)MOSubIdx.getImm()));
1990 }
1991 return true;
1992}
1993
1995 const MachineInstr &MI, unsigned DefIdx,
1996 RegSubRegPairAndIdx &InputReg) const {
1997 assert((MI.isExtractSubreg() ||
1998 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1999
2000 if (!MI.isExtractSubreg())
2001 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
2002
2003 // We are looking at:
2004 // Def = EXTRACT_SUBREG v0.sub1, sub0.
2005 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
2006 const MachineOperand &MOReg = MI.getOperand(1);
2007 if (MOReg.isUndef())
2008 return false;
2009 const MachineOperand &MOSubIdx = MI.getOperand(2);
2010 assert(MOSubIdx.isImm() &&
2011 "The subindex of the extract_subreg is not an immediate");
2012
2013 InputReg.Reg = MOReg.getReg();
2014 InputReg.SubReg = MOReg.getSubReg();
2015 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
2016 return true;
2017}
2018
2020 const MachineInstr &MI, unsigned DefIdx,
2021 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
2022 assert((MI.isInsertSubreg() ||
2023 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
2024
2025 if (!MI.isInsertSubreg())
2026 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
2027
2028 // We are looking at:
2029 // Def = INSERT_SEQUENCE v0, v1, sub0.
2030 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
2031 const MachineOperand &MOBaseReg = MI.getOperand(1);
2032 const MachineOperand &MOInsertedReg = MI.getOperand(2);
2033 if (MOInsertedReg.isUndef())
2034 return false;
2035 const MachineOperand &MOSubIdx = MI.getOperand(3);
2036 assert(MOSubIdx.isImm() &&
2037 "One of the subindex of the reg_sequence is not an immediate");
2038 BaseReg.Reg = MOBaseReg.getReg();
2039 BaseReg.SubReg = MOBaseReg.getSubReg();
2040
2041 InsertedReg.Reg = MOInsertedReg.getReg();
2042 InsertedReg.SubReg = MOInsertedReg.getSubReg();
2043 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
2044 return true;
2045}
2046
2047// Returns a MIRPrinter comment for this machine operand.
2049 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
2050 const TargetRegisterInfo *TRI) const {
2051
2052 if (!MI.isInlineAsm())
2053 return "";
2054
2055 std::string Flags;
2056 raw_string_ostream OS(Flags);
2057
2059 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
2060 unsigned ExtraInfo = Op.getImm();
2061 bool First = true;
2062 for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
2063 if (!First)
2064 OS << " ";
2065 First = false;
2066 OS << Info;
2067 }
2068
2069 return Flags;
2070 }
2071
2072 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
2073 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
2074 return "";
2075
2076 assert(Op.isImm() && "Expected flag operand to be an immediate");
2077 // Pretty print the inline asm operand descriptor.
2078 unsigned Flag = Op.getImm();
2079 const InlineAsm::Flag F(Flag);
2080 OS << F.getKindName();
2081
2082 unsigned RCID;
2083 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
2084 if (TRI) {
2085 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
2086 } else
2087 OS << ":RC" << RCID;
2088 }
2089
2090 if (F.isMemKind()) {
2091 InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
2092 OS << ":" << InlineAsm::getMemConstraintName(MCID);
2093 }
2094
2095 unsigned TiedTo;
2096 if (F.isUseOperandTiedToDef(TiedTo))
2097 OS << " tiedto:$" << TiedTo;
2098
2099 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
2100 F.getRegMayBeFolded())
2101 OS << " foldable";
2102
2103 return Flags;
2104}
2105
2107
2109 Function &F, std::vector<outliner::Candidate> &Candidates) const {
2110 // Include target features from an arbitrary candidate for the outlined
2111 // function. This makes sure the outlined function knows what kinds of
2112 // instructions are going into it. This is fine, since all parent functions
2113 // must necessarily support the instructions that are in the outlined region.
2114 outliner::Candidate &FirstCand = Candidates.front();
2115 const Function &ParentFn = FirstCand.getMF()->getFunction();
2116 if (ParentFn.hasFnAttribute("target-features"))
2117 F.addFnAttr(ParentFn.getFnAttribute("target-features"));
2118 if (ParentFn.hasFnAttribute("target-cpu"))
2119 F.addFnAttr(ParentFn.getFnAttribute("target-cpu"));
2120
2121 // Set nounwind, so we don't generate eh_frame.
2122 if (llvm::all_of(Candidates, [](const outliner::Candidate &C) {
2123 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
2124 }))
2125 F.addFnAttr(Attribute::NoUnwind);
2126}
2127
2131 unsigned Flags) const {
2132 MachineInstr &MI = *MIT;
2133
2134 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
2135 // have support for outlining those. Special-case that here.
2136 if (MI.isCFIInstruction())
2137 // Just go right to the target implementation.
2138 return getOutliningTypeImpl(MMI, MIT, Flags);
2139
2140 // Be conservative about inline assembly.
2141 if (MI.isInlineAsm())
2143
2144 // Labels generally can't safely be outlined.
2145 if (MI.isLabel())
2147
2148 // Don't let debug instructions impact analysis.
2149 if (MI.isDebugInstr())
2151
2152 // Some other special cases.
2153 switch (MI.getOpcode()) {
2154 case TargetOpcode::IMPLICIT_DEF:
2155 case TargetOpcode::KILL:
2156 case TargetOpcode::LIFETIME_START:
2157 case TargetOpcode::LIFETIME_END:
2159 default:
2160 break;
2161 }
2162
2163 // Is this a terminator for a basic block?
2164 if (MI.isTerminator()) {
2165 // If this is a branch to another block, we can't outline it.
2166 if (!MI.getParent()->succ_empty())
2168
2169 // Don't outline if the branch is not unconditional.
2170 if (isPredicated(MI))
2172 }
2173
2174 // Make sure none of the operands of this instruction do anything that
2175 // might break if they're moved outside their current function.
2176 // This includes MachineBasicBlock references, BlockAddressses,
2177 // Constant pool indices and jump table indices.
2178 //
2179 // A quick note on MO_TargetIndex:
2180 // This doesn't seem to be used in any of the architectures that the
2181 // MachineOutliner supports, but it was still filtered out in all of them.
2182 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
2183 // As such, this check is removed both here and in the target-specific
2184 // implementations. Instead, we assert to make sure this doesn't
2185 // catch anyone off-guard somewhere down the line.
2186 for (const MachineOperand &MOP : MI.operands()) {
2187 // If you hit this assertion, please remove it and adjust
2188 // `getOutliningTypeImpl` for your target appropriately if necessary.
2189 // Adding the assertion back to other supported architectures
2190 // would be nice too :)
2191 assert(!MOP.isTargetIndex() && "This isn't used quite yet!");
2192
2193 // CFI instructions should already have been filtered out at this point.
2194 assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!");
2195
2196 // PrologEpilogInserter should've already run at this point.
2197 assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!");
2198
2199 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
2201 }
2202
2203 // If we don't know, delegate to the target-specific hook.
2204 return getOutliningTypeImpl(MMI, MIT, Flags);
2205}
2206
2208 unsigned &Flags) const {
2209 // Some instrumentations create special TargetOpcode at the start which
2210 // expands to special code sequences which must be present.
2212 if (First == MBB.end())
2213 return true;
2214
2215 if (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
2216 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
2217 return false;
2218
2219 // Some instrumentations create special pseudo-instructions at or just before
2220 // the end that must be present.
2221 auto Last = MBB.getLastNonDebugInstr();
2222 if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET ||
2223 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2224 return false;
2225
2226 if (Last != First && Last->isReturn()) {
2227 --Last;
2228 if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT ||
2229 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2230 return false;
2231 }
2232 return true;
2233}
2234
2236 return MI->isCall() || MI->hasUnmodeledSideEffects() ||
2237 (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad());
2238}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
DXIL Forward Handle Accesses
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
uint64_t Size
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Register const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
raw_pwrite_stream & OS
This file defines the SmallSet class.
This file contains some functions that are useful when dealing with strings.
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
static void transferImplicitOperands(MachineInstr *MI, const TargetRegisterInfo *TRI)
transferImplicitOperands - MI is a pseudo-instruction, and the lowered replacement instructions immed...
static cl::opt< bool > EnableAccReassociation("acc-reassoc", cl::Hidden, cl::init(true), cl::desc("Enable reassociation of accumulation chains"))
static std::pair< bool, bool > mustSwapOperands(unsigned Pattern)
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, const TargetInstrInfo &TII, unsigned FoldIdx)
static cl::opt< unsigned int > MinAccumulatorDepth("acc-min-depth", cl::Hidden, cl::init(8), cl::desc("Minimum length of accumulator chains " "required for the optimization to kick in"))
static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI, const TargetInstrInfo &TII)
static cl::opt< unsigned int > MaxAccumulatorWidth("acc-max-width", cl::Hidden, cl::init(3), cl::desc("Maximum number of branches in the accumulator tree"))
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
This file describes how to lower LLVM code to machine code.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
Value * RHS
Value * LHS
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
DWARF expression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
This class represents an Operation in the Expression.
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:198
A debug info location.
Definition: DebugLoc.h:124
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:230
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:762
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:359
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:727
LLVM_ABI std::optional< StringRef > getSectionPrefix() const
Get the section prefix for this global object.
Definition: Globals.cpp:297
bool hasSection() const
Check if this global has a custom object file section.
Definition: GlobalObject.h:106
static std::vector< StringRef > getExtraInfoNames(unsigned ExtraInfo)
Definition: InlineAsm.h:446
static StringRef getMemConstraintName(ConstraintCode C)
Definition: InlineAsm.h:470
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
const InstrItinerary * Itineraries
Array of itineraries selected.
bool isEmpty() const
Returns true if there are no itineraries.
bool hasValue() const
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:64
virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI=nullptr) const
Returns the maximum possible encoded instruction size in bytes.
Definition: MCAsmInfo.h:527
StringRef getCommentString() const
Definition: MCAsmInfo.h:538
const char * getSeparatorString() const
Definition: MCAsmInfo.h:533
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:188
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:199
unsigned getSchedClass() const
Return the scheduling class for this instruction.
Definition: MCInstrDesc.h:603
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:238
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:240
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:249
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
Definition: MCInstrDesc.h:483
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:64
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1565
Set of metadata that should be preserved when using BuildMI().
instr_iterator instr_begin()
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator getFirstNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the first non-debug instruction in the basic block, or end().
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
LLVM_ABI void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
LLVM_ABI iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstr & cloneMachineInstrBundle(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig)
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL, bool NoImplicit=false)
CreateMachineInstr - Allocate a new MachineInstr.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
void eraseAdditionalCallInfo(const MachineInstr *MI)
Following functions update call site info.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & copyMIMetadata(const MIMetadata &MIMD) const
Representation of each machine instruction.
Definition: MachineInstr.h:72
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:587
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:359
void setFlags(unsigned flags)
Definition: MachineInstr.h:422
LLVM_ABI bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:590
void setDebugInstrNum(unsigned Num)
Set instruction number of this MachineInstr.
Definition: MachineInstr.h:562
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:805
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
Definition: MachineInstr.h:558
LLVM_ABI void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool isCFIInstruction() const
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
Definition: MachineInstr.h:431
mop_range explicit_operands()
Definition: MachineInstr.h:696
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:798
LLVM_ABI void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:780
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:595
uint32_t getFlags() const
Return the MI flags bitvector.
Definition: MachineInstr.h:404
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
LLVM_ABI const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setIsInternalRead(bool Val=true)
void setImm(int64_t immVal)
int64_t getImm() const
LLVM_ABI void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
LLVM_ABI bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
void setMBB(MachineBasicBlock *MBB)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MI-level patchpoint operands.
Definition: StackMaps.h:77
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:74
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:78
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:134
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition: SmallSet.h:227
bool empty() const
Definition: SmallVector.h:82
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
MI-level stackmap operands.
Definition: StackMaps.h:36
MI-level Statepoint operands.
Definition: StackMaps.h:159
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:154
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:148
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of 'Reg'.
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu.
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const
Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
outliner::InstrType getOutliningType(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MIT should be outlined.
virtual bool isThroughputPattern(unsigned Pattern) const
Return true when a code sequence can improve throughput.
bool getAccumulatorReassociationPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns) const
Find chains of accumulations that can be rewritten as a tree for increased ILP.
virtual std::pair< unsigned, unsigned > getPatchpointUnfoldableRange(const MachineInstr &MI) const
For a patchpoint, stackmap, or statepoint intrinsic, return the range of operands which can't be fold...
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const
Optional target hook to create the LLVM IR attributes for the outlined function.
bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
void getAccumulatorChain(MachineInstr *CurrentInstr, SmallVectorImpl< Register > &Chain) const
Find the chain of accumulator instructions in \P MBB and return them in \P Chain.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const
This function defines the logic to lower COPY instruction to target specific instruction(s).
virtual unsigned getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const
Returns the opcode that should be use to reduce accumulation registers.
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
virtual MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const
Try to remove the load by folding it to a register operand at the use.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Store the specified register of the given register class to the specified stack frame index.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const
Emit instructions to copy a pair of physical registers.
virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const
Returns an opcode which defines the accumulator used by \P Opcode.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual MCInst getNop() const
Return the noop instruction to use for a noop.
unsigned getCallFrameSizeAt(MachineInstr &MI) const
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
std::pair< unsigned, unsigned > getReassociationOpcodes(unsigned Pattern, const MachineInstr &Root, const MachineInstr &Prev) const
Reassociation of some instructions requires inverse operations (e.g.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Load the specified register of the given register class from the specified stack frame index.
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, ArrayRef< unsigned > OperandIndices, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate \P Root and \P Prev according to \P Pattern to reduce critical path length.
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
virtual void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const
Insert noops into the instruction stream at the specified point.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual ~TargetInstrInfo()
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
virtual bool isAccumulationOpcode(unsigned Opcode) const
Return true when \P OpCode is an instruction which performs accumulation into one of its operand regi...
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input \P Inst is part of a chain of dependent ops that are suitable for reassociat...
void reduceAccumulatorTree(SmallVectorImpl< Register > &RegistersToReduce, SmallVectorImpl< MachineInstr * > &InsInstrs, MachineFunction &MF, MachineInstr &Root, MachineRegisterInfo &MRI, DenseMap< Register, unsigned > &InstrIdxForVirtReg, Register ResultReg) const
Reduces branches of the accumulator tree into a single register.
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual unsigned getPredicationCost(const MachineInstr &MI) const
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const
Return true if the function is a viable candidate for machine function splitting.
virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const
Return a strategy that MachineCombiner must use when creating traces.
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isGlobalMemoryObject(const MachineInstr *MI) const
Returns true if MI is an instruction we are unable to reason about (like a call or something with unm...
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a register,...
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
static const unsigned CommuteAnyOperandIndex
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1,...
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
const Triple & getTargetTriple() const
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition: Triple.h:608
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:662
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ Define
Register definition.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:477
@ Length
Definition: DWP.cpp:477
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2491
constexpr from_range_t from_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:336
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:976
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
LLVM_ABI VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1916
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:858
#define N
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:258
unsigned LoadLatency
Definition: MCSchedule.h:299
unsigned HighLatency
Definition: MCSchedule.h:306
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Writes
Writes - One of the operands writes the virtual register.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const