LLVM 22.0.0git
TargetInstrInfo.cpp
Go to the documentation of this file.
1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/SmallSet.h"
31#include "llvm/IR/DataLayout.h"
33#include "llvm/MC/MCAsmInfo.h"
39
40using namespace llvm;
41
43 "disable-sched-hazard", cl::Hidden, cl::init(false),
44 cl::desc("Disable hazard detection during preRA scheduling"));
45
47 "acc-reassoc", cl::Hidden, cl::init(true),
48 cl::desc("Enable reassociation of accumulation chains"));
49
52 cl::desc("Minimum length of accumulator chains "
53 "required for the optimization to kick in"));
54
56 "acc-max-width", cl::Hidden, cl::init(3),
57 cl::desc("Maximum number of branches in the accumulator tree"));
58
60
63 const TargetRegisterInfo *TRI) const {
64 if (OpNum >= MCID.getNumOperands())
65 return nullptr;
66
67 short RegClass = MCID.operands()[OpNum].RegClass;
68 if (MCID.operands()[OpNum].isLookupPtrRegClass())
69 return TRI->getPointerRegClass(RegClass);
70
71 // Instructions like INSERT_SUBREG do not have fixed register classes.
72 if (RegClass < 0)
73 return nullptr;
74
75 // Otherwise just look it up normally.
76 return TRI->getRegClass(RegClass);
77}
78
79/// insertNoop - Insert a noop into the instruction stream at the specified
80/// point.
83 llvm_unreachable("Target didn't implement insertNoop!");
84}
85
86/// insertNoops - Insert noops into the instruction stream at the specified
87/// point.
90 unsigned Quantity) const {
91 for (unsigned i = 0; i < Quantity; ++i)
93}
94
95static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
96 return strncmp(Str, MAI.getCommentString().data(),
97 MAI.getCommentString().size()) == 0;
98}
99
100/// Measure the specified inline asm to determine an approximation of its
101/// length.
102/// Comments (which run till the next SeparatorString or newline) do not
103/// count as an instruction.
104/// Any other non-whitespace text is considered an instruction, with
105/// multiple instructions separated by SeparatorString or newlines.
106/// Variable-length instructions are not handled here; this function
107/// may be overloaded in the target code to do that.
108/// We implement a special case of the .space directive which takes only a
109/// single integer argument in base 10 that is the size in bytes. This is a
110/// restricted form of the GAS directive in that we only interpret
111/// simple--i.e. not a logical or arithmetic expression--size values without
112/// the optional fill value. This is primarily used for creating arbitrary
113/// sized inline asm blocks for testing purposes.
115 const char *Str,
116 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
117 // Count the number of instructions in the asm.
118 bool AtInsnStart = true;
119 unsigned Length = 0;
120 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
121 for (; *Str; ++Str) {
122 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
123 strlen(MAI.getSeparatorString())) == 0) {
124 AtInsnStart = true;
125 } else if (isAsmComment(Str, MAI)) {
126 // Stop counting as an instruction after a comment until the next
127 // separator.
128 AtInsnStart = false;
129 }
130
131 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
132 unsigned AddLength = MaxInstLength;
133 if (strncmp(Str, ".space", 6) == 0) {
134 char *EStr;
135 int SpaceSize;
136 SpaceSize = strtol(Str + 6, &EStr, 10);
137 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
138 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
139 ++EStr;
140 if (*EStr == '\0' || *EStr == '\n' ||
141 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
142 AddLength = SpaceSize;
143 }
144 Length += AddLength;
145 AtInsnStart = false;
146 }
147 }
148
149 return Length;
150}
151
152/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
153/// after it, replacing it with an unconditional branch to NewDest.
154void
156 MachineBasicBlock *NewDest) const {
157 MachineBasicBlock *MBB = Tail->getParent();
158
159 // Remove all the old successors of MBB from the CFG.
160 while (!MBB->succ_empty())
161 MBB->removeSuccessor(MBB->succ_begin());
162
163 // Save off the debug loc before erasing the instruction.
164 DebugLoc DL = Tail->getDebugLoc();
165
166 // Update call info and remove all the dead instructions
167 // from the end of MBB.
168 while (Tail != MBB->end()) {
169 auto MI = Tail++;
170 if (MI->shouldUpdateAdditionalCallInfo())
171 MBB->getParent()->eraseAdditionalCallInfo(&*MI);
172 MBB->erase(MI);
173 }
174
175 // If MBB isn't immediately before MBB, insert a branch to it.
177 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
178 MBB->addSuccessor(NewDest);
179}
180
182 bool NewMI, unsigned Idx1,
183 unsigned Idx2) const {
184 const MCInstrDesc &MCID = MI.getDesc();
185 bool HasDef = MCID.getNumDefs();
186 if (HasDef && !MI.getOperand(0).isReg())
187 // No idea how to commute this instruction. Target should implement its own.
188 return nullptr;
189
190 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
191 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
192 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
193 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
194 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
195 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
196 "This only knows how to commute register operands so far");
197
198 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
199 Register Reg1 = MI.getOperand(Idx1).getReg();
200 Register Reg2 = MI.getOperand(Idx2).getReg();
201 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
202 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
203 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
204 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
205 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
206 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
207 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
208 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
209 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
210 // Avoid calling isRenamable for virtual registers since we assert that
211 // renamable property is only queried/set for physical registers.
212 bool Reg1IsRenamable =
213 Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false;
214 bool Reg2IsRenamable =
215 Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false;
216
217 // For a case like this:
218 // %0.sub = INST %0.sub(tied), %1.sub, implicit-def %0
219 // we need to update the implicit-def after commuting to result in:
220 // %1.sub = INST %1.sub(tied), %0.sub, implicit-def %1
221 SmallVector<unsigned> UpdateImplicitDefIdx;
222 if (HasDef && MI.hasImplicitDef()) {
223 const TargetRegisterInfo *TRI =
224 MI.getMF()->getSubtarget().getRegisterInfo();
225 for (auto [OpNo, MO] : llvm::enumerate(MI.implicit_operands())) {
226 Register ImplReg = MO.getReg();
227 if ((ImplReg.isVirtual() && ImplReg == Reg0) ||
228 (ImplReg.isPhysical() && Reg0.isPhysical() &&
229 TRI->isSubRegisterEq(ImplReg, Reg0)))
230 UpdateImplicitDefIdx.push_back(OpNo + MI.getNumExplicitOperands());
231 }
232 }
233
234 // If destination is tied to either of the commuted source register, then
235 // it must be updated.
236 if (HasDef && Reg0 == Reg1 &&
237 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
238 Reg2IsKill = false;
239 Reg0 = Reg2;
240 SubReg0 = SubReg2;
241 } else if (HasDef && Reg0 == Reg2 &&
242 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
243 Reg1IsKill = false;
244 Reg0 = Reg1;
245 SubReg0 = SubReg1;
246 }
247
248 MachineInstr *CommutedMI = nullptr;
249 if (NewMI) {
250 // Create a new instruction.
251 MachineFunction &MF = *MI.getMF();
252 CommutedMI = MF.CloneMachineInstr(&MI);
253 } else {
254 CommutedMI = &MI;
255 }
256
257 if (HasDef) {
258 CommutedMI->getOperand(0).setReg(Reg0);
259 CommutedMI->getOperand(0).setSubReg(SubReg0);
260 for (unsigned Idx : UpdateImplicitDefIdx)
261 CommutedMI->getOperand(Idx).setReg(Reg0);
262 }
263 CommutedMI->getOperand(Idx2).setReg(Reg1);
264 CommutedMI->getOperand(Idx1).setReg(Reg2);
265 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
266 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
267 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
268 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
269 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
270 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
271 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
272 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
273 // Avoid calling setIsRenamable for virtual registers since we assert that
274 // renamable property is only queried/set for physical registers.
275 if (Reg1.isPhysical())
276 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
277 if (Reg2.isPhysical())
278 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
279 return CommutedMI;
280}
281
283 unsigned OpIdx1,
284 unsigned OpIdx2) const {
285 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
286 // any commutable operand, which is done in findCommutedOpIndices() method
287 // called below.
288 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
289 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
290 assert(MI.isCommutable() &&
291 "Precondition violation: MI must be commutable.");
292 return nullptr;
293 }
294 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
295}
296
298 unsigned &ResultIdx2,
299 unsigned CommutableOpIdx1,
300 unsigned CommutableOpIdx2) {
301 if (ResultIdx1 == CommuteAnyOperandIndex &&
302 ResultIdx2 == CommuteAnyOperandIndex) {
303 ResultIdx1 = CommutableOpIdx1;
304 ResultIdx2 = CommutableOpIdx2;
305 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
306 if (ResultIdx2 == CommutableOpIdx1)
307 ResultIdx1 = CommutableOpIdx2;
308 else if (ResultIdx2 == CommutableOpIdx2)
309 ResultIdx1 = CommutableOpIdx1;
310 else
311 return false;
312 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
313 if (ResultIdx1 == CommutableOpIdx1)
314 ResultIdx2 = CommutableOpIdx2;
315 else if (ResultIdx1 == CommutableOpIdx2)
316 ResultIdx2 = CommutableOpIdx1;
317 else
318 return false;
319 } else
320 // Check that the result operand indices match the given commutable
321 // operand indices.
322 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
323 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
324
325 return true;
326}
327
329 unsigned &SrcOpIdx1,
330 unsigned &SrcOpIdx2) const {
331 assert(!MI.isBundle() &&
332 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
333
334 const MCInstrDesc &MCID = MI.getDesc();
335 if (!MCID.isCommutable())
336 return false;
337
338 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
339 // is not true, then the target must implement this.
340 unsigned CommutableOpIdx1 = MCID.getNumDefs();
341 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
342 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
343 CommutableOpIdx1, CommutableOpIdx2))
344 return false;
345
346 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
347 // No idea.
348 return false;
349 return true;
350}
351
353 if (!MI.isTerminator()) return false;
354
355 // Conditional branch is a special case.
356 if (MI.isBranch() && !MI.isBarrier())
357 return true;
358 if (!MI.isPredicable())
359 return true;
360 return !isPredicated(MI);
361}
362
365 bool MadeChange = false;
366
367 assert(!MI.isBundle() &&
368 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
369
370 const MCInstrDesc &MCID = MI.getDesc();
371 if (!MI.isPredicable())
372 return false;
373
374 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
375 if (MCID.operands()[i].isPredicate()) {
376 MachineOperand &MO = MI.getOperand(i);
377 if (MO.isReg()) {
378 MO.setReg(Pred[j].getReg());
379 MadeChange = true;
380 } else if (MO.isImm()) {
381 MO.setImm(Pred[j].getImm());
382 MadeChange = true;
383 } else if (MO.isMBB()) {
384 MO.setMBB(Pred[j].getMBB());
385 MadeChange = true;
386 }
387 ++j;
388 }
389 }
390 return MadeChange;
391}
392
394 const MachineInstr &MI,
396 size_t StartSize = Accesses.size();
397 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
398 oe = MI.memoperands_end();
399 o != oe; ++o) {
400 if ((*o)->isLoad() &&
401 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
402 Accesses.push_back(*o);
403 }
404 return Accesses.size() != StartSize;
405}
406
408 const MachineInstr &MI,
410 size_t StartSize = Accesses.size();
411 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
412 oe = MI.memoperands_end();
413 o != oe; ++o) {
414 if ((*o)->isStore() &&
415 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
416 Accesses.push_back(*o);
417 }
418 return Accesses.size() != StartSize;
419}
420
422 unsigned SubIdx, unsigned &Size,
423 unsigned &Offset,
424 const MachineFunction &MF) const {
426 if (!SubIdx) {
427 Size = TRI->getSpillSize(*RC);
428 Offset = 0;
429 return true;
430 }
431 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
432 // Convert bit size to byte size.
433 if (BitSize % 8)
434 return false;
435
436 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
437 if (BitOffset < 0 || BitOffset % 8)
438 return false;
439
440 Size = BitSize / 8;
441 Offset = (unsigned)BitOffset / 8;
442
443 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
444
445 if (!MF.getDataLayout().isLittleEndian()) {
446 Offset = TRI->getSpillSize(*RC) - (Offset + Size);
447 }
448 return true;
449}
450
453 Register DestReg, unsigned SubIdx,
454 const MachineInstr &Orig,
455 const TargetRegisterInfo &TRI) const {
456 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
457 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
458 MBB.insert(I, MI);
459}
460
462 const MachineInstr &MI1,
463 const MachineRegisterInfo *MRI) const {
465}
466
469 MachineBasicBlock::iterator InsertBefore,
470 const MachineInstr &Orig) const {
471 MachineFunction &MF = *MBB.getParent();
472 // CFI instructions are marked as non-duplicable, because Darwin compact
473 // unwind info emission can't handle multiple prologue setups.
474 assert((!Orig.isNotDuplicable() ||
476 Orig.isCFIInstruction())) &&
477 "Instruction cannot be duplicated");
478
479 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
480}
481
482// If the COPY instruction in MI can be folded to a stack operation, return
483// the register class to use.
485 const TargetInstrInfo &TII,
486 unsigned FoldIdx) {
487 assert(TII.isCopyInstr(MI) && "MI must be a COPY instruction");
488 if (MI.getNumOperands() != 2)
489 return nullptr;
490 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
491
492 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
493 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
494
495 if (FoldOp.getSubReg() || LiveOp.getSubReg())
496 return nullptr;
497
498 Register FoldReg = FoldOp.getReg();
499 Register LiveReg = LiveOp.getReg();
500
501 assert(FoldReg.isVirtual() && "Cannot fold physregs");
502
503 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
504 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
505
506 if (LiveOp.getReg().isPhysical())
507 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
508
509 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
510 return RC;
511
512 // FIXME: Allow folding when register classes are memory compatible.
513 return nullptr;
514}
515
516MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
517
518/// Try to remove the load by folding it to a register
519/// operand at the use. We fold the load instructions if load defines a virtual
520/// register, the virtual register is used once in the same BB, and the
521/// instructions in-between do not load or store, and have no side effects.
524 Register &FoldAsLoadDefReg,
525 MachineInstr *&DefMI) const {
526 // Check whether we can move DefMI here.
527 DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
528 assert(DefMI);
529 bool SawStore = false;
530 if (!DefMI->isSafeToMove(SawStore))
531 return nullptr;
532
533 // Collect information about virtual register operands of MI.
534 SmallVector<unsigned, 1> SrcOperandIds;
535 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
536 MachineOperand &MO = MI.getOperand(i);
537 if (!MO.isReg())
538 continue;
539 Register Reg = MO.getReg();
540 if (Reg != FoldAsLoadDefReg)
541 continue;
542 // Do not fold if we have a subreg use or a def.
543 if (MO.getSubReg() || MO.isDef())
544 return nullptr;
545 SrcOperandIds.push_back(i);
546 }
547 if (SrcOperandIds.empty())
548 return nullptr;
549
550 // Check whether we can fold the def into SrcOperandId.
551 if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
552 FoldAsLoadDefReg = 0;
553 return FoldMI;
554 }
555
556 return nullptr;
557}
558
559std::pair<unsigned, unsigned>
561 switch (MI.getOpcode()) {
562 case TargetOpcode::STACKMAP:
563 // StackMapLiveValues are foldable
564 return std::make_pair(0, StackMapOpers(&MI).getVarIdx());
565 case TargetOpcode::PATCHPOINT:
566 // For PatchPoint, the call args are not foldable (even if reported in the
567 // stackmap e.g. via anyregcc).
568 return std::make_pair(0, PatchPointOpers(&MI).getVarIdx());
569 case TargetOpcode::STATEPOINT:
570 // For statepoints, fold deopt and gc arguments, but not call arguments.
571 return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx());
572 default:
573 llvm_unreachable("unexpected stackmap opcode");
574 }
575}
576
578 ArrayRef<unsigned> Ops, int FrameIndex,
579 const TargetInstrInfo &TII) {
580 unsigned StartIdx = 0;
581 unsigned NumDefs = 0;
582 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
583 std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI);
584
585 unsigned DefToFoldIdx = MI.getNumOperands();
586
587 // Return false if any operands requested for folding are not foldable (not
588 // part of the stackmap's live values).
589 for (unsigned Op : Ops) {
590 if (Op < NumDefs) {
591 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
592 DefToFoldIdx = Op;
593 } else if (Op < StartIdx) {
594 return nullptr;
595 }
596 if (MI.getOperand(Op).isTied())
597 return nullptr;
598 }
599
600 MachineInstr *NewMI =
601 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
602 MachineInstrBuilder MIB(MF, NewMI);
603
604 // No need to fold return, the meta data, and function arguments
605 for (unsigned i = 0; i < StartIdx; ++i)
606 if (i != DefToFoldIdx)
607 MIB.add(MI.getOperand(i));
608
609 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
610 MachineOperand &MO = MI.getOperand(i);
611 unsigned TiedTo = e;
612 (void)MI.isRegTiedToDefOperand(i, &TiedTo);
613
614 if (is_contained(Ops, i)) {
615 assert(TiedTo == e && "Cannot fold tied operands");
616 unsigned SpillSize;
617 unsigned SpillOffset;
618 // Compute the spill slot size and offset.
619 const TargetRegisterClass *RC =
620 MF.getRegInfo().getRegClass(MO.getReg());
621 bool Valid =
622 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
623 if (!Valid)
624 report_fatal_error("cannot spill patchpoint subregister operand");
625 MIB.addImm(StackMaps::IndirectMemRefOp);
626 MIB.addImm(SpillSize);
627 MIB.addFrameIndex(FrameIndex);
628 MIB.addImm(SpillOffset);
629 } else {
630 MIB.add(MO);
631 if (TiedTo < e) {
632 assert(TiedTo < NumDefs && "Bad tied operand");
633 if (TiedTo > DefToFoldIdx)
634 --TiedTo;
635 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
636 }
637 }
638 }
639 return NewMI;
640}
641
642static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
643 const TargetInstrInfo &TII) {
644 // If the machine operand is tied, untie it first.
645 if (MI->getOperand(OpNo).isTied()) {
646 unsigned TiedTo = MI->findTiedOperandIdx(OpNo);
647 MI->untieRegOperand(OpNo);
648 // Intentional recursion!
649 foldInlineAsmMemOperand(MI, TiedTo, FI, TII);
650 }
651
653 TII.getFrameIndexOperands(NewOps, FI);
654 assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
655 MI->removeOperand(OpNo);
656 MI->insert(MI->operands_begin() + OpNo, NewOps);
657
658 // Change the previous operand to a MemKind InlineAsm::Flag. The second param
659 // is the per-target number of operands that represent the memory operand
660 // excluding this one (MD). This includes MO.
662 F.setMemConstraint(InlineAsm::ConstraintCode::m);
663 MachineOperand &MD = MI->getOperand(OpNo - 1);
664 MD.setImm(F);
665}
666
667// Returns nullptr if not possible to fold.
669 ArrayRef<unsigned> Ops, int FI,
670 const TargetInstrInfo &TII) {
671 assert(MI.isInlineAsm() && "wrong opcode");
672 if (Ops.size() > 1)
673 return nullptr;
674 unsigned Op = Ops[0];
675 assert(Op && "should never be first operand");
676 assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");
677
678 if (!MI.mayFoldInlineAsmRegOp(Op))
679 return nullptr;
680
681 MachineInstr &NewMI = TII.duplicate(*MI.getParent(), MI.getIterator(), MI);
682
683 foldInlineAsmMemOperand(&NewMI, Op, FI, TII);
684
685 // Update mayload/maystore metadata, and memoperands.
686 const VirtRegInfo &RI =
687 AnalyzeVirtRegInBundle(MI, MI.getOperand(Op).getReg());
690 if (RI.Reads) {
691 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
693 }
694 if (RI.Writes) {
695 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
697 }
698 MachineFunction *MF = NewMI.getMF();
699 const MachineFrameInfo &MFI = MF->getFrameInfo();
701 MachinePointerInfo::getFixedStack(*MF, FI), Flags, MFI.getObjectSize(FI),
702 MFI.getObjectAlign(FI));
703 NewMI.addMemOperand(*MF, MMO);
704
705 return &NewMI;
706}
707
709 ArrayRef<unsigned> Ops, int FI,
710 LiveIntervals *LIS,
711 VirtRegMap *VRM) const {
712 auto Flags = MachineMemOperand::MONone;
713 for (unsigned OpIdx : Ops)
714 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
716
717 MachineBasicBlock *MBB = MI.getParent();
718 assert(MBB && "foldMemoryOperand needs an inserted instruction");
719 MachineFunction &MF = *MBB->getParent();
720
721 // If we're not folding a load into a subreg, the size of the load is the
722 // size of the spill slot. But if we are, we need to figure out what the
723 // actual load size is.
724 int64_t MemSize = 0;
725 const MachineFrameInfo &MFI = MF.getFrameInfo();
727
728 if (Flags & MachineMemOperand::MOStore) {
729 MemSize = MFI.getObjectSize(FI);
730 } else {
731 for (unsigned OpIdx : Ops) {
732 int64_t OpSize = MFI.getObjectSize(FI);
733
734 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
735 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
736 if (SubRegSize > 0 && !(SubRegSize % 8))
737 OpSize = SubRegSize / 8;
738 }
739
740 MemSize = std::max(MemSize, OpSize);
741 }
742 }
743
744 assert(MemSize && "Did not expect a zero-sized stack slot");
745
746 MachineInstr *NewMI = nullptr;
747
748 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
749 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
750 MI.getOpcode() == TargetOpcode::STATEPOINT) {
751 // Fold stackmap/patchpoint.
752 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
753 if (NewMI)
754 MBB->insert(MI, NewMI);
755 } else if (MI.isInlineAsm()) {
756 return foldInlineAsmMemOperand(MI, Ops, FI, *this);
757 } else {
758 // Ask the target to do the actual folding.
759 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
760 }
761
762 if (NewMI) {
763 NewMI->setMemRefs(MF, MI.memoperands());
764 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
766 NewMI->mayStore()) &&
767 "Folded a def to a non-store!");
768 assert((!(Flags & MachineMemOperand::MOLoad) ||
769 NewMI->mayLoad()) &&
770 "Folded a use to a non-load!");
771 assert(MFI.getObjectOffset(FI) != -1);
772 MachineMemOperand *MMO =
774 Flags, MemSize, MFI.getObjectAlign(FI));
775 NewMI->addMemOperand(MF, MMO);
776
777 // The pass "x86 speculative load hardening" always attaches symbols to
778 // call instructions. We need copy it form old instruction.
779 NewMI->cloneInstrSymbols(MF, MI);
780
781 return NewMI;
782 }
783
784 // Straight COPY may fold as load/store.
785 if (!isCopyInstr(MI) || Ops.size() != 1)
786 return nullptr;
787
788 const TargetRegisterClass *RC = canFoldCopy(MI, *this, Ops[0]);
789 if (!RC)
790 return nullptr;
791
792 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
794 if (Flags == MachineMemOperand::MOStore) {
795 if (MO.isUndef()) {
796 // If this is an undef copy, we do not need to bother we inserting spill
797 // code.
798 BuildMI(*MBB, Pos, MI.getDebugLoc(), get(TargetOpcode::KILL)).add(MO);
799 } else {
800 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI,
801 Register());
802 }
803 } else
804 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI, Register());
805
806 return &*--Pos;
807}
808
811 MachineInstr &LoadMI,
812 LiveIntervals *LIS) const {
813 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
814#ifndef NDEBUG
815 for (unsigned OpIdx : Ops)
816 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
817#endif
818
819 MachineBasicBlock &MBB = *MI.getParent();
820 MachineFunction &MF = *MBB.getParent();
821
822 // Ask the target to do the actual folding.
823 MachineInstr *NewMI = nullptr;
824 int FrameIndex = 0;
825
826 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
827 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
828 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
829 isLoadFromStackSlot(LoadMI, FrameIndex)) {
830 // Fold stackmap/patchpoint.
831 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
832 if (NewMI)
833 NewMI = &*MBB.insert(MI, NewMI);
834 } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) {
835 return foldInlineAsmMemOperand(MI, Ops, FrameIndex, *this);
836 } else {
837 // Ask the target to do the actual folding.
838 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
839 }
840
841 if (!NewMI)
842 return nullptr;
843
844 // Copy the memoperands from the load to the folded instruction.
845 if (MI.memoperands_empty()) {
846 NewMI->setMemRefs(MF, LoadMI.memoperands());
847 } else {
848 // Handle the rare case of folding multiple loads.
849 NewMI->setMemRefs(MF, MI.memoperands());
851 E = LoadMI.memoperands_end();
852 I != E; ++I) {
853 NewMI->addMemOperand(MF, *I);
854 }
855 }
856 return NewMI;
857}
858
859/// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
860/// replacement instructions immediately precede it. Copy any implicit
861/// operands from MI to the replacement instruction.
863 const TargetRegisterInfo *TRI) {
865 --CopyMI;
866
867 Register DstReg = MI->getOperand(0).getReg();
868 for (const MachineOperand &MO : MI->implicit_operands()) {
869 CopyMI->addOperand(MO);
870
871 // Be conservative about preserving kills when subregister defs are
872 // involved. If there was implicit kill of a super-register overlapping the
873 // copy result, we would kill the subregisters previous copies defined.
874
875 if (MO.isKill() && TRI->regsOverlap(DstReg, MO.getReg()))
876 CopyMI->getOperand(CopyMI->getNumOperands() - 1).setIsKill(false);
877 }
878}
879
881 const TargetRegisterInfo *TRI) const {
882 if (MI->allDefsAreDead()) {
883 MI->setDesc(get(TargetOpcode::KILL));
884 return;
885 }
886
887 MachineOperand &DstMO = MI->getOperand(0);
888 MachineOperand &SrcMO = MI->getOperand(1);
889
890 bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
891 if (IdentityCopy || SrcMO.isUndef()) {
892 // No need to insert an identity copy instruction, but replace with a KILL
893 // if liveness is changed.
894 if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
895 // We must make sure the super-register gets killed. Replace the
896 // instruction with KILL.
897 MI->setDesc(get(TargetOpcode::KILL));
898 return;
899 }
900 // Vanilla identity copy.
901 MI->eraseFromParent();
902 return;
903 }
904
905 copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(), DstMO.getReg(),
906 SrcMO.getReg(), SrcMO.isKill(),
907 DstMO.getReg().isPhysical() ? DstMO.isRenamable() : false,
908 SrcMO.getReg().isPhysical() ? SrcMO.isRenamable() : false);
909
910 if (MI->getNumOperands() > 2)
912 MI->eraseFromParent();
913}
914
916 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
917 const MachineOperand &Op1 = Inst.getOperand(1);
918 const MachineOperand &Op2 = Inst.getOperand(2);
919 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
920
921 // We need virtual register definitions for the operands that we will
922 // reassociate.
923 MachineInstr *MI1 = nullptr;
924 MachineInstr *MI2 = nullptr;
925 if (Op1.isReg() && Op1.getReg().isVirtual())
926 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
927 if (Op2.isReg() && Op2.getReg().isVirtual())
928 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
929
930 // And at least one operand must be defined in MBB.
931 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
932}
933
935 unsigned Opcode2) const {
936 return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2;
937}
938
940 bool &Commuted) const {
941 const MachineBasicBlock *MBB = Inst.getParent();
942 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
943 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
944 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
945 unsigned Opcode = Inst.getOpcode();
946
947 // If only one operand has the same or inverse opcode and it's the second
948 // source operand, the operands must be commuted.
949 Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
950 areOpcodesEqualOrInverse(Opcode, MI2->getOpcode());
951 if (Commuted)
952 std::swap(MI1, MI2);
953
954 // 1. The previous instruction must be the same type as Inst.
955 // 2. The previous instruction must also be associative/commutative or be the
956 // inverse of such an operation (this can be different even for
957 // instructions with the same opcode if traits like fast-math-flags are
958 // included).
959 // 3. The previous instruction must have virtual register definitions for its
960 // operands in the same basic block as Inst.
961 // 4. The previous instruction's result must only be used by Inst.
962 return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
964 isAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
966 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
967}
968
969// 1. The operation must be associative and commutative or be the inverse of
970// such an operation.
971// 2. The instruction must have virtual register definitions for its
972// operands in the same basic block.
973// 3. The instruction must have a reassociable sibling.
975 bool &Commuted) const {
976 return (isAssociativeAndCommutative(Inst) ||
977 isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
978 hasReassociableOperands(Inst, Inst.getParent()) &&
979 hasReassociableSibling(Inst, Commuted);
980}
981
982// Utility routine that checks if \param MO is defined by an
983// \param CombineOpc instruction in the basic block \param MBB.
984// If \param CombineOpc is not provided, the OpCode check will
985// be skipped.
987 unsigned CombineOpc = 0) {
988 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
989 MachineInstr *MI = nullptr;
990
991 if (MO.isReg() && MO.getReg().isVirtual())
992 MI = MRI.getUniqueVRegDef(MO.getReg());
993 // And it needs to be in the trace (otherwise, it won't have a depth).
994 if (!MI || MI->getParent() != &MBB ||
995 (MI->getOpcode() != CombineOpc && CombineOpc != 0))
996 return false;
997 // Must only used by the user we combine with.
998 if (!MRI.hasOneNonDBGUse(MO.getReg()))
999 return false;
1000
1001 return true;
1002}
1003
1004// A chain of accumulation instructions will be selected IFF:
1005// 1. All the accumulation instructions in the chain have the same opcode,
1006// besides the first that has a slightly different opcode because it does
1007// not accumulate into a register.
1008// 2. All the instructions in the chain are combinable (have a single use
1009// which itself is part of the chain).
1010// 3. Meets the required minimum length.
1012 MachineInstr *CurrentInstr, SmallVectorImpl<Register> &Chain) const {
1013 // Walk up the chain of accumulation instructions and collect them in the
1014 // vector.
1015 MachineBasicBlock &MBB = *CurrentInstr->getParent();
1016 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1017 unsigned AccumulatorOpcode = CurrentInstr->getOpcode();
1018 std::optional<unsigned> ChainStartOpCode =
1019 getAccumulationStartOpcode(AccumulatorOpcode);
1020
1021 if (!ChainStartOpCode.has_value())
1022 return;
1023
1024 // Push the first accumulator result to the start of the chain.
1025 Chain.push_back(CurrentInstr->getOperand(0).getReg());
1026
1027 // Collect the accumulator input register from all instructions in the chain.
1028 while (CurrentInstr &&
1029 canCombine(MBB, CurrentInstr->getOperand(1), AccumulatorOpcode)) {
1030 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1031 CurrentInstr = MRI.getUniqueVRegDef(CurrentInstr->getOperand(1).getReg());
1032 }
1033
1034 // Add the instruction at the top of the chain.
1035 if (CurrentInstr->getOpcode() == AccumulatorOpcode &&
1036 canCombine(MBB, CurrentInstr->getOperand(1)))
1037 Chain.push_back(CurrentInstr->getOperand(1).getReg());
1038}
1039
1040/// Find chains of accumulations that can be rewritten as a tree for increased
1041/// ILP.
1043 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const {
1045 return false;
1046
1047 unsigned Opc = Root.getOpcode();
1049 return false;
1050
1051 // Verify that this is the end of the chain.
1052 MachineBasicBlock &MBB = *Root.getParent();
1053 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1054 if (!MRI.hasOneNonDBGUser(Root.getOperand(0).getReg()))
1055 return false;
1056
1057 auto User = MRI.use_instr_begin(Root.getOperand(0).getReg());
1058 if (User->getOpcode() == Opc)
1059 return false;
1060
1061 // Walk up the use chain and collect the reduction chain.
1063 getAccumulatorChain(&Root, Chain);
1064
1065 // Reject chains which are too short to be worth modifying.
1066 if (Chain.size() < MinAccumulatorDepth)
1067 return false;
1068
1069 // Check if the MBB this instruction is a part of contains any other chains.
1070 // If so, don't apply it.
1071 SmallSet<Register, 32> ReductionChain(llvm::from_range, Chain);
1072 for (const auto &I : MBB) {
1073 if (I.getOpcode() == Opc &&
1074 !ReductionChain.contains(I.getOperand(0).getReg()))
1075 return false;
1076 }
1077
1079 return true;
1080}
1081
1082// Reduce branches of the accumulator tree by adding them together.
1084 SmallVectorImpl<Register> &RegistersToReduce,
1087 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
1088 Register ResultReg) const {
1091
1092 // Get the opcode for the reduction instruction we will need to build.
1093 // If for some reason it is not defined, early exit and don't apply this.
1094 unsigned ReduceOpCode = getReduceOpcodeForAccumulator(Root.getOpcode());
1095
1096 for (unsigned int i = 1; i <= (RegistersToReduce.size() / 2); i += 2) {
1097 auto RHS = RegistersToReduce[i - 1];
1098 auto LHS = RegistersToReduce[i];
1099 Register Dest;
1100 // If we are reducing 2 registers, reuse the original result register.
1101 if (RegistersToReduce.size() == 2)
1102 Dest = ResultReg;
1103 // Otherwise, create a new virtual register to hold the partial sum.
1104 else {
1105 auto NewVR = MRI.createVirtualRegister(
1106 MRI.getRegClass(Root.getOperand(0).getReg()));
1107 Dest = NewVR;
1108 NewRegs.push_back(Dest);
1109 InstrIdxForVirtReg.insert(std::make_pair(Dest, InsInstrs.size()));
1110 }
1111
1112 // Create the new reduction instruction.
1114 BuildMI(MF, MIMetadata(Root), TII->get(ReduceOpCode), Dest)
1115 .addReg(RHS, getKillRegState(true))
1116 .addReg(LHS, getKillRegState(true));
1117 // Copy any flags needed from the original instruction.
1118 MIB->setFlags(Root.getFlags());
1119 InsInstrs.push_back(MIB);
1120 }
1121
1122 // If the number of registers to reduce is odd, add the remaining register to
1123 // the vector of registers to reduce.
1124 if (RegistersToReduce.size() % 2 != 0)
1125 NewRegs.push_back(RegistersToReduce[RegistersToReduce.size() - 1]);
1126
1127 RegistersToReduce = NewRegs;
1128}
1129
1130// The concept of the reassociation pass is that these operations can benefit
1131// from this kind of transformation:
1132//
1133// A = ? op ?
1134// B = A op X (Prev)
1135// C = B op Y (Root)
1136// -->
1137// A = ? op ?
1138// B = X op Y
1139// C = A op B
1140//
1141// breaking the dependency between A and B, allowing them to be executed in
1142// parallel (or back-to-back in a pipeline) instead of depending on each other.
1143
1144// FIXME: This has the potential to be expensive (compile time) while not
1145// improving the code at all. Some ways to limit the overhead:
1146// 1. Track successful transforms; bail out if hit rate gets too low.
1147// 2. Only enable at -O3 or some other non-default optimization level.
1148// 3. Pre-screen pattern candidates here: if an operand of the previous
1149// instruction is known to not increase the critical path, then don't match
1150// that pattern.
1152 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
1153 bool DoRegPressureReduce) const {
1154 bool Commute;
1155 if (isReassociationCandidate(Root, Commute)) {
1156 // We found a sequence of instructions that may be suitable for a
1157 // reassociation of operands to increase ILP. Specify each commutation
1158 // possibility for the Prev instruction in the sequence and let the
1159 // machine combiner decide if changing the operands is worthwhile.
1160 if (Commute) {
1163 } else {
1166 }
1167 return true;
1168 }
1169 if (getAccumulatorReassociationPatterns(Root, Patterns))
1170 return true;
1171
1172 return false;
1173}
1174
1175/// Return true when a code sequence can improve loop throughput.
1177 return false;
1178}
1179
1182 switch (Pattern) {
1185 default:
1187 }
1188}
1189
1190std::pair<unsigned, unsigned>
1192 const MachineInstr &Root,
1193 const MachineInstr &Prev) const {
1194 bool AssocCommutRoot = isAssociativeAndCommutative(Root);
1195 bool AssocCommutPrev = isAssociativeAndCommutative(Prev);
1196
1197 // Early exit if both opcodes are associative and commutative. It's a trivial
1198 // reassociation when we only change operands order. In this case opcodes are
1199 // not required to have inverse versions.
1200 if (AssocCommutRoot && AssocCommutPrev) {
1201 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
1202 return std::make_pair(Root.getOpcode(), Root.getOpcode());
1203 }
1204
1205 // At least one instruction is not associative or commutative.
1206 // Since we have matched one of the reassociation patterns, we expect that the
1207 // instructions' opcodes are equal or one of them is the inversion of the
1208 // other.
1210 "Incorrectly matched pattern");
1211 unsigned AssocCommutOpcode = Root.getOpcode();
1212 unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
1213 if (!AssocCommutRoot)
1214 std::swap(AssocCommutOpcode, InverseOpcode);
1215
1216 // The transformation rule (`+` is any associative and commutative binary
1217 // operation, `-` is the inverse):
1218 // REASSOC_AX_BY:
1219 // (A + X) + Y => A + (X + Y)
1220 // (A + X) - Y => A + (X - Y)
1221 // (A - X) + Y => A - (X - Y)
1222 // (A - X) - Y => A - (X + Y)
1223 // REASSOC_XA_BY:
1224 // (X + A) + Y => (X + Y) + A
1225 // (X + A) - Y => (X - Y) + A
1226 // (X - A) + Y => (X + Y) - A
1227 // (X - A) - Y => (X - Y) - A
1228 // REASSOC_AX_YB:
1229 // Y + (A + X) => (Y + X) + A
1230 // Y - (A + X) => (Y - X) - A
1231 // Y + (A - X) => (Y - X) + A
1232 // Y - (A - X) => (Y + X) - A
1233 // REASSOC_XA_YB:
1234 // Y + (X + A) => (Y + X) + A
1235 // Y - (X + A) => (Y - X) - A
1236 // Y + (X - A) => (Y + X) - A
1237 // Y - (X - A) => (Y - X) + A
1238 switch (Pattern) {
1239 default:
1240 llvm_unreachable("Unexpected pattern");
1242 if (!AssocCommutRoot && AssocCommutPrev)
1243 return {AssocCommutOpcode, InverseOpcode};
1244 if (AssocCommutRoot && !AssocCommutPrev)
1245 return {InverseOpcode, InverseOpcode};
1246 if (!AssocCommutRoot && !AssocCommutPrev)
1247 return {InverseOpcode, AssocCommutOpcode};
1248 break;
1250 if (!AssocCommutRoot && AssocCommutPrev)
1251 return {AssocCommutOpcode, InverseOpcode};
1252 if (AssocCommutRoot && !AssocCommutPrev)
1253 return {InverseOpcode, AssocCommutOpcode};
1254 if (!AssocCommutRoot && !AssocCommutPrev)
1255 return {InverseOpcode, InverseOpcode};
1256 break;
1258 if (!AssocCommutRoot && AssocCommutPrev)
1259 return {InverseOpcode, InverseOpcode};
1260 if (AssocCommutRoot && !AssocCommutPrev)
1261 return {AssocCommutOpcode, InverseOpcode};
1262 if (!AssocCommutRoot && !AssocCommutPrev)
1263 return {InverseOpcode, AssocCommutOpcode};
1264 break;
1266 if (!AssocCommutRoot && AssocCommutPrev)
1267 return {InverseOpcode, InverseOpcode};
1268 if (AssocCommutRoot && !AssocCommutPrev)
1269 return {InverseOpcode, AssocCommutOpcode};
1270 if (!AssocCommutRoot && !AssocCommutPrev)
1271 return {AssocCommutOpcode, InverseOpcode};
1272 break;
1273 }
1274 llvm_unreachable("Unhandled combination");
1275}
1276
1277// Return a pair of boolean flags showing if the new root and new prev operands
1278// must be swapped. See visual example of the rule in
1279// TargetInstrInfo::getReassociationOpcodes.
1280static std::pair<bool, bool> mustSwapOperands(unsigned Pattern) {
1281 switch (Pattern) {
1282 default:
1283 llvm_unreachable("Unexpected pattern");
1285 return {false, false};
1287 return {true, false};
1289 return {true, true};
1291 return {true, true};
1292 }
1293}
1294
1296 const MachineInstr &Root, unsigned Pattern,
1297 std::array<unsigned, 5> &OperandIndices) const {
1298 switch (Pattern) {
1300 OperandIndices = {1, 1, 1, 2, 2};
1301 break;
1303 OperandIndices = {2, 1, 2, 2, 1};
1304 break;
1306 OperandIndices = {1, 2, 1, 1, 2};
1307 break;
1309 OperandIndices = {2, 2, 2, 1, 1};
1310 break;
1311 default:
1312 llvm_unreachable("unexpected MachineCombinerPattern");
1313 }
1314}
1315
1316/// Attempt the reassociation transformation to reduce critical path length.
1317/// See the above comments before getMachineCombinerPatterns().
1319 MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1323 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const {
1324 MachineFunction *MF = Root.getMF();
1328 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
1329
1334 MachineOperand &OpC = Root.getOperand(0);
1335
1336 Register RegA = OpA.getReg();
1337 Register RegB = OpB.getReg();
1338 Register RegX = OpX.getReg();
1339 Register RegY = OpY.getReg();
1340 Register RegC = OpC.getReg();
1341
1342 if (RegA.isVirtual())
1343 MRI.constrainRegClass(RegA, RC);
1344 if (RegB.isVirtual())
1345 MRI.constrainRegClass(RegB, RC);
1346 if (RegX.isVirtual())
1347 MRI.constrainRegClass(RegX, RC);
1348 if (RegY.isVirtual())
1349 MRI.constrainRegClass(RegY, RC);
1350 if (RegC.isVirtual())
1351 MRI.constrainRegClass(RegC, RC);
1352
1353 // Create a new virtual register for the result of (X op Y) instead of
1354 // recycling RegB because the MachineCombiner's computation of the critical
1355 // path requires a new register definition rather than an existing one.
1356 Register NewVR = MRI.createVirtualRegister(RC);
1357 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
1358
1359 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
1360 bool KillA = OpA.isKill();
1361 bool KillX = OpX.isKill();
1362 bool KillY = OpY.isKill();
1363 bool KillNewVR = true;
1364
1365 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
1366
1367 if (SwapPrevOperands) {
1368 std::swap(RegX, RegY);
1369 std::swap(KillX, KillY);
1370 }
1371
1372 unsigned PrevFirstOpIdx, PrevSecondOpIdx;
1373 unsigned RootFirstOpIdx, RootSecondOpIdx;
1374 switch (Pattern) {
1376 PrevFirstOpIdx = OperandIndices[1];
1377 PrevSecondOpIdx = OperandIndices[3];
1378 RootFirstOpIdx = OperandIndices[2];
1379 RootSecondOpIdx = OperandIndices[4];
1380 break;
1382 PrevFirstOpIdx = OperandIndices[1];
1383 PrevSecondOpIdx = OperandIndices[3];
1384 RootFirstOpIdx = OperandIndices[4];
1385 RootSecondOpIdx = OperandIndices[2];
1386 break;
1388 PrevFirstOpIdx = OperandIndices[3];
1389 PrevSecondOpIdx = OperandIndices[1];
1390 RootFirstOpIdx = OperandIndices[2];
1391 RootSecondOpIdx = OperandIndices[4];
1392 break;
1394 PrevFirstOpIdx = OperandIndices[3];
1395 PrevSecondOpIdx = OperandIndices[1];
1396 RootFirstOpIdx = OperandIndices[4];
1397 RootSecondOpIdx = OperandIndices[2];
1398 break;
1399 default:
1400 llvm_unreachable("unexpected MachineCombinerPattern");
1401 }
1402
1403 // Basically BuildMI but doesn't add implicit operands by default.
1404 auto buildMINoImplicit = [](MachineFunction &MF, const MIMetadata &MIMD,
1405 const MCInstrDesc &MCID, Register DestReg) {
1406 return MachineInstrBuilder(
1407 MF, MF.CreateMachineInstr(MCID, MIMD.getDL(), /*NoImpl=*/true))
1408 .copyMIMetadata(MIMD)
1409 .addReg(DestReg, RegState::Define);
1410 };
1411
1412 // Create new instructions for insertion.
1413 MachineInstrBuilder MIB1 =
1414 buildMINoImplicit(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR);
1415 for (const auto &MO : Prev.explicit_operands()) {
1416 unsigned Idx = MO.getOperandNo();
1417 // Skip the result operand we'd already added.
1418 if (Idx == 0)
1419 continue;
1420 if (Idx == PrevFirstOpIdx)
1421 MIB1.addReg(RegX, getKillRegState(KillX));
1422 else if (Idx == PrevSecondOpIdx)
1423 MIB1.addReg(RegY, getKillRegState(KillY));
1424 else
1425 MIB1.add(MO);
1426 }
1427 MIB1.copyImplicitOps(Prev);
1428
1429 if (SwapRootOperands) {
1430 std::swap(RegA, NewVR);
1431 std::swap(KillA, KillNewVR);
1432 }
1433
1434 MachineInstrBuilder MIB2 =
1435 buildMINoImplicit(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC);
1436 for (const auto &MO : Root.explicit_operands()) {
1437 unsigned Idx = MO.getOperandNo();
1438 // Skip the result operand.
1439 if (Idx == 0)
1440 continue;
1441 if (Idx == RootFirstOpIdx)
1442 MIB2 = MIB2.addReg(RegA, getKillRegState(KillA));
1443 else if (Idx == RootSecondOpIdx)
1444 MIB2 = MIB2.addReg(NewVR, getKillRegState(KillNewVR));
1445 else
1446 MIB2 = MIB2.add(MO);
1447 }
1448 MIB2.copyImplicitOps(Root);
1449
1450 // Propagate FP flags from the original instructions.
1451 // But clear poison-generating flags because those may not be valid now.
1452 // TODO: There should be a helper function for copying only fast-math-flags.
1453 uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1454 MIB1->setFlags(IntersectedFlags);
1459
1460 MIB2->setFlags(IntersectedFlags);
1465
1466 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
1467
1468 // Record new instructions for insertion and old instructions for deletion.
1469 InsInstrs.push_back(MIB1);
1470 InsInstrs.push_back(MIB2);
1471 DelInstrs.push_back(&Prev);
1472 DelInstrs.push_back(&Root);
1473
1474 // We transformed:
1475 // B = A op X (Prev)
1476 // C = B op Y (Root)
1477 // Into:
1478 // B = X op Y (MIB1)
1479 // C = A op B (MIB2)
1480 // C has the same value as before, B doesn't; as such, keep the debug number
1481 // of C but not of B.
1482 if (unsigned OldRootNum = Root.peekDebugInstrNum())
1483 MIB2.getInstr()->setDebugInstrNum(OldRootNum);
1484}
1485
1487 MachineInstr &Root, unsigned Pattern,
1490 DenseMap<Register, unsigned> &InstIdxForVirtReg) const {
1492 MachineBasicBlock &MBB = *Root.getParent();
1493 MachineFunction &MF = *MBB.getParent();
1495
1496 switch (Pattern) {
1501 // Select the previous instruction in the sequence based on the input
1502 // pattern.
1503 std::array<unsigned, 5> OperandIndices;
1505 MachineInstr *Prev =
1506 MRI.getUniqueVRegDef(Root.getOperand(OperandIndices[0]).getReg());
1507
1508 // Don't reassociate if Prev and Root are in different blocks.
1509 if (Prev->getParent() != Root.getParent())
1510 return;
1511
1512 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, OperandIndices,
1513 InstIdxForVirtReg);
1514 break;
1515 }
1517 SmallVector<Register, 32> ChainRegs;
1518 getAccumulatorChain(&Root, ChainRegs);
1519 unsigned int Depth = ChainRegs.size();
1521 "Max accumulator width set to illegal value");
1522 unsigned int MaxWidth = Log2_32(Depth) < MaxAccumulatorWidth
1523 ? Log2_32(Depth)
1525
1526 // Walk down the chain and rewrite it as a tree.
1527 for (auto IndexedReg : llvm::enumerate(llvm::reverse(ChainRegs))) {
1528 // No need to rewrite the first node, it is already perfect as it is.
1529 if (IndexedReg.index() == 0)
1530 continue;
1531
1532 MachineInstr *Instr = MRI.getUniqueVRegDef(IndexedReg.value());
1534 Register AccReg;
1535 if (IndexedReg.index() < MaxWidth) {
1536 // Now we need to create new instructions for the first row.
1537 AccReg = Instr->getOperand(0).getReg();
1538 unsigned OpCode = getAccumulationStartOpcode(Root.getOpcode());
1539
1540 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(OpCode), AccReg)
1541 .addReg(Instr->getOperand(2).getReg(),
1542 getKillRegState(Instr->getOperand(2).isKill()))
1543 .addReg(Instr->getOperand(3).getReg(),
1544 getKillRegState(Instr->getOperand(3).isKill()));
1545 } else {
1546 // For the remaining cases, we need to use an output register of one of
1547 // the newly inserted instuctions as operand 1
1548 AccReg = Instr->getOperand(0).getReg() == Root.getOperand(0).getReg()
1549 ? MRI.createVirtualRegister(
1550 MRI.getRegClass(Root.getOperand(0).getReg()))
1551 : Instr->getOperand(0).getReg();
1552 assert(IndexedReg.index() >= MaxWidth);
1553 auto AccumulatorInput =
1554 ChainRegs[Depth - (IndexedReg.index() - MaxWidth) - 1];
1555 MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(Instr->getOpcode()),
1556 AccReg)
1557 .addReg(AccumulatorInput, getKillRegState(true))
1558 .addReg(Instr->getOperand(2).getReg(),
1559 getKillRegState(Instr->getOperand(2).isKill()))
1560 .addReg(Instr->getOperand(3).getReg(),
1561 getKillRegState(Instr->getOperand(3).isKill()));
1562 }
1563
1564 MIB->setFlags(Instr->getFlags());
1565 InstIdxForVirtReg.insert(std::make_pair(AccReg, InsInstrs.size()));
1566 InsInstrs.push_back(MIB);
1567 DelInstrs.push_back(Instr);
1568 }
1569
1570 SmallVector<Register, 8> RegistersToReduce;
1571 for (unsigned i = (InsInstrs.size() - MaxWidth); i < InsInstrs.size();
1572 ++i) {
1573 auto Reg = InsInstrs[i]->getOperand(0).getReg();
1574 RegistersToReduce.push_back(Reg);
1575 }
1576
1577 while (RegistersToReduce.size() > 1)
1578 reduceAccumulatorTree(RegistersToReduce, InsInstrs, MF, Root, MRI,
1579 InstIdxForVirtReg, Root.getOperand(0).getReg());
1580
1581 break;
1582 }
1583 }
1584}
1585
1589
1591 const MachineInstr &MI) const {
1592 const MachineFunction &MF = *MI.getMF();
1593 const MachineRegisterInfo &MRI = MF.getRegInfo();
1594
1595 // Remat clients assume operand 0 is the defined register.
1596 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
1597 return false;
1598 Register DefReg = MI.getOperand(0).getReg();
1599
1600 // A sub-register definition can only be rematerialized if the instruction
1601 // doesn't read the other parts of the register. Otherwise it is really a
1602 // read-modify-write operation on the full virtual register which cannot be
1603 // moved safely.
1604 if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() &&
1605 MI.readsVirtualRegister(DefReg))
1606 return false;
1607
1608 // A load from a fixed stack slot can be rematerialized. This may be
1609 // redundant with subsequent checks, but it's target-independent,
1610 // simple, and a common case.
1611 int FrameIdx = 0;
1612 if (isLoadFromStackSlot(MI, FrameIdx) &&
1613 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
1614 return true;
1615
1616 // Avoid instructions obviously unsafe for remat.
1617 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1618 MI.hasUnmodeledSideEffects())
1619 return false;
1620
1621 // Don't remat inline asm. We have no idea how expensive it is
1622 // even if it's side effect free.
1623 if (MI.isInlineAsm())
1624 return false;
1625
1626 // Avoid instructions which load from potentially varying memory.
1627 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1628 return false;
1629
1630 // If any of the registers accessed are non-constant, conservatively assume
1631 // the instruction is not rematerializable.
1632 for (const MachineOperand &MO : MI.operands()) {
1633 if (!MO.isReg()) continue;
1634 Register Reg = MO.getReg();
1635 if (Reg == 0)
1636 continue;
1637
1638 // Check for a well-behaved physical register.
1639 if (Reg.isPhysical()) {
1640 if (MO.isUse()) {
1641 // If the physreg has no defs anywhere, it's just an ambient register
1642 // and we can freely move its uses. Alternatively, if it's allocatable,
1643 // it could get allocated to something with a def during allocation.
1644 if (!MRI.isConstantPhysReg(Reg))
1645 return false;
1646 } else {
1647 // A physreg def. We can't remat it.
1648 return false;
1649 }
1650 continue;
1651 }
1652
1653 // Only allow one virtual-register def. There may be multiple defs of the
1654 // same virtual register, though.
1655 if (MO.isDef() && Reg != DefReg)
1656 return false;
1657
1658 // Don't allow any virtual-register uses. Rematting an instruction with
1659 // virtual register uses would length the live ranges of the uses, which
1660 // is not necessarily a good idea, certainly not "trivial".
1661 if (MO.isUse())
1662 return false;
1663 }
1664
1665 // Everything checked out.
1666 return true;
1667}
1668
1670 const MachineFunction *MF = MI.getMF();
1672 bool StackGrowsDown =
1674
1675 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1676 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1677
1678 if (!isFrameInstr(MI))
1679 return 0;
1680
1681 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1682
1683 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1684 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1685 SPAdj = -SPAdj;
1686
1687 return SPAdj;
1688}
1689
1690/// isSchedulingBoundary - Test if the given instruction should be
1691/// considered a scheduling boundary. This primarily includes labels
1692/// and terminators.
1694 const MachineBasicBlock *MBB,
1695 const MachineFunction &MF) const {
1696 // Terminators and labels can't be scheduled around.
1697 if (MI.isTerminator() || MI.isPosition())
1698 return true;
1699
1700 // INLINEASM_BR can jump to another block
1701 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1702 return true;
1703
1704 // Don't attempt to schedule around any instruction that defines
1705 // a stack-oriented pointer, as it's unlikely to be profitable. This
1706 // saves compile time, because it doesn't require every single
1707 // stack slot reference to depend on the instruction that does the
1708 // modification.
1709 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1711 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
1712}
1713
1714// Provide a global flag for disabling the PreRA hazard recognizer that targets
1715// may choose to honor.
1719
1720// Default implementation of CreateTargetRAHazardRecognizer.
1723 const ScheduleDAG *DAG) const {
1724 // Dummy hazard recognizer allows all instructions to issue.
1725 return new ScheduleHazardRecognizer();
1726}
1727
1728// Default implementation of CreateTargetMIHazardRecognizer.
1730 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1731 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1732}
1733
1734// Default implementation of CreateTargetPostRAHazardRecognizer.
1740
1741// Default implementation of getMemOperandWithOffset.
1743 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1744 bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
1747 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1748 Width, TRI) ||
1749 BaseOps.size() != 1)
1750 return false;
1751 BaseOp = BaseOps.front();
1752 return true;
1753}
1754
1755//===----------------------------------------------------------------------===//
1756// SelectionDAG latency interface.
1757//===----------------------------------------------------------------------===//
1758
1759std::optional<unsigned>
1761 SDNode *DefNode, unsigned DefIdx,
1762 SDNode *UseNode, unsigned UseIdx) const {
1763 if (!ItinData || ItinData->isEmpty())
1764 return std::nullopt;
1765
1766 if (!DefNode->isMachineOpcode())
1767 return std::nullopt;
1768
1769 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1770 if (!UseNode->isMachineOpcode())
1771 return ItinData->getOperandCycle(DefClass, DefIdx);
1772 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1773 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1774}
1775
1777 SDNode *N) const {
1778 if (!ItinData || ItinData->isEmpty())
1779 return 1;
1780
1781 if (!N->isMachineOpcode())
1782 return 1;
1783
1784 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1785}
1786
1787//===----------------------------------------------------------------------===//
1788// MachineInstr latency interface.
1789//===----------------------------------------------------------------------===//
1790
1792 const MachineInstr &MI) const {
1793 if (!ItinData || ItinData->isEmpty())
1794 return 1;
1795
1796 unsigned Class = MI.getDesc().getSchedClass();
1797 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1798 if (UOps >= 0)
1799 return UOps;
1800
1801 // The # of u-ops is dynamically determined. The specific target should
1802 // override this function to return the right number.
1803 return 1;
1804}
1805
1806/// Return the default expected latency for a def based on it's opcode.
1808 const MachineInstr &DefMI) const {
1809 if (DefMI.isTransient())
1810 return 0;
1811 if (DefMI.mayLoad())
1812 return SchedModel.LoadLatency;
1813 if (isHighLatencyDef(DefMI.getOpcode()))
1814 return SchedModel.HighLatency;
1815 return 1;
1816}
1817
1819 return 0;
1820}
1821
1823 const MachineInstr &MI,
1824 unsigned *PredCost) const {
1825 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1826 // still have a MinLatency property, which getStageLatency checks.
1827 if (!ItinData)
1828 return MI.mayLoad() ? 2 : 1;
1829
1830 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1831}
1832
1834 const MachineInstr &DefMI,
1835 unsigned DefIdx) const {
1836 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1837 if (!ItinData || ItinData->isEmpty())
1838 return false;
1839
1840 unsigned DefClass = DefMI.getDesc().getSchedClass();
1841 std::optional<unsigned> DefCycle =
1842 ItinData->getOperandCycle(DefClass, DefIdx);
1843 return DefCycle && DefCycle <= 1U;
1844}
1845
1847 // TODO: We don't split functions where a section attribute has been set
1848 // since the split part may not be placed in a contiguous region. It may also
1849 // be more beneficial to augment the linker to ensure contiguous layout of
1850 // split functions within the same section as specified by the attribute.
1851 if (MF.getFunction().hasSection())
1852 return false;
1853
1854 // We don't want to proceed further for cold functions
1855 // or functions of unknown hotness. Lukewarm functions have no prefix.
1856 std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
1857 if (SectionPrefix &&
1858 (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
1859 return false;
1860 }
1861
1862 return true;
1863}
1864
1865std::optional<ParamLoadedValue>
1867 Register Reg) const {
1868 const MachineFunction *MF = MI.getMF();
1871 int64_t Offset;
1872 bool OffsetIsScalable;
1873
1874 // To simplify the sub-register handling, verify that we only need to
1875 // consider physical registers.
1876 assert(MF->getProperties().hasNoVRegs());
1877
1878 if (auto DestSrc = isCopyInstr(MI)) {
1879 Register DestReg = DestSrc->Destination->getReg();
1880
1881 // If the copy destination is the forwarding reg, describe the forwarding
1882 // reg using the copy source as the backup location. Example:
1883 //
1884 // x0 = MOV x7
1885 // call callee(x0) ; x0 described as x7
1886 if (Reg == DestReg)
1887 return ParamLoadedValue(*DestSrc->Source, Expr);
1888
1889 // If the target's hook couldn't describe this copy, give up.
1890 return std::nullopt;
1891 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1892 Register SrcReg = RegImm->Reg;
1893 Offset = RegImm->Imm;
1895 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1896 } else if (MI.hasOneMemOperand()) {
1897 // Only describe memory which provably does not escape the function. As
1898 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1899 // callee (or by another thread).
1900 const auto &TII = MF->getSubtarget().getInstrInfo();
1901 const MachineFrameInfo &MFI = MF->getFrameInfo();
1902 const MachineMemOperand *MMO = MI.memoperands()[0];
1903 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1904
1905 // If the address points to "special" memory (e.g. a spill slot), it's
1906 // sufficient to check that it isn't aliased by any high-level IR value.
1907 if (!PSV || PSV->mayAlias(&MFI))
1908 return std::nullopt;
1909
1910 const MachineOperand *BaseOp;
1911 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
1912 TRI))
1913 return std::nullopt;
1914
1915 // FIXME: Scalable offsets are not yet handled in the offset code below.
1916 if (OffsetIsScalable)
1917 return std::nullopt;
1918
1919 // TODO: Can currently only handle mem instructions with a single define.
1920 // An example from the x86 target:
1921 // ...
1922 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1923 // ...
1924 //
1925 if (MI.getNumExplicitDefs() != 1)
1926 return std::nullopt;
1927
1928 // TODO: In what way do we need to take Reg into consideration here?
1929
1932 Ops.push_back(dwarf::DW_OP_deref_size);
1933 Ops.push_back(MMO->getSize().hasValue() ? MMO->getSize().getValue()
1934 : ~UINT64_C(0));
1935 Expr = DIExpression::prependOpcodes(Expr, Ops);
1936 return ParamLoadedValue(*BaseOp, Expr);
1937 }
1938
1939 return std::nullopt;
1940}
1941
1942// Get the call frame size just before MI.
1944 // Search backwards from MI for the most recent call frame instruction.
1945 MachineBasicBlock *MBB = MI.getParent();
1946 for (auto &AdjI : reverse(make_range(MBB->instr_begin(), MI.getIterator()))) {
1947 if (AdjI.getOpcode() == getCallFrameSetupOpcode())
1948 return getFrameTotalSize(AdjI);
1949 if (AdjI.getOpcode() == getCallFrameDestroyOpcode())
1950 return 0;
1951 }
1952
1953 // If none was found, use the call frame size from the start of the basic
1954 // block.
1955 return MBB->getCallFrameSize();
1956}
1957
1958/// Both DefMI and UseMI must be valid. By default, call directly to the
1959/// itinerary. This may be overriden by the target.
1961 const InstrItineraryData *ItinData, const MachineInstr &DefMI,
1962 unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
1963 unsigned DefClass = DefMI.getDesc().getSchedClass();
1964 unsigned UseClass = UseMI.getDesc().getSchedClass();
1965 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1966}
1967
1969 const MachineInstr &MI, unsigned DefIdx,
1970 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1971 assert((MI.isRegSequence() ||
1972 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1973
1974 if (!MI.isRegSequence())
1975 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1976
1977 // We are looking at:
1978 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1979 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1980 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1981 OpIdx += 2) {
1982 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1983 if (MOReg.isUndef())
1984 continue;
1985 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1986 assert(MOSubIdx.isImm() &&
1987 "One of the subindex of the reg_sequence is not an immediate");
1988 // Record Reg:SubReg, SubIdx.
1989 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1990 (unsigned)MOSubIdx.getImm()));
1991 }
1992 return true;
1993}
1994
1996 const MachineInstr &MI, unsigned DefIdx,
1997 RegSubRegPairAndIdx &InputReg) const {
1998 assert((MI.isExtractSubreg() ||
1999 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
2000
2001 if (!MI.isExtractSubreg())
2002 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
2003
2004 // We are looking at:
2005 // Def = EXTRACT_SUBREG v0.sub1, sub0.
2006 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
2007 const MachineOperand &MOReg = MI.getOperand(1);
2008 if (MOReg.isUndef())
2009 return false;
2010 const MachineOperand &MOSubIdx = MI.getOperand(2);
2011 assert(MOSubIdx.isImm() &&
2012 "The subindex of the extract_subreg is not an immediate");
2013
2014 InputReg.Reg = MOReg.getReg();
2015 InputReg.SubReg = MOReg.getSubReg();
2016 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
2017 return true;
2018}
2019
2021 const MachineInstr &MI, unsigned DefIdx,
2022 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
2023 assert((MI.isInsertSubreg() ||
2024 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
2025
2026 if (!MI.isInsertSubreg())
2027 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
2028
2029 // We are looking at:
2030 // Def = INSERT_SEQUENCE v0, v1, sub0.
2031 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
2032 const MachineOperand &MOBaseReg = MI.getOperand(1);
2033 const MachineOperand &MOInsertedReg = MI.getOperand(2);
2034 if (MOInsertedReg.isUndef())
2035 return false;
2036 const MachineOperand &MOSubIdx = MI.getOperand(3);
2037 assert(MOSubIdx.isImm() &&
2038 "One of the subindex of the reg_sequence is not an immediate");
2039 BaseReg.Reg = MOBaseReg.getReg();
2040 BaseReg.SubReg = MOBaseReg.getSubReg();
2041
2042 InsertedReg.Reg = MOInsertedReg.getReg();
2043 InsertedReg.SubReg = MOInsertedReg.getSubReg();
2044 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
2045 return true;
2046}
2047
2048// Returns a MIRPrinter comment for this machine operand.
2050 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
2051 const TargetRegisterInfo *TRI) const {
2052
2053 if (!MI.isInlineAsm())
2054 return "";
2055
2056 std::string Flags;
2057 raw_string_ostream OS(Flags);
2058
2060 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
2061 unsigned ExtraInfo = Op.getImm();
2062 bool First = true;
2063 for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
2064 if (!First)
2065 OS << " ";
2066 First = false;
2067 OS << Info;
2068 }
2069
2070 return Flags;
2071 }
2072
2073 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
2074 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
2075 return "";
2076
2077 assert(Op.isImm() && "Expected flag operand to be an immediate");
2078 // Pretty print the inline asm operand descriptor.
2079 unsigned Flag = Op.getImm();
2080 const InlineAsm::Flag F(Flag);
2081 OS << F.getKindName();
2082
2083 unsigned RCID;
2084 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
2085 if (TRI) {
2086 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
2087 } else
2088 OS << ":RC" << RCID;
2089 }
2090
2091 if (F.isMemKind()) {
2092 InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
2094 }
2095
2096 unsigned TiedTo;
2097 if (F.isUseOperandTiedToDef(TiedTo))
2098 OS << " tiedto:$" << TiedTo;
2099
2100 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
2101 F.getRegMayBeFolded())
2102 OS << " foldable";
2103
2104 return Flags;
2105}
2106
2108
2110 Function &F, std::vector<outliner::Candidate> &Candidates) const {
2111 // Include target features from an arbitrary candidate for the outlined
2112 // function. This makes sure the outlined function knows what kinds of
2113 // instructions are going into it. This is fine, since all parent functions
2114 // must necessarily support the instructions that are in the outlined region.
2115 outliner::Candidate &FirstCand = Candidates.front();
2116 const Function &ParentFn = FirstCand.getMF()->getFunction();
2117 if (ParentFn.hasFnAttribute("target-features"))
2118 F.addFnAttr(ParentFn.getFnAttribute("target-features"));
2119 if (ParentFn.hasFnAttribute("target-cpu"))
2120 F.addFnAttr(ParentFn.getFnAttribute("target-cpu"));
2121
2122 // Set nounwind, so we don't generate eh_frame.
2123 if (llvm::all_of(Candidates, [](const outliner::Candidate &C) {
2124 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
2125 }))
2126 F.addFnAttr(Attribute::NoUnwind);
2127}
2128
2132 unsigned Flags) const {
2133 MachineInstr &MI = *MIT;
2134
2135 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
2136 // have support for outlining those. Special-case that here.
2137 if (MI.isCFIInstruction())
2138 // Just go right to the target implementation.
2139 return getOutliningTypeImpl(MMI, MIT, Flags);
2140
2141 // Be conservative about inline assembly.
2142 if (MI.isInlineAsm())
2144
2145 // Labels generally can't safely be outlined.
2146 if (MI.isLabel())
2148
2149 // Don't let debug instructions impact analysis.
2150 if (MI.isDebugInstr())
2152
2153 // Some other special cases.
2154 switch (MI.getOpcode()) {
2155 case TargetOpcode::IMPLICIT_DEF:
2156 case TargetOpcode::KILL:
2157 case TargetOpcode::LIFETIME_START:
2158 case TargetOpcode::LIFETIME_END:
2160 default:
2161 break;
2162 }
2163
2164 // Is this a terminator for a basic block?
2165 if (MI.isTerminator()) {
2166 // If this is a branch to another block, we can't outline it.
2167 if (!MI.getParent()->succ_empty())
2169
2170 // Don't outline if the branch is not unconditional.
2171 if (isPredicated(MI))
2173 }
2174
2175 // Make sure none of the operands of this instruction do anything that
2176 // might break if they're moved outside their current function.
2177 // This includes MachineBasicBlock references, BlockAddressses,
2178 // Constant pool indices and jump table indices.
2179 //
2180 // A quick note on MO_TargetIndex:
2181 // This doesn't seem to be used in any of the architectures that the
2182 // MachineOutliner supports, but it was still filtered out in all of them.
2183 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
2184 // As such, this check is removed both here and in the target-specific
2185 // implementations. Instead, we assert to make sure this doesn't
2186 // catch anyone off-guard somewhere down the line.
2187 for (const MachineOperand &MOP : MI.operands()) {
2188 // If you hit this assertion, please remove it and adjust
2189 // `getOutliningTypeImpl` for your target appropriately if necessary.
2190 // Adding the assertion back to other supported architectures
2191 // would be nice too :)
2192 assert(!MOP.isTargetIndex() && "This isn't used quite yet!");
2193
2194 // CFI instructions should already have been filtered out at this point.
2195 assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!");
2196
2197 // PrologEpilogInserter should've already run at this point.
2198 assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!");
2199
2200 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
2202 }
2203
2204 // If we don't know, delegate to the target-specific hook.
2205 return getOutliningTypeImpl(MMI, MIT, Flags);
2206}
2207
2209 unsigned &Flags) const {
2210 // Some instrumentations create special TargetOpcode at the start which
2211 // expands to special code sequences which must be present.
2212 auto First = MBB.getFirstNonDebugInstr();
2213 if (First == MBB.end())
2214 return true;
2215
2216 if (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
2217 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
2218 return false;
2219
2220 // Some instrumentations create special pseudo-instructions at or just before
2221 // the end that must be present.
2222 auto Last = MBB.getLastNonDebugInstr();
2223 if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET ||
2224 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2225 return false;
2226
2227 if (Last != First && Last->isReturn()) {
2228 --Last;
2229 if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT ||
2230 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2231 return false;
2232 }
2233 return true;
2234}
2235
2237 return MI->isCall() || MI->hasUnmodeledSideEffects() ||
2238 (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad());
2239}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
DXIL Forward Handle Accesses
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
This file defines the SmallSet class.
This file contains some functions that are useful when dealing with strings.
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
static void transferImplicitOperands(MachineInstr *MI, const TargetRegisterInfo *TRI)
transferImplicitOperands - MI is a pseudo-instruction, and the lowered replacement instructions immed...
static cl::opt< bool > EnableAccReassociation("acc-reassoc", cl::Hidden, cl::init(true), cl::desc("Enable reassociation of accumulation chains"))
static std::pair< bool, bool > mustSwapOperands(unsigned Pattern)
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, const TargetInstrInfo &TII, unsigned FoldIdx)
static cl::opt< unsigned int > MinAccumulatorDepth("acc-min-depth", cl::Hidden, cl::init(8), cl::desc("Minimum length of accumulator chains " "required for the optimization to kick in"))
static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI, const TargetInstrInfo &TII)
static cl::opt< unsigned int > MaxAccumulatorWidth("acc-max-width", cl::Hidden, cl::init(3), cl::desc("Maximum number of branches in the accumulator tree"))
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
DWARF expression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:198
A debug info location.
Definition DebugLoc.h:124
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:214
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:762
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
LLVM_ABI std::optional< StringRef > getSectionPrefix() const
Get the section prefix for this global object.
Definition Globals.cpp:297
bool hasSection() const
Check if this global has a custom object file section.
static std::vector< StringRef > getExtraInfoNames(unsigned ExtraInfo)
Definition InlineAsm.h:446
static StringRef getMemConstraintName(ConstraintCode C)
Definition InlineAsm.h:470
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
const InstrItinerary * Itineraries
Array of itineraries selected.
bool isEmpty() const
Returns true if there are no itineraries.
bool hasValue() const
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI=nullptr) const
Returns the maximum possible encoded instruction size in bytes.
Definition MCAsmInfo.h:527
StringRef getCommentString() const
Definition MCAsmInfo.h:538
const char * getSeparatorString() const
Definition MCAsmInfo.h:533
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:64
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1565
Set of metadata that should be preserved when using BuildMI().
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
const MachineFunctionProperties & getProperties() const
Get the function properties.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & copyMIMetadata(const MIMetadata &MIMD) const
Representation of each machine instruction.
ArrayRef< MachineMemOperand * >::iterator mmo_iterator
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
void setFlags(unsigned flags)
unsigned getNumOperands() const
Retuns the total number of operands.
void setDebugInstrNum(unsigned Num)
Set instruction number of this MachineInstr.
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
LLVM_ABI void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool isCFIInstruction() const
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
mop_range explicit_operands()
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
LLVM_ABI const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setIsInternalRead(bool Val=true)
void setImm(int64_t immVal)
int64_t getImm() const
LLVM_ABI void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
LLVM_ABI bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
void setMBB(MachineBasicBlock *MBB)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MI-level patchpoint operands.
Definition StackMaps.h:77
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:74
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:226
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
Definition StackMaps.h:36
MI-level Statepoint operands.
Definition StackMaps.h:159
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:154
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:148
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of 'Reg'.
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu.
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const
Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
outliner::InstrType getOutliningType(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MIT should be outlined.
virtual bool isThroughputPattern(unsigned Pattern) const
Return true when a code sequence can improve throughput.
bool getAccumulatorReassociationPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns) const
Find chains of accumulations that can be rewritten as a tree for increased ILP.
virtual std::pair< unsigned, unsigned > getPatchpointUnfoldableRange(const MachineInstr &MI) const
For a patchpoint, stackmap, or statepoint intrinsic, return the range of operands which can't be fold...
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const
Optional target hook to create the LLVM IR attributes for the outlined function.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
void getAccumulatorChain(MachineInstr *CurrentInstr, SmallVectorImpl< Register > &Chain) const
Find the chain of accumulator instructions in \P MBB and return them in \P Chain.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const
This function defines the logic to lower COPY instruction to target specific instruction(s).
virtual unsigned getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const
Returns the opcode that should be use to reduce accumulation registers.
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
virtual MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const
Try to remove the load by folding it to a register operand at the use.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Store the specified register of the given register class to the specified stack frame index.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const
Emit instructions to copy a pair of physical registers.
virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const
Returns an opcode which defines the accumulator used by \P Opcode.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual MCInst getNop() const
Return the noop instruction to use for a noop.
unsigned getCallFrameSizeAt(MachineInstr &MI) const
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
std::pair< unsigned, unsigned > getReassociationOpcodes(unsigned Pattern, const MachineInstr &Root, const MachineInstr &Prev) const
Reassociation of some instructions requires inverse operations (e.g.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Load the specified register of the given register class from the specified stack frame index.
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, ArrayRef< unsigned > OperandIndices, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate \P Root and \P Prev according to \P Pattern to reduce critical path length.
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
TargetInstrInfo(unsigned CFSetupOpcode=~0u, unsigned CFDestroyOpcode=~0u, unsigned CatchRetOpcode=~0u, unsigned ReturnOpcode=~0u)
virtual void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const
Insert noops into the instruction stream at the specified point.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual ~TargetInstrInfo()
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
virtual bool isAccumulationOpcode(unsigned Opcode) const
Return true when \P OpCode is an instruction which performs accumulation into one of its operand regi...
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input \P Inst is part of a chain of dependent ops that are suitable for reassociat...
void reduceAccumulatorTree(SmallVectorImpl< Register > &RegistersToReduce, SmallVectorImpl< MachineInstr * > &InsInstrs, MachineFunction &MF, MachineInstr &Root, MachineRegisterInfo &MRI, DenseMap< Register, unsigned > &InstrIdxForVirtReg, Register ResultReg) const
Reduces branches of the accumulator tree into a single register.
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual unsigned getPredicationCost(const MachineInstr &MI) const
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const
Return true if the function is a viable candidate for machine function splitting.
virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const
Return a strategy that MachineCombiner must use when creating traces.
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isGlobalMemoryObject(const MachineInstr *MI) const
Returns true if MI is an instruction we are unable to reason about (like a call or something with unm...
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a register,...
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
static const unsigned CommuteAnyOperandIndex
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1,...
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor.
bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
const Triple & getTargetTriple() const
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:611
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ Define
Register definition.
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
@ Length
Definition DWP.cpp:477
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1707
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2454
constexpr from_range_t from_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:682
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:342
auto reverse(ContainerTy &&C)
Definition STLExtras.h:400
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
LLVM_ABI VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1879
bool isSpace(char C)
Checks whether character C is whitespace in the "C" locale.
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:853
#define N
Machine model for scheduling, bundling, and heuristics.
Definition MCSchedule.h:258
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Writes
Writes - One of the operands writes the virtual register.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const