LLVM 22.0.0git
TargetInstrInfo.h
Go to the documentation of this file.
1//===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the target machine instruction set to the code generator.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CODEGEN_TARGETINSTRINFO_H
14#define LLVM_CODEGEN_TARGETINSTRINFO_H
15
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/Uniformity.h"
31#include "llvm/MC/MCInstrInfo.h"
36#include <array>
37#include <cassert>
38#include <cstddef>
39#include <cstdint>
40#include <utility>
41#include <vector>
42
43namespace llvm {
44
45class DFAPacketizer;
46class InstrItineraryData;
47class LiveIntervals;
48class LiveVariables;
49class MachineLoop;
50class MachineMemOperand;
51class MachineModuleInfo;
52class MachineRegisterInfo;
53class MCAsmInfo;
54class MCInst;
55struct MCSchedModel;
56class Module;
57class ScheduleDAG;
58class ScheduleDAGMI;
59class ScheduleHazardRecognizer;
60class SDNode;
61class SelectionDAG;
62class SMSchedule;
63class SwingSchedulerDAG;
64class RegScavenger;
65class TargetRegisterClass;
66class TargetRegisterInfo;
67class TargetSchedModel;
68class TargetSubtargetInfo;
69enum class MachineTraceStrategy;
70
71template <class T> class SmallVectorImpl;
72
73using ParamLoadedValue = std::pair<MachineOperand, DIExpression*>;
74
78
80 : Destination(&Dest), Source(&Src) {}
81};
82
83/// Used to describe a register and immediate addition.
84struct RegImmPair {
86 int64_t Imm;
87
88 RegImmPair(Register Reg, int64_t Imm) : Reg(Reg), Imm(Imm) {}
89};
90
91/// Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
92/// It holds the register values, the scale value and the displacement.
93/// It also holds a descriptor for the expression used to calculate the address
94/// from the operands.
96 enum class Formula {
97 Basic = 0, // BaseReg + ScaledReg * Scale + Displacement
98 SExtScaledReg = 1, // BaseReg + sext(ScaledReg) * Scale + Displacement
99 ZExtScaledReg = 2 // BaseReg + zext(ScaledReg) * Scale + Displacement
100 };
101
104 int64_t Scale = 0;
105 int64_t Displacement = 0;
107 ExtAddrMode() = default;
108};
109
110//---------------------------------------------------------------------------
111///
112/// TargetInstrInfo - Interface to description of machine instruction set
113///
115public:
116 TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
117 unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u)
118 : CallFrameSetupOpcode(CFSetupOpcode),
119 CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode),
120 ReturnOpcode(ReturnOpcode) {}
124
125 static bool isGenericOpcode(unsigned Opc) {
126 return Opc <= TargetOpcode::GENERIC_OP_END;
127 }
128
129 static bool isGenericAtomicRMWOpcode(unsigned Opc) {
130 return Opc >= TargetOpcode::GENERIC_ATOMICRMW_OP_START &&
131 Opc <= TargetOpcode::GENERIC_ATOMICRMW_OP_END;
132 }
133
134 /// Given a machine instruction descriptor, returns the register
135 /// class constraint for OpNum, or NULL.
136 virtual
137 const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
138 const TargetRegisterInfo *TRI,
139 const MachineFunction &MF) const;
140
141 /// Returns true if MI is an instruction we are unable to reason about
142 /// (like a call or something with unmodeled side effects).
143 virtual bool isGlobalMemoryObject(const MachineInstr *MI) const;
144
145 /// Return true if the instruction is trivially rematerializable, meaning it
146 /// has no side effects and requires no operands that aren't always available.
147 /// This means the only allowed uses are constants and unallocatable physical
148 /// registers so that the instructions result is independent of the place
149 /// in the function.
151 return (MI.getOpcode() == TargetOpcode::IMPLICIT_DEF &&
152 MI.getNumOperands() == 1) ||
153 (MI.getDesc().isRematerializable() &&
154 isReallyTriviallyReMaterializable(MI));
155 }
156
157 /// Given \p MO is a PhysReg use return if it can be ignored for the purpose
158 /// of instruction rematerialization or sinking.
159 virtual bool isIgnorableUse(const MachineOperand &MO) const {
160 return false;
161 }
162
163 virtual bool isSafeToSink(MachineInstr &MI, MachineBasicBlock *SuccToSinkTo,
164 MachineCycleInfo *CI) const {
165 return true;
166 }
167
168 /// For a "cheap" instruction which doesn't enable additional sinking,
169 /// should MachineSink break a critical edge to sink it anyways?
171 return false;
172 }
173
174protected:
175 /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
176 /// set, this hook lets the target specify whether the instruction is actually
177 /// trivially rematerializable, taking into consideration its operands. This
178 /// predicate must return false if the instruction has any side effects other
179 /// than producing a value, or if it requres any address registers that are
180 /// not always available.
181 virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const;
182
183 /// This method commutes the operands of the given machine instruction MI.
184 /// The operands to be commuted are specified by their indices OpIdx1 and
185 /// OpIdx2.
186 ///
187 /// If a target has any instructions that are commutable but require
188 /// converting to different instructions or making non-trivial changes
189 /// to commute them, this method can be overloaded to do that.
190 /// The default implementation simply swaps the commutable operands.
191 ///
192 /// If NewMI is false, MI is modified in place and returned; otherwise, a
193 /// new machine instruction is created and returned.
194 ///
195 /// Do not call this method for a non-commutable instruction.
196 /// Even though the instruction is commutable, the method may still
197 /// fail to commute the operands, null pointer is returned in such cases.
198 virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
199 unsigned OpIdx1,
200 unsigned OpIdx2) const;
201
202 /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable
203 /// operand indices to (ResultIdx1, ResultIdx2).
204 /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be
205 /// predefined to some indices or be undefined (designated by the special
206 /// value 'CommuteAnyOperandIndex').
207 /// The predefined result indices cannot be re-defined.
208 /// The function returns true iff after the result pair redefinition
209 /// the fixed result pair is equal to or equivalent to the source pair of
210 /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that
211 /// the pairs (x,y) and (y,x) are equivalent.
212 static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2,
213 unsigned CommutableOpIdx1,
214 unsigned CommutableOpIdx2);
215
216public:
217 /// These methods return the opcode of the frame setup/destroy instructions
218 /// if they exist (-1 otherwise). Some targets use pseudo instructions in
219 /// order to abstract away the difference between operating with a frame
220 /// pointer and operating without, through the use of these two instructions.
221 /// A FrameSetup MI in MF implies MFI::AdjustsStack.
222 ///
223 unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
224 unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
225
226 /// Returns true if the argument is a frame pseudo instruction.
227 bool isFrameInstr(const MachineInstr &I) const {
228 return I.getOpcode() == getCallFrameSetupOpcode() ||
229 I.getOpcode() == getCallFrameDestroyOpcode();
230 }
231
232 /// Returns true if the argument is a frame setup pseudo instruction.
233 bool isFrameSetup(const MachineInstr &I) const {
234 return I.getOpcode() == getCallFrameSetupOpcode();
235 }
236
237 /// Returns size of the frame associated with the given frame instruction.
238 /// For frame setup instruction this is frame that is set up space set up
239 /// after the instruction. For frame destroy instruction this is the frame
240 /// freed by the caller.
241 /// Note, in some cases a call frame (or a part of it) may be prepared prior
242 /// to the frame setup instruction. It occurs in the calls that involve
243 /// inalloca arguments. This function reports only the size of the frame part
244 /// that is set up between the frame setup and destroy pseudo instructions.
245 int64_t getFrameSize(const MachineInstr &I) const {
246 assert(isFrameInstr(I) && "Not a frame instruction");
247 assert(I.getOperand(0).getImm() >= 0);
248 return I.getOperand(0).getImm();
249 }
250
251 /// Returns the total frame size, which is made up of the space set up inside
252 /// the pair of frame start-stop instructions and the space that is set up
253 /// prior to the pair.
254 int64_t getFrameTotalSize(const MachineInstr &I) const {
255 if (isFrameSetup(I)) {
256 assert(I.getOperand(1).getImm() >= 0 &&
257 "Frame size must not be negative");
258 return getFrameSize(I) + I.getOperand(1).getImm();
259 }
260 return getFrameSize(I);
261 }
262
263 unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
264 unsigned getReturnOpcode() const { return ReturnOpcode; }
265
266 /// Returns the actual stack pointer adjustment made by an instruction
267 /// as part of a call sequence. By default, only call frame setup/destroy
268 /// instructions adjust the stack, but targets may want to override this
269 /// to enable more fine-grained adjustment, or adjust by a different value.
270 virtual int getSPAdjust(const MachineInstr &MI) const;
271
272 /// Return true if the instruction is a "coalescable" extension instruction.
273 /// That is, it's like a copy where it's legal for the source to overlap the
274 /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
275 /// expected the pre-extension value is available as a subreg of the result
276 /// register. This also returns the sub-register index in SubIdx.
277 virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
278 Register &DstReg, unsigned &SubIdx) const {
279 return false;
280 }
281
282 /// If the specified machine instruction is a direct
283 /// load from a stack slot, return the virtual or physical register number of
284 /// the destination along with the FrameIndex of the loaded stack slot. If
285 /// not, return 0. This predicate must return 0 if the instruction has
286 /// any side effects other than loading from the stack slot.
288 int &FrameIndex) const {
289 return 0;
290 }
291
292 /// Optional extension of isLoadFromStackSlot that returns the number of
293 /// bytes loaded from the stack. This must be implemented if a backend
294 /// supports partial stack slot spills/loads to further disambiguate
295 /// what the load does.
297 int &FrameIndex,
298 TypeSize &MemBytes) const {
299 MemBytes = TypeSize::getZero();
300 return isLoadFromStackSlot(MI, FrameIndex);
301 }
302
303 /// Check for post-frame ptr elimination stack locations as well.
304 /// This uses a heuristic so it isn't reliable for correctness.
306 int &FrameIndex) const {
307 return 0;
308 }
309
310 /// If the specified machine instruction has a load from a stack slot,
311 /// return true along with the FrameIndices of the loaded stack slot and the
312 /// machine mem operands containing the reference.
313 /// If not, return false. Unlike isLoadFromStackSlot, this returns true for
314 /// any instructions that loads from the stack. This is just a hint, as some
315 /// cases may be missed.
316 virtual bool hasLoadFromStackSlot(
317 const MachineInstr &MI,
319
320 /// If the specified machine instruction is a direct
321 /// store to a stack slot, return the virtual or physical register number of
322 /// the source reg along with the FrameIndex of the loaded stack slot. If
323 /// not, return 0. This predicate must return 0 if the instruction has
324 /// any side effects other than storing to the stack slot.
326 int &FrameIndex) const {
327 return 0;
328 }
329
330 /// Optional extension of isStoreToStackSlot that returns the number of
331 /// bytes stored to the stack. This must be implemented if a backend
332 /// supports partial stack slot spills/loads to further disambiguate
333 /// what the store does.
335 int &FrameIndex,
336 TypeSize &MemBytes) const {
337 MemBytes = TypeSize::getZero();
338 return isStoreToStackSlot(MI, FrameIndex);
339 }
340
341 /// Check for post-frame ptr elimination stack locations as well.
342 /// This uses a heuristic, so it isn't reliable for correctness.
344 int &FrameIndex) const {
345 return 0;
346 }
347
348 /// If the specified machine instruction has a store to a stack slot,
349 /// return true along with the FrameIndices of the loaded stack slot and the
350 /// machine mem operands containing the reference.
351 /// If not, return false. Unlike isStoreToStackSlot,
352 /// this returns true for any instructions that stores to the
353 /// stack. This is just a hint, as some cases may be missed.
354 virtual bool hasStoreToStackSlot(
355 const MachineInstr &MI,
357
358 /// Return true if the specified machine instruction
359 /// is a copy of one stack slot to another and has no other effect.
360 /// Provide the identity of the two frame indices.
361 virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
362 int &SrcFrameIndex) const {
363 return false;
364 }
365
366 /// Compute the size in bytes and offset within a stack slot of a spilled
367 /// register or subregister.
368 ///
369 /// \param [out] Size in bytes of the spilled value.
370 /// \param [out] Offset in bytes within the stack slot.
371 /// \returns true if both Size and Offset are successfully computed.
372 ///
373 /// Not all subregisters have computable spill slots. For example,
374 /// subregisters registers may not be byte-sized, and a pair of discontiguous
375 /// subregisters has no single offset.
376 ///
377 /// Targets with nontrivial bigendian implementations may need to override
378 /// this, particularly to support spilled vector registers.
379 virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
380 unsigned &Size, unsigned &Offset,
381 const MachineFunction &MF) const;
382
383 /// Return true if the given instruction is terminator that is unspillable,
384 /// according to isUnspillableTerminatorImpl.
386 return MI->isTerminator() && isUnspillableTerminatorImpl(MI);
387 }
388
389 /// Returns the size in bytes of the specified MachineInstr, or ~0U
390 /// when this function is not implemented by a target.
391 virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
392 return ~0U;
393 }
394
395 /// Return true if the instruction is as cheap as a move instruction.
396 ///
397 /// Targets for different archs need to override this, and different
398 /// micro-architectures can also be finely tuned inside.
399 virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
400 return MI.isAsCheapAsAMove();
401 }
402
403 /// Return true if the instruction should be sunk by MachineSink.
404 ///
405 /// MachineSink determines on its own whether the instruction is safe to sink;
406 /// this gives the target a hook to override the default behavior with regards
407 /// to which instructions should be sunk.
408 virtual bool shouldSink(const MachineInstr &MI) const { return true; }
409
410 /// Return false if the instruction should not be hoisted by MachineLICM.
411 ///
412 /// MachineLICM determines on its own whether the instruction is safe to
413 /// hoist; this gives the target a hook to extend this assessment and prevent
414 /// an instruction being hoisted from a given loop for target specific
415 /// reasons.
416 virtual bool shouldHoist(const MachineInstr &MI,
417 const MachineLoop *FromLoop) const {
418 return true;
419 }
420
421 /// Re-issue the specified 'original' instruction at the
422 /// specific location targeting a new destination register.
423 /// The register in Orig->getOperand(0).getReg() will be substituted by
424 /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
425 /// SubIdx.
426 virtual void reMaterialize(MachineBasicBlock &MBB,
428 unsigned SubIdx, const MachineInstr &Orig,
429 const TargetRegisterInfo &TRI) const;
430
431 /// Clones instruction or the whole instruction bundle \p Orig and
432 /// insert into \p MBB before \p InsertBefore. The target may update operands
433 /// that are required to be unique.
434 ///
435 /// \p Orig must not return true for MachineInstr::isNotDuplicable().
436 virtual MachineInstr &duplicate(MachineBasicBlock &MBB,
437 MachineBasicBlock::iterator InsertBefore,
438 const MachineInstr &Orig) const;
439
440 /// This method must be implemented by targets that
441 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
442 /// may be able to convert a two-address instruction into one or more true
443 /// three-address instructions on demand. This allows the X86 target (for
444 /// example) to convert ADD and SHL instructions into LEA instructions if they
445 /// would require register copies due to two-addressness.
446 ///
447 /// This method returns a null pointer if the transformation cannot be
448 /// performed, otherwise it returns the last new instruction.
449 ///
450 /// If \p LIS is not nullptr, the LiveIntervals info should be updated for
451 /// replacing \p MI with new instructions, even though this function does not
452 /// remove MI.
454 LiveVariables *LV,
455 LiveIntervals *LIS) const {
456 return nullptr;
457 }
458
459 // This constant can be used as an input value of operand index passed to
460 // the method findCommutedOpIndices() to tell the method that the
461 // corresponding operand index is not pre-defined and that the method
462 // can pick any commutable operand.
463 static const unsigned CommuteAnyOperandIndex = ~0U;
464
465 /// This method commutes the operands of the given machine instruction MI.
466 ///
467 /// The operands to be commuted are specified by their indices OpIdx1 and
468 /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value
469 /// 'CommuteAnyOperandIndex', which means that the method is free to choose
470 /// any arbitrarily chosen commutable operand. If both arguments are set to
471 /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable
472 /// operands; then commutes them if such operands could be found.
473 ///
474 /// If NewMI is false, MI is modified in place and returned; otherwise, a
475 /// new machine instruction is created and returned.
476 ///
477 /// Do not call this method for a non-commutable instruction or
478 /// for non-commuable operands.
479 /// Even though the instruction is commutable, the method may still
480 /// fail to commute the operands, null pointer is returned in such cases.
482 commuteInstruction(MachineInstr &MI, bool NewMI = false,
483 unsigned OpIdx1 = CommuteAnyOperandIndex,
484 unsigned OpIdx2 = CommuteAnyOperandIndex) const;
485
486 /// Returns true iff the routine could find two commutable operands in the
487 /// given machine instruction.
488 /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments.
489 /// If any of the INPUT values is set to the special value
490 /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable
491 /// operand, then returns its index in the corresponding argument.
492 /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method
493 /// looks for 2 commutable operands.
494 /// If INPUT values refer to some operands of MI, then the method simply
495 /// returns true if the corresponding operands are commutable and returns
496 /// false otherwise.
497 ///
498 /// For example, calling this method this way:
499 /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
500 /// findCommutedOpIndices(MI, Op1, Op2);
501 /// can be interpreted as a query asking to find an operand that would be
502 /// commutable with the operand#1.
503 virtual bool findCommutedOpIndices(const MachineInstr &MI,
504 unsigned &SrcOpIdx1,
505 unsigned &SrcOpIdx2) const;
506
507 /// Returns true if the target has a preference on the operands order of
508 /// the given machine instruction. And specify if \p Commute is required to
509 /// get the desired operands order.
510 virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const {
511 return false;
512 }
513
514 /// If possible, converts the instruction to a simplified/canonical form.
515 /// Returns true if the instruction was modified.
516 ///
517 /// This function is only called after register allocation. The MI will be
518 /// modified in place. This is called by passes such as
519 /// MachineCopyPropagation, where their mutation of the MI operands may
520 /// expose opportunities to convert the instruction to a simpler form (e.g.
521 /// a load of 0).
522 virtual bool simplifyInstruction(MachineInstr &MI) const { return false; }
523
524 /// A pair composed of a register and a sub-register index.
525 /// Used to give some type checking when modeling Reg:SubReg.
528 unsigned SubReg;
529
531 : Reg(Reg), SubReg(SubReg) {}
532
533 bool operator==(const RegSubRegPair& P) const {
534 return Reg == P.Reg && SubReg == P.SubReg;
535 }
536 bool operator!=(const RegSubRegPair& P) const {
537 return !(*this == P);
538 }
539 };
540
541 /// A pair composed of a pair of a register and a sub-register index,
542 /// and another sub-register index.
543 /// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
545 unsigned SubIdx;
546
548 unsigned SubIdx = 0)
549 : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {}
550 };
551
552 /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
553 /// and \p DefIdx.
554 /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
555 /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef
556 /// flag are not added to this list.
557 /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
558 /// two elements:
559 /// - %1:sub1, sub0
560 /// - %2<:0>, sub1
561 ///
562 /// \returns true if it is possible to build such an input sequence
563 /// with the pair \p MI, \p DefIdx. False otherwise.
564 ///
565 /// \pre MI.isRegSequence() or MI.isRegSequenceLike().
566 ///
567 /// \note The generic implementation does not provide any support for
568 /// MI.isRegSequenceLike(). In other words, one has to override
569 /// getRegSequenceLikeInputs for target specific instructions.
570 bool
571 getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
572 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;
573
574 /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
575 /// and \p DefIdx.
576 /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
577 /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
578 /// - %1:sub1, sub0
579 ///
580 /// \returns true if it is possible to build such an input sequence
581 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
582 /// False otherwise.
583 ///
584 /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
585 ///
586 /// \note The generic implementation does not provide any support for
587 /// MI.isExtractSubregLike(). In other words, one has to override
588 /// getExtractSubregLikeInputs for target specific instructions.
589 bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
590 RegSubRegPairAndIdx &InputReg) const;
591
592 /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
593 /// and \p DefIdx.
594 /// \p [out] BaseReg and \p [out] InsertedReg contain
595 /// the equivalent inputs of INSERT_SUBREG.
596 /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
597 /// - BaseReg: %0:sub0
598 /// - InsertedReg: %1:sub1, sub3
599 ///
600 /// \returns true if it is possible to build such an input sequence
601 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
602 /// False otherwise.
603 ///
604 /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
605 ///
606 /// \note The generic implementation does not provide any support for
607 /// MI.isInsertSubregLike(). In other words, one has to override
608 /// getInsertSubregLikeInputs for target specific instructions.
609 bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
610 RegSubRegPair &BaseReg,
611 RegSubRegPairAndIdx &InsertedReg) const;
612
613 /// Return true if two machine instructions would produce identical values.
614 /// By default, this is only true when the two instructions
615 /// are deemed identical except for defs. If this function is called when the
616 /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
617 /// aggressive checks.
618 virtual bool produceSameValue(const MachineInstr &MI0,
619 const MachineInstr &MI1,
620 const MachineRegisterInfo *MRI = nullptr) const;
621
622 /// \returns true if a branch from an instruction with opcode \p BranchOpc
623 /// bytes is capable of jumping to a position \p BrOffset bytes away.
624 virtual bool isBranchOffsetInRange(unsigned BranchOpc,
625 int64_t BrOffset) const {
626 llvm_unreachable("target did not implement");
627 }
628
629 /// \returns The block that branch instruction \p MI jumps to.
631 llvm_unreachable("target did not implement");
632 }
633
634 /// Insert an unconditional indirect branch at the end of \p MBB to \p
635 /// NewDestBB. Optionally, insert the clobbered register restoring in \p
636 /// RestoreBB. \p BrOffset indicates the offset of \p NewDestBB relative to
637 /// the offset of the position to insert the new branch.
639 MachineBasicBlock &NewDestBB,
640 MachineBasicBlock &RestoreBB,
641 const DebugLoc &DL, int64_t BrOffset = 0,
642 RegScavenger *RS = nullptr) const {
643 llvm_unreachable("target did not implement");
644 }
645
646 /// Analyze the branching code at the end of MBB, returning
647 /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
648 /// implemented for a target). Upon success, this returns false and returns
649 /// with the following information in various cases:
650 ///
651 /// 1. If this block ends with no branches (it just falls through to its succ)
652 /// just return false, leaving TBB/FBB null.
653 /// 2. If this block ends with only an unconditional branch, it sets TBB to be
654 /// the destination block.
655 /// 3. If this block ends with a conditional branch and it falls through to a
656 /// successor block, it sets TBB to be the branch destination block and a
657 /// list of operands that evaluate the condition. These operands can be
658 /// passed to other TargetInstrInfo methods to create new branches.
659 /// 4. If this block ends with a conditional branch followed by an
660 /// unconditional branch, it returns the 'true' destination in TBB, the
661 /// 'false' destination in FBB, and a list of operands that evaluate the
662 /// condition. These operands can be passed to other TargetInstrInfo
663 /// methods to create new branches.
664 ///
665 /// Note that removeBranch and insertBranch must be implemented to support
666 /// cases where this method returns success.
667 ///
668 /// If AllowModify is true, then this routine is allowed to modify the basic
669 /// block (e.g. delete instructions after the unconditional branch).
670 ///
671 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
672 /// before calling this function.
674 MachineBasicBlock *&FBB,
676 bool AllowModify = false) const {
677 return true;
678 }
679
680 /// Represents a predicate at the MachineFunction level. The control flow a
681 /// MachineBranchPredicate represents is:
682 ///
683 /// Reg = LHS `Predicate` RHS == ConditionDef
684 /// if Reg then goto TrueDest else goto FalseDest
685 ///
688 PRED_EQ, // True if two values are equal
689 PRED_NE, // True if two values are not equal
690 PRED_INVALID // Sentinel value
691 };
692
694 MachineOperand LHS = MachineOperand::CreateImm(0);
695 MachineOperand RHS = MachineOperand::CreateImm(0);
696 MachineBasicBlock *TrueDest = nullptr;
697 MachineBasicBlock *FalseDest = nullptr;
698 MachineInstr *ConditionDef = nullptr;
699
700 /// SingleUseCondition is true if ConditionDef is dead except for the
701 /// branch(es) at the end of the basic block.
702 ///
703 bool SingleUseCondition = false;
704
705 explicit MachineBranchPredicate() = default;
706 };
707
708 /// Analyze the branching code at the end of MBB and parse it into the
709 /// MachineBranchPredicate structure if possible. Returns false on success
710 /// and true on failure.
711 ///
712 /// If AllowModify is true, then this routine is allowed to modify the basic
713 /// block (e.g. delete instructions after the unconditional branch).
714 ///
717 bool AllowModify = false) const {
718 return true;
719 }
720
721 /// Remove the branching code at the end of the specific MBB.
722 /// This is only invoked in cases where analyzeBranch returns success. It
723 /// returns the number of instructions that were removed.
724 /// If \p BytesRemoved is non-null, report the change in code size from the
725 /// removed instructions.
727 int *BytesRemoved = nullptr) const {
728 llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
729 }
730
731 /// Insert branch code into the end of the specified MachineBasicBlock. The
732 /// operands to this method are the same as those returned by analyzeBranch.
733 /// This is only invoked in cases where analyzeBranch returns success. It
734 /// returns the number of instructions inserted. If \p BytesAdded is non-null,
735 /// report the change in code size from the added instructions.
736 ///
737 /// It is also invoked by tail merging to add unconditional branches in
738 /// cases where analyzeBranch doesn't apply because there was no original
739 /// branch to analyze. At least this much must be implemented, else tail
740 /// merging needs to be disabled.
741 ///
742 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
743 /// before calling this function.
747 const DebugLoc &DL,
748 int *BytesAdded = nullptr) const {
749 llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
750 }
751
753 MachineBasicBlock *DestBB,
754 const DebugLoc &DL,
755 int *BytesAdded = nullptr) const {
756 return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL,
757 BytesAdded);
758 }
759
760 /// Object returned by analyzeLoopForPipelining. Allows software pipelining
761 /// implementations to query attributes of the loop being pipelined and to
762 /// apply target-specific updates to the loop once pipelining is complete.
764 public:
766 /// Return true if the given instruction should not be pipelined and should
767 /// be ignored. An example could be a loop comparison, or induction variable
768 /// update with no users being pipelined.
769 virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const = 0;
770
771 /// Return true if the proposed schedule should used. Otherwise return
772 /// false to not pipeline the loop. This function should be used to ensure
773 /// that pipelined loops meet target-specific quality heuristics.
775 return true;
776 }
777
778 /// Create a condition to determine if the trip count of the loop is greater
779 /// than TC, where TC is always one more than for the previous prologue or
780 /// 0 if this is being called for the outermost prologue.
781 ///
782 /// If the trip count is statically known to be greater than TC, return
783 /// true. If the trip count is statically known to be not greater than TC,
784 /// return false. Otherwise return nullopt and fill out Cond with the test
785 /// condition.
786 ///
787 /// Note: This hook is guaranteed to be called from the innermost to the
788 /// outermost prologue of the loop being software pipelined.
789 virtual std::optional<bool>
792
793 /// Create a condition to determine if the remaining trip count for a phase
794 /// is greater than TC. Some instructions such as comparisons may be
795 /// inserted at the bottom of MBB. All instructions expanded for the
796 /// phase must be inserted in MBB before calling this function.
797 /// LastStage0Insts is the map from the original instructions scheduled at
798 /// stage#0 to the expanded instructions for the last iteration of the
799 /// kernel. LastStage0Insts is intended to obtain the instruction that
800 /// refers the latest loop counter value.
801 ///
802 /// MBB can also be a predecessor of the prologue block. Then
803 /// LastStage0Insts must be empty and the compared value is the initial
804 /// value of the trip count.
809 "Target didn't implement "
810 "PipelinerLoopInfo::createRemainingIterationsGreaterCondition!");
811 }
812
813 /// Modify the loop such that the trip count is
814 /// OriginalTC + TripCountAdjust.
815 virtual void adjustTripCount(int TripCountAdjust) = 0;
816
817 /// Called when the loop's preheader has been modified to NewPreheader.
818 virtual void setPreheader(MachineBasicBlock *NewPreheader) = 0;
819
820 /// Called when the loop is being removed. Any instructions in the preheader
821 /// should be removed.
822 ///
823 /// Once this function is called, no other functions on this object are
824 /// valid; the loop has been removed.
825 virtual void disposed(LiveIntervals *LIS = nullptr) {}
826
827 /// Return true if the target can expand pipelined schedule with modulo
828 /// variable expansion.
829 virtual bool isMVEExpanderSupported() { return false; }
830 };
831
832 /// Analyze loop L, which must be a single-basic-block loop, and if the
833 /// conditions can be understood enough produce a PipelinerLoopInfo object.
834 virtual std::unique_ptr<PipelinerLoopInfo>
836 return nullptr;
837 }
838
839 /// Analyze the loop code, return true if it cannot be understood. Upon
840 /// success, this function returns false and returns information about the
841 /// induction variable and compare instruction used at the end.
842 virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
843 MachineInstr *&CmpInst) const {
844 return true;
845 }
846
847 /// Generate code to reduce the loop iteration by one and check if the loop
848 /// is finished. Return the value/register of the new loop count. We need
849 /// this function when peeling off one or more iterations of a loop. This
850 /// function assumes the nth iteration is peeled first.
852 MachineBasicBlock &PreHeader,
853 MachineInstr *IndVar, MachineInstr &Cmp,
856 unsigned Iter, unsigned MaxIter) const {
857 llvm_unreachable("Target didn't implement ReduceLoopCount");
858 }
859
860 /// Delete the instruction OldInst and everything after it, replacing it with
861 /// an unconditional branch to NewDest. This is used by the tail merging pass.
862 virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
863 MachineBasicBlock *NewDest) const;
864
865 /// Return true if it's legal to split the given basic
866 /// block at the specified instruction (i.e. instruction would be the start
867 /// of a new basic block).
870 return true;
871 }
872
873 /// Return true if it's profitable to predicate
874 /// instructions with accumulated instruction latency of "NumCycles"
875 /// of the specified basic block, where the probability of the instructions
876 /// being executed is given by Probability, and Confidence is a measure
877 /// of our confidence that it will be properly predicted.
878 virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
879 unsigned ExtraPredCycles,
880 BranchProbability Probability) const {
881 return false;
882 }
883
884 /// Second variant of isProfitableToIfCvt. This one
885 /// checks for the case where two basic blocks from true and false path
886 /// of a if-then-else (diamond) are predicated on mutually exclusive
887 /// predicates, where the probability of the true path being taken is given
888 /// by Probability, and Confidence is a measure of our confidence that it
889 /// will be properly predicted.
890 virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles,
891 unsigned ExtraTCycles,
892 MachineBasicBlock &FMBB, unsigned NumFCycles,
893 unsigned ExtraFCycles,
894 BranchProbability Probability) const {
895 return false;
896 }
897
898 /// Return true if it's profitable for if-converter to duplicate instructions
899 /// of specified accumulated instruction latencies in the specified MBB to
900 /// enable if-conversion.
901 /// The probability of the instructions being executed is given by
902 /// Probability, and Confidence is a measure of our confidence that it
903 /// will be properly predicted.
905 unsigned NumCycles,
906 BranchProbability Probability) const {
907 return false;
908 }
909
910 /// Return the increase in code size needed to predicate a contiguous run of
911 /// NumInsts instructions.
913 unsigned NumInsts) const {
914 return 0;
915 }
916
917 /// Return an estimate for the code size reduction (in bytes) which will be
918 /// caused by removing the given branch instruction during if-conversion.
919 virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const {
920 return getInstSizeInBytes(MI);
921 }
922
923 /// Return true if it's profitable to unpredicate
924 /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
925 /// exclusive predicates.
926 /// e.g.
927 /// subeq r0, r1, #1
928 /// addne r0, r1, #1
929 /// =>
930 /// sub r0, r1, #1
931 /// addne r0, r1, #1
932 ///
933 /// This may be profitable is conditional instructions are always executed.
935 MachineBasicBlock &FMBB) const {
936 return false;
937 }
938
939 /// Return true if it is possible to insert a select
940 /// instruction that chooses between TrueReg and FalseReg based on the
941 /// condition code in Cond.
942 ///
943 /// When successful, also return the latency in cycles from TrueReg,
944 /// FalseReg, and Cond to the destination register. In most cases, a select
945 /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
946 ///
947 /// Some x86 implementations have 2-cycle cmov instructions.
948 ///
949 /// @param MBB Block where select instruction would be inserted.
950 /// @param Cond Condition returned by analyzeBranch.
951 /// @param DstReg Virtual dest register that the result should write to.
952 /// @param TrueReg Virtual register to select when Cond is true.
953 /// @param FalseReg Virtual register to select when Cond is false.
954 /// @param CondCycles Latency from Cond+Branch to select output.
955 /// @param TrueCycles Latency from TrueReg to select output.
956 /// @param FalseCycles Latency from FalseReg to select output.
959 Register TrueReg, Register FalseReg,
960 int &CondCycles, int &TrueCycles,
961 int &FalseCycles) const {
962 return false;
963 }
964
965 /// Insert a select instruction into MBB before I that will copy TrueReg to
966 /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
967 ///
968 /// This function can only be called after canInsertSelect() returned true.
969 /// The condition in Cond comes from analyzeBranch, and it can be assumed
970 /// that the same flags or registers required by Cond are available at the
971 /// insertion point.
972 ///
973 /// @param MBB Block where select instruction should be inserted.
974 /// @param I Insertion point.
975 /// @param DL Source location for debugging.
976 /// @param DstReg Virtual register to be defined by select instruction.
977 /// @param Cond Condition as computed by analyzeBranch.
978 /// @param TrueReg Virtual register to copy when Cond is true.
979 /// @param FalseReg Virtual register to copy when Cons is false.
983 Register TrueReg, Register FalseReg) const {
984 llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
985 }
986
987 /// Analyze the given select instruction, returning true if
988 /// it cannot be understood. It is assumed that MI->isSelect() is true.
989 ///
990 /// When successful, return the controlling condition and the operands that
991 /// determine the true and false result values.
992 ///
993 /// Result = SELECT Cond, TrueOp, FalseOp
994 ///
995 /// Some targets can optimize select instructions, for example by predicating
996 /// the instruction defining one of the operands. Such targets should set
997 /// Optimizable.
998 ///
999 /// @param MI Select instruction to analyze.
1000 /// @param Cond Condition controlling the select.
1001 /// @param TrueOp Operand number of the value selected when Cond is true.
1002 /// @param FalseOp Operand number of the value selected when Cond is false.
1003 /// @param Optimizable Returned as true if MI is optimizable.
1004 /// @returns False on success.
1005 virtual bool analyzeSelect(const MachineInstr &MI,
1007 unsigned &TrueOp, unsigned &FalseOp,
1008 bool &Optimizable) const {
1009 assert(MI.getDesc().isSelect() && "MI must be a select instruction");
1010 return true;
1011 }
1012
1013 /// Given a select instruction that was understood by
1014 /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
1015 /// merging it with one of its operands. Returns NULL on failure.
1016 ///
1017 /// When successful, returns the new select instruction. The client is
1018 /// responsible for deleting MI.
1019 ///
1020 /// If both sides of the select can be optimized, PreferFalse is used to pick
1021 /// a side.
1022 ///
1023 /// @param MI Optimizable select instruction.
1024 /// @param NewMIs Set that record all MIs in the basic block up to \p
1025 /// MI. Has to be updated with any newly created MI or deleted ones.
1026 /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
1027 /// @returns Optimized instruction or NULL.
1030 bool PreferFalse = false) const {
1031 // This function must be implemented if Optimizable is ever set.
1032 llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
1033 }
1034
1035 /// Emit instructions to copy a pair of physical registers.
1036 ///
1037 /// This function should support copies within any legal register class as
1038 /// well as any cross-class copies created during instruction selection.
1039 ///
1040 /// The source and destination registers may overlap, which may require a
1041 /// careful implementation when multiple copy instructions are required for
1042 /// large registers. See for example the ARM target.
1043 ///
1044 /// If RenamableDest is true, the copy instruction's destination operand is
1045 /// marked renamable.
1046 /// If RenamableSrc is true, the copy instruction's source operand is
1047 /// marked renamable.
1050 Register DestReg, Register SrcReg, bool KillSrc,
1051 bool RenamableDest = false,
1052 bool RenamableSrc = false) const {
1053 llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
1054 }
1055
1056 /// Allow targets to tell MachineVerifier whether a specific register
1057 /// MachineOperand can be used as part of PC-relative addressing.
1058 /// PC-relative addressing modes in many CISC architectures contain
1059 /// (non-PC) registers as offsets or scaling values, which inherently
1060 /// tags the corresponding MachineOperand with OPERAND_PCREL.
1061 ///
1062 /// @param MO The MachineOperand in question. MO.isReg() should always
1063 /// be true.
1064 /// @return Whether this operand is allowed to be used PC-relatively.
1065 virtual bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const {
1066 return false;
1067 }
1068
1069 /// Return an index for MachineJumpTableInfo if \p insn is an indirect jump
1070 /// using a jump table, otherwise -1.
1071 virtual int getJumpTableIndex(const MachineInstr &MI) const { return -1; }
1072
1073protected:
1074 /// Target-dependent implementation for IsCopyInstr.
1075 /// If the specific machine instruction is a instruction that moves/copies
1076 /// value from one register to another register return destination and source
1077 /// registers as machine operands.
1078 virtual std::optional<DestSourcePair>
1080 return std::nullopt;
1081 }
1082
1083 virtual std::optional<DestSourcePair>
1085 return std::nullopt;
1086 }
1087
1088 /// Return true if the given terminator MI is not expected to spill. This
1089 /// sets the live interval as not spillable and adjusts phi node lowering to
1090 /// not introduce copies after the terminator. Use with care, these are
1091 /// currently used for hardware loop intrinsics in very controlled situations,
1092 /// created prior to registry allocation in loops that only have single phi
1093 /// users for the terminators value. They may run out of registers if not used
1094 /// carefully.
1095 virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const {
1096 return false;
1097 }
1098
1099public:
1100 /// If the specific machine instruction is a instruction that moves/copies
1101 /// value from one register to another register return destination and source
1102 /// registers as machine operands.
1103 /// For COPY-instruction the method naturally returns destination and source
1104 /// registers as machine operands, for all other instructions the method calls
1105 /// target-dependent implementation.
1106 std::optional<DestSourcePair> isCopyInstr(const MachineInstr &MI) const {
1107 if (MI.isCopy()) {
1108 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1109 }
1110 return isCopyInstrImpl(MI);
1111 }
1112
1113 // Similar to `isCopyInstr`, but adds non-copy semantics on MIR, but
1114 // ultimately generates a copy instruction.
1115 std::optional<DestSourcePair> isCopyLikeInstr(const MachineInstr &MI) const {
1116 if (auto IsCopyInstr = isCopyInstr(MI))
1117 return IsCopyInstr;
1118 return isCopyLikeInstrImpl(MI);
1119 }
1120
1121 bool isFullCopyInstr(const MachineInstr &MI) const {
1122 auto DestSrc = isCopyInstr(MI);
1123 if (!DestSrc)
1124 return false;
1125
1126 const MachineOperand *DestRegOp = DestSrc->Destination;
1127 const MachineOperand *SrcRegOp = DestSrc->Source;
1128 return !DestRegOp->getSubReg() && !SrcRegOp->getSubReg();
1129 }
1130
1131 /// If the specific machine instruction is an instruction that adds an
1132 /// immediate value and a register, and stores the result in the given
1133 /// register \c Reg, return a pair of the source register and the offset
1134 /// which has been added.
1135 virtual std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
1136 Register Reg) const {
1137 return std::nullopt;
1138 }
1139
1140 /// Returns true if MI is an instruction that defines Reg to have a constant
1141 /// value and the value is recorded in ImmVal. The ImmVal is a result that
1142 /// should be interpreted as modulo size of Reg.
1144 const Register Reg,
1145 int64_t &ImmVal) const {
1146 return false;
1147 }
1148
1149 /// Store the specified register of the given register class to the specified
1150 /// stack frame index. The store instruction is to be added to the given
1151 /// machine basic block before the specified machine instruction. If isKill
1152 /// is true, the register operand is the last use and must be marked kill. If
1153 /// \p SrcReg is being directly spilled as part of assigning a virtual
1154 /// register, \p VReg is the register being assigned. This additional register
1155 /// argument is needed for certain targets when invoked from RegAllocFast to
1156 /// map the spilled physical register to its virtual register. A null register
1157 /// can be passed elsewhere. The \p Flags is used to set appropriate machine
1158 /// flags on the spill instruction e.g. FrameSetup flag on a callee saved
1159 /// register spill instruction, part of prologue, during the frame lowering.
1162 bool isKill, int FrameIndex, const TargetRegisterClass *RC,
1163 const TargetRegisterInfo *TRI, Register VReg,
1164 MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const {
1165 llvm_unreachable("Target didn't implement "
1166 "TargetInstrInfo::storeRegToStackSlot!");
1167 }
1168
1169 /// Load the specified register of the given register class from the specified
1170 /// stack frame index. The load instruction is to be added to the given
1171 /// machine basic block before the specified machine instruction. If \p
1172 /// DestReg is being directly reloaded as part of assigning a virtual
1173 /// register, \p VReg is the register being assigned. This additional register
1174 /// argument is needed for certain targets when invoked from RegAllocFast to
1175 /// map the loaded physical register to its virtual register. A null register
1176 /// can be passed elsewhere. The \p Flags is used to set appropriate machine
1177 /// flags on the spill instruction e.g. FrameDestroy flag on a callee saved
1178 /// register reload instruction, part of epilogue, during the frame lowering.
1181 int FrameIndex, const TargetRegisterClass *RC,
1182 const TargetRegisterInfo *TRI, Register VReg,
1183 MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const {
1184 llvm_unreachable("Target didn't implement "
1185 "TargetInstrInfo::loadRegFromStackSlot!");
1186 }
1187
1188 /// This function is called for all pseudo instructions
1189 /// that remain after register allocation. Many pseudo instructions are
1190 /// created to help register allocation. This is the place to convert them
1191 /// into real instructions. The target can edit MI in place, or it can insert
1192 /// new instructions and erase MI. The function should return true if
1193 /// anything was changed.
1194 virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
1195
1196 /// Check whether the target can fold a load that feeds a subreg operand
1197 /// (or a subreg operand that feeds a store).
1198 /// For example, X86 may want to return true if it can fold
1199 /// movl (%esp), %eax
1200 /// subb, %al, ...
1201 /// Into:
1202 /// subb (%esp), ...
1203 ///
1204 /// Ideally, we'd like the target implementation of foldMemoryOperand() to
1205 /// reject subregs - but since this behavior used to be enforced in the
1206 /// target-independent code, moving this responsibility to the targets
1207 /// has the potential of causing nasty silent breakage in out-of-tree targets.
1208 virtual bool isSubregFoldable() const { return false; }
1209
1210 /// For a patchpoint, stackmap, or statepoint intrinsic, return the range of
1211 /// operands which can't be folded into stack references. Operands outside
1212 /// of the range are most likely foldable but it is not guaranteed.
1213 /// These instructions are unique in that stack references for some operands
1214 /// have the same execution cost (e.g. none) as the unfolded register forms.
1215 /// The ranged return is guaranteed to include all operands which can't be
1216 /// folded at zero cost.
1217 virtual std::pair<unsigned, unsigned>
1218 getPatchpointUnfoldableRange(const MachineInstr &MI) const;
1219
1220 /// Attempt to fold a load or store of the specified stack
1221 /// slot into the specified machine instruction for the specified operand(s).
1222 /// If this is possible, a new instruction is returned with the specified
1223 /// operand folded, otherwise NULL is returned.
1224 /// The new instruction is inserted before MI, and the client is responsible
1225 /// for removing the old instruction.
1226 /// If VRM is passed, the assigned physregs can be inspected by target to
1227 /// decide on using an opcode (note that those assignments can still change).
1228 MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
1229 int FI,
1230 LiveIntervals *LIS = nullptr,
1231 VirtRegMap *VRM = nullptr) const;
1232
1233 /// Same as the previous version except it allows folding of any load and
1234 /// store from / to any address, not just from a specific stack slot.
1235 MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
1236 MachineInstr &LoadMI,
1237 LiveIntervals *LIS = nullptr) const;
1238
1239 /// This function defines the logic to lower COPY instruction to
1240 /// target specific instruction(s).
1241 void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const;
1242
1243 /// Return true when there is potentially a faster code sequence
1244 /// for an instruction chain ending in \p Root. All potential patterns are
1245 /// returned in the \p Pattern vector. Pattern should be sorted in priority
1246 /// order since the pattern evaluator stops checking as soon as it finds a
1247 /// faster sequence.
1248 /// \param Root - Instruction that could be combined with one of its operands
1249 /// \param Patterns - Vector of possible combination patterns
1250 virtual bool getMachineCombinerPatterns(MachineInstr &Root,
1251 SmallVectorImpl<unsigned> &Patterns,
1252 bool DoRegPressureReduce) const;
1253
1254 /// Return true if target supports reassociation of instructions in machine
1255 /// combiner pass to reduce register pressure for a given BB.
1256 virtual bool
1258 const RegisterClassInfo *RegClassInfo) const {
1259 return false;
1260 }
1261
1262 /// Fix up the placeholder we may add in genAlternativeCodeSequence().
1263 virtual void
1265 SmallVectorImpl<MachineInstr *> &InsInstrs) const {}
1266
1267 /// Return true when a code sequence can improve throughput. It
1268 /// should be called only for instructions in loops.
1269 /// \param Pattern - combiner pattern
1270 virtual bool isThroughputPattern(unsigned Pattern) const;
1271
1272 /// Return the objective of a combiner pattern.
1273 /// \param Pattern - combiner pattern
1274 virtual CombinerObjective getCombinerObjective(unsigned Pattern) const;
1275
1276 /// Return true if the input \P Inst is part of a chain of dependent ops
1277 /// that are suitable for reassociation, otherwise return false.
1278 /// If the instruction's operands must be commuted to have a previous
1279 /// instruction of the same type define the first source operand, \P Commuted
1280 /// will be set to true.
1281 bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const;
1282
1283 /// Return true when \P Inst is both associative and commutative. If \P Invert
1284 /// is true, then the inverse of \P Inst operation must be tested.
1286 bool Invert = false) const {
1287 return false;
1288 }
1289
1290 /// Find chains of accumulations that can be rewritten as a tree for increased
1291 /// ILP.
1292 bool getAccumulatorReassociationPatterns(
1293 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const;
1294
1295 /// Find the chain of accumulator instructions in \P MBB and return them in
1296 /// \P Chain.
1297 void getAccumulatorChain(MachineInstr *CurrentInstr,
1298 SmallVectorImpl<Register> &Chain) const;
1299
1300 /// Return true when \P OpCode is an instruction which performs
1301 /// accumulation into one of its operand registers.
1302 virtual bool isAccumulationOpcode(unsigned Opcode) const { return false; }
1303
1304 /// Returns an opcode which defines the accumulator used by \P Opcode.
1305 virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const {
1306 llvm_unreachable("Function not implemented for target!");
1307 return 0;
1308 }
1309
1310 /// Returns the opcode that should be use to reduce accumulation registers.
1311 virtual unsigned
1312 getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const {
1313 llvm_unreachable("Function not implemented for target!");
1314 return 0;
1315 }
1316
1317 /// Reduces branches of the accumulator tree into a single register.
1318 void reduceAccumulatorTree(SmallVectorImpl<Register> &RegistersToReduce,
1320 MachineFunction &MF, MachineInstr &Root,
1322 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
1323 Register ResultReg) const;
1324
1325 /// Return the inverse operation opcode if it exists for \P Opcode (e.g. add
1326 /// for sub and vice versa).
1327 virtual std::optional<unsigned> getInverseOpcode(unsigned Opcode) const {
1328 return std::nullopt;
1329 }
1330
1331 /// Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
1332 bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const;
1333
1334 /// Return true when \P Inst has reassociable operands in the same \P MBB.
1335 virtual bool hasReassociableOperands(const MachineInstr &Inst,
1336 const MachineBasicBlock *MBB) const;
1337
1338 /// Return true when \P Inst has reassociable sibling.
1339 virtual bool hasReassociableSibling(const MachineInstr &Inst,
1340 bool &Commuted) const;
1341
1342 /// When getMachineCombinerPatterns() finds patterns, this function generates
1343 /// the instructions that could replace the original code sequence. The client
1344 /// has to decide whether the actual replacement is beneficial or not.
1345 /// \param Root - Instruction that could be combined with one of its operands
1346 /// \param Pattern - Combination pattern for Root
1347 /// \param InsInstrs - Vector of new instructions that implement P
1348 /// \param DelInstrs - Old instructions, including Root, that could be
1349 /// replaced by InsInstr
1350 /// \param InstIdxForVirtReg - map of virtual register to instruction in
1351 /// InsInstr that defines it
1352 virtual void genAlternativeCodeSequence(
1353 MachineInstr &Root, unsigned Pattern,
1356 DenseMap<Register, unsigned> &InstIdxForVirtReg) const;
1357
1358 /// When calculate the latency of the root instruction, accumulate the
1359 /// latency of the sequence to the root latency.
1360 /// \param Root - Instruction that could be combined with one of its operands
1362 return true;
1363 }
1364
1365 /// The returned array encodes the operand index for each parameter because
1366 /// the operands may be commuted; the operand indices for associative
1367 /// operations might also be target-specific. Each element specifies the index
1368 /// of {Prev, A, B, X, Y}.
1369 virtual void
1370 getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern,
1371 std::array<unsigned, 5> &OperandIndices) const;
1372
1373 /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
1374 /// reduce critical path length.
1375 void reassociateOps(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1379 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const;
1380
1381 /// Reassociation of some instructions requires inverse operations (e.g.
1382 /// (X + A) - Y => (X - Y) + A). This method returns a pair of new opcodes
1383 /// (new root opcode, new prev opcode) that must be used to reassociate \P
1384 /// Root and \P Prev accoring to \P Pattern.
1385 std::pair<unsigned, unsigned>
1386 getReassociationOpcodes(unsigned Pattern, const MachineInstr &Root,
1387 const MachineInstr &Prev) const;
1388
1389 /// The limit on resource length extension we accept in MachineCombiner Pass.
1390 virtual int getExtendResourceLenLimit() const { return 0; }
1391
1392 /// This is an architecture-specific helper function of reassociateOps.
1393 /// Set special operand attributes for new instructions after reassociation.
1394 virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
1395 MachineInstr &NewMI1,
1396 MachineInstr &NewMI2) const {}
1397
1398 /// Return true when a target supports MachineCombiner.
1399 virtual bool useMachineCombiner() const { return false; }
1400
1401 /// Return a strategy that MachineCombiner must use when creating traces.
1402 virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const;
1403
1404 /// Return true if the given SDNode can be copied during scheduling
1405 /// even if it has glue.
1406 virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; }
1407
1408protected:
1409 /// Target-dependent implementation for foldMemoryOperand.
1410 /// Target-independent code in foldMemoryOperand will
1411 /// take care of adding a MachineMemOperand to the newly created instruction.
1412 /// The instruction and any auxiliary instructions necessary will be inserted
1413 /// at InsertPt.
1414 virtual MachineInstr *
1417 MachineBasicBlock::iterator InsertPt, int FrameIndex,
1418 LiveIntervals *LIS = nullptr,
1419 VirtRegMap *VRM = nullptr) const {
1420 return nullptr;
1421 }
1422
1423 /// Target-dependent implementation for foldMemoryOperand.
1424 /// Target-independent code in foldMemoryOperand will
1425 /// take care of adding a MachineMemOperand to the newly created instruction.
1426 /// The instruction and any auxiliary instructions necessary will be inserted
1427 /// at InsertPt.
1430 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1431 LiveIntervals *LIS = nullptr) const {
1432 return nullptr;
1433 }
1434
1435 /// Target-dependent implementation of getRegSequenceInputs.
1436 ///
1437 /// \returns true if it is possible to build the equivalent
1438 /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
1439 ///
1440 /// \pre MI.isRegSequenceLike().
1441 ///
1442 /// \see TargetInstrInfo::getRegSequenceInputs.
1444 const MachineInstr &MI, unsigned DefIdx,
1445 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1446 return false;
1447 }
1448
1449 /// Target-dependent implementation of getExtractSubregInputs.
1450 ///
1451 /// \returns true if it is possible to build the equivalent
1452 /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1453 ///
1454 /// \pre MI.isExtractSubregLike().
1455 ///
1456 /// \see TargetInstrInfo::getExtractSubregInputs.
1458 unsigned DefIdx,
1459 RegSubRegPairAndIdx &InputReg) const {
1460 return false;
1461 }
1462
1463 /// Target-dependent implementation of getInsertSubregInputs.
1464 ///
1465 /// \returns true if it is possible to build the equivalent
1466 /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1467 ///
1468 /// \pre MI.isInsertSubregLike().
1469 ///
1470 /// \see TargetInstrInfo::getInsertSubregInputs.
1471 virtual bool
1473 RegSubRegPair &BaseReg,
1474 RegSubRegPairAndIdx &InsertedReg) const {
1475 return false;
1476 }
1477
1478public:
1479 /// unfoldMemoryOperand - Separate a single instruction which folded a load or
1480 /// a store or a load and a store into two or more instruction. If this is
1481 /// possible, returns true as well as the new instructions by reference.
1482 virtual bool
1484 bool UnfoldLoad, bool UnfoldStore,
1485 SmallVectorImpl<MachineInstr *> &NewMIs) const {
1486 return false;
1487 }
1488
1490 SmallVectorImpl<SDNode *> &NewNodes) const {
1491 return false;
1492 }
1493
1494 /// Returns the opcode of the would be new
1495 /// instruction after load / store are unfolded from an instruction of the
1496 /// specified opcode. It returns zero if the specified unfolding is not
1497 /// possible. If LoadRegIndex is non-null, it is filled in with the operand
1498 /// index of the operand which will hold the register holding the loaded
1499 /// value.
1500 virtual unsigned
1501 getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore,
1502 unsigned *LoadRegIndex = nullptr) const {
1503 return 0;
1504 }
1505
1506 /// This is used by the pre-regalloc scheduler to determine if two loads are
1507 /// loading from the same base address. It should only return true if the base
1508 /// pointers are the same and the only differences between the two addresses
1509 /// are the offset. It also returns the offsets by reference.
1510 virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1511 int64_t &Offset1,
1512 int64_t &Offset2) const {
1513 return false;
1514 }
1515
1516 /// This is a used by the pre-regalloc scheduler to determine (in conjunction
1517 /// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
1518 /// On some targets if two loads are loading from
1519 /// addresses in the same cache line, it's better if they are scheduled
1520 /// together. This function takes two integers that represent the load offsets
1521 /// from the common base address. It returns true if it decides it's desirable
1522 /// to schedule the two loads together. "NumLoads" is the number of loads that
1523 /// have already been scheduled after Load1.
1524 virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
1525 int64_t Offset1, int64_t Offset2,
1526 unsigned NumLoads) const {
1527 return false;
1528 }
1529
1530 /// Get the base operand and byte offset of an instruction that reads/writes
1531 /// memory. This is a convenience function for callers that are only prepared
1532 /// to handle a single base operand.
1533 /// FIXME: Move Offset and OffsetIsScalable to some ElementCount-style
1534 /// abstraction that supports negative offsets.
1535 bool getMemOperandWithOffset(const MachineInstr &MI,
1536 const MachineOperand *&BaseOp, int64_t &Offset,
1537 bool &OffsetIsScalable,
1538 const TargetRegisterInfo *TRI) const;
1539
1540 /// Get zero or more base operands and the byte offset of an instruction that
1541 /// reads/writes memory. Note that there may be zero base operands if the
1542 /// instruction accesses a constant address.
1543 /// It returns false if MI does not read/write memory.
1544 /// It returns false if base operands and offset could not be determined.
1545 /// It is not guaranteed to always recognize base operands and offsets in all
1546 /// cases.
1547 /// FIXME: Move Offset and OffsetIsScalable to some ElementCount-style
1548 /// abstraction that supports negative offsets.
1551 int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
1552 const TargetRegisterInfo *TRI) const {
1553 return false;
1554 }
1555
1556 /// Return true if the instruction contains a base register and offset. If
1557 /// true, the function also sets the operand position in the instruction
1558 /// for the base register and offset.
1560 unsigned &BasePos,
1561 unsigned &OffsetPos) const {
1562 return false;
1563 }
1564
1565 /// Target dependent implementation to get the values constituting the address
1566 /// MachineInstr that is accessing memory. These values are returned as a
1567 /// struct ExtAddrMode which contains all relevant information to make up the
1568 /// address.
1569 virtual std::optional<ExtAddrMode>
1571 const TargetRegisterInfo *TRI) const {
1572 return std::nullopt;
1573 }
1574
1575 /// Check if it's possible and beneficial to fold the addressing computation
1576 /// `AddrI` into the addressing mode of the load/store instruction `MemI`. The
1577 /// memory instruction is a user of the virtual register `Reg`, which in turn
1578 /// is the ultimate destination of zero or more COPY instructions from the
1579 /// output register of `AddrI`.
1580 /// Return the adddressing mode after folding in `AM`.
1582 const MachineInstr &AddrI,
1583 ExtAddrMode &AM) const {
1584 return false;
1585 }
1586
1587 /// Emit a load/store instruction with the same value register as `MemI`, but
1588 /// using the address from `AM`. The addressing mode must have been obtained
1589 /// from `canFoldIntoAddr` for the same memory instruction.
1591 const ExtAddrMode &AM) const {
1592 llvm_unreachable("target did not implement emitLdStWithAddr()");
1593 }
1594
1595 /// Returns true if MI's Def is NullValueReg, and the MI
1596 /// does not change the Zero value. i.e. cases such as rax = shr rax, X where
1597 /// NullValueReg = rax. Note that if the NullValueReg is non-zero, this
1598 /// function can return true even if becomes zero. Specifically cases such as
1599 /// NullValueReg = shl NullValueReg, 63.
1601 const Register NullValueReg,
1602 const TargetRegisterInfo *TRI) const {
1603 return false;
1604 }
1605
1606 /// If the instruction is an increment of a constant value, return the amount.
1607 virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
1608 return false;
1609 }
1610
1611 /// Returns true if the two given memory operations should be scheduled
1612 /// adjacent. Note that you have to add:
1613 /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1614 /// or
1615 /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1616 /// to TargetMachine::createMachineScheduler() to have an effect.
1617 ///
1618 /// \p BaseOps1 and \p BaseOps2 are memory operands of two memory operations.
1619 /// \p Offset1 and \p Offset2 are the byte offsets for the memory
1620 /// operations.
1621 /// \p OffsetIsScalable1 and \p OffsetIsScalable2 indicate if the offset is
1622 /// scaled by a runtime quantity.
1623 /// \p ClusterSize is the number of operations in the resulting load/store
1624 /// cluster if this hook returns true.
1625 /// \p NumBytes is the number of bytes that will be loaded from all the
1626 /// clustered loads if this hook returns true.
1628 int64_t Offset1, bool OffsetIsScalable1,
1630 int64_t Offset2, bool OffsetIsScalable2,
1631 unsigned ClusterSize,
1632 unsigned NumBytes) const {
1633 llvm_unreachable("target did not implement shouldClusterMemOps()");
1634 }
1635
1636 /// Reverses the branch condition of the specified condition list,
1637 /// returning false on success and true if it cannot be reversed.
1638 virtual bool
1640 return true;
1641 }
1642
1643 /// Insert a noop into the instruction stream at the specified point.
1644 virtual void insertNoop(MachineBasicBlock &MBB,
1646
1647 /// Insert noops into the instruction stream at the specified point.
1648 virtual void insertNoops(MachineBasicBlock &MBB,
1650 unsigned Quantity) const;
1651
1652 /// Return the noop instruction to use for a noop.
1653 virtual MCInst getNop() const;
1654
1655 /// Return true for post-incremented instructions.
1656 virtual bool isPostIncrement(const MachineInstr &MI) const { return false; }
1657
1658 /// Returns true if the instruction is already predicated.
1659 virtual bool isPredicated(const MachineInstr &MI) const { return false; }
1660
1661 /// Assumes the instruction is already predicated and returns true if the
1662 /// instruction can be predicated again.
1663 virtual bool canPredicatePredicatedInstr(const MachineInstr &MI) const {
1664 assert(isPredicated(MI) && "Instruction is not predicated");
1665 return false;
1666 }
1667
1668 // Returns a MIRPrinter comment for this machine operand.
1669 virtual std::string
1670 createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op,
1671 unsigned OpIdx, const TargetRegisterInfo *TRI) const;
1672
1673 /// Returns true if the instruction is a
1674 /// terminator instruction that has not been predicated.
1675 bool isUnpredicatedTerminator(const MachineInstr &MI) const;
1676
1677 /// Returns true if MI is an unconditional tail call.
1678 virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
1679 return false;
1680 }
1681
1682 /// Returns true if the tail call can be made conditional on BranchCond.
1684 const MachineInstr &TailCall) const {
1685 return false;
1686 }
1687
1688 /// Replace the conditional branch in MBB with a conditional tail call.
1691 const MachineInstr &TailCall) const {
1692 llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
1693 }
1694
1695 /// Convert the instruction into a predicated instruction.
1696 /// It returns true if the operation was successful.
1697 virtual bool PredicateInstruction(MachineInstr &MI,
1698 ArrayRef<MachineOperand> Pred) const;
1699
1700 /// Returns true if the first specified predicate
1701 /// subsumes the second, e.g. GE subsumes GT.
1703 ArrayRef<MachineOperand> Pred2) const {
1704 return false;
1705 }
1706
1707 /// If the specified instruction defines any predicate
1708 /// or condition code register(s) used for predication, returns true as well
1709 /// as the definition predicate(s) by reference.
1710 /// SkipDead should be set to false at any point that dead
1711 /// predicate instructions should be considered as being defined.
1712 /// A dead predicate instruction is one that is guaranteed to be removed
1713 /// after a call to PredicateInstruction.
1715 std::vector<MachineOperand> &Pred,
1716 bool SkipDead) const {
1717 return false;
1718 }
1719
1720 /// Return true if the specified instruction can be predicated.
1721 /// By default, this returns true for every instruction with a
1722 /// PredicateOperand.
1723 virtual bool isPredicable(const MachineInstr &MI) const {
1724 return MI.getDesc().isPredicable();
1725 }
1726
1727 /// Return true if it's safe to move a machine
1728 /// instruction that defines the specified register class.
1729 virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
1730 return true;
1731 }
1732
1733 /// Test if the given instruction should be considered a scheduling boundary.
1734 /// This primarily includes labels and terminators.
1735 virtual bool isSchedulingBoundary(const MachineInstr &MI,
1736 const MachineBasicBlock *MBB,
1737 const MachineFunction &MF) const;
1738
1739 /// Measure the specified inline asm to determine an approximation of its
1740 /// length.
1741 virtual unsigned getInlineAsmLength(
1742 const char *Str, const MCAsmInfo &MAI,
1743 const TargetSubtargetInfo *STI = nullptr) const;
1744
1745 /// Allocate and return a hazard recognizer to use for this target when
1746 /// scheduling the machine instructions before register allocation.
1747 virtual ScheduleHazardRecognizer *
1748 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1749 const ScheduleDAG *DAG) const;
1750
1751 /// Allocate and return a hazard recognizer to use for this target when
1752 /// scheduling the machine instructions before register allocation.
1753 virtual ScheduleHazardRecognizer *
1754 CreateTargetMIHazardRecognizer(const InstrItineraryData *,
1755 const ScheduleDAGMI *DAG) const;
1756
1757 /// Allocate and return a hazard recognizer to use for this target when
1758 /// scheduling the machine instructions after register allocation.
1759 virtual ScheduleHazardRecognizer *
1760 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *,
1761 const ScheduleDAG *DAG) const;
1762
1763 /// Allocate and return a hazard recognizer to use for by non-scheduling
1764 /// passes.
1765 virtual ScheduleHazardRecognizer *
1767 return nullptr;
1768 }
1769
1770 /// Provide a global flag for disabling the PreRA hazard recognizer that
1771 /// targets may choose to honor.
1772 bool usePreRAHazardRecognizer() const;
1773
1774 /// For a comparison instruction, return the source registers
1775 /// in SrcReg and SrcReg2 if having two register operands, and the value it
1776 /// compares against in CmpValue. Return true if the comparison instruction
1777 /// can be analyzed.
1778 virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
1779 Register &SrcReg2, int64_t &Mask,
1780 int64_t &Value) const {
1781 return false;
1782 }
1783
1784 /// See if the comparison instruction can be converted
1785 /// into something more efficient. E.g., on ARM most instructions can set the
1786 /// flags register, obviating the need for a separate CMP.
1787 virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
1788 Register SrcReg2, int64_t Mask,
1789 int64_t Value,
1790 const MachineRegisterInfo *MRI) const {
1791 return false;
1792 }
1793 virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
1794
1795 /// Try to remove the load by folding it to a register operand at the use.
1796 /// We fold the load instructions if and only if the
1797 /// def and use are in the same BB. We only look at one load and see
1798 /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
1799 /// defined by the load we are trying to fold. DefMI returns the machine
1800 /// instruction that defines FoldAsLoadDefReg, and the function returns
1801 /// the machine instruction generated due to folding.
1802 virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI,
1803 const MachineRegisterInfo *MRI,
1804 Register &FoldAsLoadDefReg,
1805 MachineInstr *&DefMI) const;
1806
1807 /// 'Reg' is known to be defined by a move immediate instruction,
1808 /// try to fold the immediate into the use instruction.
1809 /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
1810 /// then the caller may assume that DefMI has been erased from its parent
1811 /// block. The caller may assume that it will not be erased by this
1812 /// function otherwise.
1815 return false;
1816 }
1817
1818 /// Return the number of u-operations the given machine
1819 /// instruction will be decoded to on the target cpu. The itinerary's
1820 /// IssueWidth is the number of microops that can be dispatched each
1821 /// cycle. An instruction with zero microops takes no dispatch resources.
1822 virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
1823 const MachineInstr &MI) const;
1824
1825 /// Return true for pseudo instructions that don't consume any
1826 /// machine resources in their current form. These are common cases that the
1827 /// scheduler should consider free, rather than conservatively handling them
1828 /// as instructions with no itinerary.
1829 bool isZeroCost(unsigned Opcode) const {
1830 return Opcode <= TargetOpcode::COPY;
1831 }
1832
1833 virtual std::optional<unsigned>
1834 getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode,
1835 unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const;
1836
1837 /// Compute and return the use operand latency of a given pair of def and use.
1838 /// In most cases, the static scheduling itinerary was enough to determine the
1839 /// operand latency. But it may not be possible for instructions with variable
1840 /// number of defs / uses.
1841 ///
1842 /// This is a raw interface to the itinerary that may be directly overridden
1843 /// by a target. Use computeOperandLatency to get the best estimate of
1844 /// latency.
1845 virtual std::optional<unsigned>
1846 getOperandLatency(const InstrItineraryData *ItinData,
1847 const MachineInstr &DefMI, unsigned DefIdx,
1848 const MachineInstr &UseMI, unsigned UseIdx) const;
1849
1850 /// Compute the instruction latency of a given instruction.
1851 /// If the instruction has higher cost when predicated, it's returned via
1852 /// PredCost.
1853 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
1854 const MachineInstr &MI,
1855 unsigned *PredCost = nullptr) const;
1856
1857 virtual unsigned getPredicationCost(const MachineInstr &MI) const;
1858
1859 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
1860 SDNode *Node) const;
1861
1862 /// Return the default expected latency for a def based on its opcode.
1863 unsigned defaultDefLatency(const MCSchedModel &SchedModel,
1864 const MachineInstr &DefMI) const;
1865
1866 /// Return true if this opcode has high latency to its result.
1867 virtual bool isHighLatencyDef(int opc) const { return false; }
1868
1869 /// Compute operand latency between a def of 'Reg'
1870 /// and a use in the current loop. Return true if the target considered
1871 /// it 'high'. This is used by optimization passes such as machine LICM to
1872 /// determine whether it makes sense to hoist an instruction out even in a
1873 /// high register pressure situation.
1874 virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
1875 const MachineRegisterInfo *MRI,
1876 const MachineInstr &DefMI, unsigned DefIdx,
1877 const MachineInstr &UseMI,
1878 unsigned UseIdx) const {
1879 return false;
1880 }
1881
1882 /// Compute operand latency of a def of 'Reg'. Return true
1883 /// if the target considered it 'low'.
1884 virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
1885 const MachineInstr &DefMI,
1886 unsigned DefIdx) const;
1887
1888 /// Perform target-specific instruction verification.
1889 virtual bool verifyInstruction(const MachineInstr &MI,
1890 StringRef &ErrInfo) const {
1891 return true;
1892 }
1893
1894 /// Return the current execution domain and bit mask of
1895 /// possible domains for instruction.
1896 ///
1897 /// Some micro-architectures have multiple execution domains, and multiple
1898 /// opcodes that perform the same operation in different domains. For
1899 /// example, the x86 architecture provides the por, orps, and orpd
1900 /// instructions that all do the same thing. There is a latency penalty if a
1901 /// register is written in one domain and read in another.
1902 ///
1903 /// This function returns a pair (domain, mask) containing the execution
1904 /// domain of MI, and a bit mask of possible domains. The setExecutionDomain
1905 /// function can be used to change the opcode to one of the domains in the
1906 /// bit mask. Instructions whose execution domain can't be changed should
1907 /// return a 0 mask.
1908 ///
1909 /// The execution domain numbers don't have any special meaning except domain
1910 /// 0 is used for instructions that are not associated with any interesting
1911 /// execution domain.
1912 ///
1913 virtual std::pair<uint16_t, uint16_t>
1915 return std::make_pair(0, 0);
1916 }
1917
1918 /// Change the opcode of MI to execute in Domain.
1919 ///
1920 /// The bit (1 << Domain) must be set in the mask returned from
1921 /// getExecutionDomain(MI).
1922 virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
1923
1924 /// Returns the preferred minimum clearance
1925 /// before an instruction with an unwanted partial register update.
1926 ///
1927 /// Some instructions only write part of a register, and implicitly need to
1928 /// read the other parts of the register. This may cause unwanted stalls
1929 /// preventing otherwise unrelated instructions from executing in parallel in
1930 /// an out-of-order CPU.
1931 ///
1932 /// For example, the x86 instruction cvtsi2ss writes its result to bits
1933 /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
1934 /// the instruction needs to wait for the old value of the register to become
1935 /// available:
1936 ///
1937 /// addps %xmm1, %xmm0
1938 /// movaps %xmm0, (%rax)
1939 /// cvtsi2ss %rbx, %xmm0
1940 ///
1941 /// In the code above, the cvtsi2ss instruction needs to wait for the addps
1942 /// instruction before it can issue, even though the high bits of %xmm0
1943 /// probably aren't needed.
1944 ///
1945 /// This hook returns the preferred clearance before MI, measured in
1946 /// instructions. Other defs of MI's operand OpNum are avoided in the last N
1947 /// instructions before MI. It should only return a positive value for
1948 /// unwanted dependencies. If the old bits of the defined register have
1949 /// useful values, or if MI is determined to otherwise read the dependency,
1950 /// the hook should return 0.
1951 ///
1952 /// The unwanted dependency may be handled by:
1953 ///
1954 /// 1. Allocating the same register for an MI def and use. That makes the
1955 /// unwanted dependency identical to a required dependency.
1956 ///
1957 /// 2. Allocating a register for the def that has no defs in the previous N
1958 /// instructions.
1959 ///
1960 /// 3. Calling breakPartialRegDependency() with the same arguments. This
1961 /// allows the target to insert a dependency breaking instruction.
1962 ///
1963 virtual unsigned
1965 const TargetRegisterInfo *TRI) const {
1966 // The default implementation returns 0 for no partial register dependency.
1967 return 0;
1968 }
1969
1970 /// Return the minimum clearance before an instruction that reads an
1971 /// unused register.
1972 ///
1973 /// For example, AVX instructions may copy part of a register operand into
1974 /// the unused high bits of the destination register.
1975 ///
1976 /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
1977 ///
1978 /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
1979 /// false dependence on any previous write to %xmm0.
1980 ///
1981 /// This hook works similarly to getPartialRegUpdateClearance, except that it
1982 /// does not take an operand index. Instead sets \p OpNum to the index of the
1983 /// unused register.
1984 virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
1985 const TargetRegisterInfo *TRI) const {
1986 // The default implementation returns 0 for no undef register dependency.
1987 return 0;
1988 }
1989
1990 /// Insert a dependency-breaking instruction
1991 /// before MI to eliminate an unwanted dependency on OpNum.
1992 ///
1993 /// If it wasn't possible to avoid a def in the last N instructions before MI
1994 /// (see getPartialRegUpdateClearance), this hook will be called to break the
1995 /// unwanted dependency.
1996 ///
1997 /// On x86, an xorps instruction can be used as a dependency breaker:
1998 ///
1999 /// addps %xmm1, %xmm0
2000 /// movaps %xmm0, (%rax)
2001 /// xorps %xmm0, %xmm0
2002 /// cvtsi2ss %rbx, %xmm0
2003 ///
2004 /// An <imp-kill> operand should be added to MI if an instruction was
2005 /// inserted. This ties the instructions together in the post-ra scheduler.
2006 ///
2007 virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
2008 const TargetRegisterInfo *TRI) const {}
2009
2010 /// Create machine specific model for scheduling.
2011 virtual DFAPacketizer *
2013 return nullptr;
2014 }
2015
2016 /// Sometimes, it is possible for the target
2017 /// to tell, even without aliasing information, that two MIs access different
2018 /// memory addresses. This function returns true if two MIs access different
2019 /// memory addresses and false otherwise.
2020 ///
2021 /// Assumes any physical registers used to compute addresses have the same
2022 /// value for both instructions. (This is the most useful assumption for
2023 /// post-RA scheduling.)
2024 ///
2025 /// See also MachineInstr::mayAlias, which is implemented on top of this
2026 /// function.
2027 virtual bool
2029 const MachineInstr &MIb) const {
2030 assert(MIa.mayLoadOrStore() &&
2031 "MIa must load from or modify a memory location");
2032 assert(MIb.mayLoadOrStore() &&
2033 "MIb must load from or modify a memory location");
2034 return false;
2035 }
2036
2037 /// Return the value to use for the MachineCSE's LookAheadLimit,
2038 /// which is a heuristic used for CSE'ing phys reg defs.
2039 virtual unsigned getMachineCSELookAheadLimit() const {
2040 // The default lookahead is small to prevent unprofitable quadratic
2041 // behavior.
2042 return 5;
2043 }
2044
2045 /// Return the maximal number of alias checks on memory operands. For
2046 /// instructions with more than one memory operands, the alias check on a
2047 /// single MachineInstr pair has quadratic overhead and results in
2048 /// unacceptable performance in the worst case. The limit here is to clamp
2049 /// that maximal checks performed. Usually, that's the product of memory
2050 /// operand numbers from that pair of MachineInstr to be checked. For
2051 /// instance, with two MachineInstrs with 4 and 5 memory operands
2052 /// correspondingly, a total of 20 checks are required. With this limit set to
2053 /// 16, their alias check is skipped. We choose to limit the product instead
2054 /// of the individual instruction as targets may have special MachineInstrs
2055 /// with a considerably high number of memory operands, such as `ldm` in ARM.
2056 /// Setting this limit per MachineInstr would result in either too high
2057 /// overhead or too rigid restriction.
2058 virtual unsigned getMemOperandAACheckLimit() const { return 16; }
2059
2060 /// Return an array that contains the ids of the target indices (used for the
2061 /// TargetIndex machine operand) and their names.
2062 ///
2063 /// MIR Serialization is able to serialize only the target indices that are
2064 /// defined by this method.
2067 return {};
2068 }
2069
2070 /// Decompose the machine operand's target flags into two values - the direct
2071 /// target flag value and any of bit flags that are applied.
2072 virtual std::pair<unsigned, unsigned>
2074 return std::make_pair(0u, 0u);
2075 }
2076
2077 /// Return an array that contains the direct target flag values and their
2078 /// names.
2079 ///
2080 /// MIR Serialization is able to serialize only the target flags that are
2081 /// defined by this method.
2084 return {};
2085 }
2086
2087 /// Return an array that contains the bitmask target flag values and their
2088 /// names.
2089 ///
2090 /// MIR Serialization is able to serialize only the target flags that are
2091 /// defined by this method.
2094 return {};
2095 }
2096
2097 /// Return an array that contains the MMO target flag values and their
2098 /// names.
2099 ///
2100 /// MIR Serialization is able to serialize only the MMO target flags that are
2101 /// defined by this method.
2104 return {};
2105 }
2106
2107 /// Determines whether \p Inst is a tail call instruction. Override this
2108 /// method on targets that do not properly set MCID::Return and MCID::Call on
2109 /// tail call instructions."
2110 virtual bool isTailCall(const MachineInstr &Inst) const {
2111 return Inst.isReturn() && Inst.isCall();
2112 }
2113
2114 /// True if the instruction is bound to the top of its basic block and no
2115 /// other instructions shall be inserted before it. This can be implemented
2116 /// to prevent register allocator to insert spills for \p Reg before such
2117 /// instructions.
2119 Register Reg = Register()) const {
2120 return false;
2121 }
2122
2123 /// Allows targets to use appropriate copy instruction while spilitting live
2124 /// range of a register in register allocation.
2126 const MachineFunction &MF) const {
2127 return TargetOpcode::COPY;
2128 }
2129
2130 /// During PHI eleimination lets target to make necessary checks and
2131 /// insert the copy to the PHI destination register in a target specific
2132 /// manner.
2135 const DebugLoc &DL, Register Src, Register Dst) const {
2136 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
2137 .addReg(Src);
2138 }
2139
2140 /// During PHI eleimination lets target to make necessary checks and
2141 /// insert the copy to the PHI destination register in a target specific
2142 /// manner.
2145 const DebugLoc &DL, Register Src,
2146 unsigned SrcSubReg,
2147 Register Dst) const {
2148 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
2149 .addReg(Src, 0, SrcSubReg);
2150 }
2151
2152 /// Returns a \p outliner::OutlinedFunction struct containing target-specific
2153 /// information for a set of outlining candidates. Returns std::nullopt if the
2154 /// candidates are not suitable for outlining. \p MinRepeats is the minimum
2155 /// number of times the instruction sequence must be repeated.
2156 virtual std::optional<std::unique_ptr<outliner::OutlinedFunction>>
2158 const MachineModuleInfo &MMI,
2159 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
2160 unsigned MinRepeats) const {
2162 "Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!");
2163 }
2164
2165 /// Optional target hook to create the LLVM IR attributes for the outlined
2166 /// function. If overridden, the overriding function must call the default
2167 /// implementation.
2168 virtual void mergeOutliningCandidateAttributes(
2169 Function &F, std::vector<outliner::Candidate> &Candidates) const;
2170
2171protected:
2172 /// Target-dependent implementation for getOutliningTypeImpl.
2173 virtual outliner::InstrType
2175 MachineBasicBlock::iterator &MIT, unsigned Flags) const {
2177 "Target didn't implement TargetInstrInfo::getOutliningTypeImpl!");
2178 }
2179
2180public:
2181 /// Returns how or if \p MIT should be outlined. \p Flags is the
2182 /// target-specific information returned by isMBBSafeToOutlineFrom.
2183 outliner::InstrType getOutliningType(const MachineModuleInfo &MMI,
2185 unsigned Flags) const;
2186
2187 /// Optional target hook that returns true if \p MBB is safe to outline from,
2188 /// and returns any target-specific information in \p Flags.
2189 virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
2190 unsigned &Flags) const;
2191
2192 /// Optional target hook which partitions \p MBB into outlinable ranges for
2193 /// instruction mapping purposes. Each range is defined by two iterators:
2194 /// [start, end).
2195 ///
2196 /// Ranges are expected to be ordered top-down. That is, ranges closer to the
2197 /// top of the block should come before ranges closer to the end of the block.
2198 ///
2199 /// Ranges cannot overlap.
2200 ///
2201 /// If an entire block is mappable, then its range is [MBB.begin(), MBB.end())
2202 ///
2203 /// All instructions not present in an outlinable range are considered
2204 /// illegal.
2205 virtual SmallVector<
2206 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
2207 getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const {
2208 return {std::make_pair(MBB.begin(), MBB.end())};
2209 }
2210
2211 /// Insert a custom frame for outlined functions.
2213 const outliner::OutlinedFunction &OF) const {
2215 "Target didn't implement TargetInstrInfo::buildOutlinedFrame!");
2216 }
2217
2218 /// Insert a call to an outlined function into the program.
2219 /// Returns an iterator to the spot where we inserted the call. This must be
2220 /// implemented by the target.
2224 outliner::Candidate &C) const {
2226 "Target didn't implement TargetInstrInfo::insertOutlinedCall!");
2227 }
2228
2229 /// Insert an architecture-specific instruction to clear a register. If you
2230 /// need to avoid sideeffects (e.g. avoid XOR on x86, which sets EFLAGS), set
2231 /// \p AllowSideEffects to \p false.
2234 DebugLoc &DL,
2235 bool AllowSideEffects = true) const {
2236#if 0
2237 // FIXME: This should exist once all platforms that use stack protectors
2238 // implements it.
2240 "Target didn't implement TargetInstrInfo::buildClearRegister!");
2241#endif
2242 }
2243
2244 /// Return true if the function can safely be outlined from.
2245 /// A function \p MF is considered safe for outlining if an outlined function
2246 /// produced from instructions in F will produce a program which produces the
2247 /// same output for any set of given inputs.
2249 bool OutlineFromLinkOnceODRs) const {
2250 llvm_unreachable("Target didn't implement "
2251 "TargetInstrInfo::isFunctionSafeToOutlineFrom!");
2252 }
2253
2254 /// Return true if the function should be outlined from by default.
2256 return false;
2257 }
2258
2259 /// Return true if the function is a viable candidate for machine function
2260 /// splitting. The criteria for if a function can be split may vary by target.
2261 virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const;
2262
2263 /// Return true if the MachineBasicBlock can safely be split to the cold
2264 /// section. On AArch64, certain instructions may cause a block to be unsafe
2265 /// to split to the cold section.
2266 virtual bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const {
2267 return true;
2268 }
2269
2270 /// Produce the expression describing the \p MI loading a value into
2271 /// the physical register \p Reg. This hook should only be used with
2272 /// \p MIs belonging to VReg-less functions.
2273 virtual std::optional<ParamLoadedValue>
2274 describeLoadedValue(const MachineInstr &MI, Register Reg) const;
2275
2276 /// Given the generic extension instruction \p ExtMI, returns true if this
2277 /// extension is a likely candidate for being folded into an another
2278 /// instruction.
2280 MachineRegisterInfo &MRI) const {
2281 return false;
2282 }
2283
2284 /// Return MIR formatter to format/parse MIR operands. Target can override
2285 /// this virtual function and return target specific MIR formatter.
2286 virtual const MIRFormatter *getMIRFormatter() const {
2287 if (!Formatter)
2288 Formatter = std::make_unique<MIRFormatter>();
2289 return Formatter.get();
2290 }
2291
2292 /// Returns the target-specific default value for tail duplication.
2293 /// This value will be used if the tail-dup-placement-threshold argument is
2294 /// not provided.
2295 virtual unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const {
2296 return OptLevel >= CodeGenOptLevel::Aggressive ? 4 : 2;
2297 }
2298
2299 /// Returns the target-specific default value for tail merging.
2300 /// This value will be used if the tail-merge-size argument is not provided.
2301 virtual unsigned getTailMergeSize(const MachineFunction &MF) const {
2302 return 3;
2303 }
2304
2305 /// Returns the callee operand from the given \p MI.
2306 virtual const MachineOperand &getCalleeOperand(const MachineInstr &MI) const {
2307 return MI.getOperand(0);
2308 }
2309
2310 /// Return the uniformity behavior of the given instruction.
2311 virtual InstructionUniformity
2313 return InstructionUniformity::Default;
2314 }
2315
2316 /// Returns true if the given \p MI defines a TargetIndex operand that can be
2317 /// tracked by their offset, can have values, and can have debug info
2318 /// associated with it. If so, sets \p Index and \p Offset of the target index
2319 /// operand.
2321 int64_t &Offset) const {
2322 return false;
2323 }
2324
2325 // Get the call frame size just before MI.
2326 unsigned getCallFrameSizeAt(MachineInstr &MI) const;
2327
2328 /// Fills in the necessary MachineOperands to refer to a frame index.
2329 /// The best way to understand this is to print `asm(""::"m"(x));` after
2330 /// finalize-isel. Example:
2331 /// INLINEASM ... 262190 /* mem:m */, %stack.0.x.addr, 1, $noreg, 0, $noreg
2332 /// we would add placeholders for: ^ ^ ^ ^
2334 int FI) const {
2335 llvm_unreachable("unknown number of operands necessary");
2336 }
2337
2338private:
2339 mutable std::unique_ptr<MIRFormatter> Formatter;
2340 unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
2341 unsigned CatchRetOpcode;
2342 unsigned ReturnOpcode;
2343};
2344
2345/// Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
2349
2351 return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
2352 SubRegInfo::getEmptyKey());
2353 }
2354
2356 return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
2357 SubRegInfo::getTombstoneKey());
2358 }
2359
2360 /// Reuse getHashValue implementation from
2361 /// std::pair<unsigned, unsigned>.
2362 static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
2363 return DenseMapInfo<std::pair<Register, unsigned>>::getHashValue(
2364 std::make_pair(Val.Reg, Val.SubReg));
2365 }
2366
2369 return LHS == RHS;
2370 }
2371};
2372
2373} // end namespace llvm
2374
2375#endif // LLVM_CODEGEN_TARGETINSTRINFO_H
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< ShadowStackGC > C("shadow-stack", "Very portable GC for uncooperative code generators")
#define LLVM_ABI
Definition: Compiler.h:213
DXIL Forward Handle Accesses
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
uint32_t Index
uint64_t Size
uint64_t Offset
Definition: ELF_riscv.cpp:478
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Machine Check Debug Module
Contains all data structures shared between the outliner implemented in MachineOutliner....
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition: Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
#define P(N)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getInstSizeInBytes(const MachineInstr &MI, const SystemZInstrInfo *TII)
Value * RHS
Value * LHS
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:666
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:124
Itinerary data supplied by a subtarget to be used by a target.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:64
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:188
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:199
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:27
MIRFormater - Interface to format MIR operand based on target.
Definition: MIRFormatter.h:33
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:72
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:938
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:948
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Represents one node in the SelectionDAG.
This class represents the scheduled code.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:229
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:380
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
virtual bool isMVEExpanderSupported()
Return true if the target can expand pipelined schedule with modulo variable expansion.
virtual void createRemainingIterationsGreaterCondition(int TC, MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, DenseMap< MachineInstr *, MachineInstr * > &LastStage0Insts)
Create a condition to determine if the remaining trip count for a phase is greater than TC.
virtual void adjustTripCount(int TripCountAdjust)=0
Modify the loop such that the trip count is OriginalTC + TripCountAdjust.
virtual void disposed(LiveIntervals *LIS=nullptr)
Called when the loop is being removed.
virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const =0
Return true if the given instruction should not be pipelined and should be ignored.
virtual void setPreheader(MachineBasicBlock *NewPreheader)=0
Called when the loop's preheader has been modified to NewPreheader.
virtual bool shouldUseSchedule(SwingSchedulerDAG &SSD, SMSchedule &SMS)
Return true if the proposed schedule should used.
virtual std::optional< bool > createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond)=0
Create a condition to determine if the trip count of the loop is greater than TC, where TC is always ...
TargetInstrInfo - Interface to description of machine instruction set.
virtual SmallVector< std::pair< MachineBasicBlock::iterator, MachineBasicBlock::iterator > > getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook which partitions MBB into outlinable ranges for instruction mapping purposes.
virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const
Return true if it's profitable to predicate instructions with accumulated instruction latency of "Num...
virtual bool isBasicBlockPrologue(const MachineInstr &MI, Register Reg=Register()) const
True if the instruction is bound to the top of its basic block and no other instructions shall be ins...
virtual bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const
Reverses the branch condition of the specified condition list, returning false on success and true if...
virtual unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const
Remove the branching code at the end of the specific MBB.
virtual std::unique_ptr< PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
virtual bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const
If the specified instruction defines any predicate or condition code register(s) used for predication...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool canPredicatePredicatedInstr(const MachineInstr &MI) const
Assumes the instruction is already predicated and returns true if the instruction can be predicated a...
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
bool isZeroCost(unsigned Opcode) const
Return true for pseudo instructions that don't consume any machine resources in their current form.
virtual void buildClearRegister(Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator Iter, DebugLoc &DL, bool AllowSideEffects=true) const
Insert an architecture-specific instruction to clear a register.
virtual void getFrameIndexOperands(SmallVectorImpl< MachineOperand > &Ops, int FI) const
Fills in the necessary MachineOperands to refer to a frame index.
virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
virtual bool isExtendLikelyToBeFolded(MachineInstr &ExtMI, MachineRegisterInfo &MRI) const
Given the generic extension instruction ExtMI, returns true if this extension is a likely candidate f...
virtual bool isSafeToSink(MachineInstr &MI, MachineBasicBlock *SuccToSinkTo, MachineCycleInfo *CI) const
virtual std::optional< DestSourcePair > isCopyLikeInstrImpl(const MachineInstr &MI) const
virtual unsigned getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Returns the preferred minimum clearance before an instruction with an unwanted partial register updat...
virtual bool canMakeTailCallConditional(SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Returns true if the tail call can be made conditional on BranchCond.
virtual DFAPacketizer * CreateTargetScheduleState(const TargetSubtargetInfo &) const
Create machine specific model for scheduling.
virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineBasicBlock &PreHeader, MachineInstr *IndVar, MachineInstr &Cmp, SmallVectorImpl< MachineOperand > &Cond, SmallVectorImpl< MachineInstr * > &PrevInsts, unsigned Iter, unsigned MaxIter) const
Generate code to reduce the loop iteration by one and check if the loop is finished.
virtual bool isPostIncrement(const MachineInstr &MI) const
Return true for post-incremented instructions.
bool isTriviallyReMaterializable(const MachineInstr &MI) const
Return true if the instruction is trivially rematerializable, meaning it has no side effects and requ...
virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const
Return true if the instruction is a "coalescable" extension instruction.
virtual void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset=0, RegScavenger *RS=nullptr) const
Insert an unconditional indirect branch at the end of MBB to NewDestBB.
virtual ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const
Return an array that contains the MMO target flag values and their names.
virtual bool getBaseAndOffsetPosition(const MachineInstr &MI, unsigned &BasePos, unsigned &OffsetPos) const
Return true if the instruction contains a base register and offset.
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, unsigned *LoadRegIndex=nullptr) const
Returns the opcode of the would be new instruction after load / store are unfolded from an instructio...
virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB, MachineBranchPredicate &MBP, bool AllowModify=false) const
Analyze the branching code at the end of MBB and parse it into the MachineBranchPredicate structure i...
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
virtual bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const
Return true if the function should be outlined from by default.
virtual MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &NewMIs, bool PreferFalse=false) const
Given a select instruction that was understood by analyzeSelect and returned Optimizable = true,...
virtual bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const
Check if it's possible and beneficial to fold the addressing computation AddrI into the addressing mo...
virtual const MIRFormatter * getMIRFormatter() const
Return MIR formatter to format/parse MIR operands.
virtual bool shouldReduceRegisterPressure(const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const
Return true if target supports reassociation of instructions in machine combiner pass to reduce regis...
virtual ArrayRef< std::pair< int, const char * > > getSerializableTargetIndices() const
Return an array that contains the ids of the target indices (used for the TargetIndex machine operand...
bool isFullCopyInstr(const MachineInstr &MI) const
virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Return the minimum clearance before an instruction that reads an unused register.
virtual bool preservesZeroValueInReg(const MachineInstr *MI, const Register NullValueReg, const TargetRegisterInfo *TRI) const
Returns true if MI's Def is NullValueReg, and the MI does not change the Zero value.
virtual bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const
Perform target-specific instruction verification.
virtual void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const
Fix up the placeholder we may add in genAlternativeCodeSequence().
virtual bool isUnconditionalTailCall(const MachineInstr &MI) const
Returns true if MI is an unconditional tail call.
virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel, const MachineRegisterInfo *MRI, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const
Compute operand latency between a def of 'Reg' and a use in the current loop.
bool isUnspillableTerminator(const MachineInstr *MI) const
Return true if the given instruction is terminator that is unspillable, according to isUnspillableTer...
virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const
Return true if it's profitable to unpredicate one side of a 'diamond', i.e.
virtual bool useMachineCombiner() const
Return true when a target supports MachineCombiner.
virtual bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const
Returns true if the first specified predicate subsumes the second, e.g.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Insert a dependency-breaking instruction before MI to eliminate an unwanted dependency on OpNum.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB, unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability) const
Second variant of isProfitableToIfCvt.
virtual int getExtendResourceLenLimit() const
The limit on resource length extension we accept in MachineCombiner Pass.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const
Allocate and return a hazard recognizer to use for by non-scheduling passes.
virtual void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const
Insert a select instruction into MBB before I that will copy TrueReg to DstReg when Cond is true,...
virtual bool shouldBreakCriticalEdgeToSink(MachineInstr &MI) const
For a "cheap" instruction which doesn't enable additional sinking, should MachineSink break a critica...
virtual bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const
Sometimes, it is possible for the target to tell, even without aliasing information,...
virtual bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const
unsigned getReturnOpcode() const
virtual bool isIgnorableUse(const MachineOperand &MO) const
Given MO is a PhysReg use return if it can be ignored for the purpose of instruction rematerializatio...
virtual unsigned getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const
Returns the opcode that should be use to reduce accumulation registers.
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
virtual bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const
Returns true if the two given memory operations should be scheduled adjacent.
virtual unsigned getLiveRangeSplitOpcode(Register Reg, const MachineFunction &MF) const
Allows targets to use appropriate copy instruction while spilitting live range of a register in regis...
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Store the specified register of the given register class to the specified stack frame index.
virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t Mask, int64_t Value, const MachineRegisterInfo *MRI) const
See if the comparison instruction can be converted into something more efficient.
virtual unsigned getMemOperandAACheckLimit() const
Return the maximal number of alias checks on memory operands.
virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const
Return true if the function can safely be outlined from.
virtual bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const
Return true if the MachineBasicBlock can safely be split to the cold section.
virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const
Insert a custom frame for outlined functions.
virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const
This is a used by the pre-regalloc scheduler to determine (in conjunction with areLoadsFromSameBasePt...
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const
Emit instructions to copy a pair of physical registers.
virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const
Returns an opcode which defines the accumulator used by \P Opcode.
virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const
Return true if the given SDNode can be copied during scheduling even if it has glue.
virtual bool simplifyInstruction(MachineInstr &MI) const
If possible, converts the instruction to a simplified/canonical form.
virtual std::optional< ExtAddrMode > getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const
Target dependent implementation to get the values constituting the address MachineInstr that is acces...
virtual std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const
Target-dependent implementation for IsCopyInstr.
virtual MachineInstr * createPHIDestinationCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const
Returns true if MI is an instruction that defines Reg to have a constant value and the value is recor...
static bool isGenericOpcode(unsigned Opc)
TargetInstrInfo & operator=(const TargetInstrInfo &)=delete
std::optional< DestSourcePair > isCopyLikeInstr(const MachineInstr &MI) const
virtual ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const
Return an array that contains the bitmask target flag values and their names.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual bool isSubregFoldable() const
Check whether the target can fold a load that feeds a subreg operand (or a subreg operand that feeds ...
virtual Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const
Check for post-frame ptr elimination stack locations as well.
virtual Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const
Check for post-frame ptr elimination stack locations as well.
virtual std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const
Return the current execution domain and bit mask of possible domains for instruction.
virtual bool optimizeCondBranch(MachineInstr &MI) const
virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, MachineInstr *&CmpInst) const
Analyze the loop code, return true if it cannot be understood.
unsigned getCatchReturnOpcode() const
virtual unsigned getTailMergeSize(const MachineFunction &MF) const
Returns the target-specific default value for tail merging.
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Load the specified register of the given register class from the specified stack frame index.
virtual InstructionUniformity getInstructionUniformity(const MachineInstr &MI) const
Return the uniformity behavior of the given instruction.
virtual bool isAsCheapAsAMove(const MachineInstr &MI) const
Return true if the instruction is as cheap as a move instruction.
virtual bool isTailCall(const MachineInstr &Inst) const
Determines whether Inst is a tail call instruction.
virtual const MachineOperand & getCalleeOperand(const MachineInstr &MI) const
Returns the callee operand from the given MI.
virtual Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
virtual bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const
'Reg' is known to be defined by a move immediate instruction, try to fold the immediate into the use ...
virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, int &SrcFrameIndex) const
Return true if the specified machine instruction is a copy of one stack slot to another and has no ot...
virtual int getJumpTableIndex(const MachineInstr &MI) const
Return an index for MachineJumpTableInfo if insn is an indirect jump using a jump table,...
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual bool isExplicitTargetIndexDef(const MachineInstr &MI, int &Index, int64_t &Offset) const
Returns true if the given MI defines a TargetIndex operand that can be tracked by their offset,...
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, Register Reg, bool UnfoldLoad, bool UnfoldStore, SmallVectorImpl< MachineInstr * > &NewMIs) const
unfoldMemoryOperand - Separate a single instruction which folded a load or a store or a load and a st...
virtual bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const
Allow targets to tell MachineVerifier whether a specific register MachineOperand can be used as part ...
virtual std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const
Returns a outliner::OutlinedFunction struct containing target-specific information for a set of outli...
virtual MachineInstr * createPHISourceCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const
Insert a call to an outlined function into the program.
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
TargetInstrInfo(unsigned CFSetupOpcode=~0u, unsigned CFDestroyOpcode=~0u, unsigned CatchRetOpcode=~0u, unsigned ReturnOpcode=~0u)
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Replace the conditional branch in MBB with a conditional tail call.
TargetInstrInfo(const TargetInstrInfo &)=delete
virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const
Return an estimate for the code size reduction (in bytes) which will be caused by removing the given ...
virtual ~TargetInstrInfo()
virtual bool isAccumulationOpcode(unsigned Opcode) const
Return true when \P OpCode is an instruction which performs accumulation into one of its operand regi...
bool isFrameSetup(const MachineInstr &I) const
Returns true if the argument is a frame setup pseudo instruction.
virtual unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const
Return the increase in code size needed to predicate a contiguous run of NumInsts instructions.
virtual bool accumulateInstrSeqToRootLatency(MachineInstr &Root) const
When calculate the latency of the root instruction, accumulate the latency of the sequence to the roo...
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
virtual bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const
Analyze the given select instruction, returning true if it cannot be understood.
virtual Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex, TypeSize &MemBytes) const
Optional extension of isStoreToStackSlot that returns the number of bytes stored to the stack.
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex, TypeSize &MemBytes) const
Optional extension of isLoadFromStackSlot that returns the number of bytes loaded from the stack.
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const
Returns the size in bytes of the specified MachineInstr, or ~0U when this function is not implemented...
virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const
Return true if it's profitable for if-converter to duplicate instructions of specified accumulated in...
virtual bool shouldSink(const MachineInstr &MI) const
Return true if the instruction should be sunk by MachineSink.
virtual MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const
This method must be implemented by targets that set the M_CONVERTIBLE_TO_3_ADDR flag.
virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const
Change the opcode of MI to execute in Domain.
virtual bool isPredicable(const MachineInstr &MI) const
Return true if the specified instruction can be predicated.
virtual std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned) const
Decompose the machine operand's target flags into two values - the direct target flag value and any o...
virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const
Return true if it's safe to move a machine instruction that defines the specified register class.
virtual bool canInsertSelect(const MachineBasicBlock &MBB, ArrayRef< MachineOperand > Cond, Register DstReg, Register TrueReg, Register FalseReg, int &CondCycles, int &TrueCycles, int &FalseCycles) const
Return true if it is possible to insert a select instruction that chooses between TrueReg and FalseRe...
virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const
Return true if the given terminator MI is not expected to spill.
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a register,...
static bool isGenericAtomicRMWOpcode(unsigned Opc)
virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const
Returns true if the target has a preference on the operands order of the given machine instruction.
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
virtual MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const
Emit a load/store instruction with the same value register as MemI, but using the address from AM.
virtual bool expandPostRAPseudo(MachineInstr &MI) const
This function is called for all pseudo instructions that remain after register allocation.
virtual ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const
Return an array that contains the direct target flag values and their names.
virtual bool shouldHoist(const MachineInstr &MI, const MachineLoop *FromLoop) const
Return false if the instruction should not be hoisted by MachineLICM.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
virtual unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const
Returns the target-specific default value for tail duplication.
unsigned insertUnconditionalBranch(MachineBasicBlock &MBB, MachineBasicBlock *DestBB, const DebugLoc &DL, int *BytesAdded=nullptr) const
virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const
If the instruction is an increment of a constant value, return the amount.
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, LiveIntervals *LIS=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const
This is used by the pre-regalloc scheduler to determine if two loads are loading from the same base a...
virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, SmallVectorImpl< SDNode * > &NewNodes) const
virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const
For a comparison instruction, return the source registers in SrcReg and SrcReg2 if having two registe...
virtual unsigned getMachineCSELookAheadLimit() const
Return the value to use for the MachineCSE's LookAheadLimit, which is a heuristic used for CSE'ing ph...
virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const
Return true if it's legal to split the given basic block at the specified instruction (i....
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
TargetSubtargetInfo - Generic base class for all target subtargets.
LLVM Value Representation.
Definition: Value.h:75
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineTraceStrategy
Strategies for selecting traces.
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:82
InstructionUniformity
Enum describing how instructions behave with respect to uniformity and divergence,...
Definition: Uniformity.h:18
#define N
static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val)
Reuse getHashValue implementation from std::pair<unsigned, unsigned>.
static TargetInstrInfo::RegSubRegPair getTombstoneKey()
static TargetInstrInfo::RegSubRegPair getEmptyKey()
static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS, const TargetInstrInfo::RegSubRegPair &RHS)
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:54
const MachineOperand * Source
DestSourcePair(const MachineOperand &Dest, const MachineOperand &Src)
const MachineOperand * Destination
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
ExtAddrMode()=default
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:258
Used to describe a register and immediate addition.
RegImmPair(Register Reg, int64_t Imm)
Represents a predicate at the MachineFunction level.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
RegSubRegPairAndIdx(Register Reg=Register(), unsigned SubReg=0, unsigned SubIdx=0)
A pair composed of a register and a sub-register index.
bool operator==(const RegSubRegPair &P) const
RegSubRegPair(Register Reg=Register(), unsigned SubReg=0)
bool operator!=(const RegSubRegPair &P) const
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.