47#define DEBUG_TYPE "ppc-instr-info"
49#define GET_INSTRMAP_INFO
50#define GET_INSTRINFO_CTOR_DTOR
51#include "PPCGenInstrInfo.inc"
54 "Number of spillvsrrc spilled to stack as vec");
56 "Number of spillvsrrc spilled to stack as gpr");
57STATISTIC(NumGPRtoVSRSpill,
"Number of gpr spills to spillvsrrc");
59 "Number of ISELs that depend on comparison of constants converted");
61 "Number of compare-immediate instructions fed by constants");
63 "Number of record-form rotates converted to record-form andi");
67 cl::desc(
"Disable analysis for CTR loops"));
73cl::desc(
"Causes the backend to crash instead of generating a nop VSX copy"),
78 cl::desc(
"Use the old (incorrect) instruction latency calculation"));
82 cl::desc(
"register pressure factor for the transformations."));
86 cl::desc(
"enable register pressure reduce in machine combiner pass."));
89void PPCInstrInfo::anchor() {}
94 STI.isPPC64() ? PPC::BLR8 : PPC::BLR),
95 Subtarget(STI), RI(STI.getTargetMachine()) {}
103 static_cast<const PPCSubtarget *
>(STI)->getCPUDirective();
107 static_cast<const PPCSubtarget *
>(STI)->getInstrItineraryData();
139 unsigned *PredCost)
const {
141 return PPCGenInstrInfo::getInstrLatency(ItinData,
MI, PredCost);
151 unsigned DefClass =
MI.getDesc().getSchedClass();
152 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
170 std::optional<unsigned>
Latency = PPCGenInstrInfo::getOperandLatency(
173 if (!
DefMI.getParent())
180 if (Reg.isVirtual()) {
183 IsRegCR =
MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) ||
184 MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRBITRCRegClass);
186 IsRegCR = PPC::CRRCRegClass.contains(Reg) ||
187 PPC::CRBITRCRegClass.contains(Reg);
190 if (
UseMI.isBranch() && IsRegCR) {
270#define InfoArrayIdxFMAInst 0
271#define InfoArrayIdxFAddInst 1
272#define InfoArrayIdxFMULInst 2
273#define InfoArrayIdxAddOpIdx 3
274#define InfoArrayIdxMULOpIdx 4
275#define InfoArrayIdxFSubInst 5
286 {PPC::XSMADDADP, PPC::XSADDDP, PPC::XSMULDP, 1, 2, PPC::XSSUBDP},
287 {PPC::XSMADDASP, PPC::XSADDSP, PPC::XSMULSP, 1, 2, PPC::XSSUBSP},
288 {PPC::XVMADDADP, PPC::XVADDDP, PPC::XVMULDP, 1, 2, PPC::XVSUBDP},
289 {PPC::XVMADDASP, PPC::XVADDSP, PPC::XVMULSP, 1, 2, PPC::XVSUBSP},
290 {PPC::FMADD, PPC::FADD, PPC::FMUL, 3, 1, PPC::FSUB},
291 {PPC::FMADDS, PPC::FADDS, PPC::FMULS, 3, 1, PPC::FSUBS}};
295int16_t PPCInstrInfo::getFMAOpIdxInfo(
unsigned Opcode)
const {
352 bool DoRegPressureReduce)
const {
357 auto IsAllOpsVirtualReg = [](
const MachineInstr &Instr) {
358 for (
const auto &MO : Instr.explicit_operands())
359 if (!(MO.isReg() && MO.getReg().isVirtual()))
364 auto IsReassociableAddOrSub = [&](
const MachineInstr &Instr,
366 if (Instr.getOpcode() !=
377 if (!IsAllOpsVirtualReg(Instr))
383 !
MRI->hasOneNonDBGUse(Instr.getOperand(0).getReg()))
389 auto IsReassociableFMA = [&](
const MachineInstr &Instr, int16_t &AddOpIdx,
390 int16_t &MulOpIdx,
bool IsLeaf) {
391 int16_t
Idx = getFMAOpIdxInfo(Instr.getOpcode());
402 if (!IsAllOpsVirtualReg(Instr))
422 int16_t AddOpIdx = -1;
423 int16_t MulOpIdx = -1;
425 bool IsUsedOnceL =
false;
426 bool IsUsedOnceR =
false;
430 auto IsRPReductionCandidate = [&]() {
434 if (Opcode != PPC::XSMADDASP && Opcode != PPC::XSMADDADP)
439 if (IsReassociableFMA(Root, AddOpIdx, MulOpIdx,
true)) {
440 assert((MulOpIdx >= 0) &&
"mul operand index not right!");
441 Register MULRegL =
TRI->lookThruSingleUseCopyChain(
443 Register MULRegR =
TRI->lookThruSingleUseCopyChain(
445 if (!MULRegL && !MULRegR)
448 if (MULRegL && !MULRegR) {
452 }
else if (!MULRegL && MULRegR) {
464 MULInstrL =
MRI->getVRegDef(MULRegL);
465 MULInstrR =
MRI->getVRegDef(MULRegR);
472 if (DoRegPressureReduce && IsRPReductionCandidate()) {
473 assert((MULInstrL && MULInstrR) &&
"wrong register preduction candidate!");
494 if (!IsReassociableFMA(Root, AddOpIdx, MulOpIdx,
false))
497 assert((AddOpIdx >= 0) &&
"add operand index not right!");
504 if (!IsReassociableFMA(*Prev, AddOpIdx, MulOpIdx,
false))
507 assert((AddOpIdx >= 0) &&
"add operand index not right!");
512 if (IsReassociableFMA(*Leaf, AddOpIdx, MulOpIdx,
true)) {
528 assert(!InsInstrs.
empty() &&
"Instructions set to be inserted is empty!");
561 assert(isa<llvm::ConstantFP>(
C) &&
"not a valid constant!");
564 APFloat F1((dyn_cast<ConstantFP>(
C))->getValueAPF());
566 Constant *NegC = ConstantFP::get(dyn_cast<ConstantFP>(
C)->getContext(), F1);
574 for (
auto *Inst : InsInstrs) {
576 assert(Operand.isReg() &&
"Invalid instruction in InsInstrs!");
577 if (Operand.getReg() == PPC::ZERO8) {
578 Placeholder = &Operand;
584 assert(Placeholder &&
"Placeholder does not exist!");
589 generateLoadForNewConst(ConstPoolIdx, &Root,
C->getType(), InsInstrs);
592 Placeholder->setReg(LoadNewConst);
613 if (!(Subtarget.
isPPC64() && Subtarget.hasP9Vector() &&
621 auto GetMBBPressure =
631 if (
MI.isDebugValue() ||
MI.isDebugLabel())
637 RPTracker.
recede(RegOpers);
647 unsigned VSSRCLimit =
651 return GetMBBPressure(
MBB)[PPC::RegisterPressureSets::VSSRC] >
657 if (!
I->hasOneMemOperand())
661 return Op->isLoad() &&
Op->getPseudoValue() &&
665Register PPCInstrInfo::generateLoadForNewConst(
673 "Target not supported!\n");
679 Register VReg1 =
MRI->createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass);
681 BuildMI(*MF,
MI->getDebugLoc(),
get(PPC::ADDIStocHA8), VReg1)
686 "Only float and double are supported!");
691 LoadOpcode = PPC::DFLOADf32;
693 LoadOpcode = PPC::DFLOADf64;
723 assert(
I->mayLoad() &&
"Should be a load instruction.\n");
724 for (
auto MO :
I->uses()) {
728 if (Reg == 0 || !Reg.isVirtual())
734 return (MCP->
getConstants())[MO2.getIndex()].Val.ConstVal;
754 bool DoRegPressureReduce)
const {
764 DoRegPressureReduce);
777 reassociateFMA(Root,
Pattern, InsInstrs, DelInstrs, InstrIdxForVirtReg);
782 DelInstrs, InstrIdxForVirtReg);
787void PPCInstrInfo::reassociateFMA(
798 MRI.constrainRegClass(RegC, RC);
801 int16_t
Idx = getFMAOpIdxInfo(FmaOp);
802 assert(
Idx >= 0 &&
"Root must be a FMA instruction");
804 bool IsILPReassociate =
824 Leaf =
MRI.getVRegDef(MULReg);
830 Leaf =
MRI.getVRegDef(MULReg);
836 if (IsILPReassociate)
844 MRI.constrainRegClass(Reg, RC);
845 KillFlag = Operand.
isKill();
850 bool &MulOp1KillFlag,
bool &MulOp2KillFlag,
851 bool &AddOpKillFlag) {
852 GetOperandInfo(
Instr.getOperand(FirstMulOpIdx), MulOp1, MulOp1KillFlag);
853 GetOperandInfo(
Instr.getOperand(FirstMulOpIdx + 1), MulOp2, MulOp2KillFlag);
854 GetOperandInfo(
Instr.getOperand(AddOpIdx), AddOp, AddOpKillFlag);
857 Register RegM11, RegM12, RegX, RegY, RegM21, RegM22, RegM31, RegM32, RegA11,
859 bool KillX =
false, KillY =
false, KillM11 =
false, KillM12 =
false,
860 KillM21 =
false, KillM22 =
false, KillM31 =
false, KillM32 =
false,
861 KillA11 =
false, KillA21 =
false, KillB =
false;
863 GetFMAInstrInfo(Root, RegM31, RegM32, RegB, KillM31, KillM32, KillB);
865 if (IsILPReassociate)
866 GetFMAInstrInfo(*Prev, RegM21, RegM22, RegA21, KillM21, KillM22, KillA21);
869 GetFMAInstrInfo(*Leaf, RegM11, RegM12, RegA11, KillM11, KillM12, KillA11);
870 GetOperandInfo(Leaf->
getOperand(AddOpIdx), RegX, KillX);
872 GetOperandInfo(Leaf->
getOperand(1), RegX, KillX);
873 GetOperandInfo(Leaf->
getOperand(2), RegY, KillY);
876 GetOperandInfo(Leaf->
getOperand(1), RegX, KillX);
877 GetOperandInfo(Leaf->
getOperand(2), RegY, KillY);
887 InstrIdxForVirtReg.
insert(std::make_pair(NewVRA, 0));
890 if (IsILPReassociate) {
891 NewVRB =
MRI.createVirtualRegister(RC);
892 InstrIdxForVirtReg.
insert(std::make_pair(NewVRB, 1));
897 NewVRD =
MRI.createVirtualRegister(RC);
898 InstrIdxForVirtReg.
insert(std::make_pair(NewVRD, 2));
903 Register RegMul2,
bool KillRegMul2) {
904 MI->getOperand(AddOpIdx).setReg(RegAdd);
905 MI->getOperand(AddOpIdx).setIsKill(KillAdd);
906 MI->getOperand(FirstMulOpIdx).setReg(RegMul1);
907 MI->getOperand(FirstMulOpIdx).setIsKill(KillRegMul1);
908 MI->getOperand(FirstMulOpIdx + 1).setReg(RegMul2);
909 MI->getOperand(FirstMulOpIdx + 1).setIsKill(KillRegMul2);
930 AdjustOperandOrder(MINewB, RegX, KillX, RegM21, KillM21, RegM22, KillM22);
931 AdjustOperandOrder(MINewA, RegY, KillY, RegM31, KillM31, RegM32, KillM32);
952 assert(NewVRD &&
"new FMA register not created!");
971 AdjustOperandOrder(MINewB, RegX, KillX, RegM21, KillM21, RegM22, KillM22);
972 AdjustOperandOrder(MINewD, NewVRA,
true, RegM31, KillM31, RegM32,
998 bool KillVarReg =
false;
1001 KillVarReg = KillM31;
1004 KillVarReg = KillM32;
1028 if (!IsILPReassociate) {
1037 "Insertion instructions set should not be empty!");
1041 if (IsILPReassociate)
1049 unsigned &SubIdx)
const {
1050 switch (
MI.getOpcode()) {
1051 default:
return false;
1054 case PPC::EXTSW_32_64:
1055 SrcReg =
MI.getOperand(1).getReg();
1056 DstReg =
MI.getOperand(0).getReg();
1057 SubIdx = PPC::sub_32;
1063 int &FrameIndex)
const {
1067 if (
MI.getOperand(1).isImm() && !
MI.getOperand(1).getImm() &&
1068 MI.getOperand(2).isFI()) {
1069 FrameIndex =
MI.getOperand(2).getIndex();
1070 return MI.getOperand(0).getReg();
1080 switch (
MI.getOpcode()) {
1090 case PPC::ADDIStocHA:
1091 case PPC::ADDIStocHA8:
1093 case PPC::ADDItocL8:
1094 case PPC::LOAD_STACK_GUARD:
1095 case PPC::PPCLdFixedAddr:
1097 case PPC::XXLXORspz:
1098 case PPC::XXLXORdpz:
1099 case PPC::XXLEQVOnes:
1100 case PPC::XXSPLTI32DX:
1102 case PPC::XXSPLTIDP:
1106 case PPC::V_SETALLONESB:
1107 case PPC::V_SETALLONESH:
1108 case PPC::V_SETALLONES:
1111 case PPC::XXSETACCZ:
1112 case PPC::DMXXSETACCZ:
1119 int &FrameIndex)
const {
1121 if (
MI.getOperand(1).isImm() && !
MI.getOperand(1).getImm() &&
1122 MI.getOperand(2).isFI()) {
1123 FrameIndex =
MI.getOperand(2).getIndex();
1124 return MI.getOperand(0).getReg();
1132 unsigned OpIdx2)
const {
1136 if (
MI.getOpcode() != PPC::RLWIMI &&
MI.getOpcode() != PPC::RLWIMI_rec)
1144 if (
MI.getOperand(3).getImm() != 0)
1155 assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&
1156 "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMI_rec.");
1160 unsigned SubReg1 =
MI.getOperand(1).getSubReg();
1161 unsigned SubReg2 =
MI.getOperand(2).getSubReg();
1162 bool Reg1IsKill =
MI.getOperand(1).isKill();
1163 bool Reg2IsKill =
MI.getOperand(2).isKill();
1164 bool ChangeReg0 =
false;
1170 "Expecting a two-address instruction!");
1171 assert(
MI.getOperand(0).getSubReg() == SubReg1 &&
"Tied subreg mismatch");
1177 unsigned MB =
MI.getOperand(4).getImm();
1178 unsigned ME =
MI.getOperand(5).getImm();
1182 if (MB == 0 && ME == 31)
1187 Register Reg0 = ChangeReg0 ? Reg2 :
MI.getOperand(0).getReg();
1188 bool Reg0IsDead =
MI.getOperand(0).isDead();
1189 return BuildMI(MF,
MI.getDebugLoc(),
MI.getDesc())
1198 MI.getOperand(0).setReg(Reg2);
1199 MI.getOperand(0).setSubReg(SubReg2);
1201 MI.getOperand(2).setReg(Reg1);
1202 MI.getOperand(1).setReg(Reg2);
1203 MI.getOperand(2).setSubReg(SubReg1);
1204 MI.getOperand(1).setSubReg(SubReg2);
1205 MI.getOperand(2).setIsKill(Reg1IsKill);
1206 MI.getOperand(1).setIsKill(Reg2IsKill);
1209 MI.getOperand(4).setImm((ME + 1) & 31);
1210 MI.getOperand(5).setImm((MB - 1) & 31);
1215 unsigned &SrcOpIdx1,
1216 unsigned &SrcOpIdx2)
const {
1227 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
1237 default: Opcode = PPC::NOP;
break;
1263 bool AllowModify)
const {
1264 bool isPPC64 = Subtarget.
isPPC64();
1271 if (!isUnpredicatedTerminator(*
I))
1277 if (
I->getOpcode() == PPC::B &&
1279 I->eraseFromParent();
1283 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
1292 if (
I ==
MBB.
begin() || !isUnpredicatedTerminator(*--
I)) {
1298 }
else if (LastInst.
getOpcode() == PPC::BCC) {
1306 }
else if (LastInst.
getOpcode() == PPC::BC) {
1314 }
else if (LastInst.
getOpcode() == PPC::BCn) {
1322 }
else if (LastInst.
getOpcode() == PPC::BDNZ8 ||
1333 }
else if (LastInst.
getOpcode() == PPC::BDZ8 ||
1354 if (
I !=
MBB.
begin() && isUnpredicatedTerminator(*--
I))
1358 if (SecondLastInst.
getOpcode() == PPC::BCC &&
1368 }
else if (SecondLastInst.
getOpcode() == PPC::BC &&
1378 }
else if (SecondLastInst.
getOpcode() == PPC::BCn &&
1388 }
else if ((SecondLastInst.
getOpcode() == PPC::BDNZ8 ||
1389 SecondLastInst.
getOpcode() == PPC::BDNZ) &&
1402 }
else if ((SecondLastInst.
getOpcode() == PPC::BDZ8 ||
1403 SecondLastInst.
getOpcode() == PPC::BDZ) &&
1426 I->eraseFromParent();
1435 int *BytesRemoved)
const {
1436 assert(!BytesRemoved &&
"code size not handled");
1442 if (
I->getOpcode() != PPC::B &&
I->getOpcode() != PPC::BCC &&
1443 I->getOpcode() != PPC::BC &&
I->getOpcode() != PPC::BCn &&
1444 I->getOpcode() != PPC::BDNZ8 &&
I->getOpcode() != PPC::BDNZ &&
1445 I->getOpcode() != PPC::BDZ8 &&
I->getOpcode() != PPC::BDZ)
1449 I->eraseFromParent();
1455 if (
I->getOpcode() != PPC::BCC &&
1456 I->getOpcode() != PPC::BC &&
I->getOpcode() != PPC::BCn &&
1457 I->getOpcode() != PPC::BDNZ8 &&
I->getOpcode() != PPC::BDNZ &&
1458 I->getOpcode() != PPC::BDZ8 &&
I->getOpcode() != PPC::BDZ)
1462 I->eraseFromParent();
1471 int *BytesAdded)
const {
1473 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1475 "PPC branch conditions have two components!");
1476 assert(!BytesAdded &&
"code size not handled");
1478 bool isPPC64 = Subtarget.
isPPC64();
1486 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1487 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).
addMBB(
TBB);
1503 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1504 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).
addMBB(
TBB);
1522 Register FalseReg,
int &CondCycles,
1523 int &TrueCycles,
int &FalseCycles)
const {
1524 if (!Subtarget.hasISEL())
1527 if (
Cond.size() != 2)
1543 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
1548 if (!PPC::GPRCRegClass.hasSubClassEq(RC) &&
1549 !PPC::GPRC_NOR0RegClass.hasSubClassEq(RC) &&
1550 !PPC::G8RCRegClass.hasSubClassEq(RC) &&
1551 !PPC::G8RC_NOX0RegClass.hasSubClassEq(RC))
1571 "PPC branch conditions have two components!");
1576 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
1577 assert(RC &&
"TrueReg and FalseReg must have overlapping register classes");
1579 bool Is64Bit = PPC::G8RCRegClass.hasSubClassEq(RC) ||
1580 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC);
1582 PPC::GPRCRegClass.hasSubClassEq(RC) ||
1583 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) &&
1584 "isel is for regular integer GPRs only");
1586 unsigned OpCode = Is64Bit ? PPC::ISEL8 : PPC::ISEL;
1589 unsigned SubIdx = 0;
1590 bool SwapOps =
false;
1591 switch (SelectPred) {
1595 SubIdx = PPC::sub_eq; SwapOps =
false;
break;
1599 SubIdx = PPC::sub_eq; SwapOps =
true;
break;
1603 SubIdx = PPC::sub_lt; SwapOps =
false;
break;
1607 SubIdx = PPC::sub_lt; SwapOps =
true;
break;
1611 SubIdx = PPC::sub_gt; SwapOps =
false;
break;
1615 SubIdx = PPC::sub_gt; SwapOps =
true;
break;
1619 SubIdx = PPC::sub_un; SwapOps =
false;
break;
1623 SubIdx = PPC::sub_un; SwapOps =
true;
break;
1628 Register FirstReg = SwapOps ? FalseReg : TrueReg,
1629 SecondReg = SwapOps ? TrueReg : FalseReg;
1634 if (
MRI.getRegClass(FirstReg)->contains(PPC::R0) ||
1635 MRI.getRegClass(FirstReg)->contains(PPC::X0)) {
1637 MRI.getRegClass(FirstReg)->contains(PPC::X0) ?
1638 &PPC::G8RC_NOX0RegClass : &PPC::GPRC_NOR0RegClass;
1640 FirstReg =
MRI.createVirtualRegister(FirstRC);
1652 if (CRBit == PPC::CR0LT || CRBit == PPC::CR1LT ||
1653 CRBit == PPC::CR2LT || CRBit == PPC::CR3LT ||
1654 CRBit == PPC::CR4LT || CRBit == PPC::CR5LT ||
1655 CRBit == PPC::CR6LT || CRBit == PPC::CR7LT)
1657 if (CRBit == PPC::CR0GT || CRBit == PPC::CR1GT ||
1658 CRBit == PPC::CR2GT || CRBit == PPC::CR3GT ||
1659 CRBit == PPC::CR4GT || CRBit == PPC::CR5GT ||
1660 CRBit == PPC::CR6GT || CRBit == PPC::CR7GT)
1662 if (CRBit == PPC::CR0EQ || CRBit == PPC::CR1EQ ||
1663 CRBit == PPC::CR2EQ || CRBit == PPC::CR3EQ ||
1664 CRBit == PPC::CR4EQ || CRBit == PPC::CR5EQ ||
1665 CRBit == PPC::CR6EQ || CRBit == PPC::CR7EQ)
1667 if (CRBit == PPC::CR0UN || CRBit == PPC::CR1UN ||
1668 CRBit == PPC::CR2UN || CRBit == PPC::CR3UN ||
1669 CRBit == PPC::CR4UN || CRBit == PPC::CR5UN ||
1670 CRBit == PPC::CR6UN || CRBit == PPC::CR7UN)
1673 assert(Ret != 4 &&
"Invalid CR bit register");
1681 bool RenamableDest,
bool RenamableSrc)
const {
1685 if (PPC::F8RCRegClass.
contains(DestReg) &&
1686 PPC::VSRCRegClass.
contains(SrcReg)) {
1688 TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass);
1694 }
else if (PPC::F8RCRegClass.
contains(SrcReg) &&
1695 PPC::VSRCRegClass.
contains(DestReg)) {
1697 TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass);
1706 if (PPC::CRBITRCRegClass.
contains(SrcReg) &&
1707 PPC::GPRCRegClass.
contains(DestReg)) {
1719 }
else if (PPC::CRRCRegClass.
contains(SrcReg) &&
1720 (PPC::G8RCRegClass.
contains(DestReg) ||
1721 PPC::GPRCRegClass.
contains(DestReg))) {
1722 bool Is64Bit = PPC::G8RCRegClass.contains(DestReg);
1723 unsigned MvCode = Is64Bit ? PPC::MFOCRF8 : PPC::MFOCRF;
1724 unsigned ShCode = Is64Bit ? PPC::RLWINM8 : PPC::RLWINM;
1725 unsigned CRNum =
TRI->getEncodingValue(SrcReg);
1737 }
else if (PPC::G8RCRegClass.
contains(SrcReg) &&
1738 PPC::VSFRCRegClass.
contains(DestReg)) {
1739 assert(Subtarget.hasDirectMove() &&
1740 "Subtarget doesn't support directmove, don't know how to copy.");
1745 }
else if (PPC::VSFRCRegClass.
contains(SrcReg) &&
1746 PPC::G8RCRegClass.
contains(DestReg)) {
1747 assert(Subtarget.hasDirectMove() &&
1748 "Subtarget doesn't support directmove, don't know how to copy.");
1752 }
else if (PPC::SPERCRegClass.
contains(SrcReg) &&
1753 PPC::GPRCRegClass.
contains(DestReg)) {
1757 }
else if (PPC::GPRCRegClass.
contains(SrcReg) &&
1758 PPC::SPERCRegClass.
contains(DestReg)) {
1762 }
else if ((PPC::G8RCRegClass.
contains(DestReg) ||
1763 PPC::GPRCRegClass.
contains(DestReg)) &&
1764 SrcReg == PPC::CARRY) {
1765 bool Is64Bit = PPC::G8RCRegClass.contains(DestReg);
1770 }
else if ((PPC::G8RCRegClass.
contains(SrcReg) ||
1771 PPC::GPRCRegClass.
contains(SrcReg)) &&
1772 DestReg == PPC::CARRY) {
1773 bool Is64Bit = PPC::G8RCRegClass.contains(SrcReg);
1782 if (PPC::GPRCRegClass.
contains(DestReg, SrcReg))
1784 else if (PPC::G8RCRegClass.
contains(DestReg, SrcReg))
1786 else if (PPC::F4RCRegClass.
contains(DestReg, SrcReg))
1788 else if (PPC::CRRCRegClass.
contains(DestReg, SrcReg))
1790 else if (PPC::VRRCRegClass.
contains(DestReg, SrcReg))
1792 else if (PPC::VSRCRegClass.
contains(DestReg, SrcReg))
1802 else if (PPC::VSFRCRegClass.
contains(DestReg, SrcReg) ||
1803 PPC::VSSRCRegClass.
contains(DestReg, SrcReg))
1804 Opc = (Subtarget.hasP9Vector()) ? PPC::XSCPSGNDP : PPC::XXLORf;
1805 else if (Subtarget.pairedVectorMemops() &&
1806 PPC::VSRpRCRegClass.contains(DestReg, SrcReg)) {
1807 if (SrcReg > PPC::VSRp15)
1808 SrcReg = PPC::V0 + (SrcReg - PPC::VSRp16) * 2;
1810 SrcReg = PPC::VSL0 + (SrcReg - PPC::VSRp0) * 2;
1811 if (DestReg > PPC::VSRp15)
1812 DestReg = PPC::V0 + (DestReg - PPC::VSRp16) * 2;
1814 DestReg = PPC::VSL0 + (DestReg - PPC::VSRp0) * 2;
1821 else if (PPC::CRBITRCRegClass.
contains(DestReg, SrcReg))
1823 else if (PPC::SPERCRegClass.
contains(DestReg, SrcReg))
1825 else if ((PPC::ACCRCRegClass.
contains(DestReg) ||
1826 PPC::UACCRCRegClass.
contains(DestReg)) &&
1827 (PPC::ACCRCRegClass.
contains(SrcReg) ||
1828 PPC::UACCRCRegClass.
contains(SrcReg))) {
1834 bool DestPrimed = PPC::ACCRCRegClass.contains(DestReg);
1835 bool SrcPrimed = PPC::ACCRCRegClass.contains(SrcReg);
1837 PPC::VSL0 + (SrcReg - (SrcPrimed ? PPC::ACC0 : PPC::UACC0)) * 4;
1839 PPC::VSL0 + (DestReg - (DestPrimed ? PPC::ACC0 : PPC::UACC0)) * 4;
1848 if (SrcPrimed && !KillSrc)
1851 }
else if (PPC::G8pRCRegClass.
contains(DestReg) &&
1852 PPC::G8pRCRegClass.
contains(SrcReg)) {
1854 unsigned DestRegIdx = DestReg - PPC::G8p0;
1855 MCRegister DestRegSub0 = PPC::X0 + 2 * DestRegIdx;
1856 MCRegister DestRegSub1 = PPC::X0 + 2 * DestRegIdx + 1;
1857 unsigned SrcRegIdx = SrcReg - PPC::G8p0;
1858 MCRegister SrcRegSub0 = PPC::X0 + 2 * SrcRegIdx;
1859 MCRegister SrcRegSub1 = PPC::X0 + 2 * SrcRegIdx + 1;
1867 }
else if ((PPC::WACCRCRegClass.
contains(DestReg) ||
1868 PPC::WACC_HIRCRegClass.
contains(DestReg)) &&
1869 (PPC::WACCRCRegClass.
contains(SrcReg) ||
1870 PPC::WACC_HIRCRegClass.
contains(SrcReg))) {
1872 Opc = PPC::WACCRCRegClass.contains(SrcReg) ? PPC::DMXXEXTFDMR512
1873 : PPC::DMXXEXTFDMR512_HI;
1893 Opc = PPC::WACCRCRegClass.contains(DestReg) ? PPC::DMXXINSTDMR512
1894 : PPC::DMXXINSTDMR512_HI;
1901 }
else if (PPC::DMRRCRegClass.
contains(DestReg) &&
1902 PPC::DMRRCRegClass.
contains(SrcReg)) {
1923 if (PPC::GPRCRegClass.hasSubClassEq(RC) ||
1924 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) {
1926 }
else if (PPC::G8RCRegClass.hasSubClassEq(RC) ||
1927 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) {
1929 }
else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
1931 }
else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
1933 }
else if (PPC::SPERCRegClass.hasSubClassEq(RC)) {
1935 }
else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
1937 }
else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
1939 }
else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
1941 }
else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
1943 }
else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
1945 }
else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) {
1947 }
else if (PPC::SPILLTOVSRRCRegClass.hasSubClassEq(RC)) {
1949 }
else if (PPC::ACCRCRegClass.hasSubClassEq(RC)) {
1950 assert(Subtarget.pairedVectorMemops() &&
1951 "Register unexpected when paired memops are disabled.");
1953 }
else if (PPC::UACCRCRegClass.hasSubClassEq(RC)) {
1954 assert(Subtarget.pairedVectorMemops() &&
1955 "Register unexpected when paired memops are disabled.");
1957 }
else if (PPC::WACCRCRegClass.hasSubClassEq(RC)) {
1958 assert(Subtarget.pairedVectorMemops() &&
1959 "Register unexpected when paired memops are disabled.");
1961 }
else if (PPC::VSRpRCRegClass.hasSubClassEq(RC)) {
1962 assert(Subtarget.pairedVectorMemops() &&
1963 "Register unexpected when paired memops are disabled.");
1965 }
else if (PPC::G8pRCRegClass.hasSubClassEq(RC)) {
1967 }
else if (PPC::DMRROWRCRegClass.hasSubClassEq(RC)) {
1969 }
else if (PPC::DMRROWpRCRegClass.hasSubClassEq(RC)) {
1971 }
else if (PPC::DMRpRCRegClass.hasSubClassEq(RC)) {
1973 }
else if (PPC::DMRRCRegClass.hasSubClassEq(RC)) {
1984 return OpcodesForSpill[getSpillIndex(RC)];
1990 return OpcodesForSpill[getSpillIndex(RC)];
1993void PPCInstrInfo::StoreRegToStackSlot(
2007 if (PPC::CRRCRegClass.hasSubClassEq(RC) ||
2008 PPC::CRBITRCRegClass.hasSubClassEq(RC))
2022 StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs);
2032 NewMIs.
back()->addMemOperand(MF, MMO);
2052 unsigned DestReg,
int FrameIdx,
2070 LoadRegFromStackSlot(MF,
DL, DestReg, FrameIdx, RC, NewMIs);
2080 NewMIs.
back()->addMemOperand(MF, MMO);
2101 assert(
Cond.size() == 2 &&
"Invalid PPC branch opcode!");
2116 unsigned DefOpc =
DefMI.getOpcode();
2117 if (DefOpc != PPC::LI && DefOpc != PPC::LI8)
2119 if (!
DefMI.getOperand(1).isImm())
2121 if (
DefMI.getOperand(1).getImm() != 0)
2137 for (UseIdx = 0; UseIdx <
UseMI.getNumOperands(); ++UseIdx)
2138 if (
UseMI.getOperand(UseIdx).isReg() &&
2142 assert(UseIdx <
UseMI.getNumOperands() &&
"Cannot find Reg in UseMI");
2153 if (UseInfo->
RegClass != PPC::GPRC_NOR0RegClassID &&
2154 UseInfo->
RegClass != PPC::G8RC_NOX0RegClassID)
2166 bool isPPC64 = Subtarget.
isPPC64();
2167 ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO;
2169 ZeroReg = UseInfo->
RegClass == PPC::G8RC_NOX0RegClassID ?
2170 PPC::ZERO8 : PPC::ZERO;
2175 UseMI.getOperand(UseIdx).setReg(ZeroReg);
2187 if (
MRI->use_nodbg_empty(Reg))
2188 DefMI.eraseFromParent();
2194 if (
MI.definesRegister(PPC::CTR,
nullptr) ||
2195 MI.definesRegister(PPC::CTR8,
nullptr))
2207 unsigned NumT,
unsigned ExtraT,
2209 unsigned NumF,
unsigned ExtraF,
2229 switch (
MI.getOpcode()) {
2245 unsigned OpC =
MI.getOpcode();
2246 if (OpC == PPC::BLR || OpC == PPC::BLR8) {
2247 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR) {
2248 bool isPPC64 = Subtarget.
isPPC64();
2249 MI.setDesc(
get(Pred[0].
getImm() ? (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR)
2250 : (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR)));
2256 MI.setDesc(
get(PPC::BCLR));
2259 MI.setDesc(
get(PPC::BCLRn));
2262 MI.setDesc(
get(PPC::BCCLR));
2269 }
else if (OpC == PPC::B) {
2270 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR) {
2271 bool isPPC64 = Subtarget.
isPPC64();
2272 MI.setDesc(
get(Pred[0].
getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ)
2273 : (isPPC64 ? PPC::BDZ8 : PPC::BDZ)));
2280 MI.removeOperand(0);
2282 MI.setDesc(
get(PPC::BC));
2288 MI.removeOperand(0);
2290 MI.setDesc(
get(PPC::BCn));
2296 MI.removeOperand(0);
2298 MI.setDesc(
get(PPC::BCC));
2306 }
else if (OpC == PPC::BCTR || OpC == PPC::BCTR8 || OpC == PPC::BCTRL ||
2307 OpC == PPC::BCTRL8 || OpC == PPC::BCTRL_RM ||
2308 OpC == PPC::BCTRL8_RM) {
2309 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR)
2312 bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8 ||
2313 OpC == PPC::BCTRL_RM || OpC == PPC::BCTRL8_RM;
2314 bool isPPC64 = Subtarget.
isPPC64();
2317 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8)
2318 : (setLR ? PPC::BCCTRL : PPC::BCCTR)));
2321 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n)
2322 : (setLR ? PPC::BCCTRLn : PPC::BCCTRn)));
2325 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8)
2326 : (setLR ? PPC::BCCCTRL : PPC::BCCCTR)));
2337 if (OpC == PPC::BCTRL_RM || OpC == PPC::BCTRL8_RM)
2349 assert(Pred1.
size() == 2 &&
"Invalid PPC first predicate");
2350 assert(Pred2.
size() == 2 &&
"Invalid PPC second predicate");
2352 if (Pred1[1].
getReg() == PPC::CTR8 || Pred1[1].
getReg() == PPC::CTR)
2354 if (Pred2[1].
getReg() == PPC::CTR8 || Pred2[1].
getReg() == PPC::CTR)
2379 std::vector<MachineOperand> &Pred,
2380 bool SkipDead)
const {
2388 { &PPC::CRRCRegClass, &PPC::CRBITRCRegClass,
2389 &PPC::CTRRCRegClass, &PPC::CTRRC8RegClass };
2393 for (
unsigned c = 0; c < std::size(RCs) && !Found; ++c) {
2396 if (MO.isDef() && RC->
contains(MO.getReg())) {
2400 }
else if (MO.isRegMask()) {
2402 if (MO.clobbersPhysReg(R)) {
2415 int64_t &
Value)
const {
2416 unsigned Opc =
MI.getOpcode();
2419 default:
return false;
2424 SrcReg =
MI.getOperand(1).getReg();
2426 Value =
MI.getOperand(2).getImm();
2435 SrcReg =
MI.getOperand(1).getReg();
2436 SrcReg2 =
MI.getOperand(2).getReg();
2455 if (OpC == PPC::FCMPUS || OpC == PPC::FCMPUD)
2467 bool isPPC64 = Subtarget.
isPPC64();
2468 bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW;
2469 bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW;
2470 bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD;
2479 if (!
MI)
return false;
2481 bool equalityOnly =
false;
2484 if (is32BitSignedCompare) {
2490 }
else if (is32BitUnsignedCompare) {
2495 equalityOnly =
true;
2499 equalityOnly = is64BitUnsignedCompare;
2501 equalityOnly = is32BitUnsignedCompare;
2507 I =
MRI->use_instr_begin(CRReg), IE =
MRI->use_instr_end();
2519 if (SubIdx != PPC::sub_eq)
2531 bool FoundUse =
false;
2533 J =
MRI->use_instr_begin(CRReg), JE =
MRI->use_instr_end();
2560 else if (
Value != 0) {
2569 if (equalityOnly || !
MRI->hasOneUse(CRReg))
2579 int16_t Immed = (int16_t)
Value;
2613 for (;
I != E && !noSub; --
I) {
2615 unsigned IOpC = Instr.getOpcode();
2617 if (&*
I != &CmpInstr && (Instr.modifiesRegister(PPC::CR0,
TRI) ||
2618 Instr.readsRegister(PPC::CR0,
TRI)))
2627 if ((OpC == PPC::CMPW || OpC == PPC::CMPLW ||
2628 OpC == PPC::CMPD || OpC == PPC::CMPLD) &&
2629 (IOpC == PPC::SUBF || IOpC == PPC::SUBF8) &&
2630 ((Instr.getOperand(1).getReg() == SrcReg &&
2631 Instr.getOperand(2).getReg() == SrcReg2) ||
2632 (Instr.getOperand(1).getReg() == SrcReg2 &&
2633 Instr.getOperand(2).getReg() == SrcReg))) {
2651 int MIOpC =
MI->getOpcode();
2652 if (MIOpC == PPC::ANDI_rec || MIOpC == PPC::ANDI8_rec ||
2653 MIOpC == PPC::ANDIS_rec || MIOpC == PPC::ANDIS8_rec)
2656 NewOpC = PPC::getRecordFormOpcode(MIOpC);
2674 if (!equalityOnly && (NewOpC == PPC::SUBF_rec || NewOpC == PPC::SUBF8_rec) &&
2684 bool ShouldSwap =
false;
2686 ShouldSwap = SrcReg2 != 0 &&
Sub->getOperand(1).getReg() == SrcReg2 &&
2687 Sub->getOperand(2).getReg() == SrcReg;
2691 ShouldSwap = !ShouldSwap;
2696 I =
MRI->use_instr_begin(CRReg), IE =
MRI->use_instr_end();
2704 "Invalid predicate for equality-only optimization");
2711 assert((!equalityOnly || NewSubReg == PPC::sub_eq) &&
2712 "Invalid CR bit for equality-only optimization");
2714 if (NewSubReg == PPC::sub_lt)
2715 NewSubReg = PPC::sub_gt;
2716 else if (NewSubReg == PPC::sub_gt)
2717 NewSubReg = PPC::sub_lt;
2725 "Non-zero immediate support and ShouldSwap"
2726 "may conflict in updating predicate");
2734 BuildMI(*
MI->getParent(), std::next(MII),
MI->getDebugLoc(),
2735 get(TargetOpcode::COPY), CRReg)
2740 MI->clearRegisterDeads(PPC::CR0);
2742 if (MIOpC != NewOpC) {
2752 if (MIOpC == PPC::RLWINM || MIOpC == PPC::RLWINM8) {
2753 Register GPRRes =
MI->getOperand(0).getReg();
2754 int64_t SH =
MI->getOperand(2).getImm();
2755 int64_t MB =
MI->getOperand(3).getImm();
2756 int64_t ME =
MI->getOperand(4).getImm();
2759 bool MBInLoHWord = MB >= 16;
2760 bool MEInLoHWord = ME >= 16;
2763 if (MB <= ME && MBInLoHWord == MEInLoHWord && SH == 0) {
2764 Mask = ((1LLU << (32 - MB)) - 1) & ~((1LLU << (31 - ME)) - 1);
2766 Mask >>= MBInLoHWord ? 0 : 16;
2767 NewOpC = MIOpC == PPC::RLWINM
2768 ? (MBInLoHWord ? PPC::ANDI_rec : PPC::ANDIS_rec)
2769 : (MBInLoHWord ? PPC::ANDI8_rec : PPC::ANDIS8_rec);
2770 }
else if (
MRI->use_empty(GPRRes) && (ME == 31) &&
2771 (ME - MB + 1 == SH) && (MB >= 16)) {
2775 Mask = ((1LLU << 32) - 1) & ~((1LLU << (32 - SH)) - 1);
2777 NewOpC = MIOpC == PPC::RLWINM ? PPC::ANDIS_rec : PPC::ANDIS8_rec;
2780 if (Mask != ~0LLU) {
2781 MI->removeOperand(4);
2782 MI->removeOperand(3);
2783 MI->getOperand(2).setImm(Mask);
2784 NumRcRotatesConvertedToRcAnd++;
2786 }
else if (MIOpC == PPC::RLDICL &&
MI->getOperand(2).getImm() == 0) {
2787 int64_t MB =
MI->getOperand(3).getImm();
2789 uint64_t Mask = (1LLU << (63 - MB + 1)) - 1;
2790 NewOpC = PPC::ANDI8_rec;
2791 MI->removeOperand(3);
2792 MI->getOperand(2).setImm(Mask);
2793 NumRcRotatesConvertedToRcAnd++;
2798 MI->setDesc(NewDesc);
2801 if (!
MI->definesRegister(ImpDef,
nullptr)) {
2802 MI->addOperand(*
MI->getParent()->getParent(),
2807 if (!
MI->readsRegister(ImpUse,
nullptr)) {
2808 MI->addOperand(*
MI->getParent()->getParent(),
2813 assert(
MI->definesRegister(PPC::CR0,
nullptr) &&
2814 "Record-form instruction does not define cr0?");
2819 for (
unsigned i = 0, e = PredsToUpdate.
size(); i < e; i++)
2820 PredsToUpdate[i].first->setImm(PredsToUpdate[i].second);
2822 for (
unsigned i = 0, e = SubRegsToUpdate.
size(); i < e; i++)
2823 SubRegsToUpdate[i].first->setSubReg(SubRegsToUpdate[i].second);
2834 int64_t CmpMask, CmpValue;
2839 if (CmpValue || !CmpMask || SrcReg2)
2847 if (
Opc == PPC::CMPLWI ||
Opc == PPC::CMPLDI)
2854 if (Subtarget.
isPPC64() &&
Opc == PPC::CMPWI)
2861 bool SrcRegHasOtherUse =
false;
2868 if (CRReg != PPC::CR0)
2872 bool SeenUseOfCRReg =
false;
2873 bool IsCRRegKilled =
false;
2874 if (!isRegElgibleForForwarding(RegMO, *SrcMI, CmpMI,
false, IsCRRegKilled,
2880 int NewOpC = PPC::getRecordFormOpcode(SrcMIOpc);
2894 "Record-form instruction does not define cr0?");
2908 OffsetIsScalable =
false;
2943 case PPC::DFSTOREf64:
2944 return FirstOpc == SecondOpc;
2950 return SecondOpc == PPC::STW || SecondOpc == PPC::STW8;
2957 int64_t OpOffset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
2958 unsigned NumBytes)
const {
2964 "Only base registers and frame indices are supported.");
2969 if (ClusterSize > 2)
2983 unsigned FirstOpc = FirstLdSt.
getOpcode();
2984 unsigned SecondOpc = SecondLdSt.
getOpcode();
2996 int64_t Offset1 = 0, Offset2 = 0;
3005 assert(Base1 == &BaseOp1 && Base2 == &BaseOp2 &&
3006 "getMemOperandWithOffsetWidth return incorrect base op");
3008 assert(Offset1 <= Offset2 &&
"Caller should have ordered offsets.");
3009 return Offset1 + (int64_t)Width1.
getValue() == Offset2;
3016 unsigned Opcode =
MI.getOpcode();
3018 if (Opcode == PPC::INLINEASM || Opcode == PPC::INLINEASM_BR) {
3020 const char *AsmStr =
MI.getOperand(0).getSymbolName();
3022 }
else if (Opcode == TargetOpcode::STACKMAP) {
3025 }
else if (Opcode == TargetOpcode::PATCHPOINT) {
3029 return get(Opcode).getSize();
3033std::pair<unsigned, unsigned>
3036 return std::make_pair(TF, 0u);
3041 using namespace PPCII;
3042 static const std::pair<unsigned, const char *> TargetFlags[] = {
3043 {MO_PLT,
"ppc-plt"},
3044 {MO_PIC_FLAG,
"ppc-pic"},
3045 {MO_PCREL_FLAG,
"ppc-pcrel"},
3046 {MO_GOT_FLAG,
"ppc-got"},
3047 {MO_PCREL_OPT_FLAG,
"ppc-opt-pcrel"},
3048 {MO_TLSGD_FLAG,
"ppc-tlsgd"},
3049 {MO_TPREL_FLAG,
"ppc-tprel"},
3050 {MO_TLSLDM_FLAG,
"ppc-tlsldm"},
3051 {MO_TLSLD_FLAG,
"ppc-tlsld"},
3052 {MO_TLSGDM_FLAG,
"ppc-tlsgdm"},
3053 {MO_GOT_TLSGD_PCREL_FLAG,
"ppc-got-tlsgd-pcrel"},
3054 {MO_GOT_TLSLD_PCREL_FLAG,
"ppc-got-tlsld-pcrel"},
3055 {MO_GOT_TPREL_PCREL_FLAG,
"ppc-got-tprel-pcrel"},
3058 {MO_TPREL_LO,
"ppc-tprel-lo"},
3059 {MO_TPREL_HA,
"ppc-tprel-ha"},
3060 {MO_DTPREL_LO,
"ppc-dtprel-lo"},
3061 {MO_TLSLD_LO,
"ppc-tlsld-lo"},
3062 {MO_TOC_LO,
"ppc-toc-lo"},
3063 {MO_TLS,
"ppc-tls"},
3064 {MO_PIC_HA_FLAG,
"ppc-ha-pic"},
3065 {MO_PIC_LO_FLAG,
"ppc-lo-pic"},
3066 {MO_TPREL_PCREL_FLAG,
"ppc-tprel-pcrel"},
3067 {MO_TLS_PCREL_FLAG,
"ppc-tls-pcrel"},
3068 {MO_GOT_PCREL_FLAG,
"ppc-got-pcrel"},
3080 unsigned UpperOpcode, LowerOpcode;
3081 switch (
MI.getOpcode()) {
3082 case PPC::DFLOADf32:
3083 UpperOpcode = PPC::LXSSP;
3084 LowerOpcode = PPC::LFS;
3086 case PPC::DFLOADf64:
3087 UpperOpcode = PPC::LXSD;
3088 LowerOpcode = PPC::LFD;
3090 case PPC::DFSTOREf32:
3091 UpperOpcode = PPC::STXSSP;
3092 LowerOpcode = PPC::STFS;
3094 case PPC::DFSTOREf64:
3095 UpperOpcode = PPC::STXSD;
3096 LowerOpcode = PPC::STFD;
3098 case PPC::XFLOADf32:
3099 UpperOpcode = PPC::LXSSPX;
3100 LowerOpcode = PPC::LFSX;
3102 case PPC::XFLOADf64:
3103 UpperOpcode = PPC::LXSDX;
3104 LowerOpcode = PPC::LFDX;
3106 case PPC::XFSTOREf32:
3107 UpperOpcode = PPC::STXSSPX;
3108 LowerOpcode = PPC::STFSX;
3110 case PPC::XFSTOREf64:
3111 UpperOpcode = PPC::STXSDX;
3112 LowerOpcode = PPC::STFDX;
3115 UpperOpcode = PPC::LXSIWAX;
3116 LowerOpcode = PPC::LFIWAX;
3119 UpperOpcode = PPC::LXSIWZX;
3120 LowerOpcode = PPC::LFIWZX;
3123 UpperOpcode = PPC::STXSIWX;
3124 LowerOpcode = PPC::STFIWX;
3130 Register TargetReg =
MI.getOperand(0).getReg();
3132 if ((TargetReg >= PPC::F0 && TargetReg <= PPC::F31) ||
3133 (TargetReg >= PPC::VSL0 && TargetReg <= PPC::VSL31))
3134 Opcode = LowerOpcode;
3136 Opcode = UpperOpcode;
3137 MI.setDesc(
get(Opcode));
3146 auto &
MBB = *
MI.getParent();
3147 auto DL =
MI.getDebugLoc();
3149 switch (
MI.getOpcode()) {
3150 case PPC::BUILD_UACC: {
3153 if (ACC - PPC::ACC0 != UACC - PPC::UACC0) {
3154 MCRegister SrcVSR = PPC::VSL0 + (UACC - PPC::UACC0) * 4;
3155 MCRegister DstVSR = PPC::VSL0 + (ACC - PPC::ACC0) * 4;
3159 for (
int VecNo = 0; VecNo < 4; VecNo++)
3161 .addReg(SrcVSR + VecNo)
3169 case PPC::KILL_PAIR: {
3170 MI.setDesc(
get(PPC::UNENCODED_NOP));
3171 MI.removeOperand(1);
3172 MI.removeOperand(0);
3175 case TargetOpcode::LOAD_STACK_GUARD: {
3178 (Subtarget.
isTargetLinux() || M->getStackProtectorGuard() ==
"tls") &&
3179 "Only Linux target or tls mode are expected to contain "
3180 "LOAD_STACK_GUARD");
3182 if (M->getStackProtectorGuard() ==
"tls")
3183 Offset = M->getStackProtectorGuardOffset();
3186 const unsigned Reg = Subtarget.
isPPC64() ? PPC::X13 : PPC::R2;
3187 MI.setDesc(
get(Subtarget.
isPPC64() ? PPC::LD : PPC::LWZ));
3193 case PPC::PPCLdFixedAddr: {
3195 "Only targets with Glibc expected to contain PPCLdFixedAddr");
3197 const unsigned Reg = Subtarget.
isPPC64() ? PPC::X13 : PPC::R2;
3198 MI.setDesc(
get(PPC::LWZ));
3200#undef PPC_LNX_FEATURE
3202#define PPC_LNX_DEFINE_OFFSETS
3203#include "llvm/TargetParser/PPCTargetParser.def"
3205 bool Is64 = Subtarget.
isPPC64();
3206 if (FAType == PPC_FAWORD_HWCAP) {
3208 Offset = Is64 ? PPC_HWCAP_OFFSET_LE64 : PPC_HWCAP_OFFSET_LE32;
3210 Offset = Is64 ? PPC_HWCAP_OFFSET_BE64 : PPC_HWCAP_OFFSET_BE32;
3211 }
else if (FAType == PPC_FAWORD_HWCAP2) {
3213 Offset = Is64 ? PPC_HWCAP2_OFFSET_LE64 : PPC_HWCAP2_OFFSET_LE32;
3215 Offset = Is64 ? PPC_HWCAP2_OFFSET_BE64 : PPC_HWCAP2_OFFSET_BE32;
3216 }
else if (FAType == PPC_FAWORD_CPUID) {
3218 Offset = Is64 ? PPC_CPUID_OFFSET_LE64 : PPC_CPUID_OFFSET_LE32;
3220 Offset = Is64 ? PPC_CPUID_OFFSET_BE64 : PPC_CPUID_OFFSET_BE32;
3222 assert(
Offset &&
"Do not know the offset for this fixed addr load");
3223 MI.removeOperand(1);
3229#define PPC_TGT_PARSER_UNDEF_MACROS
3230#include "llvm/TargetParser/PPCTargetParser.def"
3231#undef PPC_TGT_PARSER_UNDEF_MACROS
3233 case PPC::DFLOADf32:
3234 case PPC::DFLOADf64:
3235 case PPC::DFSTOREf32:
3236 case PPC::DFSTOREf64: {
3237 assert(Subtarget.hasP9Vector() &&
3238 "Invalid D-Form Pseudo-ops on Pre-P9 target.");
3241 "D-form op must have register and immediate operands");
3244 case PPC::XFLOADf32:
3245 case PPC::XFSTOREf32:
3249 assert(Subtarget.hasP8Vector() &&
3250 "Invalid X-Form Pseudo-ops on Pre-P8 target.");
3251 assert(
MI.getOperand(2).isReg() &&
MI.getOperand(1).isReg() &&
3252 "X-form op must have register and register operands");
3255 case PPC::XFLOADf64:
3256 case PPC::XFSTOREf64: {
3257 assert(Subtarget.hasVSX() &&
3258 "Invalid X-Form Pseudo-ops on target that has no VSX.");
3259 assert(
MI.getOperand(2).isReg() &&
MI.getOperand(1).isReg() &&
3260 "X-form op must have register and register operands");
3263 case PPC::SPILLTOVSR_LD: {
3264 Register TargetReg =
MI.getOperand(0).getReg();
3265 if (PPC::VSFRCRegClass.
contains(TargetReg)) {
3266 MI.setDesc(
get(PPC::DFLOADf64));
3270 MI.setDesc(
get(PPC::LD));
3273 case PPC::SPILLTOVSR_ST: {
3275 if (PPC::VSFRCRegClass.
contains(SrcReg)) {
3276 NumStoreSPILLVSRRCAsVec++;
3277 MI.setDesc(
get(PPC::DFSTOREf64));
3280 NumStoreSPILLVSRRCAsGpr++;
3281 MI.setDesc(
get(PPC::STD));
3285 case PPC::SPILLTOVSR_LDX: {
3286 Register TargetReg =
MI.getOperand(0).getReg();
3287 if (PPC::VSFRCRegClass.
contains(TargetReg))
3288 MI.setDesc(
get(PPC::LXSDX));
3290 MI.setDesc(
get(PPC::LDX));
3293 case PPC::SPILLTOVSR_STX: {
3295 if (PPC::VSFRCRegClass.
contains(SrcReg)) {
3296 NumStoreSPILLVSRRCAsVec++;
3297 MI.setDesc(
get(PPC::STXSDX));
3299 NumStoreSPILLVSRRCAsGpr++;
3300 MI.setDesc(
get(PPC::STDX));
3307 case PPC::CFENCE8: {
3308 auto Val =
MI.getOperand(0).getReg();
3309 unsigned CmpOp = Subtarget.
isPPC64() ? PPC::CMPD : PPC::CMPW;
3315 MI.setDesc(
get(PPC::ISYNC));
3316 MI.removeOperand(0);
3327static unsigned selectReg(int64_t Imm1, int64_t Imm2,
unsigned CompareOpc,
3328 unsigned TrueReg,
unsigned FalseReg,
3329 unsigned CRSubReg) {
3331 if (CompareOpc == PPC::CMPWI || CompareOpc == PPC::CMPDI) {
3335 return Imm1 < Imm2 ? TrueReg : FalseReg;
3337 return Imm1 > Imm2 ? TrueReg : FalseReg;
3339 return Imm1 == Imm2 ? TrueReg : FalseReg;
3343 else if (CompareOpc == PPC::CMPLWI || CompareOpc == PPC::CMPLDI) {
3351 return Imm1 == Imm2 ? TrueReg : FalseReg;
3354 return PPC::NoRegister;
3359 int64_t Imm)
const {
3360 assert(
MI.getOperand(OpNo).isReg() &&
"Operand must be a REG");
3362 Register InUseReg =
MI.getOperand(OpNo).getReg();
3363 MI.getOperand(OpNo).ChangeToImmediate(Imm);
3371 int UseOpIdx =
MI.findRegisterUseOperandIdx(InUseReg,
TRI,
false);
3372 if (UseOpIdx >= 0) {
3382 MI.removeOperand(UseOpIdx);
3391 int OperandToKeep = LII.
SetCR ? 1 : 0;
3392 for (
int i =
MI.getNumOperands() - 1; i > OperandToKeep; i--)
3393 MI.removeOperand(i);
3397 MI.setDesc(
get(LII.
Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
3412 bool &SeenIntermediateUse)
const {
3413 assert(!
MI.getParent()->getParent()->getRegInfo().isSSA() &&
3414 "Should be called after register allocation.");
3418 SeenIntermediateUse =
false;
3419 for (; It != E; ++It) {
3420 if (It->modifiesRegister(Reg,
TRI))
3422 if (It->readsRegister(Reg,
TRI))
3423 SeenIntermediateUse =
true;
3431 int64_t Imm)
const {
3433 "Register should be in non-SSA form after RA");
3434 bool isPPC64 = Subtarget.
isPPC64();
3438 if (isInt<16>(Imm)) {
3440 }
else if (isInt<32>(Imm)) {
3448 assert(isPPC64 &&
"Materializing 64-bit immediate to single register is "
3449 "only supported in PPC64");
3451 if ((Imm >> 32) & 0xFFFF)
3454 .
addImm((Imm >> 32) & 0xFFFF);
3461 .
addImm((Imm >> 16) & 0xFFFF);
3471 unsigned &OpNoForForwarding,
3472 bool &SeenIntermediateUse)
const {
3473 OpNoForForwarding = ~0U;
3481 for (
int i = 1, e =
MI.getNumOperands(); i < e; i++) {
3482 if (!
MI.getOperand(i).isReg())
3485 if (!Reg.isVirtual())
3490 if (DefMIForTrueReg->
getOpcode() == PPC::LI ||
3491 DefMIForTrueReg->
getOpcode() == PPC::LI8 ||
3492 DefMIForTrueReg->
getOpcode() == PPC::ADDI ||
3493 DefMIForTrueReg->
getOpcode() == PPC::ADDI8) {
3494 OpNoForForwarding = i;
3495 DefMI = DefMIForTrueReg;
3510 unsigned Opc =
MI.getOpcode();
3511 bool ConvertibleImmForm =
3512 Opc == PPC::CMPWI ||
Opc == PPC::CMPLWI ||
Opc == PPC::CMPDI ||
3513 Opc == PPC::CMPLDI ||
Opc == PPC::ADDI ||
Opc == PPC::ADDI8 ||
3514 Opc == PPC::ORI ||
Opc == PPC::ORI8 ||
Opc == PPC::XORI ||
3515 Opc == PPC::XORI8 ||
Opc == PPC::RLDICL ||
Opc == PPC::RLDICL_rec ||
3516 Opc == PPC::RLDICL_32 ||
Opc == PPC::RLDICL_32_64 ||
3517 Opc == PPC::RLWINM ||
Opc == PPC::RLWINM_rec ||
Opc == PPC::RLWINM8 ||
3518 Opc == PPC::RLWINM8_rec;
3519 bool IsVFReg = (
MI.getNumOperands() &&
MI.getOperand(0).isReg())
3526 if ((
Opc == PPC::OR ||
Opc == PPC::OR8) &&
3527 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
3529 for (
int i = 1, e =
MI.getNumOperands(); i <
e; i++) {
3531 SeenIntermediateUse =
false;
3545 case PPC::ADDItocL8:
3548 OpNoForForwarding = i;
3555 return OpNoForForwarding == ~0
U ? nullptr :
DefMI;
3558unsigned PPCInstrInfo::getSpillTarget()
const {
3561 bool IsP10Variant = Subtarget.isISA3_1() || Subtarget.pairedVectorMemops();
3563 return Subtarget.isISAFuture() ? 3 : IsP10Variant ?
3564 2 : Subtarget.hasP9Vector() ?
3603 bool PostRA = !
MRI->isSSA();
3609 unsigned ToBeDeletedReg = 0;
3610 int64_t OffsetImm = 0;
3611 unsigned XFormOpcode = 0;
3619 bool OtherIntermediateUse =
false;
3623 if (OtherIntermediateUse || !ADDMI)
3630 unsigned ScaleRegIdx = 0;
3631 int64_t OffsetAddi = 0;
3645 assert(ADDIMI &&
"There should be ADDIMI for valid ToBeChangedReg.");
3650 for (
auto It = ++Start; It !=
End; It++)
3659 (ScaleReg == PPC::R0 || ScaleReg == PPC::X0))
3664 if (NewDefFor(ToBeChangedReg, *ADDMI,
MI) || NewDefFor(ScaleReg, *ADDMI,
MI))
3680 MI.setDesc(
get(XFormOpcode));
3682 .ChangeToRegister(ScaleReg,
false,
false,
3686 .ChangeToRegister(ToBeChangedReg,
false,
false,
true);
3698 int64_t &Imm)
const {
3702 if (
Opc != PPC::ADDI &&
Opc != PPC::ADDI8)
3718 return Opc == PPC::ADD4 ||
Opc == PPC::ADD8;
3722 unsigned &ToBeDeletedReg,
3723 unsigned &XFormOpcode,
3727 if (!
MI.mayLoadOrStore())
3730 unsigned Opc =
MI.getOpcode();
3735 if (XFormOpcode == PPC::INSTRUCTION_LIST_END)
3749 if (!ImmOperand.
isImm())
3752 assert(RegOperand.
isReg() &&
"Instruction format is not right");
3755 if (!RegOperand.
isKill())
3758 ToBeDeletedReg = RegOperand.
getReg();
3759 OffsetImm = ImmOperand.
getImm();
3766 int64_t &OffsetAddi,
3767 int64_t OffsetImm)
const {
3768 assert((Index == 1 || Index == 2) &&
"Invalid operand index for add.");
3774 bool OtherIntermediateUse =
false;
3795 if (OtherIntermediateUse || !ADDIMI)
3801 if (isInt<16>(OffsetAddi + OffsetImm))
3814 bool PostRA = !
MRI->isSSA();
3815 bool SeenIntermediateUse =
true;
3816 unsigned ForwardingOperand = ~0U;
3818 SeenIntermediateUse);
3821 assert(ForwardingOperand <
MI.getNumOperands() &&
3822 "The forwarding operand needs to be valid at this point");
3823 bool IsForwardingOperandKilled =
MI.getOperand(ForwardingOperand).isKill();
3824 bool KillFwdDefMI = !SeenIntermediateUse && IsForwardingOperandKilled;
3825 if (KilledDef && KillFwdDefMI)
3840 PPC::INSTRUCTION_LIST_END &&
3841 transformToNewImmFormFedByAdd(
MI, *
DefMI, ForwardingOperand))
3845 bool IsVFReg =
MI.getOperand(0).isReg()
3853 transformToImmFormFedByAdd(
MI, III, ForwardingOperand, *
DefMI,
3860 transformToImmFormFedByLI(
MI, III, ForwardingOperand, *
DefMI))
3865 if (!HasImmForm && simplifyToLI(
MI, *
DefMI, ForwardingOperand, KilledDef))
3874 Register FoldingReg =
MI.getOperand(1).getReg();
3878 if (SrcMI->
getOpcode() != PPC::RLWINM &&
3879 SrcMI->
getOpcode() != PPC::RLWINM_rec &&
3883 assert((
MI.getOperand(2).isImm() &&
MI.getOperand(3).isImm() &&
3886 "Invalid PPC::RLWINM Instruction!");
3894 assert((MEMI < 32 && MESrc < 32 && MBMI < 32 && MBSrc < 32) &&
3895 "Invalid PPC::RLWINM Instruction!");
3917 bool SrcMaskFull = (MBSrc - MESrc == 1) || (MBSrc == 0 && MESrc == 31);
3920 if ((MBMI > MEMI) && !SrcMaskFull)
3930 APInt RotatedSrcMask = MaskSrc.
rotl(SHMI);
3931 APInt FinalMask = RotatedSrcMask & MaskMI;
3933 bool Simplified =
false;
3936 if (FinalMask.
isZero()) {
3938 (
MI.getOpcode() == PPC::RLWINM8 ||
MI.getOpcode() == PPC::RLWINM8_rec);
3943 if (
MI.getOpcode() == PPC::RLWINM ||
MI.getOpcode() == PPC::RLWINM8) {
3945 MI.removeOperand(4);
3946 MI.removeOperand(3);
3947 MI.removeOperand(2);
3948 MI.getOperand(1).ChangeToImmediate(0);
3949 MI.setDesc(
get(Is64Bit ? PPC::LI8 : PPC::LI));
3952 MI.removeOperand(4);
3953 MI.removeOperand(3);
3954 MI.getOperand(2).setImm(0);
3955 MI.setDesc(
get(Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
3958 MI.getOperand(1).setIsKill(
true);
3962 MI.getOperand(1).setIsKill(
false);
3978 uint16_t NewSH = (SHSrc + SHMI) % 32;
3979 MI.getOperand(2).setImm(NewSH);
3982 MI.getOperand(3).setImm(NewMB);
3983 MI.getOperand(4).setImm(NewME);
3987 MI.getOperand(1).setIsKill(
true);
3991 MI.getOperand(1).setIsKill(
false);
3996 if (Simplified &
MRI->use_nodbg_empty(FoldingReg) &&
4021 default:
return false;
4029 III.
ImmOpcode =
Opc == PPC::ADD4 ? PPC::ADDI : PPC::ADDI8;
4038 III.
ImmOpcode =
Opc == PPC::ADDC ? PPC::ADDIC : PPC::ADDIC8;
4054 III.
ImmOpcode =
Opc == PPC::SUBFC ? PPC::SUBFIC : PPC::SUBFIC8;
4062 III.
ImmOpcode =
Opc == PPC::CMPW ? PPC::CMPWI : PPC::CMPDI;
4070 III.
ImmOpcode =
Opc == PPC::CMPLW ? PPC::CMPLWI : PPC::CMPLDI;
4090 case PPC::OR: III.
ImmOpcode = PPC::ORI;
break;
4091 case PPC::OR8: III.
ImmOpcode = PPC::ORI8;
break;
4092 case PPC::XOR: III.
ImmOpcode = PPC::XORI;
break;
4093 case PPC::XOR8: III.
ImmOpcode = PPC::XORI8;
break;
4098 case PPC::RLWNM_rec:
4099 case PPC::RLWNM8_rec:
4119 if (
Opc == PPC::RLWNM ||
Opc == PPC::RLWNM8 ||
Opc == PPC::RLWNM_rec ||
4120 Opc == PPC::RLWNM8_rec)
4126 case PPC::RLWNM: III.
ImmOpcode = PPC::RLWINM;
break;
4127 case PPC::RLWNM8: III.
ImmOpcode = PPC::RLWINM8;
break;
4128 case PPC::RLWNM_rec:
4131 case PPC::RLWNM8_rec:
4134 case PPC::SLW: III.
ImmOpcode = PPC::RLWINM;
break;
4135 case PPC::SLW8: III.
ImmOpcode = PPC::RLWINM8;
break;
4142 case PPC::SRW: III.
ImmOpcode = PPC::RLWINM;
break;
4143 case PPC::SRW8: III.
ImmOpcode = PPC::RLWINM8;
break;
4163 case PPC::RLDCL_rec:
4165 case PPC::RLDCR_rec:
4181 if (
Opc == PPC::RLDCL ||
Opc == PPC::RLDCL_rec ||
Opc == PPC::RLDCR ||
4182 Opc == PPC::RLDCR_rec)
4188 case PPC::RLDCL: III.
ImmOpcode = PPC::RLDICL;
break;
4189 case PPC::RLDCL_rec:
4192 case PPC::RLDCR: III.
ImmOpcode = PPC::RLDICR;
break;
4193 case PPC::RLDCR_rec:
4196 case PPC::SLD: III.
ImmOpcode = PPC::RLDICR;
break;
4200 case PPC::SRD: III.
ImmOpcode = PPC::RLDICL;
break;
4247 case PPC::LBZX: III.
ImmOpcode = PPC::LBZ;
break;
4248 case PPC::LBZX8: III.
ImmOpcode = PPC::LBZ8;
break;
4249 case PPC::LHZX: III.
ImmOpcode = PPC::LHZ;
break;
4250 case PPC::LHZX8: III.
ImmOpcode = PPC::LHZ8;
break;
4251 case PPC::LHAX: III.
ImmOpcode = PPC::LHA;
break;
4252 case PPC::LHAX8: III.
ImmOpcode = PPC::LHA8;
break;
4253 case PPC::LWZX: III.
ImmOpcode = PPC::LWZ;
break;
4254 case PPC::LWZX8: III.
ImmOpcode = PPC::LWZ8;
break;
4260 case PPC::LFSX: III.
ImmOpcode = PPC::LFS;
break;
4261 case PPC::LFDX: III.
ImmOpcode = PPC::LFD;
break;
4262 case PPC::STBX: III.
ImmOpcode = PPC::STB;
break;
4263 case PPC::STBX8: III.
ImmOpcode = PPC::STB8;
break;
4264 case PPC::STHX: III.
ImmOpcode = PPC::STH;
break;
4265 case PPC::STHX8: III.
ImmOpcode = PPC::STH8;
break;
4266 case PPC::STWX: III.
ImmOpcode = PPC::STW;
break;
4267 case PPC::STWX8: III.
ImmOpcode = PPC::STW8;
break;
4272 case PPC::STFSX: III.
ImmOpcode = PPC::STFS;
break;
4273 case PPC::STFDX: III.
ImmOpcode = PPC::STFD;
break;
4305 case PPC::LBZUX: III.
ImmOpcode = PPC::LBZU;
break;
4306 case PPC::LBZUX8: III.
ImmOpcode = PPC::LBZU8;
break;
4307 case PPC::LHZUX: III.
ImmOpcode = PPC::LHZU;
break;
4308 case PPC::LHZUX8: III.
ImmOpcode = PPC::LHZU8;
break;
4309 case PPC::LHAUX: III.
ImmOpcode = PPC::LHAU;
break;
4310 case PPC::LHAUX8: III.
ImmOpcode = PPC::LHAU8;
break;
4311 case PPC::LWZUX: III.
ImmOpcode = PPC::LWZU;
break;
4312 case PPC::LWZUX8: III.
ImmOpcode = PPC::LWZU8;
break;
4317 case PPC::LFSUX: III.
ImmOpcode = PPC::LFSU;
break;
4318 case PPC::LFDUX: III.
ImmOpcode = PPC::LFDU;
break;
4319 case PPC::STBUX: III.
ImmOpcode = PPC::STBU;
break;
4320 case PPC::STBUX8: III.
ImmOpcode = PPC::STBU8;
break;
4321 case PPC::STHUX: III.
ImmOpcode = PPC::STHU;
break;
4322 case PPC::STHUX8: III.
ImmOpcode = PPC::STHU8;
break;
4323 case PPC::STWUX: III.
ImmOpcode = PPC::STWU;
break;
4324 case PPC::STWUX8: III.
ImmOpcode = PPC::STWU8;
break;
4329 case PPC::STFSUX: III.
ImmOpcode = PPC::STFSU;
break;
4330 case PPC::STFDUX: III.
ImmOpcode = PPC::STFDU;
break;
4343 case PPC::XFLOADf32:
4344 case PPC::XFLOADf64:
4345 case PPC::XFSTOREf32:
4346 case PPC::XFSTOREf64:
4347 if (!Subtarget.hasP9Vector())
4374 case PPC::XFLOADf32:
4388 case PPC::XFLOADf64:
4406 case PPC::XFSTOREf32:
4420 case PPC::XFSTOREf64:
4431 assert(Op1 != Op2 &&
"Cannot swap operand with itself.");
4433 unsigned MaxOp = std::max(Op1, Op2);
4434 unsigned MinOp = std::min(Op1, Op2);
4437 MI.removeOperand(std::max(Op1, Op2));
4438 MI.removeOperand(std::min(Op1, Op2));
4442 if (MaxOp - MinOp == 1 &&
MI.getNumOperands() == MinOp) {
4443 MI.addOperand(MOp2);
4444 MI.addOperand(MOp1);
4449 unsigned TotalOps =
MI.getNumOperands() + 2;
4450 for (
unsigned i =
MI.getNumOperands() - 1; i >= MinOp; i--) {
4452 MI.removeOperand(i);
4455 MI.addOperand(MOp2);
4457 for (
unsigned i =
MI.getNumOperands(); i < TotalOps; i++) {
4459 MI.addOperand(MOp1);
4461 MI.addOperand(MOps.
back());
4472 unsigned OpNoForForwarding
4513 if (
Opc != PPC::ADDItocL8 &&
Opc != PPC::ADDI &&
Opc != PPC::ADDI8)
4519 if (
Opc == PPC::ADDItocL8 && Subtarget.isAIX())
4523 "Add inst must have at least three operands");
4524 RegMO = &
DefMI.getOperand(1);
4525 ImmMO = &
DefMI.getOperand(2);
4528 if (!RegMO->
isReg())
4537bool PPCInstrInfo::isRegElgibleForForwarding(
4540 bool &IsFwdFeederRegKilled,
bool &SeenIntermediateUse)
const {
4557 for (; It != E; ++It) {
4561 IsFwdFeederRegKilled =
true;
4563 SeenIntermediateUse =
true;
4565 if ((&*It) == &
DefMI)
4578bool PPCInstrInfo::isImmElgibleForForwarding(
const MachineOperand &ImmMO,
4582 int64_t BaseImm)
const {
4584 if (
DefMI.getOpcode() == PPC::ADDItocL8) {
4605 if (ImmMO.
isImm()) {
4610 APInt ActualValue(64, ImmMO.
getImm() + BaseImm,
true);
4615 Imm = SignExtend64<16>(ImmMO.
getImm() + BaseImm);
4631 unsigned OpNoForForwarding,
4633 if ((
DefMI.getOpcode() != PPC::LI &&
DefMI.getOpcode() != PPC::LI8) ||
4634 !
DefMI.getOperand(1).isImm())
4641 int64_t Immediate =
DefMI.getOperand(1).getImm();
4643 int64_t SExtImm = SignExtend64<16>(Immediate);
4645 bool ReplaceWithLI =
false;
4646 bool Is64BitLI =
false;
4649 unsigned Opc =
MI.getOpcode();
4670 bool Changed =
false;
4672 int64_t Comparand =
MI.getOperand(2).getImm();
4673 int64_t SExtComparand = ((
uint64_t)Comparand & ~0x7FFFuLL) != 0
4674 ? (Comparand | 0xFFFFFFFFFFFF0000)
4677 for (
auto &CompareUseMI :
MRI->use_instructions(DefReg)) {
4678 unsigned UseOpc = CompareUseMI.getOpcode();
4679 if (UseOpc != PPC::ISEL && UseOpc != PPC::ISEL8)
4681 unsigned CRSubReg = CompareUseMI.getOperand(3).getSubReg();
4682 Register TrueReg = CompareUseMI.getOperand(1).getReg();
4683 Register FalseReg = CompareUseMI.getOperand(2).getReg();
4684 unsigned RegToCopy =
4685 selectReg(SExtImm, SExtComparand,
Opc, TrueReg, FalseReg, CRSubReg);
4686 if (RegToCopy == PPC::NoRegister)
4689 if (RegToCopy == PPC::ZERO || RegToCopy == PPC::ZERO8) {
4690 CompareUseMI.setDesc(
get(UseOpc == PPC::ISEL8 ? PPC::LI8 : PPC::LI));
4692 CompareUseMI.removeOperand(3);
4693 CompareUseMI.removeOperand(2);
4697 dbgs() <<
"Found LI -> CMPI -> ISEL, replacing with a copy.\n");
4701 CompareUseMI.setDesc(
get(PPC::COPY));
4702 CompareUseMI.removeOperand(3);
4703 CompareUseMI.removeOperand(RegToCopy == TrueReg ? 2 : 1);
4704 CmpIselsConverted++;
4713 MissedConvertibleImmediateInstrs++;
4721 int64_t Addend =
MI.getOperand(2).getImm();
4722 if (isInt<16>(Addend + SExtImm)) {
4723 ReplaceWithLI =
true;
4724 Is64BitLI =
Opc == PPC::ADDI8;
4725 NewImm = Addend + SExtImm;
4731 case PPC::SUBFIC8: {
4733 if (
MI.getNumOperands() > 3 && !
MI.getOperand(3).isDead())
4735 int64_t Minuend =
MI.getOperand(2).getImm();
4736 if (isInt<16>(Minuend - SExtImm)) {
4737 ReplaceWithLI =
true;
4738 Is64BitLI =
Opc == PPC::SUBFIC8;
4739 NewImm = Minuend - SExtImm;
4745 case PPC::RLDICL_rec:
4746 case PPC::RLDICL_32:
4747 case PPC::RLDICL_32_64: {
4749 int64_t SH =
MI.getOperand(2).getImm();
4750 int64_t MB =
MI.getOperand(3).getImm();
4751 APInt InVal((
Opc == PPC::RLDICL ||
Opc == PPC::RLDICL_rec) ? 64 : 32,
4753 InVal = InVal.rotl(SH);
4759 if (isUInt<15>(InVal.getSExtValue()) ||
4760 (
Opc == PPC::RLDICL_rec && isUInt<16>(InVal.getSExtValue()))) {
4761 ReplaceWithLI =
true;
4762 Is64BitLI =
Opc != PPC::RLDICL_32;
4763 NewImm = InVal.getSExtValue();
4764 SetCR =
Opc == PPC::RLDICL_rec;
4771 case PPC::RLWINM_rec:
4772 case PPC::RLWINM8_rec: {
4773 int64_t SH =
MI.getOperand(2).getImm();
4774 int64_t MB =
MI.getOperand(3).getImm();
4775 int64_t ME =
MI.getOperand(4).getImm();
4776 APInt InVal(32, SExtImm,
true);
4777 InVal = InVal.rotl(SH);
4783 bool ValueFits = isUInt<15>(InVal.getSExtValue());
4784 ValueFits |= ((
Opc == PPC::RLWINM_rec ||
Opc == PPC::RLWINM8_rec) &&
4785 isUInt<16>(InVal.getSExtValue()));
4787 ReplaceWithLI =
true;
4788 Is64BitLI =
Opc == PPC::RLWINM8 ||
Opc == PPC::RLWINM8_rec;
4789 NewImm = InVal.getSExtValue();
4790 SetCR =
Opc == PPC::RLWINM_rec ||
Opc == PPC::RLWINM8_rec;
4799 int64_t LogicalImm =
MI.getOperand(2).getImm();
4801 if (
Opc == PPC::ORI ||
Opc == PPC::ORI8)
4802 Result = LogicalImm | SExtImm;
4804 Result = LogicalImm ^ SExtImm;
4805 if (isInt<16>(Result)) {
4806 ReplaceWithLI =
true;
4807 Is64BitLI =
Opc == PPC::ORI8 ||
Opc == PPC::XORI8;
4815 if (ReplaceWithLI) {
4820 bool ImmChanged = (SExtImm & NewImm) != NewImm;
4821 if (PostRA && ImmChanged)
4828 DefMI.getOperand(1).setImm(NewImm);
4832 else if (
MRI->use_empty(
MI.getOperand(0).getReg())) {
4834 assert(Immediate &&
"Transformation converted zero to non-zero?");
4837 }
else if (ImmChanged)
4852 if (KilledDef && SetCR)
4853 *KilledDef =
nullptr;
4866bool PPCInstrInfo::transformToNewImmFormFedByAdd(
4876 if (!
MI.mayLoadOrStore())
4881 assert((XFormOpcode != PPC::INSTRUCTION_LIST_END) &&
4882 "MI must have x-form opcode");
4886 bool IsVFReg =
MI.getOperand(0).isReg()
4900 if (!ImmOperandMI.
isImm())
4906 if (!isDefMIElgibleForForwarding(
DefMI, III, ImmMO, RegMO))
4908 assert(ImmMO && RegMO &&
"Imm and Reg operand must have been set");
4913 int64_t ImmBase = ImmOperandMI.
getImm();
4915 if (!isImmElgibleForForwarding(*ImmMO,
DefMI, III, Imm, ImmBase))
4919 LLVM_DEBUG(
dbgs() <<
"Replacing existing reg+imm instruction:\n");
4936bool PPCInstrInfo::transformToImmFormFedByAdd(
4946 if (!isUseMIElgibleForForwarding(
MI, III, OpNoForForwarding))
4953 if (!isDefMIElgibleForForwarding(
DefMI, III, ImmMO, RegMO))
4955 assert(ImmMO && RegMO &&
"Imm and Reg operand must have been set");
4960 if (!isImmElgibleForForwarding(*ImmMO,
DefMI, III, Imm))
4963 bool IsFwdFeederRegKilled =
false;
4964 bool SeenIntermediateUse =
false;
4966 if (!isRegElgibleForForwarding(*RegMO,
DefMI,
MI, KillDefMI,
4967 IsFwdFeederRegKilled, SeenIntermediateUse))
4987 if (ImmMO->
isImm()) {
4998 if (
DefMI.getOpcode() == PPC::ADDItocL8)
5008 MI.removeOperand(i);
5014 MI.addOperand(*ImmMO);
5016 for (
auto &MO : MOps)
5033 unsigned ConstantOpNo,
5036 if ((
DefMI.getOpcode() != PPC::LI &&
DefMI.getOpcode() != PPC::LI8) ||
5037 !
DefMI.getOperand(1).isImm())
5041 int64_t
Imm = SignExtend64<16>(
DefMI.getOperand(1).getImm());
5053 APInt ActualValue(64, Imm,
true);
5054 if (!ActualValue.isSignedIntN(III.
ImmWidth))
5068 Register OrigZeroReg =
MI.getOperand(PosForOrigZero).getReg();
5072 if ((NewZeroReg == PPC::R0 || NewZeroReg == PPC::X0) &&
5075 if ((OrigZeroReg == PPC::R0 || OrigZeroReg == PPC::X0) &&
5076 ConstantOpNo != PosForOrigZero)
5080 unsigned Opc =
MI.getOpcode();
5081 bool SpecialShift32 =
Opc == PPC::SLW ||
Opc == PPC::SLW_rec ||
5082 Opc == PPC::SRW ||
Opc == PPC::SRW_rec ||
5083 Opc == PPC::SLW8 ||
Opc == PPC::SLW8_rec ||
5084 Opc == PPC::SRW8 ||
Opc == PPC::SRW8_rec;
5085 bool SpecialShift64 =
Opc == PPC::SLD ||
Opc == PPC::SLD_rec ||
5086 Opc == PPC::SRD ||
Opc == PPC::SRD_rec;
5087 bool SetCR =
Opc == PPC::SLW_rec ||
Opc == PPC::SRW_rec ||
5088 Opc == PPC::SLD_rec ||
Opc == PPC::SRD_rec;
5090 Opc == PPC::SRD_rec;
5104 if (SpecialShift32 || SpecialShift64) {
5109 uint64_t ShAmt =
Imm & (SpecialShift32 ? 0x1F : 0x3F);
5110 if (Imm & (SpecialShift32 ? 0x20 : 0x40))
5115 else if (!SetCR && ShAmt == 0 && !PostRA) {
5116 MI.removeOperand(2);
5117 MI.setDesc(
get(PPC::COPY));
5120 if (SpecialShift32) {
5166 MRI.getRegClass(RegToModify)->hasSuperClassEq(&PPC::GPRCRegClass) ?
5167 &PPC::GPRC_and_GPRC_NOR0RegClass : &PPC::G8RC_and_G8RC_NOX0RegClass;
5168 MRI.setRegClass(RegToModify, NewRC);
5184 if (Subtarget.hasVSX() && RC == &PPC::VRRCRegClass)
5185 return &PPC::VSRCRegClass;
5190 return PPC::getRecordFormOpcode(Opcode);
5194 return (Opcode == PPC::LBZU || Opcode == PPC::LBZUX || Opcode == PPC::LBZU8 ||
5195 Opcode == PPC::LBZUX8 || Opcode == PPC::LHZU ||
5196 Opcode == PPC::LHZUX || Opcode == PPC::LHZU8 ||
5197 Opcode == PPC::LHZUX8);
5210 int Opcode =
MI->getOpcode();
5213 if (
TII->isSExt32To64(Opcode))
5222 if (Opcode == PPC::RLDICL &&
MI->getOperand(3).getImm() >= 33)
5228 if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
5229 Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec) &&
5230 MI->getOperand(3).getImm() > 0 &&
5231 MI->getOperand(3).getImm() <=
MI->getOperand(4).getImm())
5236 if (Opcode == PPC::ANDIS_rec || Opcode == PPC::ANDIS8_rec) {
5238 if ((Imm & 0x8000) == 0)
5257 int Opcode =
MI->getOpcode();
5260 if (
TII->isZExt32To64(Opcode))
5265 Opcode == PPC::LWZUX || Opcode == PPC::LWZU8 || Opcode == PPC::LWZUX8) &&
5266 MI->getOperand(0).getReg() == Reg)
5271 if (Opcode == PPC::LI || Opcode == PPC::LI8 ||
5272 Opcode == PPC::LIS || Opcode == PPC::LIS8) {
5273 int64_t Imm =
MI->getOperand(1).getImm();
5274 if (((
uint64_t)Imm & ~0x7FFFuLL) == 0)
5280 if ((Opcode == PPC::RLDICL || Opcode == PPC::RLDICL_rec ||
5281 Opcode == PPC::RLDCL || Opcode == PPC::RLDCL_rec ||
5282 Opcode == PPC::RLDICL_32_64) &&
5283 MI->getOperand(3).getImm() >= 32)
5286 if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDIC_rec) &&
5287 MI->getOperand(3).getImm() >= 32 &&
5288 MI->getOperand(3).getImm() <= 63 -
MI->getOperand(2).getImm())
5291 if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
5292 Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec ||
5293 Opcode == PPC::RLWINM8 || Opcode == PPC::RLWNM8) &&
5294 MI->getOperand(3).getImm() <=
MI->getOperand(4).getImm())
5303 if (!
MI.getOperand(1).isImm() || !
MI.getOperand(2).isReg())
5307 Register StackReg =
MI.getOperand(2).getReg();
5330 unsigned BinOpDepth,
5332 if (!Reg.isVirtual())
5339 unsigned Opcode =
MI->getOpcode();
5348 unsigned OperandEnd = 3, OperandStride = 1;
5349 if (Opcode == PPC::PHI) {
5350 OperandEnd =
MI->getNumOperands();
5354 for (
unsigned I = 1;
I < OperandEnd;
I += OperandStride) {
5355 assert(
MI->getOperand(
I).isReg() &&
"Operand must be register");
5357 BinOpDepth + 1, LV);
5366 Register SrcReg =
MI->getOperand(1).getReg();
5380 if (SrcReg != PPC::X3)
5403 BinOpDepth + 1, LV);
5405 BinOpDepth + 1, LV);
5410 if (RC == &PPC::G8RCRegClass || RC == &PPC::G8RC_and_G8RC_NOX0RegClass)
5419 std::unordered_map<unsigned, unsigned> OpcodeMap = {
5420 {PPC::OR, PPC::OR8}, {PPC::ISEL, PPC::ISEL8},
5421 {PPC::ORI, PPC::ORI8}, {PPC::XORI, PPC::XORI8},
5422 {PPC::ORIS, PPC::ORIS8}, {PPC::XORIS, PPC::XORIS8},
5423 {PPC::AND, PPC::AND8}};
5426 auto It = OpcodeMap.find(Opcode);
5427 if (It != OpcodeMap.end()) {
5429 NewOpcode = It->second;
5431 if (!
TII->isSExt32To64(Opcode))
5437 NewOpcode = PPC::get64BitInstrFromSignedExt32BitInstr(Opcode);
5440 assert(NewOpcode != -1 &&
5441 "Must have a 64-bit opcode to map the 32-bit opcode!");
5448 Register SrcReg =
MI->getOperand(0).getReg();
5458 auto MBB =
MI->getParent();
5466 for (
unsigned i = 1; i <
MI->getNumOperands(); i++) {
5468 if (!Operand.
isReg())
5478 if (NewUsedRegRC != OrgRC && (OrgRC == &PPC::GPRCRegClass ||
5479 OrgRC == &PPC::GPRC_and_GPRC_NOR0RegClass)) {
5481 Register TmpReg =
MRI->createVirtualRegister(NewUsedRegRC);
5482 Register DstTmpReg =
MRI->createVirtualRegister(NewUsedRegRC);
5488 PromoteRegs[i] = DstTmpReg;
5492 Register NewDefinedReg =
MRI->createVirtualRegister(NewRC);
5498 for (
unsigned i = 1; i <
MI->getNumOperands(); i++) {
5499 if (
auto It = PromoteRegs.
find(i); It != PromoteRegs.
end())
5505 for (
unsigned i = 1; i < Iter->getNumOperands(); i++) {
5507 if (!Operand.
isReg())
5515 MI->eraseFromParent();
5531std::pair<bool, bool>
5533 const unsigned BinOpDepth,
5536 return std::pair<bool, bool>(
false,
false);
5540 return std::pair<bool, bool>(
false,
false);
5547 if (IsSExt && IsZExt)
5548 return std::pair<bool, bool>(IsSExt, IsZExt);
5550 switch (
MI->getOpcode()) {
5552 Register SrcReg =
MI->getOperand(1).getReg();
5561 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5562 SrcExt.second || IsZExt);
5568 if (
MI->getParent()->getBasicBlock() ==
5574 return std::pair<bool, bool>(IsSExt, IsZExt);
5578 if (SrcReg != PPC::X3) {
5581 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5582 SrcExt.second || IsZExt);
5592 std::pair<bool, bool> IsExtendPair = std::pair<bool, bool>(IsSExt, IsZExt);
5596 return IsExtendPair;
5600 return IsExtendPair;
5605 return IsExtendPair;
5609 IsSExt |= Attrs.hasAttribute(Attribute::SExt);
5610 IsZExt |= Attrs.hasAttribute(Attribute::ZExt);
5611 return std::pair<bool, bool>(IsSExt, IsZExt);
5614 return IsExtendPair;
5623 Register SrcReg =
MI->getOperand(1).getReg();
5625 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5626 SrcExt.second || IsZExt);
5637 Register SrcReg =
MI->getOperand(1).getReg();
5641 return std::pair<bool, bool>(
false, SrcExt.second || IsZExt);
5643 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5644 SrcExt.second || IsZExt);
5654 return std::pair<bool, bool>(
false,
false);
5658 unsigned OperandEnd = 3, OperandStride = 1;
5659 if (
MI->getOpcode() == PPC::PHI) {
5660 OperandEnd =
MI->getNumOperands();
5666 for (
unsigned I = 1;
I != OperandEnd;
I += OperandStride) {
5667 if (!
MI->getOperand(
I).isReg())
5668 return std::pair<bool, bool>(
false,
false);
5672 IsSExt &= SrcExt.first;
5673 IsZExt &= SrcExt.second;
5675 return std::pair<bool, bool>(IsSExt, IsZExt);
5684 return std::pair<bool, bool>(
false,
false);
5686 Register SrcReg1 =
MI->getOperand(1).getReg();
5687 Register SrcReg2 =
MI->getOperand(2).getReg();
5690 return std::pair<bool, bool>(Src1Ext.first && Src2Ext.first,
5691 Src1Ext.second || Src2Ext.second);
5697 return std::pair<bool, bool>(IsSExt, IsZExt);
5701 return (Opcode == (Subtarget.
isPPC64() ? PPC::BDNZ8 : PPC::BDNZ));
5714 :
Loop(
Loop), EndLoop(EndLoop), LoopCount(LoopCount),
5716 TII(MF->getSubtarget().getInstrInfo()) {
5725 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5727 return MI == EndLoop;
5730 std::optional<bool> createTripCountGreaterCondition(
5733 if (TripCount == -1) {
5743 return TripCount > TC;
5751 void adjustTripCount(
int TripCountAdjust)
override {
5754 if (LoopCount->
getOpcode() == PPC::LI8 ||
5770 Loop->eraseFromParent();
5777std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5782 if (Preheader == LoopBB)
5783 Preheader = *std::next(LoopBB->
pred_begin());
5786 if (
I != LoopBB->
end() &&
isBDNZ(
I->getOpcode())) {
5789 Register LoopCountReg = LoopInst->getOperand(0).getReg();
5792 return std::make_unique<PPCPipelinerLoopInfo>(LoopInst, &*
I, LoopCount);
5802 unsigned LOOPi = (Subtarget.
isPPC64() ? PPC::MTCTR8loop : PPC::MTCTRloop);
5805 for (
auto &
I : PreHeader.
instrs())
5806 if (
I.getOpcode() == LOOPi)
5852 int64_t OffsetA = 0, OffsetB = 0;
5858 int LowOffset = std::min(OffsetA, OffsetB);
5859 int HighOffset = std::max(OffsetA, OffsetB);
5860 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
5862 LowOffset + (int)LowWidth.
getValue() <= HighOffset)
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static const Function * getParent(const Value *V)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
static bool isOpZeroOfSubwordPreincLoad(int Opcode)
static bool MBBDefinesCTR(MachineBasicBlock &MBB)
static bool definedByZeroExtendingOp(const unsigned Reg, const MachineRegisterInfo *MRI)
static cl::opt< float > FMARPFactor("ppc-fma-rp-factor", cl::Hidden, cl::init(1.5), cl::desc("register pressure factor for the transformations."))
#define InfoArrayIdxMULOpIdx
static unsigned selectReg(int64_t Imm1, int64_t Imm2, unsigned CompareOpc, unsigned TrueReg, unsigned FalseReg, unsigned CRSubReg)
static unsigned getCRBitValue(unsigned CRBit)
static bool isAnImmediateOperand(const MachineOperand &MO)
static const uint16_t FMAOpIdxInfo[][6]
static cl::opt< bool > DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden, cl::desc("Disable analysis for CTR loops"))
#define InfoArrayIdxAddOpIdx
static cl::opt< bool > UseOldLatencyCalc("ppc-old-latency-calc", cl::Hidden, cl::desc("Use the old (incorrect) instruction latency calculation"))
#define InfoArrayIdxFMAInst
static bool isClusterableLdStOpcPair(unsigned FirstOpc, unsigned SecondOpc, const PPCSubtarget &Subtarget)
static cl::opt< bool > EnableFMARegPressureReduction("ppc-fma-rp-reduction", cl::Hidden, cl::init(true), cl::desc("enable register pressure reduce in machine combiner pass."))
static bool isLdStSafeToCluster(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
const unsigned MAX_BINOP_DEPTH
static cl::opt< bool > DisableCmpOpt("disable-ppc-cmp-opt", cl::desc("Disable compare instruction optimization"), cl::Hidden)
#define InfoArrayIdxFSubInst
#define InfoArrayIdxFAddInst
#define InfoArrayIdxFMULInst
static bool definedBySignExtendingOp(const unsigned Reg, const MachineRegisterInfo *MRI)
static cl::opt< bool > VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy", cl::desc("Causes the backend to crash instead of generating a nop VSX copy"), cl::Hidden)
static void swapMIOperands(MachineInstr &MI, unsigned Op1, unsigned Op2)
static constexpr MCPhysReg SPReg
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static unsigned getSize(unsigned Kind)
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit, unsigned hiBit)
Wrap version of getBitsSet.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
LLVM_ABI AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
const BasicBlock & getEntryBlock() const
AttributeList getAttributes() const
Return the attribute list for this Function.
Type * getReturnType() const
Returns the type of the ret val.
A possibly irreducible generalization of a Loop.
Module * getParent()
Get the module that this global value is contained inside of...
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
Class to represent integer types.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
LLVM_ABI void recomputeForSingleDefVirtReg(Register Reg)
Recompute liveness from scratch for a virtual register Reg that is known to have a single def that do...
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
Represents a single loop in the control flow graph.
Instances of this class represent a single low-level machine instruction.
void setOpcode(unsigned Op)
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
bool isPseudo() const
Return true if this is a pseudo instruction that doesn't correspond to a real machine instruction.
This holds information about one operand of a machine instruction, indicating the register class for ...
uint16_t Constraints
Operand constraints (see OperandConstraint enum).
bool isLookupPtrRegClass() const
Set if this operand is a pointer value and it requires a callback to look up its register class.
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Wrapper class representing physical registers. Should be passed by value.
instr_iterator instr_begin()
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Instructions::iterator instr_iterator
pred_iterator pred_begin()
LLVM_ABI iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
LLVM_ABI bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool isCall(QueryType Type=AnyInBundle) const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool hasImplicitDef() const
Returns true if the instruction has implicit definition.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
mop_range uses()
Returns all operands which may be register uses.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void dump() const
LLVM_ABI void clearRegisterDeads(Register Reg)
Clear all dead flags on operands defining register Reg.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Register getReg() const
getReg - Returns the register number.
void setTargetFlags(unsigned F)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool isLiveIn(Register Reg) const
PPCDispatchGroupSBHazardRecognizer - This class implements a scoreboard-based hazard recognizer for P...
uint64_t getTOCSaveOffset() const
getTOCSaveOffset - Return the previous frame offset to save the TOC register – 64-bit SVR4 ABI only.
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
bool isLiveInSExt(Register VReg) const
This function returns true if the specified vreg is a live-in register and sign-extended.
bool isLiveInZExt(Register VReg) const
This function returns true if the specified vreg is a live-in register and zero-extended.
PPCHazardRecognizer970 - This class defines a finite state automata that models the dispatch logic on...
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
PPCInstrInfo(PPCSubtarget &STI)
bool getFMAPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for a fma chain ending in Root.
bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase=nullptr) const
bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
const TargetRegisterClass * updatedRC(const TargetRegisterClass *RC) const
bool isPredicated(const MachineInstr &MI) const override
bool expandVSXMemPseudo(MachineInstr &MI) const
bool onlyFoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg) const
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
Fixup the placeholders we put in genAlternativeCodeSequence() for MachineCombiner.
MCInst getNop() const override
Return the noop instruction to use for a noop.
static int getRecordFormOpcode(unsigned Opcode)
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool isXFormMemOp(unsigned Opcode) const
const PPCRegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
CombinerObjective getCombinerObjective(unsigned Pattern) const override
void loadRegFromStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const
unsigned getLoadOpcodeForSpill(const TargetRegisterClass *RC) const
void promoteInstr32To64ForElimEXTSW(const Register &Reg, MachineRegisterInfo *MRI, unsigned BinOpDepth, LiveVariables *LV) const
bool isTOCSaveMI(const MachineInstr &MI) const
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer to use for this target when ...
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
bool isBDNZ(unsigned Opcode) const
Check Opcode is BDNZ (Decrement CTR and branch if it is still nonzero).
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
bool isZeroExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
std::pair< bool, bool > isSignOrZeroExtended(const unsigned Reg, const unsigned BinOpDepth, const MachineRegisterInfo *MRI) const
bool expandPostRAPseudo(MachineInstr &MI) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
bool isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index, MachineInstr *&ADDIMI, int64_t &OffsetAddi, int64_t OffsetImm) const
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t Mask, int64_t Value, const MachineRegisterInfo *MRI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
void materializeImmPostRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, int64_t Imm) const
bool isADDInstrEligibleForFolding(MachineInstr &ADDMI) const
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Return true if two MIs access different memory addresses and false otherwise.
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
CreateTargetHazardRecognizer - Return the hazard recognizer to use for this target when scheduling th...
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
Get the base operand and byte offset of an instruction that reads/writes memory.
void setSpecialOperandAttr(MachineInstr &MI, uint32_t Flags) const
bool isADDIInstrEligibleForFolding(MachineInstr &ADDIMI, int64_t &Imm) const
void storeRegToStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
bool foldFrameOffset(MachineInstr &MI) const
bool isLoadFromConstantPool(MachineInstr *I) const
MachineInstr * findLoopInstr(MachineBasicBlock &PreHeader, SmallPtrSet< MachineBasicBlock *, 8 > &Visited) const
Find the hardware loop instruction used to set-up the specified loop.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
bool convertToImmediateForm(MachineInstr &MI, SmallSet< Register, 4 > &RegsToUpdate, MachineInstr **KilledDef=nullptr) const
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
Return true if get the base operand, byte offset of an instruction and the memory width.
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool shouldReduceRegisterPressure(const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const override
On PowerPC, we leverage machine combiner pass to reduce register pressure when the register pressure ...
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
bool isSignExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
void replaceInstrOperandWithImm(MachineInstr &MI, unsigned OpNo, int64_t Imm) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
Returns true if the two given memory operations should be scheduled adjacent.
void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const
bool isImmInstrEligibleForFolding(MachineInstr &MI, unsigned &BaseReg, unsigned &XFormOpcode, int64_t &OffsetOfImmInstr, ImmInstrInfo &III) const
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
Return true when there is potentially a faster code sequence for an instruction chain ending in <Root...
bool optimizeCmpPostRA(MachineInstr &MI) const
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
const Constant * getConstantFromConstantPool(MachineInstr *I) const
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool instrHasImmForm(unsigned Opc, bool IsVFReg, ImmInstrInfo &III, bool PostRA) const
MachineInstr * getDefMIPostRA(unsigned Reg, MachineInstr &MI, bool &SeenIntermediateUse) const
unsigned getMappedIdxOpcForImmOpc(unsigned ImmOpcode) const
getMappedIdxOpcForImmOpc - Return the mapped index form load/store opcode for a given imm form load/s...
static void emitAccCopyInfo(MachineBasicBlock &MBB, MCRegister DestReg, MCRegister SrcReg)
const PPCFrameLowering * getFrameLowering() const override
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
unsigned getCPUDirective() const
getCPUDirective - Returns the -m directive specified for the cpu.
bool isLittleEndian() const
bool isTargetLinux() const
const PPCTargetMachine & getTargetMachine() const
const Triple & getTargetTriple() const
void setGlibcHWCAPAccess(bool Val=true) const
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
Track the current register pressure at some position in the instruction stream, and remember the high...
LLVM_ABI void closeRegion()
Finalize the region boundaries and recored live ins and live outs.
LLVM_ABI void recede(SmallVectorImpl< VRegMaskOrUnit > *LiveUses=nullptr)
Recede across the previous instruction.
RegisterPressure & getPressure()
Get the resulting register pressure over the traversed region.
LLVM_ABI void recedeSkipDebugValues()
Recede until we find an instruction which is not a DebugValue.
LLVM_ABI void init(const MachineFunction *mf, const RegisterClassInfo *rci, const LiveIntervals *lis, const MachineBasicBlock *mbb, MachineBasicBlock::const_iterator pos, bool TrackLaneMasks, bool TrackUntiedDefs)
Setup the RegPressureTracker.
MachineBasicBlock::const_iterator getPos() const
Get the MI position corresponding to this register pressure.
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
void backward()
Update internal register state and move MBB iterator backwards.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
unsigned getRegPressureSetLimit(unsigned Idx) const
Get the register unit limit for the given pressure set index.
List of registers defined and used by a machine instruction.
LLVM_ABI void collect(const MachineInstr &MI, const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, bool TrackLaneMasks, bool IgnoreDead)
Analyze the given instruction MI and fill in the Uses, Defs and DeadDefs list based on the MachineOpe...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
const TargetInstrInfo * TII
Target instruction information.
MachineFunction & MF
Machine function.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
StackOffset holds a fixed and a scalable offset in bytes.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
bool isOSGlibc() const
Tests whether the OS uses glibc.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
LLVM Value Representation.
LLVM_ABI Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Predicate getSwappedPredicate(Predicate Opcode)
Assume the condition register is set by MI(a,b), return the predicate if we modify the instructions s...
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
int getAltVSXFMAOpcode(uint16_t Opcode)
int getNonRecordFormOpcode(uint16_t)
unsigned getPredicateCondition(Predicate Opcode)
Return the condition without hint bits.
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
unsigned getPredicateHint(Predicate Opcode)
Return the hint bits of the predicate.
Predicate InvertPredicate(Predicate Opcode)
Invert the specified predicate. != -> ==, < -> >=.
static bool isVFRegister(unsigned Reg)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
template class LLVM_TEMPLATE_ABI opt< bool >
initializer< Ty > init(const Ty &Val)
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getDeadRegState(bool B)
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
static unsigned getCRFromCRBit(unsigned SrcReg)
auto reverse(ContainerTy &&C)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
@ MustReduceRegisterPressure
void recomputeLivenessFlags(MachineBasicBlock &MBB)
Recomputes dead and kill flags in MBB.
@ Sub
Subtraction of integers.
unsigned getKillRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
static bool isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME)
Returns true iff Val consists of one contiguous run of 1s with any number of 0s on either side.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t IsSummingOperands
uint64_t OpNoForForwarding
uint64_t ImmMustBeMultipleOf
uint64_t ZeroIsSpecialNew
uint64_t ZeroIsSpecialOrig
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
std::vector< unsigned > MaxSetPressure
Map of max reg pressure indexed by pressure set ID, not class ID.