41#define DEBUG_TYPE "globalisel-utils"
44using namespace MIPatternMatch;
51 return MRI.createVirtualRegister(&RegClass);
63 assert(Reg.isVirtual() &&
"PhysReg not implemented");
69 auto *OldRegClass =
MRI.getRegClassOrNull(Reg);
73 if (ConstrainedReg != Reg) {
80 TII.get(TargetOpcode::COPY), ConstrainedReg)
85 TII.get(TargetOpcode::COPY), Reg)
89 Observer->changingInstr(*RegMO.
getParent());
91 RegMO.
setReg(ConstrainedReg);
93 Observer->changedInstr(*RegMO.
getParent());
95 }
else if (OldRegClass !=
MRI.getRegClassOrNull(Reg)) {
99 Observer->changedInstr(*RegDef);
101 Observer->changingAllUsesOfReg(
MRI, Reg);
102 Observer->finishedChangingAllUsesOfReg();
105 return ConstrainedReg;
115 assert(Reg.isVirtual() &&
"PhysReg not implemented");
128 if (
const auto *SubRC =
TRI.getCommonSubClass(
129 OpRC,
TRI.getConstrainedRegClassForOperand(RegMO,
MRI)))
132 OpRC =
TRI.getAllocatableClass(OpRC);
137 "Register class constraint is required unless either the "
138 "instruction is target independent or the operand is a use");
160 "A selected instruction is expected");
165 for (
unsigned OpI = 0, OpE =
I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
173 assert(MO.
isReg() &&
"Unsupported non-reg operand");
177 if (Reg.isPhysical())
193 int DefIdx =
I.getDesc().getOperandConstraint(OpI,
MCOI::TIED_TO);
194 if (DefIdx != -1 && !
I.isRegTiedToUseOperand(DefIdx))
195 I.tieOperands(DefIdx, OpI);
207 if (
MRI.getType(DstReg) !=
MRI.getType(SrcReg))
211 const auto &DstRBC =
MRI.getRegClassOrRegBank(DstReg);
212 if (!DstRBC || DstRBC ==
MRI.getRegClassOrRegBank(SrcReg))
217 return isa<const RegisterBank *>(DstRBC) &&
MRI.getRegClassOrNull(SrcReg) &&
218 cast<const RegisterBank *>(DstRBC)->covers(
219 *
MRI.getRegClassOrNull(SrcReg));
227 for (
const auto &MO :
MI.all_defs()) {
229 if (Reg.isPhysical() || !
MRI.use_nodbg_empty(Reg))
232 return MI.wouldBeTriviallyDead();
240 bool IsFatal = Severity ==
DS_Error &&
244 if (!R.getLocation().isValid() || IsFatal)
245 R << (
" (in function: " + MF.
getName() +
")").str();
271 MI.getDebugLoc(),
MI.getParent());
281 case TargetOpcode::G_SMIN:
282 return TargetOpcode::G_SMAX;
283 case TargetOpcode::G_SMAX:
284 return TargetOpcode::G_SMIN;
285 case TargetOpcode::G_UMIN:
286 return TargetOpcode::G_UMAX;
287 case TargetOpcode::G_UMAX:
288 return TargetOpcode::G_UMIN;
298 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
299 "Value found while looking through instrs");
302 return ValAndVReg->Value;
308 assert((Const && Const->getOpcode() == TargetOpcode::G_CONSTANT) &&
309 "expected a G_CONSTANT on Reg");
310 return Const->getOperand(1).getCImm()->getValue();
313std::optional<int64_t>
316 if (Val && Val->getBitWidth() <= 64)
317 return Val->getSExtValue();
335std::optional<ValueAndVReg>
337 bool LookThroughInstrs =
true,
338 bool LookThroughAnyExt =
false) {
342 while ((
MI =
MRI.getVRegDef(VReg)) && !IsConstantOpcode(
MI) &&
344 switch (
MI->getOpcode()) {
345 case TargetOpcode::G_ANYEXT:
346 if (!LookThroughAnyExt)
349 case TargetOpcode::G_TRUNC:
350 case TargetOpcode::G_SEXT:
351 case TargetOpcode::G_ZEXT:
354 MRI.getType(
MI->getOperand(0).getReg()).getSizeInBits()));
355 VReg =
MI->getOperand(1).getReg();
357 case TargetOpcode::COPY:
358 VReg =
MI->getOperand(1).getReg();
362 case TargetOpcode::G_INTTOPTR:
363 VReg =
MI->getOperand(1).getReg();
369 if (!
MI || !IsConstantOpcode(
MI))
373 if (!GetAPCstValue(
MI, Val))
375 for (
auto &Pair :
reverse(SeenOpcodes)) {
376 switch (Pair.first) {
377 case TargetOpcode::G_TRUNC:
378 Val = Val.
trunc(Pair.second);
380 case TargetOpcode::G_ANYEXT:
381 case TargetOpcode::G_SEXT:
382 Val = Val.
sext(Pair.second);
384 case TargetOpcode::G_ZEXT:
385 Val = Val.
zext(Pair.second);
396 return MI->getOpcode() == TargetOpcode::G_CONSTANT;
402 return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
408 unsigned Opc =
MI->getOpcode();
409 return Opc == TargetOpcode::G_CONSTANT ||
Opc == TargetOpcode::G_FCONSTANT;
435 return getConstantVRegValWithLookThrough<isIConstant, getCImmAsAPInt>(
436 VReg,
MRI, LookThroughInstrs);
441 bool LookThroughAnyExt) {
442 return getConstantVRegValWithLookThrough<isAnyConstant,
443 getCImmOrFPImmAsAPInt>(
444 VReg,
MRI, LookThroughInstrs, LookThroughAnyExt);
450 getConstantVRegValWithLookThrough<isFConstant, getCImmOrFPImmAsAPInt>(
451 VReg,
MRI, LookThroughInstrs);
461 if (TargetOpcode::G_FCONSTANT !=
MI->getOpcode())
463 return MI->getOperand(1).getFPImm();
466std::optional<DefinitionAndSourceRegister>
471 auto DefIt =
MRI.def_begin(Reg);
472 if (DefIt ==
MRI.def_end())
476 auto DstTy =
MRI.getType(DefOpnd.
getReg());
477 if (!DstTy.isValid())
482 auto SrcTy =
MRI.getType(SrcReg);
483 if (!SrcTy.isValid())
494 std::optional<DefinitionAndSourceRegister> DefSrcReg =
496 return DefSrcReg ? DefSrcReg->MI :
nullptr;
501 std::optional<DefinitionAndSourceRegister> DefSrcReg =
503 return DefSrcReg ? DefSrcReg->Reg :
Register();
510 for (
int i = 0; i < NumParts; ++i)
524 unsigned NumParts =
RegSize / MainSize;
525 unsigned LeftoverSize =
RegSize - NumParts * MainSize;
528 if (LeftoverSize == 0) {
529 for (
unsigned I = 0;
I < NumParts; ++
I)
530 VRegs.
push_back(
MRI.createGenericVirtualRegister(MainTy));
543 unsigned LeftoverNumElts = RegNumElts % MainNumElts;
545 if (MainNumElts % LeftoverNumElts == 0 &&
546 RegNumElts % LeftoverNumElts == 0 &&
548 LeftoverNumElts > 1) {
553 extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
557 unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
558 unsigned NumOfLeftoverVal =
559 ((RegNumElts % MainNumElts) / LeftoverNumElts);
563 for (
unsigned I = 0;
I < UnmergeValues.
size() - NumOfLeftoverVal;
I++) {
565 if (MergeValues.
size() == LeftoverPerMain) {
572 for (
unsigned I = UnmergeValues.
size() - NumOfLeftoverVal;
573 I < UnmergeValues.
size();
I++) {
584 for (
unsigned i = 0; i < RegPieces.
size() - 1; ++i)
587 LeftoverTy =
MRI.getType(LeftoverRegs[0]);
593 for (
unsigned I = 0;
I != NumParts; ++
I) {
594 Register NewReg =
MRI.createGenericVirtualRegister(MainTy);
601 Register NewReg =
MRI.createGenericVirtualRegister(LeftoverTy);
613 LLT RegTy =
MRI.getType(Reg);
619 unsigned LeftoverNumElts = RegNumElts % NumElts;
620 unsigned NumNarrowTyPieces = RegNumElts / NumElts;
623 if (LeftoverNumElts == 0)
624 return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
635 for (
unsigned i = 0; i < NumNarrowTyPieces; ++i,
Offset += NumElts) {
641 if (LeftoverNumElts == 1) {
666 APF.
convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
682 const APInt &C1 = MaybeOp1Cst->Value;
683 const APInt &C2 = MaybeOp2Cst->Value;
687 case TargetOpcode::G_ADD:
689 case TargetOpcode::G_PTR_ADD:
693 case TargetOpcode::G_AND:
695 case TargetOpcode::G_ASHR:
697 case TargetOpcode::G_LSHR:
699 case TargetOpcode::G_MUL:
701 case TargetOpcode::G_OR:
703 case TargetOpcode::G_SHL:
705 case TargetOpcode::G_SUB:
707 case TargetOpcode::G_XOR:
709 case TargetOpcode::G_UDIV:
710 if (!C2.getBoolValue())
713 case TargetOpcode::G_SDIV:
714 if (!C2.getBoolValue())
717 case TargetOpcode::G_UREM:
718 if (!C2.getBoolValue())
721 case TargetOpcode::G_SREM:
722 if (!C2.getBoolValue())
725 case TargetOpcode::G_SMIN:
727 case TargetOpcode::G_SMAX:
729 case TargetOpcode::G_UMIN:
731 case TargetOpcode::G_UMAX:
738std::optional<APFloat>
752 case TargetOpcode::G_FADD:
753 C1.
add(C2, APFloat::rmNearestTiesToEven);
755 case TargetOpcode::G_FSUB:
756 C1.
subtract(C2, APFloat::rmNearestTiesToEven);
758 case TargetOpcode::G_FMUL:
759 C1.
multiply(C2, APFloat::rmNearestTiesToEven);
761 case TargetOpcode::G_FDIV:
762 C1.
divide(C2, APFloat::rmNearestTiesToEven);
764 case TargetOpcode::G_FREM:
767 case TargetOpcode::G_FCOPYSIGN:
770 case TargetOpcode::G_FMINNUM:
772 case TargetOpcode::G_FMAXNUM:
774 case TargetOpcode::G_FMINIMUM:
776 case TargetOpcode::G_FMAXIMUM:
778 case TargetOpcode::G_FMINNUM_IEEE:
779 case TargetOpcode::G_FMAXNUM_IEEE:
796 auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2,
MRI);
800 auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1,
MRI);
805 for (
unsigned Idx = 0, E = SrcVec1->getNumSources();
Idx < E; ++
Idx) {
807 SrcVec2->getSourceReg(
Idx),
MRI);
812 return FoldedElements;
827 return !FPVal->getValueAPF().isNaN() ||
828 (SNaN && !FPVal->getValueAPF().isSignaling());
841 case TargetOpcode::G_FADD:
842 case TargetOpcode::G_FSUB:
843 case TargetOpcode::G_FMUL:
844 case TargetOpcode::G_FDIV:
845 case TargetOpcode::G_FREM:
846 case TargetOpcode::G_FSIN:
847 case TargetOpcode::G_FCOS:
848 case TargetOpcode::G_FTAN:
849 case TargetOpcode::G_FACOS:
850 case TargetOpcode::G_FASIN:
851 case TargetOpcode::G_FATAN:
852 case TargetOpcode::G_FATAN2:
853 case TargetOpcode::G_FCOSH:
854 case TargetOpcode::G_FSINH:
855 case TargetOpcode::G_FTANH:
856 case TargetOpcode::G_FMA:
857 case TargetOpcode::G_FMAD:
863 case TargetOpcode::G_FMINNUM_IEEE:
864 case TargetOpcode::G_FMAXNUM_IEEE: {
874 case TargetOpcode::G_FMINNUM:
875 case TargetOpcode::G_FMAXNUM: {
887 case TargetOpcode::G_FPEXT:
888 case TargetOpcode::G_FPTRUNC:
889 case TargetOpcode::G_FCANONICALIZE:
901 auto PSV = dyn_cast_if_present<const PseudoSourceValue *>(MPO.
V);
902 if (
auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
908 if (
const Value *V = dyn_cast_if_present<const Value *>(MPO.
V)) {
910 return V->getPointerAlignment(M->getDataLayout());
928 assert(Def->getParent() == &EntryMBB &&
"live-in copy not in entry block");
939 MRI.setType(LiveIn, RegTy);
957 case TargetOpcode::G_SEXT_INREG: {
958 LLT Ty =
MRI.getType(Op1);
976 case TargetOpcode::G_SEXT:
977 return Val->sext(DstSize);
978 case TargetOpcode::G_ZEXT:
979 case TargetOpcode::G_ANYEXT:
981 return Val->zext(DstSize);
989std::optional<APFloat>
992 assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
996 APFloat::rmNearestTiesToEven);
1002std::optional<SmallVector<unsigned>>
1004 std::function<
unsigned(
APInt)> CB) {
1005 LLT Ty =
MRI.getType(Src);
1007 auto tryFoldScalar = [&](
Register R) -> std::optional<unsigned> {
1010 return std::nullopt;
1011 return CB(*MaybeCst);
1015 auto *BV = getOpcodeDef<GBuildVector>(Src,
MRI);
1017 return std::nullopt;
1018 for (
unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1019 if (
auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
1023 return std::nullopt;
1027 if (
auto MaybeCst = tryFoldScalar(Src)) {
1031 return std::nullopt;
1034std::optional<SmallVector<APInt>>
1036 unsigned DstScalarSizeInBits,
unsigned ExtOp,
1038 assert(ExtOp == TargetOpcode::G_SEXT || ExtOp == TargetOpcode::G_ZEXT ||
1039 ExtOp == TargetOpcode::G_ANYEXT);
1041 const LLT Ty =
MRI.getType(Op1);
1043 auto GetICmpResultCst = [&](
bool IsTrue) {
1045 return ExtOp == TargetOpcode::G_SEXT
1054 return std::nullopt;
1057 return std::nullopt;
1060 case CmpInst::Predicate::ICMP_EQ:
1061 return GetICmpResultCst(LHSCst->eq(*RHSCst));
1062 case CmpInst::Predicate::ICMP_NE:
1063 return GetICmpResultCst(LHSCst->ne(*RHSCst));
1064 case CmpInst::Predicate::ICMP_UGT:
1065 return GetICmpResultCst(LHSCst->ugt(*RHSCst));
1066 case CmpInst::Predicate::ICMP_UGE:
1067 return GetICmpResultCst(LHSCst->uge(*RHSCst));
1068 case CmpInst::Predicate::ICMP_ULT:
1069 return GetICmpResultCst(LHSCst->ult(*RHSCst));
1070 case CmpInst::Predicate::ICMP_ULE:
1071 return GetICmpResultCst(LHSCst->ule(*RHSCst));
1072 case CmpInst::Predicate::ICMP_SGT:
1073 return GetICmpResultCst(LHSCst->sgt(*RHSCst));
1074 case CmpInst::Predicate::ICMP_SGE:
1075 return GetICmpResultCst(LHSCst->sge(*RHSCst));
1076 case CmpInst::Predicate::ICMP_SLT:
1077 return GetICmpResultCst(LHSCst->slt(*RHSCst));
1078 case CmpInst::Predicate::ICMP_SLE:
1079 return GetICmpResultCst(LHSCst->sle(*RHSCst));
1081 return std::nullopt;
1089 auto *BV1 = getOpcodeDef<GBuildVector>(Op1,
MRI);
1090 auto *BV2 = getOpcodeDef<GBuildVector>(Op2,
MRI);
1092 return std::nullopt;
1093 assert(BV1->getNumSources() == BV2->getNumSources() &&
"Invalid vectors");
1094 for (
unsigned I = 0;
I < BV1->getNumSources(); ++
I) {
1095 if (
auto MaybeFold =
1096 TryFoldScalar(BV1->getSourceReg(
I), BV2->getSourceReg(
I))) {
1100 return std::nullopt;
1105 if (
auto MaybeCst = TryFoldScalar(Op1, Op2)) {
1110 return std::nullopt;
1115 std::optional<DefinitionAndSourceRegister> DefSrcReg =
1121 const LLT Ty =
MRI.getType(Reg);
1123 switch (
MI.getOpcode()) {
1124 case TargetOpcode::G_CONSTANT: {
1129 case TargetOpcode::G_SHL: {
1141 case TargetOpcode::G_LSHR: {
1143 if (ConstLHS->isSignMask())
1149 case TargetOpcode::G_BUILD_VECTOR: {
1158 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1164 if (!Const || !Const->zextOrTrunc(
BitWidth).isPowerOf2())
1205 "getLCMType not implemented between fixed and scalable vectors.");
1225 LLT VecTy = OrigTy.
isVector() ? OrigTy : TargetTy;
1226 LLT ScalarTy = OrigTy.
isVector() ? TargetTy : OrigTy;
1261 "getCoverTy not implemented between fixed and scalable vectors.");
1269 if (OrigTyNumElts % TargetTyNumElts == 0)
1272 unsigned NumElts =
alignTo(OrigTyNumElts, TargetTyNumElts);
1292 "getGCDType not implemented between fixed and scalable vectors.");
1332 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1333 "Only G_SHUFFLE_VECTOR can have a splat index!");
1335 auto FirstDefinedIdx =
find_if(Mask, [](
int Elt) {
return Elt >= 0; });
1339 if (FirstDefinedIdx == Mask.end())
1344 int SplatValue = *FirstDefinedIdx;
1346 [&SplatValue](
int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1347 return std::nullopt;
1353 return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1354 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1359std::optional<ValueAndVReg> getAnyConstantSplat(
Register VReg,
1364 return std::nullopt;
1366 bool isConcatVectorsOp =
MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1368 return std::nullopt;
1370 std::optional<ValueAndVReg> SplatValAndReg;
1375 auto ElementValAndReg =
1377 ? getAnyConstantSplat(Element,
MRI, AllowUndef)
1381 if (!ElementValAndReg) {
1382 if (AllowUndef && isa<GImplicitDef>(
MRI.getVRegDef(Element)))
1384 return std::nullopt;
1388 if (!SplatValAndReg)
1389 SplatValAndReg = ElementValAndReg;
1392 if (SplatValAndReg->Value != ElementValAndReg->Value)
1393 return std::nullopt;
1396 return SplatValAndReg;
1403 int64_t SplatValue,
bool AllowUndef) {
1404 if (
auto SplatValAndReg = getAnyConstantSplat(Reg,
MRI, AllowUndef))
1405 return SplatValAndReg->Value.getSExtValue() == SplatValue;
1412 APInt SplatValue,
bool AllowUndef) {
1413 if (
auto SplatValAndReg = getAnyConstantSplat(Reg,
MRI, AllowUndef)) {
1414 if (SplatValAndReg->Value.getBitWidth() < SplatValue.
getBitWidth())
1416 SplatValAndReg->Value.sext(SplatValue.
getBitWidth()), SplatValue);
1418 SplatValAndReg->Value,
1419 SplatValue.
sext(SplatValAndReg->Value.getBitWidth()));
1427 int64_t SplatValue,
bool AllowUndef) {
1434 APInt SplatValue,
bool AllowUndef) {
1441 if (
auto SplatValAndReg =
1442 getAnyConstantSplat(Reg,
MRI,
false)) {
1443 if (std::optional<ValueAndVReg> ValAndVReg =
1445 return ValAndVReg->Value;
1448 return std::nullopt;
1457std::optional<int64_t>
1460 if (
auto SplatValAndReg =
1461 getAnyConstantSplat(Reg,
MRI,
false))
1463 return std::nullopt;
1466std::optional<int64_t>
1472std::optional<FPValueAndVReg>
1475 if (
auto SplatValAndReg = getAnyConstantSplat(VReg,
MRI, AllowUndef))
1477 return std::nullopt;
1492std::optional<RegOrConstant>
1494 unsigned Opc =
MI.getOpcode();
1496 return std::nullopt;
1499 auto Reg =
MI.getOperand(1).getReg();
1502 return std::nullopt;
1508 bool AllowFP =
true,
1509 bool AllowOpaqueConstants =
true) {
1510 switch (
MI.getOpcode()) {
1511 case TargetOpcode::G_CONSTANT:
1512 case TargetOpcode::G_IMPLICIT_DEF:
1514 case TargetOpcode::G_FCONSTANT:
1516 case TargetOpcode::G_GLOBAL_VALUE:
1517 case TargetOpcode::G_FRAME_INDEX:
1518 case TargetOpcode::G_BLOCK_ADDR:
1519 case TargetOpcode::G_JUMP_TABLE:
1520 return AllowOpaqueConstants;
1534 for (
unsigned SrcIdx = 0; SrcIdx < BV->
getNumSources(); ++SrcIdx) {
1545 bool AllowFP,
bool AllowOpaqueConstants) {
1552 const unsigned NumOps =
MI.getNumOperands();
1553 for (
unsigned I = 1;
I != NumOps; ++
I) {
1570 return std::nullopt;
1571 const unsigned ScalarSize =
MRI.getType(Def).getScalarSizeInBits();
1572 return APInt(ScalarSize, *MaybeCst,
true);
1575std::optional<APFloat>
1580 return FpConst->Value;
1583 return std::nullopt;
1584 return MaybeCstFP->Value;
1589 switch (
MI.getOpcode()) {
1590 case TargetOpcode::G_IMPLICIT_DEF:
1592 case TargetOpcode::G_CONSTANT:
1593 return MI.getOperand(1).getCImm()->isNullValue();
1594 case TargetOpcode::G_FCONSTANT: {
1608 switch (
MI.getOpcode()) {
1609 case TargetOpcode::G_IMPLICIT_DEF:
1611 case TargetOpcode::G_CONSTANT:
1612 return MI.getOperand(1).getCImm()->isAllOnesValue();
1622 std::function<
bool(
const Constant *ConstVal)> Match,
bool AllowUndefs) {
1625 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1626 return Match(
nullptr);
1629 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1630 return Match(Def->getOperand(1).getCImm());
1632 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1635 for (
unsigned I = 1, E = Def->getNumOperands();
I != E; ++
I) {
1636 Register SrcElt = Def->getOperand(
I).getReg();
1638 if (AllowUndefs && SrcDef->
getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1639 if (!Match(
nullptr))
1644 if (SrcDef->
getOpcode() != TargetOpcode::G_CONSTANT ||
1655 case TargetLowering::UndefinedBooleanContent:
1657 case TargetLowering::ZeroOrOneBooleanContent:
1659 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1666 bool IsVector,
bool IsFP) {
1668 case TargetLowering::UndefinedBooleanContent:
1670 case TargetLowering::ZeroOrOneBooleanContent:
1671 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1680 case TargetLowering::UndefinedBooleanContent:
1681 case TargetLowering::ZeroOrOneBooleanContent:
1683 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1693 if (
Op.isReg() &&
Op.getReg().isVirtual())
1694 DeadInstChain.
insert(
MRI.getVRegDef(
Op.getReg()));
1698 MI.eraseFromParent();
1710 while (!DeadInstChain.
empty()) {
1724 for (
auto &Def :
MI.defs()) {
1725 assert(Def.isReg() &&
"Must be a reg");
1728 for (
auto &MOUse :
MRI.use_operands(Def.getReg())) {
1736 if (!DbgUsers.
empty()) {
1744 case TargetOpcode::G_FABS:
1745 case TargetOpcode::G_FADD:
1746 case TargetOpcode::G_FCANONICALIZE:
1747 case TargetOpcode::G_FCEIL:
1748 case TargetOpcode::G_FCONSTANT:
1749 case TargetOpcode::G_FCOPYSIGN:
1750 case TargetOpcode::G_FCOS:
1751 case TargetOpcode::G_FDIV:
1752 case TargetOpcode::G_FEXP2:
1753 case TargetOpcode::G_FEXP:
1754 case TargetOpcode::G_FFLOOR:
1755 case TargetOpcode::G_FLOG10:
1756 case TargetOpcode::G_FLOG2:
1757 case TargetOpcode::G_FLOG:
1758 case TargetOpcode::G_FMA:
1759 case TargetOpcode::G_FMAD:
1760 case TargetOpcode::G_FMAXIMUM:
1761 case TargetOpcode::G_FMAXNUM:
1762 case TargetOpcode::G_FMAXNUM_IEEE:
1763 case TargetOpcode::G_FMINIMUM:
1764 case TargetOpcode::G_FMINNUM:
1765 case TargetOpcode::G_FMINNUM_IEEE:
1766 case TargetOpcode::G_FMUL:
1767 case TargetOpcode::G_FNEARBYINT:
1768 case TargetOpcode::G_FNEG:
1769 case TargetOpcode::G_FPEXT:
1770 case TargetOpcode::G_FPOW:
1771 case TargetOpcode::G_FPTRUNC:
1772 case TargetOpcode::G_FREM:
1773 case TargetOpcode::G_FRINT:
1774 case TargetOpcode::G_FSIN:
1775 case TargetOpcode::G_FTAN:
1776 case TargetOpcode::G_FACOS:
1777 case TargetOpcode::G_FASIN:
1778 case TargetOpcode::G_FATAN:
1779 case TargetOpcode::G_FATAN2:
1780 case TargetOpcode::G_FCOSH:
1781 case TargetOpcode::G_FSINH:
1782 case TargetOpcode::G_FTANH:
1783 case TargetOpcode::G_FSQRT:
1784 case TargetOpcode::G_FSUB:
1785 case TargetOpcode::G_INTRINSIC_ROUND:
1786 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1787 case TargetOpcode::G_INTRINSIC_TRUNC:
1797 LLT Ty =
MRI.getType(ShiftAmount);
1803 std::optional<ValueAndVReg> Val =
1815 for (
unsigned I = 0;
I < Sources; ++
I) {
1816 std::optional<ValueAndVReg> Val =
1844 bool ConsiderFlagsAndMetadata,
1849 if (
auto *GMI = dyn_cast<GenericMachineInstr>(RegDef))
1850 if (GMI->hasPoisonGeneratingFlags())
1855 case TargetOpcode::G_BUILD_VECTOR:
1856 case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
1858 case TargetOpcode::G_SHL:
1859 case TargetOpcode::G_ASHR:
1860 case TargetOpcode::G_LSHR:
1863 case TargetOpcode::G_FPTOSI:
1864 case TargetOpcode::G_FPTOUI:
1868 case TargetOpcode::G_CTLZ:
1869 case TargetOpcode::G_CTTZ:
1870 case TargetOpcode::G_ABS:
1871 case TargetOpcode::G_CTPOP:
1872 case TargetOpcode::G_BSWAP:
1873 case TargetOpcode::G_BITREVERSE:
1874 case TargetOpcode::G_FSHL:
1875 case TargetOpcode::G_FSHR:
1876 case TargetOpcode::G_SMAX:
1877 case TargetOpcode::G_SMIN:
1878 case TargetOpcode::G_SCMP:
1879 case TargetOpcode::G_UMAX:
1880 case TargetOpcode::G_UMIN:
1881 case TargetOpcode::G_UCMP:
1882 case TargetOpcode::G_PTRMASK:
1883 case TargetOpcode::G_SADDO:
1884 case TargetOpcode::G_SSUBO:
1885 case TargetOpcode::G_UADDO:
1886 case TargetOpcode::G_USUBO:
1887 case TargetOpcode::G_SMULO:
1888 case TargetOpcode::G_UMULO:
1889 case TargetOpcode::G_SADDSAT:
1890 case TargetOpcode::G_UADDSAT:
1891 case TargetOpcode::G_SSUBSAT:
1892 case TargetOpcode::G_USUBSAT:
1894 case TargetOpcode::G_SSHLSAT:
1895 case TargetOpcode::G_USHLSAT:
1898 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1901 std::optional<ValueAndVReg> Index =
1905 LLT VecTy =
MRI.getType(Insert->getVectorReg());
1910 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1913 std::optional<ValueAndVReg> Index =
1922 case TargetOpcode::G_SHUFFLE_VECTOR: {
1927 case TargetOpcode::G_FNEG:
1928 case TargetOpcode::G_PHI:
1929 case TargetOpcode::G_SELECT:
1930 case TargetOpcode::G_UREM:
1931 case TargetOpcode::G_SREM:
1932 case TargetOpcode::G_FREEZE:
1933 case TargetOpcode::G_ICMP:
1934 case TargetOpcode::G_FCMP:
1935 case TargetOpcode::G_FADD:
1936 case TargetOpcode::G_FSUB:
1937 case TargetOpcode::G_FMUL:
1938 case TargetOpcode::G_FDIV:
1939 case TargetOpcode::G_FREM:
1940 case TargetOpcode::G_PTR_ADD:
1943 return !isa<GCastOp>(RegDef) && !isa<GBinOp>(RegDef);
1957 case TargetOpcode::G_FREEZE:
1959 case TargetOpcode::G_IMPLICIT_DEF:
1961 case TargetOpcode::G_CONSTANT:
1962 case TargetOpcode::G_FCONSTANT:
1964 case TargetOpcode::G_BUILD_VECTOR: {
1967 for (
unsigned I = 0;
I < NumSources; ++
I)
1973 case TargetOpcode::G_PHI: {
1974 GPhi *Phi = cast<GPhi>(RegDef);
1975 unsigned NumIncoming = Phi->getNumIncomingValues();
1976 for (
unsigned I = 0;
I < NumIncoming; ++
I)
1986 return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(),
MRI,
Depth + 1,
1997 bool ConsiderFlagsAndMetadata) {
1998 return ::canCreateUndefOrPoison(Reg,
MRI, ConsiderFlagsAndMetadata,
2003 bool ConsiderFlagsAndMetadata =
true) {
2004 return ::canCreateUndefOrPoison(Reg,
MRI, ConsiderFlagsAndMetadata,
2011 return ::isGuaranteedNotToBeUndefOrPoison(Reg,
MRI,
Depth,
2018 return ::isGuaranteedNotToBeUndefOrPoison(Reg,
MRI,
Depth,
2025 return ::isGuaranteedNotToBeUndefOrPoison(Reg,
MRI,
Depth,
2037 switch (
MI.getOpcode()) {
2040 case TargetOpcode::G_ASSERT_ALIGN:
2041 case TargetOpcode::G_ASSERT_SEXT:
2042 case TargetOpcode::G_ASSERT_ZEXT:
2053std::optional<GIConstant>
2058 std::optional<ValueAndVReg> MayBeConstant =
2061 return std::nullopt;
2067 unsigned NumSources = Build->getNumSources();
2068 for (
unsigned I = 0;
I < NumSources; ++
I) {
2069 Register SrcReg = Build->getSourceReg(
I);
2070 std::optional<ValueAndVReg> MayBeConstant =
2073 return std::nullopt;
2079 std::optional<ValueAndVReg> MayBeConstant =
2082 return std::nullopt;
2093std::optional<GFConstant>
2098 std::optional<FPValueAndVReg> MayBeConstant =
2101 return std::nullopt;
2107 unsigned NumSources = Build->getNumSources();
2108 for (
unsigned I = 0;
I < NumSources; ++
I) {
2109 Register SrcReg = Build->getSourceReg(
I);
2110 std::optional<FPValueAndVReg> MayBeConstant =
2113 return std::nullopt;
2119 std::optional<FPValueAndVReg> MayBeConstant =
2122 return std::nullopt;
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, bool ConsiderFlagsAndMetadata, UndefPoisonKind Kind)
static bool isGuaranteedNotToBeUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, unsigned Depth, UndefPoisonKind Kind)
static bool includesPoison(UndefPoisonKind Kind)
static bool includesUndef(UndefPoisonKind Kind)
static void reportGISelDiagnostic(DiagnosticSeverity Severity, MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
static bool shiftAmountKnownInRange(Register ShiftAmount, const MachineRegisterInfo &MRI)
Shifts return poison if shiftwidth is larger than the bitwidth.
bool canCreatePoison(Register Reg, const MachineRegisterInfo &MRI, bool ConsiderFlagsAndMetadata=true)
static bool isBuildVectorOp(unsigned Opcode)
static bool isConstantScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This contains common code to allow clients to notify changes to machine instr.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
Tracks DebugLocs between checkpoints and verifies that they are transferred.
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
static const char PassName[]
Class recording the (high level) value of a variable.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
opStatus add(const APFloat &RHS, roundingMode RM)
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
opStatus multiply(const APFloat &RHS, roundingMode RM)
APInt bitcastToAPInt() const
opStatus mod(const APFloat &RHS)
Class for arbitrary precision integers.
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static bool isSameValue(const APInt &I1, const APInt &I2)
Determine if two APInts have the same value, after zero-extending one of them (if needed!...
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
bool isNegative() const
Return true if the sign bit is set.
bool isZero() const
Return true if the value is positive or negative zero.
This is the shared class of boolean and integer constants.
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Represents a G_BUILD_VECTOR.
An floating-point-like constant.
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
LLVM_ABI APFloat getScalarValue() const
Returns the value, if this constant is a scalar.
An integer-like constant.
LLVM_ABI APInt getScalarValue() const
Returns the value, if this constant is a scalar.
static LLVM_ABI std::optional< GIConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
Abstract class that contains various methods for clients to notify about changes.
KnownBits getKnownBits(Register R)
void insert(MachineInstr *I)
Add the specified instruction to the worklist if it isn't already in it.
MachineInstr * pop_back_val()
void remove(const MachineInstr *I)
Remove I from the worklist if it exists.
Represents an insert vector element.
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Represents a G_SHUFFLE_VECTOR.
ArrayRef< int > getMask() const
Represents a splat vector.
Module * getParent()
Get the module that this global value is contained inside of...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy)
This is an important class for using LLVM in a threaded context.
void checkpoint(bool CheckDebugLocs=true)
Call this to indicate that it's a good point to assess whether locations have been lost.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
GISelChangeObserver * getObserver() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
mop_range uses()
Returns all operands which may be register uses.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Represents a value which can be a Register or a constant.
Holds all the information related to register banks.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Target-Independent Code Generator Pass Configuration Options.
bool isGlobalISelAbortEnabled() const
Check whether or not GlobalISel should abort on error.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM Value Representation.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
@ C
The default llvm calling convention, compatible with C.
DiagnosticInfoMIROptimization::MachineArgument MNV
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII, MCRegister PhysReg, const TargetRegisterClass &RC, const DebugLoc &DL, LLT RegTy=LLT())
Return a virtual register corresponding to the incoming argument register PhysReg.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI std::optional< SmallVector< APInt > > ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2, unsigned DstScalarSizeInBits, unsigned ExtOp, const MachineRegisterInfo &MRI)
LLVM_ABI bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
LLVM_ABI Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
LLVM_ABI MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
LLVM_ABI const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
LLVM_ABI std::optional< APFloat > ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, const MachineRegisterInfo &MRI)
LLVM_ABI std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
LLVM_ABI std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI std::optional< SmallVector< unsigned > > ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI, std::function< unsigned(APInt)> CB)
Tries to constant fold a counting-zero operation (G_CTLZ or G_CTTZ) on Src.
LLVM_ABI std::optional< APInt > ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI)
LLVM_ABI std::optional< RegOrConstant > getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
LLVM_ABI std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
LLVM_ABI bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
bool isPreISelGenericOptimizationHint(unsigned Opcode)
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
LLVM_ABI LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
LLVM_ABI std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI const APInt & getIConstantFromReg(Register VReg, const MachineRegisterInfo &MRI)
VReg is defined by a G_CONSTANT, return the corresponding value.
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
LLVM_ABI bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Return true if the specified instruction is known to be a constant, or a vector of constants.
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
LLVM_ABI bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
LLVM_ABI void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver, SmallInstListTy &DeadInstChain)
LLVM_ABI void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
LLVM_ABI bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI SmallVector< APInt > ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Tries to constant fold a vector binop with sources Op1 and Op2.
LLVM_ABI std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)
Returns a floating point scalar constant of a build vector splat if it exists.
LLVM_ABI std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)
LLVM_ABI void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
LLVM_ABI LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
LLVM_ABI unsigned getInverseGMinMaxOpcode(unsigned MinMaxOpc)
Returns the inverse opcode of MinMaxOpc, which is a generic min/max opcode like G_SMIN.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
LLVM_ABI std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
LLVM_ABI bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
LLVM_ABI std::optional< APFloat > isConstantOrConstantSplatVectorFP(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a float constant integer or a splat vector of float constant integers.
constexpr unsigned BitWidth
LLVM_ABI APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
LLVM_ABI bool isBuildVectorConstantSplat(const Register Reg, const MachineRegisterInfo &MRI, int64_t SplatValue, bool AllowUndef)
Return true if the specified register is defined by G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all ...
LLVM_ABI void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
DiagnosticSeverity
Defines the different supported severity of a diagnostic.
LLVM_ABI Register constrainRegToClass(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, Register Reg, const TargetRegisterClass &RegClass)
Try to constrain Reg to the specified register class.
LLVM_ABI int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI)
Returns true if Val can be assumed to never be a signaling NaN.
LLVM_ABI std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, MachineInstr &MI, ArrayRef< MachineOperand * > DbgUsers)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
LLVM_ABI LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
LLVM_ABI std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)
LLVM_ABI bool isAssertMI(const MachineInstr &MI)
Returns true if the instruction MI is one of the assert instructions.
LLVM_ABI void extractVectorParts(Register Reg, unsigned NumElts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Version which handles irregular sub-vector splits.
LLVM_ABI int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
LLVM_ABI Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
LLVM_ABI void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel warning as a missed optimization remark to the LLVMContext's diagnostic stream.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Simple struct used to hold a Register value and the instruction which defines it.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
This class contains a discriminated union of information about pointers in memory operands,...
int64_t Offset
Offset - This is an offset from the base Value*.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
Simple struct used to hold a constant integer value and a virtual register.