80#define DEBUG_TYPE "mips-lower"
86 cl::desc(
"MIPS: Don't trap on integer division by zero."),
92 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
93 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
124 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
129 return NumIntermediates;
145 unsigned Flag)
const {
151 unsigned Flag)
const {
157 unsigned Flag)
const {
163 unsigned Flag)
const {
169 unsigned Flag)
const {
171 N->getOffset(), Flag);
571 if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||
591 EVT Ty =
N->getValueType(0);
592 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
593 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
599 N->getOperand(0),
N->getOperand(1));
604 if (
N->hasAnyUseOfValue(0)) {
613 if (
N->hasAnyUseOfValue(1)) {
655 "Illegal Condition Code");
669 if (!
LHS.getValueType().isFloatingPoint())
781 SDValue ValueIfTrue =
N->getOperand(0), ValueIfFalse =
N->getOperand(2);
797 SDValue FCC =
N->getOperand(1), Glue =
N->getOperand(3);
799 ValueIfFalse, FCC, ValueIfTrue, Glue);
808 SDValue FirstOperand =
N->getOperand(0);
809 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
811 EVT ValTy =
N->getValueType(0);
815 unsigned SMPos, SMSize;
821 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
831 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
851 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
856 if (SMPos != Pos || Pos >= ValTy.
getSizeInBits() || SMSize >= 32 ||
878 NewOperand = FirstOperand;
891 SDValue FirstOperand =
N->getOperand(0), SecondOperand =
N->getOperand(1);
892 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
896 SecondOperand.getOpcode() ==
ISD::SHL) ||
898 SecondOperand.getOpcode() ==
ISD::AND)) {
909 ? SecondOperand.getOperand(0)
914 if (!(CN = dyn_cast<ConstantSDNode>(AndMask)) ||
919 ? SecondOperand.getOperand(1)
921 if (!(CN = dyn_cast<ConstantSDNode>(ShlShift)))
925 if (SMPos0 != 0 || SMSize0 != ShlShiftValue)
929 EVT ValTy =
N->getValueType(0);
930 SMPos1 = ShlShiftValue;
932 SMSize1 = (ValTy == MVT::i64 ? 64 : 32) - SMPos1;
946 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
951 if (SecondOperand.getOpcode() ==
ISD::AND &&
952 SecondOperand.getOperand(0).getOpcode() ==
ISD::SHL) {
954 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand.getOperand(1))) ||
959 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
964 if (!(CN = dyn_cast<ConstantSDNode>(Shl.
getOperand(1))))
971 EVT ValTy =
N->getValueType(0);
972 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.
getSizeInBits()))
985 if (~CN->
getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
986 ((SMSize0 + SMPos0 <= 64 && Subtarget.
hasMips64r2()) ||
987 (SMSize0 + SMPos0 <= 32))) {
990 if (SecondOperand.getOpcode() ==
ISD::AND) {
991 if (!(CN1 = dyn_cast<ConstantSDNode>(SecondOperand->getOperand(1))))
994 if (!(CN1 = dyn_cast<ConstantSDNode>(
N->getOperand(1))))
1003 EVT ValTy =
N->getOperand(0)->getValueType(0);
1009 SecondOperand, Const1);
1078 if (!Mult.hasOneUse())
1086 SDValue MultLHS = Mult->getOperand(0);
1087 SDValue MultRHS = Mult->getOperand(1);
1094 if (!IsSigned && !IsUnsigned)
1100 std::tie(BottomHalf, TopHalf) =
1127 !Subtarget.
inMips16Mode() &&
N->getValueType(0) == MVT::i64)
1142 !Subtarget.
inMips16Mode() &&
N->getValueType(0) == MVT::i64)
1152 SDValue InnerAdd =
N->getOperand(1);
1168 EVT ValTy =
N->getValueType(0);
1185 SDValue FirstOperand =
N->getOperand(0);
1186 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
1187 SDValue SecondOperand =
N->getOperand(1);
1188 EVT ValTy =
N->getValueType(0);
1192 unsigned SMPos, SMSize;
1197 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)))
1209 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
1215 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.
getSizeInBits())
1235 EVT VT =
N->getValueType(0);
1244 auto *ConstantOperand = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
1250 int64_t ConstImm = ConstantOperand->getSExtValue();
1261 unsigned Opc =
N->getOpcode();
1302 if (
auto *
C = dyn_cast<ConstantSDNode>(
Y))
1303 return C->getAPIntValue().ule(15);
1311 N->getOperand(0).getOpcode() ==
ISD::SRL) ||
1313 N->getOperand(0).getOpcode() ==
ISD::SHL)) &&
1314 "Expected shift-shift mask");
1316 if (
N->getOperand(0).getValueType().isVector())
1331 switch (
Op.getOpcode())
1346 return lowerFCANONICALIZE(
Op, DAG);
1359 return lowerREADCYCLECOUNTER(
Op, DAG);
1382 bool Is64Bit,
bool IsMicroMips) {
1391 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1412 switch (
MI.getOpcode()) {
1415 case Mips::ATOMIC_LOAD_ADD_I8:
1416 return emitAtomicBinaryPartword(
MI, BB, 1);
1417 case Mips::ATOMIC_LOAD_ADD_I16:
1418 return emitAtomicBinaryPartword(
MI, BB, 2);
1419 case Mips::ATOMIC_LOAD_ADD_I32:
1420 return emitAtomicBinary(
MI, BB);
1421 case Mips::ATOMIC_LOAD_ADD_I64:
1422 return emitAtomicBinary(
MI, BB);
1424 case Mips::ATOMIC_LOAD_AND_I8:
1425 return emitAtomicBinaryPartword(
MI, BB, 1);
1426 case Mips::ATOMIC_LOAD_AND_I16:
1427 return emitAtomicBinaryPartword(
MI, BB, 2);
1428 case Mips::ATOMIC_LOAD_AND_I32:
1429 return emitAtomicBinary(
MI, BB);
1430 case Mips::ATOMIC_LOAD_AND_I64:
1431 return emitAtomicBinary(
MI, BB);
1433 case Mips::ATOMIC_LOAD_OR_I8:
1434 return emitAtomicBinaryPartword(
MI, BB, 1);
1435 case Mips::ATOMIC_LOAD_OR_I16:
1436 return emitAtomicBinaryPartword(
MI, BB, 2);
1437 case Mips::ATOMIC_LOAD_OR_I32:
1438 return emitAtomicBinary(
MI, BB);
1439 case Mips::ATOMIC_LOAD_OR_I64:
1440 return emitAtomicBinary(
MI, BB);
1442 case Mips::ATOMIC_LOAD_XOR_I8:
1443 return emitAtomicBinaryPartword(
MI, BB, 1);
1444 case Mips::ATOMIC_LOAD_XOR_I16:
1445 return emitAtomicBinaryPartword(
MI, BB, 2);
1446 case Mips::ATOMIC_LOAD_XOR_I32:
1447 return emitAtomicBinary(
MI, BB);
1448 case Mips::ATOMIC_LOAD_XOR_I64:
1449 return emitAtomicBinary(
MI, BB);
1451 case Mips::ATOMIC_LOAD_NAND_I8:
1452 return emitAtomicBinaryPartword(
MI, BB, 1);
1453 case Mips::ATOMIC_LOAD_NAND_I16:
1454 return emitAtomicBinaryPartword(
MI, BB, 2);
1455 case Mips::ATOMIC_LOAD_NAND_I32:
1456 return emitAtomicBinary(
MI, BB);
1457 case Mips::ATOMIC_LOAD_NAND_I64:
1458 return emitAtomicBinary(
MI, BB);
1460 case Mips::ATOMIC_LOAD_SUB_I8:
1461 return emitAtomicBinaryPartword(
MI, BB, 1);
1462 case Mips::ATOMIC_LOAD_SUB_I16:
1463 return emitAtomicBinaryPartword(
MI, BB, 2);
1464 case Mips::ATOMIC_LOAD_SUB_I32:
1465 return emitAtomicBinary(
MI, BB);
1466 case Mips::ATOMIC_LOAD_SUB_I64:
1467 return emitAtomicBinary(
MI, BB);
1469 case Mips::ATOMIC_SWAP_I8:
1470 return emitAtomicBinaryPartword(
MI, BB, 1);
1471 case Mips::ATOMIC_SWAP_I16:
1472 return emitAtomicBinaryPartword(
MI, BB, 2);
1473 case Mips::ATOMIC_SWAP_I32:
1474 return emitAtomicBinary(
MI, BB);
1475 case Mips::ATOMIC_SWAP_I64:
1476 return emitAtomicBinary(
MI, BB);
1478 case Mips::ATOMIC_CMP_SWAP_I8:
1479 return emitAtomicCmpSwapPartword(
MI, BB, 1);
1480 case Mips::ATOMIC_CMP_SWAP_I16:
1481 return emitAtomicCmpSwapPartword(
MI, BB, 2);
1482 case Mips::ATOMIC_CMP_SWAP_I32:
1483 return emitAtomicCmpSwap(
MI, BB);
1484 case Mips::ATOMIC_CMP_SWAP_I64:
1485 return emitAtomicCmpSwap(
MI, BB);
1487 case Mips::ATOMIC_LOAD_MIN_I8:
1488 return emitAtomicBinaryPartword(
MI, BB, 1);
1489 case Mips::ATOMIC_LOAD_MIN_I16:
1490 return emitAtomicBinaryPartword(
MI, BB, 2);
1491 case Mips::ATOMIC_LOAD_MIN_I32:
1492 return emitAtomicBinary(
MI, BB);
1493 case Mips::ATOMIC_LOAD_MIN_I64:
1494 return emitAtomicBinary(
MI, BB);
1496 case Mips::ATOMIC_LOAD_MAX_I8:
1497 return emitAtomicBinaryPartword(
MI, BB, 1);
1498 case Mips::ATOMIC_LOAD_MAX_I16:
1499 return emitAtomicBinaryPartword(
MI, BB, 2);
1500 case Mips::ATOMIC_LOAD_MAX_I32:
1501 return emitAtomicBinary(
MI, BB);
1502 case Mips::ATOMIC_LOAD_MAX_I64:
1503 return emitAtomicBinary(
MI, BB);
1505 case Mips::ATOMIC_LOAD_UMIN_I8:
1506 return emitAtomicBinaryPartword(
MI, BB, 1);
1507 case Mips::ATOMIC_LOAD_UMIN_I16:
1508 return emitAtomicBinaryPartword(
MI, BB, 2);
1509 case Mips::ATOMIC_LOAD_UMIN_I32:
1510 return emitAtomicBinary(
MI, BB);
1511 case Mips::ATOMIC_LOAD_UMIN_I64:
1512 return emitAtomicBinary(
MI, BB);
1514 case Mips::ATOMIC_LOAD_UMAX_I8:
1515 return emitAtomicBinaryPartword(
MI, BB, 1);
1516 case Mips::ATOMIC_LOAD_UMAX_I16:
1517 return emitAtomicBinaryPartword(
MI, BB, 2);
1518 case Mips::ATOMIC_LOAD_UMAX_I32:
1519 return emitAtomicBinary(
MI, BB);
1520 case Mips::ATOMIC_LOAD_UMAX_I64:
1521 return emitAtomicBinary(
MI, BB);
1523 case Mips::PseudoSDIV:
1524 case Mips::PseudoUDIV:
1531 case Mips::SDIV_MM_Pseudo:
1532 case Mips::UDIV_MM_Pseudo:
1535 case Mips::DIV_MMR6:
1536 case Mips::DIVU_MMR6:
1537 case Mips::MOD_MMR6:
1538 case Mips::MODU_MMR6:
1540 case Mips::PseudoDSDIV:
1541 case Mips::PseudoDUDIV:
1548 case Mips::PseudoSELECT_I:
1549 case Mips::PseudoSELECT_I64:
1550 case Mips::PseudoSELECT_S:
1551 case Mips::PseudoSELECT_D32:
1552 case Mips::PseudoSELECT_D64:
1553 return emitPseudoSELECT(
MI, BB,
false, Mips::BNE);
1554 case Mips::PseudoSELECTFP_F_I:
1555 case Mips::PseudoSELECTFP_F_I64:
1556 case Mips::PseudoSELECTFP_F_S:
1557 case Mips::PseudoSELECTFP_F_D32:
1558 case Mips::PseudoSELECTFP_F_D64:
1559 return emitPseudoSELECT(
MI, BB,
true, Mips::BC1F);
1560 case Mips::PseudoSELECTFP_T_I:
1561 case Mips::PseudoSELECTFP_T_I64:
1562 case Mips::PseudoSELECTFP_T_S:
1563 case Mips::PseudoSELECTFP_T_D32:
1564 case Mips::PseudoSELECTFP_T_D64:
1565 return emitPseudoSELECT(
MI, BB,
true, Mips::BC1T);
1566 case Mips::PseudoD_SELECT_I:
1567 case Mips::PseudoD_SELECT_I64:
1568 return emitPseudoD_SELECT(
MI, BB);
1570 return emitLDR_W(
MI, BB);
1572 return emitLDR_D(
MI, BB);
1574 return emitSTR_W(
MI, BB);
1576 return emitSTR_D(
MI, BB);
1592 bool NeedsAdditionalReg =
false;
1593 switch (
MI.getOpcode()) {
1594 case Mips::ATOMIC_LOAD_ADD_I32:
1595 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1597 case Mips::ATOMIC_LOAD_SUB_I32:
1598 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1600 case Mips::ATOMIC_LOAD_AND_I32:
1601 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1603 case Mips::ATOMIC_LOAD_OR_I32:
1604 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1606 case Mips::ATOMIC_LOAD_XOR_I32:
1607 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1609 case Mips::ATOMIC_LOAD_NAND_I32:
1610 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1612 case Mips::ATOMIC_SWAP_I32:
1613 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1615 case Mips::ATOMIC_LOAD_ADD_I64:
1616 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1618 case Mips::ATOMIC_LOAD_SUB_I64:
1619 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1621 case Mips::ATOMIC_LOAD_AND_I64:
1622 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1624 case Mips::ATOMIC_LOAD_OR_I64:
1625 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1627 case Mips::ATOMIC_LOAD_XOR_I64:
1628 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1630 case Mips::ATOMIC_LOAD_NAND_I64:
1631 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1633 case Mips::ATOMIC_SWAP_I64:
1634 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1636 case Mips::ATOMIC_LOAD_MIN_I32:
1637 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1638 NeedsAdditionalReg =
true;
1640 case Mips::ATOMIC_LOAD_MAX_I32:
1641 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1642 NeedsAdditionalReg =
true;
1644 case Mips::ATOMIC_LOAD_UMIN_I32:
1645 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1646 NeedsAdditionalReg =
true;
1648 case Mips::ATOMIC_LOAD_UMAX_I32:
1649 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1650 NeedsAdditionalReg =
true;
1652 case Mips::ATOMIC_LOAD_MIN_I64:
1653 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1654 NeedsAdditionalReg =
true;
1656 case Mips::ATOMIC_LOAD_MAX_I64:
1657 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1658 NeedsAdditionalReg =
true;
1660 case Mips::ATOMIC_LOAD_UMIN_I64:
1661 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1662 NeedsAdditionalReg =
true;
1664 case Mips::ATOMIC_LOAD_UMAX_I64:
1665 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1666 NeedsAdditionalReg =
true;
1727 if (NeedsAdditionalReg) {
1734 MI.eraseFromParent();
1741 unsigned SrcReg)
const {
1761 int64_t ShiftImm = 32 - (
Size * 8);
1772 "Unsupported size for EmitAtomicBinaryPartial.");
1799 unsigned AtomicOp = 0;
1800 bool NeedsAdditionalReg =
false;
1801 switch (
MI.getOpcode()) {
1802 case Mips::ATOMIC_LOAD_NAND_I8:
1803 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1805 case Mips::ATOMIC_LOAD_NAND_I16:
1806 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1808 case Mips::ATOMIC_SWAP_I8:
1809 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1811 case Mips::ATOMIC_SWAP_I16:
1812 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1814 case Mips::ATOMIC_LOAD_ADD_I8:
1815 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1817 case Mips::ATOMIC_LOAD_ADD_I16:
1818 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1820 case Mips::ATOMIC_LOAD_SUB_I8:
1821 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1823 case Mips::ATOMIC_LOAD_SUB_I16:
1824 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1826 case Mips::ATOMIC_LOAD_AND_I8:
1827 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1829 case Mips::ATOMIC_LOAD_AND_I16:
1830 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1832 case Mips::ATOMIC_LOAD_OR_I8:
1833 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1835 case Mips::ATOMIC_LOAD_OR_I16:
1836 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1838 case Mips::ATOMIC_LOAD_XOR_I8:
1839 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1841 case Mips::ATOMIC_LOAD_XOR_I16:
1842 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1844 case Mips::ATOMIC_LOAD_MIN_I8:
1845 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1846 NeedsAdditionalReg =
true;
1848 case Mips::ATOMIC_LOAD_MIN_I16:
1849 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1850 NeedsAdditionalReg =
true;
1852 case Mips::ATOMIC_LOAD_MAX_I8:
1853 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1854 NeedsAdditionalReg =
true;
1856 case Mips::ATOMIC_LOAD_MAX_I16:
1857 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1858 NeedsAdditionalReg =
true;
1860 case Mips::ATOMIC_LOAD_UMIN_I8:
1861 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1862 NeedsAdditionalReg =
true;
1864 case Mips::ATOMIC_LOAD_UMIN_I16:
1865 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1866 NeedsAdditionalReg =
true;
1868 case Mips::ATOMIC_LOAD_UMAX_I8:
1869 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1870 NeedsAdditionalReg =
true;
1872 case Mips::ATOMIC_LOAD_UMAX_I16:
1873 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1874 NeedsAdditionalReg =
true;
1903 int64_t MaskImm = (
Size == 1) ? 255 : 65535;
1944 if (NeedsAdditionalReg) {
1950 MI.eraseFromParent();
1964 assert((
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1965 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1966 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1968 const unsigned Size =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1976 unsigned AtomicOp =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1977 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1978 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1993 Register OldValCopy =
MRI.createVirtualRegister(
MRI.getRegClass(OldVal));
1994 Register NewValCopy =
MRI.createVirtualRegister(
MRI.getRegClass(NewVal));
2012 MI.eraseFromParent();
2020 "Unsupported size for EmitAtomicCmpSwapPartial.");
2047 unsigned AtomicOp =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
2048 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
2049 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
2090 int64_t MaskImm = (
Size == 1) ? 255 : 65535;
2091 BuildMI(BB,
DL,
TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
2093 BuildMI(BB,
DL,
TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
2136 MI.eraseFromParent();
2146 unsigned RdhwrOpc, DestReg;
2149 if (PtrVT == MVT::i64) {
2150 RdhwrOpc = Mips::RDHWR64;
2162 RdhwrOpc = Mips::RDHWR;
2199 FCC0, Dest, CondRes);
2221 "Floating point operand expected.");
2232 EVT Ty =
Op.getValueType();
2238 "Windows is the only supported COFF target");
2288 EVT Ty =
Op.getValueType();
2333 .setLibCallee(
CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2334 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2380 EVT Ty =
Op.getValueType();
2393 EVT Ty =
Op.getValueType();
2422 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
2429 EVT VT =
Node->getValueType(0);
2434 const Value *SV = cast<SrcValueSDNode>(
Node->getOperand(2))->getValue();
2461 unsigned ArgSizeInBytes =
2477 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2486 bool HasExtractInsert) {
2487 EVT TyX =
Op.getOperand(0).getValueType();
2488 EVT TyY =
Op.getOperand(1).getValueType();
2505 if (HasExtractInsert) {
2523 if (TyX == MVT::f32)
2533 bool HasExtractInsert) {
2534 unsigned WidthX =
Op.getOperand(0).getValueSizeInBits();
2535 unsigned WidthY =
Op.getOperand(1).getValueSizeInBits();
2544 if (HasExtractInsert) {
2550 if (WidthX > WidthY)
2552 else if (WidthY > WidthX)
2571 if (WidthX > WidthY)
2573 else if (WidthY > WidthX)
2591 bool HasExtractInsert)
const {
2603 Op.getOperand(0), Const1);
2606 if (HasExtractInsert)
2617 if (
Op.getValueType() == MVT::f32)
2631 bool HasExtractInsert)
const {
2642 if (HasExtractInsert)
2664 EVT VT =
Op.getValueType();
2678 if (
Op.getConstantOperandVal(0) != 0) {
2680 "return address can be determined only for current frame");
2686 EVT VT =
Op.getValueType();
2696 if (
Op.getConstantOperandVal(0) != 0) {
2698 "return address can be determined only for current frame");
2704 MVT VT =
Op.getSimpleValueType();
2705 unsigned RA =
ABI.
IsN64() ? Mips::RA_64 : Mips::RA;
2731 unsigned OffsetReg =
ABI.
IsN64() ? Mips::V1_64 : Mips::V1;
2732 unsigned AddrReg =
ABI.
IsN64() ? Mips::V0_64 : Mips::V0;
2822 DL, VTList,
Cond, ShiftRightHi,
2838 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2839 EVT BasePtrVT =
Ptr.getValueType();
2849 LD->getMemOperand());
2855 EVT MemVT = LD->getMemoryVT();
2861 if ((LD->getAlign().value() >= (MemVT.
getSizeInBits() / 8)) ||
2862 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2866 EVT VT =
Op.getValueType();
2870 assert((VT == MVT::i32) || (VT == MVT::i64));
2913 SDValue Ops[] = { SRL, LWR.getValue(1) };
2986 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2998 EVT ValTy =
Op->getValueType(0);
3045 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
3049 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
3057 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
3061 else if (ArgFlags.
isZExt())
3069 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
3073 else if (ArgFlags.
isZExt())
3084 bool AllocateFloatsInIntReg = State.
isVarArg() || ValNo > 1 ||
3087 bool isI64 = (ValVT == MVT::i32 && OrigAlign ==
Align(8));
3091 if (ValVT == MVT::i32 && isVectorFloat) {
3098 if (Reg == Mips::A2)
3107 }
else if (ValVT == MVT::i32 ||
3108 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
3112 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
3115 }
else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
3119 if (Reg == Mips::A1 || Reg == Mips::A3)
3135 if (ValVT == MVT::f32) {
3143 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
3163 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
3165 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy, State,
3173 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
3175 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy, State,
3183#include "MipsGenCallingConv.inc"
3186 return CC_Mips_FixedArg;
3198 const SDLoc &
DL,
bool IsTailCall,
3216 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3217 bool IsPICCall,
bool GlobalOrExternal,
bool InternalLinkage,
3230 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3231 unsigned GPReg =
ABI.
IsN64() ? Mips::GP_64 : Mips::GP;
3233 RegsToPass.push_back(std::make_pair(GPReg,
getGlobalReg(CLI.
DAG, Ty)));
3242 for (
auto &R : RegsToPass) {
3249 for (
auto &R : RegsToPass)
3256 assert(Mask &&
"Missing call preserved mask for calling convention");
3260 Function *
F =
G->getGlobal()->getParent()->getFunction(
Sym);
3261 if (
F &&
F->hasFnAttribute(
"__Mips16RetHelper")) {
3274 switch (
MI.getOpcode()) {
3278 case Mips::JALRPseudo:
3280 case Mips::JALR64Pseudo:
3281 case Mips::JALR16_MM:
3282 case Mips::JALRC16_MMR6:
3283 case Mips::TAILCALLREG:
3284 case Mips::TAILCALLREG64:
3285 case Mips::TAILCALLR6REG:
3286 case Mips::TAILCALL64R6REG:
3287 case Mips::TAILCALLREG_MM:
3288 case Mips::TAILCALLREG_MMR6: {
3292 Node->getNumOperands() < 1 ||
3293 Node->getOperand(0).getNumOperands() < 2) {
3299 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3302 dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
3306 if (!isa<Function>(
G->getGlobal())) {
3307 LLVM_DEBUG(
dbgs() <<
"Not adding R_MIPS_JALR against data symbol "
3308 <<
G->getGlobal()->getName() <<
"\n");
3311 Sym =
G->getGlobal()->getName();
3314 dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
3315 Sym = ES->getSymbol();
3359 dyn_cast_or_null<const ExternalSymbolSDNode>(Callee.getNode());
3391 unsigned ReservedArgArea =
3393 CCInfo.AllocateStack(ReservedArgArea,
Align(1));
3395 CCInfo.AnalyzeCallOperands(Outs,
CC_Mips);
3398 unsigned StackSize = CCInfo.getStackSize();
3408 bool InternalLinkage =
false;
3410 IsTailCall = isEligibleForTailCallOptimization(
3413 InternalLinkage =
G->getGlobal()->hasInternalLinkage();
3414 IsTailCall &= (InternalLinkage ||
G->getGlobal()->hasLocalLinkage() ||
3415 G->getGlobal()->hasPrivateLinkage() ||
3416 G->getGlobal()->hasHiddenVisibility() ||
3417 G->getGlobal()->hasProtectedVisibility());
3422 "site marked musttail");
3431 StackSize =
alignTo(StackSize, StackAlignment);
3433 if (!(IsTailCall || MemcpyInByVal))
3439 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3442 CCInfo.rewindByValRegsInfo();
3445 for (
unsigned i = 0, e = ArgLocs.
size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3446 SDValue Arg = OutVals[OutIdx];
3450 bool UseUpperBits =
false;
3453 if (
Flags.isByVal()) {
3454 unsigned FirstByValReg, LastByValReg;
3455 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3456 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3459 "ByVal args of size 0 should have been ignored by front-end.");
3460 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3462 "Do not tail-call optimize if there is a byval argument.");
3463 passByValArg(Chain,
DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3466 CCInfo.nextInRegsParam();
3476 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3477 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3478 (ValVT == MVT::i64 && LocVT == MVT::f64))
3480 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3491 Register LocRegHigh = ArgLocs[++i].getLocReg();
3492 RegsToPass.
push_back(std::make_pair(LocRegLo,
Lo));
3493 RegsToPass.push_back(std::make_pair(LocRegHigh,
Hi));
3502 UseUpperBits =
true;
3508 UseUpperBits =
true;
3514 UseUpperBits =
true;
3522 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3532 RegsToPass.push_back(std::make_pair(VA.
getLocReg(), Arg));
3553 Chain, Arg,
DL, IsTailCall, DAG));
3558 if (!MemOpChains.
empty())
3566 bool GlobalOrExternal =
false, IsCallReloc =
false;
3575 if (
auto *
N = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3580 }
else if (
auto *
N = dyn_cast<GlobalAddressSDNode>(Callee)) {
3584 if (
auto *
F = dyn_cast<Function>(
N->getGlobal())) {
3585 if (
F->hasFnAttribute(
"long-call"))
3586 UseLongCalls =
true;
3587 else if (
F->hasFnAttribute(
"short-call"))
3588 UseLongCalls =
false;
3599 G->getGlobal()->hasDLLImportStorageClass()) {
3601 "Windows is the only supported COFF target");
3609 if (InternalLinkage)
3625 GlobalOrExternal =
true;
3628 const char *
Sym = S->getSymbol();
3644 GlobalOrExternal =
true;
3650 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3651 IsCallReloc, CLI, Callee, Chain);
3667 if (!(MemcpyInByVal)) {
3674 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins,
DL, DAG,
3680SDValue MipsTargetLowering::LowerCallResult(
3690 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips);
3693 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3698 RVLocs[i].getLocVT(), InGlue);
3703 unsigned ValSizeInBits =
Ins[i].ArgVT.getSizeInBits();
3804SDValue MipsTargetLowering::LowerFormalArguments(
3815 std::vector<SDValue> OutChains;
3825 if (
Func.hasFnAttribute(
"interrupt") && !
Func.arg_empty())
3827 "Functions with the interrupt attribute cannot have arguments!");
3829 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3831 CCInfo.getInRegsParamsCount() > 0);
3833 unsigned CurArgIdx = 0;
3834 CCInfo.rewindByValRegsInfo();
3836 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3838 if (Ins[InsIdx].isOrigArg()) {
3839 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3840 CurArgIdx =
Ins[InsIdx].getOrigArgIndex();
3846 if (
Flags.isByVal()) {
3847 assert(Ins[InsIdx].isOrigArg() &&
"Byval arguments cannot be implicit");
3848 unsigned FirstByValReg, LastByValReg;
3849 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3850 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3853 "ByVal args of size 0 should have been ignored by front-end.");
3854 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3855 copyByValRegs(Chain,
DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3856 FirstByValReg, LastByValReg, VA, CCInfo);
3857 CCInfo.nextInRegsParam();
3877 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3878 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3879 (RegVT == MVT::f64 && ValVT == MVT::i64))
3881 else if (
ABI.
IsO32() && RegVT == MVT::i32 &&
3882 ValVT == MVT::f64) {
3891 ArgValue, ArgValue2);
3910 LocVT,
DL, Chain, FIN,
3912 OutChains.push_back(ArgValue.
getValue(1));
3921 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3923 if (ArgLocs[i].needsCustom()) {
3931 if (Ins[InsIdx].
Flags.isSRet()) {
3945 writeVarArgRegs(OutChains, Chain,
DL, DAG, CCInfo);
3949 if (!OutChains.empty()) {
3950 OutChains.push_back(Chain);
3967 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3968 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3971bool MipsTargetLowering::shouldSignExtendTypeInLibCall(
Type *Ty,
3972 bool IsSigned)
const {
4006 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
4012 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
4016 bool UseUpperBits =
false;
4027 UseUpperBits =
true;
4033 UseUpperBits =
true;
4039 UseUpperBits =
true;
4047 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
4073 unsigned V0 =
ABI.
IsN64() ? Mips::V0_64 : Mips::V0;
4088 return LowerInterruptReturn(RetOps,
DL, DAG);
4101MipsTargetLowering::getConstraintType(
StringRef Constraint)
const {
4113 if (Constraint.
size() == 1) {
4114 switch (Constraint[0]) {
4128 if (Constraint ==
"ZC")
4138MipsTargetLowering::getSingleConstraintMatchWeight(
4139 AsmOperandInfo &
info,
const char *constraint)
const {
4141 Value *CallOperandVal =
info.CallOperandVal;
4144 if (!CallOperandVal)
4148 switch (*constraint) {
4177 if (isa<ConstantInt>(CallOperandVal))
4192 unsigned long long &Reg) {
4193 if (
C.front() !=
'{' ||
C.back() !=
'}')
4194 return std::make_pair(
false,
false);
4198 I = std::find_if(
B, E, isdigit);
4204 return std::make_pair(
true,
false);
4215 return VT.
bitsLT(MinVT) ? MinVT : VT;
4218std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
4224 unsigned long long Reg;
4229 return std::make_pair(0U,
nullptr);
4231 if ((Prefix ==
"hi" || Prefix ==
"lo")) {
4234 return std::make_pair(0U,
nullptr);
4236 RC =
TRI->getRegClass(Prefix ==
"hi" ?
4237 Mips::HI32RegClassID : Mips::LO32RegClassID);
4238 return std::make_pair(*(RC->
begin()), RC);
4239 }
else if (Prefix.starts_with(
"$msa")) {
4244 return std::make_pair(0U,
nullptr);
4247 .
Case(
"$msair", Mips::MSAIR)
4248 .
Case(
"$msacsr", Mips::MSACSR)
4249 .
Case(
"$msaaccess", Mips::MSAAccess)
4250 .
Case(
"$msasave", Mips::MSASave)
4251 .
Case(
"$msamodify", Mips::MSAModify)
4252 .
Case(
"$msarequest", Mips::MSARequest)
4253 .
Case(
"$msamap", Mips::MSAMap)
4254 .
Case(
"$msaunmap", Mips::MSAUnmap)
4258 return std::make_pair(0U,
nullptr);
4260 RC =
TRI->getRegClass(Mips::MSACtrlRegClassID);
4261 return std::make_pair(Reg, RC);
4265 return std::make_pair(0U,
nullptr);
4267 if (Prefix ==
"$f") {
4270 if (VT == MVT::Other)
4275 if (RC == &Mips::AFGR64RegClass) {
4279 }
else if (Prefix ==
"$fcc")
4280 RC =
TRI->getRegClass(Mips::FCCRegClassID);
4281 else if (Prefix ==
"$w") {
4288 assert(Reg < RC->getNumRegs());
4289 return std::make_pair(*(RC->
begin() + Reg), RC);
4295std::pair<unsigned, const TargetRegisterClass *>
4299 if (Constraint.
size() == 1) {
4300 switch (Constraint[0]) {
4304 if ((VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8 ||
4308 return std::make_pair(0U, &Mips::CPU16RegsRegClass);
4309 return std::make_pair(0U, &Mips::GPR32RegClass);
4313 return std::make_pair(0U, &Mips::GPR32RegClass);
4316 return std::make_pair(0U, &Mips::GPR64RegClass);
4318 return std::make_pair(0U,
nullptr);
4320 if (VT == MVT::v16i8)
4321 return std::make_pair(0U, &Mips::MSA128BRegClass);
4322 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
4323 return std::make_pair(0U, &Mips::MSA128HRegClass);
4324 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
4325 return std::make_pair(0U, &Mips::MSA128WRegClass);
4326 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
4327 return std::make_pair(0U, &Mips::MSA128DRegClass);
4328 else if (VT == MVT::f32)
4329 return std::make_pair(0U, &Mips::FGR32RegClass);
4332 return std::make_pair(0U, &Mips::FGR64RegClass);
4333 return std::make_pair(0U, &Mips::AFGR64RegClass);
4338 return std::make_pair((
unsigned)Mips::T9, &Mips::GPR32RegClass);
4340 return std::make_pair((
unsigned)Mips::T9_64, &Mips::GPR64RegClass);
4342 return std::make_pair(0U,
nullptr);
4345 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
4346 return std::make_pair((
unsigned)Mips::LO0, &Mips::LO32RegClass);
4347 return std::make_pair((
unsigned)Mips::LO0_64, &Mips::LO64RegClass);
4352 return std::make_pair(0U,
nullptr);
4356 if (!Constraint.
empty()) {
4357 std::pair<unsigned, const TargetRegisterClass *>
R;
4358 R = parseRegForInlineAsmConstraint(Constraint, VT);
4369void MipsTargetLowering::LowerAsmOperandForConstraint(
SDValue Op,
4371 std::vector<SDValue> &Ops,
4377 if (Constraint.
size() > 1)
4380 char ConstraintLetter = Constraint[0];
4381 switch (ConstraintLetter) {
4387 int64_t Val =
C->getSExtValue();
4388 if (isInt<16>(Val)) {
4397 int64_t Val =
C->getZExtValue();
4408 if (isUInt<16>(Val)) {
4417 int64_t Val =
C->getSExtValue();
4418 if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){
4427 int64_t Val =
C->getSExtValue();
4428 if ((Val >= -65535) && (Val <= -1)) {
4437 int64_t Val =
C->getSExtValue();
4438 if ((isInt<15>(Val))) {
4447 int64_t Val =
C->getSExtValue();
4448 if ((Val <= 65535) && (Val >= 1)) {
4457 Ops.push_back(Result);
4464bool MipsTargetLowering::isLegalAddressingMode(
const DataLayout &
DL,
4492EVT MipsTargetLowering::getOptimalMemOpType(
4501bool MipsTargetLowering::isFPImmLegal(
const APFloat &Imm,
EVT VT,
4502 bool ForCodeSize)
const {
4503 if (VT != MVT::f32 && VT != MVT::f64)
4505 if (
Imm.isNegZero())
4507 return Imm.isZero();
4510bool MipsTargetLowering::isLegalICmpImmediate(int64_t Imm)
const {
4511 return isInt<16>(Imm);
4514bool MipsTargetLowering::isLegalAddImmediate(int64_t Imm)
const {
4515 return isInt<16>(Imm);
4518unsigned MipsTargetLowering::getJumpTableEncoding()
const {
4526SDValue MipsTargetLowering::getPICJumpTableRelocBase(
SDValue Table,
4533bool MipsTargetLowering::useSoftFloat()
const {
4537void MipsTargetLowering::copyByValRegs(
4541 unsigned FirstReg,
unsigned LastReg,
const CCValAssign &VA,
4546 unsigned NumRegs = LastReg - FirstReg;
4547 unsigned RegAreaSize = NumRegs * GPRSizeInBytes;
4548 unsigned FrameObjSize = std::max(
Flags.getByValSize(), RegAreaSize);
4555 (
int)((ByValArgRegs.
size() - FirstReg) * GPRSizeInBytes);
4577 for (
unsigned I = 0;
I < NumRegs; ++
I) {
4578 unsigned ArgReg = ByValArgRegs[FirstReg +
I];
4579 unsigned VReg =
addLiveIn(MF, ArgReg, RC);
4580 unsigned Offset =
I * GPRSizeInBytes;
4585 OutChains.push_back(Store);
4590void MipsTargetLowering::passByValArg(
4592 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
4597 unsigned ByValSizeInBytes =
Flags.getByValSize();
4598 unsigned OffsetInBytes = 0;
4601 std::min(
Flags.getNonZeroByValAlign(),
Align(RegSizeInBytes));
4604 unsigned NumRegs = LastReg - FirstReg;
4608 bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);
4612 for (;
I < NumRegs - LeftoverBytes; ++
I, OffsetInBytes += RegSizeInBytes) {
4618 unsigned ArgReg = ArgRegs[FirstReg +
I];
4619 RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));
4623 if (ByValSizeInBytes == OffsetInBytes)
4627 if (LeftoverBytes) {
4630 for (
unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
4631 OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {
4632 unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;
4634 if (RemainingSizeInBytes < LoadSizeInBytes)
4650 Shamt = TotalBytesLoaded * 8;
4652 Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
4662 OffsetInBytes += LoadSizeInBytes;
4663 TotalBytesLoaded += LoadSizeInBytes;
4664 Alignment = std::min(Alignment,
Align(LoadSizeInBytes));
4667 unsigned ArgReg = ArgRegs[FirstReg +
I];
4668 RegsToPass.push_back(std::make_pair(ArgReg, Val));
4674 unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
4681 Align(Alignment),
false,
false,
4686void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
4707 (
int)(RegSizeInBytes * (ArgRegs.
size() -
Idx));
4719 for (
unsigned I =
Idx;
I < ArgRegs.
size();
4720 ++
I, VaArgOffset += RegSizeInBytes) {
4727 cast<StoreSDNode>(
Store.getNode())->getMemOperand()->setValue(
4729 OutChains.push_back(Store);
4734 Align Alignment)
const {
4737 assert(
Size &&
"Byval argument's size shouldn't be 0.");
4741 unsigned FirstReg = 0;
4742 unsigned NumRegs = 0;
4754 Alignment >=
Align(RegSizeInBytes) &&
4755 "Byval argument's alignment should be a multiple of RegSizeInBytes.");
4763 if ((Alignment > RegSizeInBytes) && (FirstReg % 2)) {
4764 State->
AllocateReg(IntArgRegs[FirstReg], ShadowRegs[FirstReg]);
4770 for (
unsigned I = FirstReg;
Size > 0 && (
I < IntArgRegs.
size());
4771 Size -= RegSizeInBytes, ++
I, ++NumRegs)
4781 unsigned Opc)
const {
4783 "Subtarget already supports SELECT nodes with the use of"
4784 "conditional-move instructions.");
4807 F->insert(It, copy0MBB);
4808 F->insert(It, sinkMBB);
4851 MI.eraseFromParent();
4860 "Subtarget already supports SELECT nodes with the use of"
4861 "conditional-move instructions.");
4884 F->insert(It, copy0MBB);
4885 F->insert(It, sinkMBB);
4927 MI.eraseFromParent();
4940 .
Case(
"$28", Mips::GP_64)
4941 .
Case(
"sp", Mips::SP_64)
4947 .
Case(
"$28", Mips::GP)
4948 .
Case(
"sp", Mips::SP)
4963 unsigned Imm =
MI.getOperand(2).getImm();
4969 Register Temp =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4978 Register LoadHalf =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4979 Register LoadFull =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4980 Register Undef =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
4985 .
addImm(Imm + (IsLittle ? 0 : 3))
4990 .
addImm(Imm + (IsLittle ? 3 : 0))
4995 MI.eraseFromParent();
5009 unsigned Imm =
MI.getOperand(2).getImm();
5016 Register Temp =
MRI.createVirtualRegister(&Mips::GPR64RegClass);
5023 Register Wtemp =
MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5024 Register Lo =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5025 Register Hi =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5029 .
addImm(Imm + (IsLittle ? 0 : 4));
5033 .
addImm(Imm + (IsLittle ? 4 : 0));
5043 Register LoHalf =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5044 Register LoFull =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5045 Register LoUndef =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5046 Register HiHalf =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5047 Register HiFull =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5048 Register HiUndef =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5049 Register Wtemp =
MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5054 .
addImm(Imm + (IsLittle ? 0 : 7))
5059 .
addImm(Imm + (IsLittle ? 3 : 4))
5065 .
addImm(Imm + (IsLittle ? 4 : 3))
5070 .
addImm(Imm + (IsLittle ? 7 : 0))
5079 MI.eraseFromParent();
5091 Register StoreVal =
MI.getOperand(0).getReg();
5093 unsigned Imm =
MI.getOperand(2).getImm();
5099 Register BitcastW =
MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5100 Register Tmp =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5113 Register Tmp =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5121 .
addImm(Imm + (IsLittle ? 0 : 3));
5125 .
addImm(Imm + (IsLittle ? 3 : 0));
5128 MI.eraseFromParent();
5141 Register StoreVal =
MI.getOperand(0).getReg();
5143 unsigned Imm =
MI.getOperand(2).getImm();
5150 Register BitcastD =
MRI.createVirtualRegister(&Mips::MSA128DRegClass);
5151 Register Lo =
MRI.createVirtualRegister(&Mips::GPR64RegClass);
5164 Register BitcastW =
MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5165 Register Lo =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5166 Register Hi =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5181 .
addImm(Imm + (IsLittle ? 0 : 4));
5185 .
addImm(Imm + (IsLittle ? 4 : 0));
5191 Register Lo =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5192 Register Hi =
MRI.createVirtualRegister(&Mips::GPR32RegClass);
5205 .
addImm(Imm + (IsLittle ? 0 : 3));
5209 .
addImm(Imm + (IsLittle ? 3 : 0));
5213 .
addImm(Imm + (IsLittle ? 4 : 7));
5217 .
addImm(Imm + (IsLittle ? 7 : 4));
5220 MI.eraseFromParent();
unsigned const MachineRegisterInfo * MRI
static SDValue performSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
If the operand is a bitwise AND with a constant RHS, and the shift has a constant RHS and is the only...
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ATTRIBUTE_UNUSED
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
Register const TargetRegisterInfo * TRI
cl::opt< bool > EmitJalrReloc
static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State) LLVM_ATTRIBUTE_UNUSED
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG, const MipsSubtarget &Subtarget)
static bool invertFPCondCodeUser(Mips::CondCode CC)
This function returns true if the floating point conditional branches and conditional moves which use...
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State, ArrayRef< MCPhysReg > F64Regs)
static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG, bool SingleFloat)
static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static const MCPhysReg Mips64DPRegs[8]
static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG, bool IsLittle)
static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD, SDValue Chain, unsigned Offset)
static unsigned addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
static std::pair< bool, bool > parsePhysicalReg(StringRef C, StringRef &Prefix, unsigned long long &Reg)
This is a helper function to parse a physical register string and split it into non-numeric and numer...
static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD, SDValue Chain, SDValue Src, unsigned Offset)
static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)
static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
cl::opt< bool > EmitJalrReloc
static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op)
static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)
static cl::opt< bool > NoZeroDivCheck("mno-check-zero-division", cl::Hidden, cl::desc("MIPS: Don't trap on integer division by zero."), cl::init(false))
static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performSignExtendCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performCMovFPCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA, EVT ArgVT, const SDLoc &DL, SelectionDAG &DAG)
static Mips::CondCode condCodeToFCC(ISD::CondCode CC)
static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True, SDValue False, const SDLoc &DL)
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
SI optimize exec mask operations pre RA
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
static const MCPhysReg IntRegs[32]
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static const MCPhysReg F32Regs[64]
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
LLVM Basic Block Representation.
static BranchProbability getOne()
CCState - This class holds information needed while lowering arguments and return values.
MachineFunction & getMachineFunction() const
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
CallingConv::ID getCallingConv() const
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd)
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
bool isUpperBitsInLoc() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
int64_t getLocMemOffset() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
const char * getSymbol() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalValue * getGlobal() const
bool hasLocalLinkage() const
bool hasDLLImportStorageClass() const
LLVM_ABI const GlobalObject * getAliaseeObject() const
bool hasInternalLinkage() const
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Wrapper class representing physical registers. Should be passed by value.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
static auto fp_fixedlen_vector_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
@ EK_GPRel32BlockAddress
EK_GPRel32BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ MOVolatile
The memory access is volatile.
Flags getFlags() const
Return the raw flags of the source value,.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
bool ArePtrs64bit() const
unsigned GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const
Obtain the size of the area allocated by the callee for arguments.
unsigned GetPtrAddiuOp() const
unsigned GetPtrAndOp() const
ArrayRef< MCPhysReg > GetByValArgRegs() const
The registers to use for byval arguments.
unsigned GetNullPtr() const
ArrayRef< MCPhysReg > getVarArgRegs(bool isGP64bit) const
The registers to use for the variable argument list.
static SpecialCallingConvType getSpecialCallingConvForCallee(const SDNode *Callee, const MipsSubtarget &Subtarget)
Determine the SpecialCallingConvType for the given callee.
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
void setVarArgsFrameIndex(int Index)
unsigned getSRetReturnReg() const
int getVarArgsFrameIndex() const
MachinePointerInfo callPtrInfo(MachineFunction &MF, const char *ES)
Create a MachinePointerInfo that has an ExternalSymbolPseudoSourceValue object representing a GOT ent...
Register getGlobalBaseReg(MachineFunction &MF)
void setSRetReturnReg(unsigned Reg)
void setFormalArgInfo(unsigned Size, bool HasByval)
static const uint32_t * getMips16RetHelperMask()
bool inMicroMipsMode() const
bool useSoftFloat() const
const MipsInstrInfo * getInstrInfo() const override
bool inMips16Mode() const
bool inAbs2008Mode() const
const MipsRegisterInfo * getRegisterInfo() const override
bool systemSupportsUnalignedAccess() const
Does the system support unaligned memory access.
bool hasExtractInsert() const
Features related to the presence of specific instructions.
bool isTargetCOFF() const
bool isTargetWindows() const
bool isSingleFloat() const
bool useLongCalls() const
unsigned getGPRSizeInBytes() const
bool inMips16HardFloat() const
const TargetFrameLowering * getFrameLowering() const override
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the register type for a given MVT, ensuring vectors are treated as a series of gpr sized integ...
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
static const MipsTargetLowering * create(const MipsTargetMachine &TM, const MipsSubtarget &STI)
SDValue getAddrGPRel(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN64) const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Break down vectors to the correct number of gpr sized integers.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
SDValue getAddrNonPICSym64(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - get the ISD::SETCC result ValueType
SDValue getAddrGlobal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flag, SDValue Chain, const MachinePointerInfo &PtrInfo) const
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
MipsTargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
SDValue getAddrGlobalLargeGOT(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned HiFlag, unsigned LoFlag, SDValue Chain, const MachinePointerInfo &PtrInfo) const
SDValue getDllimportVariable(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, SDValue Chain, const MachinePointerInfo &PtrInfo) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
CCAssignFn * CCAssignFnForReturn() const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
SDValue getDllimportSymbol(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
CCAssignFn * CCAssignFnForCall() const
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the number of registers for a given MVT, ensuring vectors are treated as a series of gpr sized...
SDValue getAddrNonPIC(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
virtual void getOpndList(SmallVectorImpl< SDValue > &Ops, std::deque< std::pair< unsigned, SDValue > > &RegsToPass, bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage, bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const
This function fills Ops, which is the list of operands that will later be used when a function call n...
EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const override
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to fold a pair of shifts into a mask.
SDValue getAddrLocal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN32OrN64) const
SDValue getGlobalReg(SelectionDAG &DAG, EVT Ty) const
const MipsSubtarget & Subtarget
void HandleByVal(CCState *, unsigned &, Align) const override
Target-specific cleanup for formal ByVal parameters.
SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const
bool IsConstantInSmallSection(const DataLayout &DL, const Constant *CN, const TargetMachine &TM) const
Return true if this constant should be placed into small data section.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
void addCallSiteInfo(const SDNode *Node, CallSiteInfo &&CallInfo)
Set CallSiteInfo to be associated with Node.
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
virtual TargetLoweringObjectFile * getObjFileLowering() const
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
unsigned EmitCallGraphSection
Emit section containing call graph metadata.
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
constexpr ScalarTy getFixedValue() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FADD
Simple binary floating point operators.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SHL
Shift and rotation operations.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ CALLSEQ_START
CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
@ Bitcast
Perform the operation on a different, but equivalently sized type.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ EarlyClobber
Register definition happens before uses.
Not(const Pred &P) -> Not< Pred >
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
const MipsTargetLowering * createMips16TargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
Create MipsTargetLowering objects.
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
const MipsTargetLowering * createMipsSETargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
LLVM_ABI bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
Align getNonZeroOrigAlign() const
SmallVector< ArgRegPair, 1 > ArgRegPairs
Vector of call argument and its forwarding register.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const