63 "branch-hint-probability-threshold",
64 cl::desc(
"The probability threshold of enabling branch hint."),
118void X86AsmPrinter::StackMapShadowTracker::count(
const MCInst &Inst,
125 CurrentShadowSize +=
Code.size();
126 if (CurrentShadowSize >= RequiredShadowSize)
131void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding(
133 if (InShadow && CurrentShadowSize < RequiredShadowSize) {
135 emitX86Nops(OutStreamer, RequiredShadowSize - CurrentShadowSize,
140void X86AsmPrinter::EmitAndCountInstruction(
MCInst &Inst) {
147 : Ctx(asmprinter.OutContext), MF(mf),
TM(mf.getTarget()),
163 "Isn't a symbol reference");
179 Suffix =
"$non_lazy_ptr";
184 Name +=
DL.getPrivateGlobalPrefix();
191 }
else if (MO.
isMBB()) {
198 Sym = Ctx.getOrCreateSymbol(
Name);
209 if (!StubSym.getPointer()) {
219 getMachOMMI().getGVStubEntry(
Sym);
237 const MCExpr *Expr =
nullptr;
314 assert(MAI.doesSetDirectiveSuppressReloc());
336 return Subtarget.is64Bit() ? X86::RET64 : X86::RET32;
376 Opcode = X86::JMP32r;
379 Opcode = X86::JMP32m;
381 case X86::TAILJMPr64:
382 Opcode = X86::JMP64r;
384 case X86::TAILJMPm64:
385 Opcode = X86::JMP64m;
387 case X86::TAILJMPr64_REX:
388 Opcode = X86::JMP64r_REX;
390 case X86::TAILJMPm64_REX:
391 Opcode = X86::JMP64m_REX;
394 case X86::TAILJMPd64:
397 case X86::TAILJMPd_CC:
398 case X86::TAILJMPd64_CC:
410 if (
auto Op = LowerMachineOperand(
MI, MO);
Op.isValid())
413 bool In64BitMode =
AsmPrinter.getSubtarget().is64Bit();
430 "Unexpected # of LEA operands");
432 "LEA has segment specified!");
437 case X86::MULX64Hrm: {
442 case X86::MULX32Hrr: NewOpc = X86::MULX32rr;
break;
443 case X86::MULX32Hrm: NewOpc = X86::MULX32rm;
break;
444 case X86::MULX64Hrr: NewOpc = X86::MULX64rr;
break;
445 case X86::MULX64Hrm: NewOpc = X86::MULX64rm;
break;
458 case X86::CALL64pcrel32:
462 case X86::EH_RETURN64: {
467 case X86::CLEANUPRET: {
473 case X86::CATCHRET: {
476 unsigned ReturnReg = In64BitMode ? X86::RAX : X86::EAX;
485 case X86::TAILJMPr64:
486 case X86::TAILJMPr64_REX:
488 case X86::TAILJMPd64:
492 case X86::TAILJMPd_CC:
493 case X86::TAILJMPd64_CC:
498 case X86::TAILJMPm64:
499 case X86::TAILJMPm64_REX:
501 "Unexpected number of operands!");
504 case X86::MASKMOVDQU:
505 case X86::VMASKMOVDQU:
520 MI->findRegisterDefOperand(X86::EFLAGS,
nullptr);
521 if (!MF.getFunction().hasOptSize() && FlagDef && FlagDef->
isDead())
530void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering,
538 switch (
MI.getOpcode()) {
539 case X86::TLS_addr32:
540 case X86::TLS_addr64:
541 case X86::TLS_addrX32:
544 case X86::TLS_base_addr32:
547 case X86::TLS_base_addr64:
548 case X86::TLS_base_addrX32:
551 case X86::TLS_desc32:
552 case X86::TLS_desc64:
560 MCInstLowering.GetSymbolFromOperand(
MI.getOperand(3)), Specifier, Ctx);
574 EmitAndCountInstruction(
576 .addReg(Is64BitsLP64 ? X86::RAX : X86::EAX)
577 .addReg(Is64Bits ? X86::RIP : X86::EBX)
582 EmitAndCountInstruction(
584 .addReg(Is64BitsLP64 ? X86::RAX : X86::EAX)
589 }
else if (Is64Bits) {
591 if (NeedsPadding && Is64BitsLP64)
617 EmitAndCountInstruction(
650 EmitAndCountInstruction(
664 unsigned MaxNopLength = 1;
665 if (Subtarget->is64Bit()) {
668 if (Subtarget->hasFeature(X86::TuningFast7ByteNOP))
670 else if (Subtarget->hasFeature(X86::TuningFast15ByteNOP))
672 else if (Subtarget->hasFeature(X86::TuningFast11ByteNOP))
676 }
if (Subtarget->is32Bit())
680 NumBytes = std::min(NumBytes, MaxNopLength);
683 unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg;
684 IndexReg = Displacement = SegmentReg = 0;
742 SegmentReg = X86::CS;
746 unsigned NumPrefixes = std::min(NumBytes - NopSize, 5U);
747 NopSize += NumPrefixes;
748 for (
unsigned i = 0; i != NumPrefixes; ++i)
749 OS.emitBytes(
"\x66");
766 .addImm(Displacement)
771 assert(NopSize <= NumBytes &&
"We overemitted?");
778 unsigned NopsToEmit = NumBytes;
781 NumBytes -=
emitNop(
OS, NumBytes, Subtarget);
782 assert(NopsToEmit >= NumBytes &&
"Emitted more than I asked for!");
787 X86MCInstLower &MCIL) {
788 assert(Subtarget->is64Bit() &&
"Statepoint currently only supports X86-64");
793 if (
unsigned PatchBytes = SOpers.getNumPatchBytes()) {
800 switch (CallTarget.
getType()) {
803 CallTargetMCOp = MCIL.LowerSymbolOperand(
804 CallTarget, MCIL.GetSymbolFromOperand(CallTarget));
805 CallOpcode = X86::CALL64pcrel32;
813 CallOpcode = X86::CALL64pcrel32;
825 CallOpcode = X86::CALL64r;
835 CallInst.addOperand(CallTargetMCOp);
837 maybeEmitNopAfterCallForWindowsEH(&
MI);
848void X86AsmPrinter::LowerFAULTING_OP(
const MachineInstr &FaultingMI,
849 X86MCInstLower &MCIL) {
860 unsigned OperandsBeginIdx = 4;
870 MI.setOpcode(Opcode);
872 if (DefRegister != X86::NoRegister)
877 if (
auto Op = MCIL.LowerMachineOperand(&FaultingMI, MO);
Op.isValid())
885 X86MCInstLower &MCIL) {
886 bool Is64Bits = Subtarget->is64Bit();
891 EmitAndCountInstruction(
892 MCInstBuilder(Is64Bits ? X86::CALL64pcrel32 : X86::CALLpcrel32)
897 assert(std::next(
MI.getIterator())->isCall() &&
898 "KCFI_CHECK not followed by a call instruction");
905 int64_t PrefixNops = 0;
916 const Register AddrReg =
MI.getOperand(0).getReg();
920 unsigned TempReg = AddrReg == X86::R10 ? X86::R11D : X86::R10D;
921 EmitAndCountInstruction(
924 .addReg(X86::NoRegister)
928 .addReg(X86::NoRegister)
929 .addImm(-(PrefixNops + 4))
930 .addReg(X86::NoRegister));
933 EmitAndCountInstruction(
945void X86AsmPrinter::LowerASAN_CHECK_MEMACCESS(
const MachineInstr &
MI) {
952 const auto &
Reg =
MI.getOperand(0).getReg();
959 &ShadowBase, &MappingScale, &OrShadowOffset);
963 std::string SymName = (
"__asan_check_" +
Name +
"_" +
Op +
"_" +
964 Twine(1ULL << AccessInfo.AccessSizeIndex) +
"_" +
969 "OrShadowOffset is not supported with optimized callbacks");
971 EmitAndCountInstruction(
978 X86MCInstLower &MCIL) {
983 auto NextMI = std::find_if(std::next(
MI.getIterator()),
984 MI.getParent()->end().getInstrIterator(),
985 [](
auto &
II) { return !II.isMetaInstruction(); });
988 unsigned MinSize =
MI.getOperand(0).getImm();
990 if (NextMI !=
MI.getParent()->end() && !NextMI->isInlineAsm()) {
995 MCIL.Lower(&*NextMI, MCI);
1001 if (
Code.size() < MinSize) {
1002 if (MinSize == 2 && Subtarget->is32Bit() &&
1004 (Subtarget->getCPU().empty() || Subtarget->getCPU() ==
"pentium3")) {
1010 MCInstBuilder(X86::MOV32rr_REV).addReg(X86::EDI).addReg(X86::EDI),
1014 assert(NopSize == MinSize &&
"Could not implement MinSize!");
1030 unsigned NumShadowBytes =
MI.getOperand(1).getImm();
1031 SMShadowTracker.reset(NumShadowBytes);
1037 X86MCInstLower &MCIL) {
1038 assert(Subtarget->is64Bit() &&
"Patchpoint currently only supports X86-64");
1050 unsigned ScratchIdx = opers.getNextScratchIdx();
1051 unsigned EncodedBytes = 0;
1068 CalleeMCOp = MCIL.LowerSymbolOperand(CalleeMO,
1069 MCIL.GetSymbolFromOperand(CalleeMO));
1075 Register ScratchReg =
MI.getOperand(ScratchIdx).getReg();
1081 EmitAndCountInstruction(
1086 "Lowering patchpoint with thunks not yet implemented.");
1087 EmitAndCountInstruction(
MCInstBuilder(X86::CALL64r).addReg(ScratchReg));
1091 unsigned NumBytes = opers.getNumPatchBytes();
1092 assert(NumBytes >= EncodedBytes &&
1093 "Patchpoint can't request size less than the length of a call.");
1098void X86AsmPrinter::LowerPATCHABLE_EVENT_CALL(
const MachineInstr &
MI,
1099 X86MCInstLower &MCIL) {
1100 assert(Subtarget->is64Bit() &&
"XRay custom events only supports X86-64");
1125 OutStreamer->AddComment(
"# XRay Custom Event Log");
1136 const Register DestRegs[] = {X86::RDI, X86::RSI};
1137 bool UsedMask[] = {
false,
false};
1146 for (
unsigned I = 0;
I <
MI.getNumOperands(); ++
I)
1147 if (
auto Op = MCIL.LowerMachineOperand(&
MI,
MI.getOperand(
I));
1149 assert(
Op.isReg() &&
"Only support arguments in registers");
1152 if (SrcRegs[
I] != DestRegs[
I]) {
1154 EmitAndCountInstruction(
1165 for (
unsigned I = 0;
I <
MI.getNumOperands(); ++
I)
1166 if (SrcRegs[
I] != DestRegs[
I])
1167 EmitAndCountInstruction(
1179 .
addOperand(MCIL.LowerSymbolOperand(TOp, TSym)));
1182 for (
unsigned I =
sizeof UsedMask;
I-- > 0;)
1184 EmitAndCountInstruction(
MCInstBuilder(X86::POP64r).addReg(DestRegs[
I]));
1188 OutStreamer->AddComment(
"xray custom event end.");
1196void X86AsmPrinter::LowerPATCHABLE_TYPED_EVENT_CALL(
const MachineInstr &
MI,
1197 X86MCInstLower &MCIL) {
1198 assert(Subtarget->is64Bit() &&
"XRay typed events only supports X86-64");
1223 OutStreamer->AddComment(
"# XRay Typed Event Log");
1235 const Register DestRegs[] = {X86::RDI, X86::RSI, X86::RDX};
1236 bool UsedMask[] = {
false,
false,
false};
1245 for (
unsigned I = 0;
I <
MI.getNumOperands(); ++
I)
1246 if (
auto Op = MCIL.LowerMachineOperand(&
MI,
MI.getOperand(
I));
1249 assert(
Op.isReg() &&
"Only supports arguments in registers");
1252 if (SrcRegs[
I] != DestRegs[
I]) {
1254 EmitAndCountInstruction(
1270 for (
unsigned I = 0;
I <
MI.getNumOperands(); ++
I)
1272 EmitAndCountInstruction(
1284 .
addOperand(MCIL.LowerSymbolOperand(TOp, TSym)));
1287 for (
unsigned I =
sizeof UsedMask;
I-- > 0;)
1289 EmitAndCountInstruction(
MCInstBuilder(X86::POP64r).addReg(DestRegs[
I]));
1299void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(
const MachineInstr &
MI,
1300 X86MCInstLower &MCIL) {
1305 if (
F.hasFnAttribute(
"patchable-function-entry")) {
1307 if (
F.getFnAttribute(
"patchable-function-entry")
1309 .getAsInteger(10, Num))
1340 X86MCInstLower &MCIL) {
1360 unsigned OpCode =
MI.getOperand(0).getImm();
1362 Ret.setOpcode(OpCode);
1364 if (
auto Op = MCIL.LowerMachineOperand(&
MI, MO);
Op.isValid())
1371void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(
const MachineInstr &
MI,
1372 X86MCInstLower &MCIL) {
1377 bool IsConditional = TC.
getOpcode() == X86::JCC_1;
1379 if (IsConditional) {
1426 for (
auto &MO : TCOperands)
1427 if (
auto Op = MCIL.LowerMachineOperand(&
MI, MO);
Op.isValid())
1448 unsigned SrcOpIdx) {
1458 CS <<
" {%" << Mask <<
"}";
1469 if (Src1Name == Src2Name)
1470 for (
int i = 0, e = ShuffleMask.
size(); i != e; ++i)
1471 if (ShuffleMask[i] >= e)
1472 ShuffleMask[i] -= e;
1474 for (
int i = 0, e = ShuffleMask.
size(); i != e; ++i) {
1484 bool isSrc1 = ShuffleMask[i] < (int)e;
1485 CS << (isSrc1 ? Src1Name : Src2Name) <<
'[';
1487 bool IsFirst =
true;
1489 (ShuffleMask[i] < (
int)e) == isSrc1) {
1497 CS << ShuffleMask[i] % (int)e;
1507 std::string Comment;
1527 bool PrintZero =
false) {
1536 CS << (PrintZero ? 0ULL : Val.
getRawData()[i]);
1543 bool PrintZero =
false) {
1549 Flt.toString(Str, 0, 0);
1555 if (isa<UndefValue>(COp)) {
1557 }
else if (
auto *CI = dyn_cast<ConstantInt>(COp)) {
1558 if (
auto VTy = dyn_cast<FixedVectorType>(CI->getType())) {
1559 for (
unsigned I = 0, E = VTy->getNumElements();
I != E; ++
I) {
1566 }
else if (
auto *CF = dyn_cast<ConstantFP>(COp)) {
1567 if (
auto VTy = dyn_cast<FixedVectorType>(CF->getType())) {
1568 for (
unsigned I = 0, E = VTy->getNumElements();
I != E; ++
I) {
1575 }
else if (
auto *CDS = dyn_cast<ConstantDataSequential>(COp)) {
1576 Type *EltTy = CDS->getElementType();
1580 unsigned E = std::min(
BitWidth / EltBits, (
unsigned)CDS->getNumElements());
1582 for (
unsigned I = 0;
I != E; ++
I) {
1595 }
else if (
auto *CV = dyn_cast<ConstantVector>(COp)) {
1596 unsigned EltBits = CV->getType()->getScalarSizeInBits();
1597 unsigned E = std::min(
BitWidth / EltBits, CV->getNumOperands());
1599 for (
unsigned I = 0;
I != E; ++
I) {
1613 int SclWidth,
int VecWidth,
1614 const char *ShuffleComment) {
1617 std::string Comment;
1625 for (
int I = 1, E = VecWidth / SclWidth;
I < E; ++
I) {
1635 CS << ShuffleComment;
1643 std::string Comment;
1647 for (
int l = 0; l != Repeats; ++l) {
1658 int SrcEltBits,
int DstEltBits,
bool IsSext) {
1661 if (
C &&
C->getType()->getScalarSizeInBits() ==
unsigned(SrcEltBits)) {
1662 if (
auto *CDS = dyn_cast<ConstantDataSequential>(
C)) {
1663 int NumElts = CDS->getNumElements();
1664 std::string Comment;
1668 for (
int i = 0; i != NumElts; ++i) {
1671 if (CDS->getElementType()->isIntegerTy()) {
1672 APInt Elt = CDS->getElementAsAPInt(i);
1673 Elt = IsSext ? Elt.
sext(DstEltBits) : Elt.
zext(DstEltBits);
1687 int SrcEltBits,
int DstEltBits) {
1691 int SrcEltBits,
int DstEltBits) {
1692 if (
printExtend(
MI, OutStreamer, SrcEltBits, DstEltBits,
false))
1696 std::string Comment;
1703 assert((Width % DstEltBits) == 0 && (DstEltBits % SrcEltBits) == 0 &&
1704 "Illegal extension ratio");
1714 "SEH_ instruction Windows and UEFI only");
1720 switch (
MI->getOpcode()) {
1721 case X86::SEH_PushReg:
1724 case X86::SEH_StackAlloc:
1727 case X86::SEH_StackAlign:
1730 case X86::SEH_SetFrame:
1731 assert(
MI->getOperand(1).getImm() == 0 &&
1732 ".cv_fpo_setframe takes no offset");
1735 case X86::SEH_EndPrologue:
1738 case X86::SEH_SaveReg:
1739 case X86::SEH_SaveXMM:
1740 case X86::SEH_PushFrame:
1750 switch (
MI->getOpcode()) {
1751 case X86::SEH_PushReg:
1755 case X86::SEH_SaveReg:
1757 MI->getOperand(1).getImm());
1760 case X86::SEH_SaveXMM:
1762 MI->getOperand(1).getImm());
1765 case X86::SEH_StackAlloc:
1766 OutStreamer->emitWinCFIAllocStack(
MI->getOperand(0).getImm());
1769 case X86::SEH_SetFrame:
1771 MI->getOperand(1).getImm());
1774 case X86::SEH_PushFrame:
1775 OutStreamer->emitWinCFIPushFrame(
MI->getOperand(0).getImm());
1778 case X86::SEH_EndPrologue:
1782 case X86::SEH_BeginEpilogue:
1786 case X86::SEH_EndEpilogue:
1790 case X86::SEH_UnwindV2Start:
1794 case X86::SEH_UnwindVersion:
1795 OutStreamer->emitWinCFIUnwindVersion(
MI->getOperand(0).getImm());
1805 switch (
MI->getOpcode()) {
1810 case X86::VPSHUFBrm:
1811 case X86::VPSHUFBYrm:
1812 case X86::VPSHUFBZ128rm:
1813 case X86::VPSHUFBZ128rmk:
1814 case X86::VPSHUFBZ128rmkz:
1815 case X86::VPSHUFBZ256rm:
1816 case X86::VPSHUFBZ256rmk:
1817 case X86::VPSHUFBZ256rmkz:
1818 case X86::VPSHUFBZrm:
1819 case X86::VPSHUFBZrmk:
1820 case X86::VPSHUFBZrmkz: {
1832 case X86::VPERMILPSrm:
1833 case X86::VPERMILPSYrm:
1834 case X86::VPERMILPSZ128rm:
1835 case X86::VPERMILPSZ128rmk:
1836 case X86::VPERMILPSZ128rmkz:
1837 case X86::VPERMILPSZ256rm:
1838 case X86::VPERMILPSZ256rmk:
1839 case X86::VPERMILPSZ256rmkz:
1840 case X86::VPERMILPSZrm:
1841 case X86::VPERMILPSZrmk:
1842 case X86::VPERMILPSZrmkz: {
1853 case X86::VPERMILPDrm:
1854 case X86::VPERMILPDYrm:
1855 case X86::VPERMILPDZ128rm:
1856 case X86::VPERMILPDZ128rmk:
1857 case X86::VPERMILPDZ128rmkz:
1858 case X86::VPERMILPDZ256rm:
1859 case X86::VPERMILPDZ256rmk:
1860 case X86::VPERMILPDZ256rmkz:
1861 case X86::VPERMILPDZrm:
1862 case X86::VPERMILPDZrmk:
1863 case X86::VPERMILPDZrmkz: {
1875 case X86::VPERMIL2PDrm:
1876 case X86::VPERMIL2PSrm:
1877 case X86::VPERMIL2PDYrm:
1878 case X86::VPERMIL2PSYrm: {
1880 "Unexpected number of operands!");
1883 if (!CtrlOp.
isImm())
1887 switch (
MI->getOpcode()) {
1889 case X86::VPERMIL2PSrm:
case X86::VPERMIL2PSYrm: ElSize = 32;
break;
1890 case X86::VPERMIL2PDrm:
case X86::VPERMIL2PDYrm: ElSize = 64;
break;
1903 case X86::VPPERMrrm: {
1914 case X86::MMX_MOVQ64rm: {
1916 std::string Comment;
1920 if (
auto *CF = dyn_cast<ConstantFP>(
C)) {
1921 CS <<
"0x" <<
toString(CF->getValueAPF().bitcastToAPInt(), 16,
false);
1928#define INSTR_CASE(Prefix, Instr, Suffix, Postfix) \
1929 case X86::Prefix##Instr##Suffix##rm##Postfix:
1931#define CASE_ARITH_RM(Instr) \
1932 INSTR_CASE(, Instr, , ) \
1933 INSTR_CASE(V, Instr, , ) \
1934 INSTR_CASE(V, Instr, Y, ) \
1935 INSTR_CASE(V, Instr, Z128, ) \
1936 INSTR_CASE(V, Instr, Z128, k) \
1937 INSTR_CASE(V, Instr, Z128, kz) \
1938 INSTR_CASE(V, Instr, Z256, ) \
1939 INSTR_CASE(V, Instr, Z256, k) \
1940 INSTR_CASE(V, Instr, Z256, kz) \
1941 INSTR_CASE(V, Instr, Z, ) \
1942 INSTR_CASE(V, Instr, Z, k) \
1943 INSTR_CASE(V, Instr, Z, kz)
1949 if (
C->getType()->getScalarSizeInBits() == 8) {
1950 std::string Comment;
1952 unsigned VectorWidth =
1970 if (
C->getType()->getScalarSizeInBits() == 16) {
1971 std::string Comment;
1973 unsigned VectorWidth =
1984#define MASK_AVX512_CASE(Instr) \
1992 case X86::MOVSDrm_alt:
1993 case X86::VMOVSDrm_alt:
1994 case X86::VMOVSDZrm_alt:
1995 case X86::MOVQI2PQIrm:
1996 case X86::VMOVQI2PQIrm:
1997 case X86::VMOVQI2PQIZrm:
2002 case X86::VMOVSHZrm_alt:
2004 "mem[0],zero,zero,zero,zero,zero,zero,zero");
2010 case X86::MOVSSrm_alt:
2011 case X86::VMOVSSrm_alt:
2012 case X86::VMOVSSZrm_alt:
2013 case X86::MOVDI2PDIrm:
2014 case X86::VMOVDI2PDIrm:
2015 case X86::VMOVDI2PDIZrm:
2019#define MOV_CASE(Prefix, Suffix) \
2020 case X86::Prefix##MOVAPD##Suffix##rm: \
2021 case X86::Prefix##MOVAPS##Suffix##rm: \
2022 case X86::Prefix##MOVUPD##Suffix##rm: \
2023 case X86::Prefix##MOVUPS##Suffix##rm: \
2024 case X86::Prefix##MOVDQA##Suffix##rm: \
2025 case X86::Prefix##MOVDQU##Suffix##rm:
2027#define MOV_AVX512_CASE(Suffix, Postfix) \
2028 case X86::VMOVDQA64##Suffix##rm##Postfix: \
2029 case X86::VMOVDQA32##Suffix##rm##Postfix: \
2030 case X86::VMOVDQU64##Suffix##rm##Postfix: \
2031 case X86::VMOVDQU32##Suffix##rm##Postfix: \
2032 case X86::VMOVDQU16##Suffix##rm##Postfix: \
2033 case X86::VMOVDQU8##Suffix##rm##Postfix: \
2034 case X86::VMOVAPS##Suffix##rm##Postfix: \
2035 case X86::VMOVAPD##Suffix##rm##Postfix: \
2036 case X86::VMOVUPS##Suffix##rm##Postfix: \
2037 case X86::VMOVUPD##Suffix##rm##Postfix:
2039#define CASE_128_MOV_RM() \
2042 MOV_AVX512_CASE(Z128, ) \
2043 MOV_AVX512_CASE(Z128, k) \
2044 MOV_AVX512_CASE(Z128, kz)
2046#define CASE_256_MOV_RM() \
2048 MOV_AVX512_CASE(Z256, ) \
2049 MOV_AVX512_CASE(Z256, k) \
2050 MOV_AVX512_CASE(Z256, kz) \
2052#define CASE_512_MOV_RM() \
2053 MOV_AVX512_CASE(Z, ) \
2054 MOV_AVX512_CASE(Z, k) \
2055 MOV_AVX512_CASE(Z, kz) \
2068 case X86::VBROADCASTF128rm:
2069 case X86::VBROADCASTI128rm:
2091 case X86::MOVDDUPrm:
2092 case X86::VMOVDDUPrm:
2094 case X86::VPBROADCASTQrm:
2098 case X86::VBROADCASTSDYrm:
2100 case X86::VPBROADCASTQYrm:
2108 case X86::VBROADCASTSSrm:
2110 case X86::VPBROADCASTDrm:
2114 case X86::VBROADCASTSSYrm:
2116 case X86::VPBROADCASTDYrm:
2124 case X86::VPBROADCASTWrm:
2128 case X86::VPBROADCASTWYrm:
2135 case X86::VPBROADCASTBrm:
2139 case X86::VPBROADCASTBYrm:
2147#define MOVX_CASE(Prefix, Ext, Type, Suffix, Postfix) \
2148 case X86::Prefix##PMOV##Ext##Type##Suffix##rm##Postfix:
2150#define CASE_MOVX_RM(Ext, Type) \
2151 MOVX_CASE(, Ext, Type, , ) \
2152 MOVX_CASE(V, Ext, Type, , ) \
2153 MOVX_CASE(V, Ext, Type, Y, ) \
2154 MOVX_CASE(V, Ext, Type, Z128, ) \
2155 MOVX_CASE(V, Ext, Type, Z128, k ) \
2156 MOVX_CASE(V, Ext, Type, Z128, kz ) \
2157 MOVX_CASE(V, Ext, Type, Z256, ) \
2158 MOVX_CASE(V, Ext, Type, Z256, k ) \
2159 MOVX_CASE(V, Ext, Type, Z256, kz ) \
2160 MOVX_CASE(V, Ext, Type, Z, ) \
2161 MOVX_CASE(V, Ext, Type, Z, k ) \
2162 MOVX_CASE(V, Ext, Type, Z, kz )
2211 assert(
MI->getOpcode() == X86::TAILJMPm64_REX ||
2212 MI->getOpcode() == X86::CALL64m);
2223 if (
I->isJumpTableDebugInfo())
2234 X86MCInstLower MCInstLowering(*
MF, *
this);
2238 if (
MI->getOpcode() == X86::OR64rm) {
2239 for (
auto &Opd :
MI->operands()) {
2240 if (Opd.isSymbol() &&
StringRef(Opd.getSymbolName()) ==
2241 "swift_async_extendedFramePointerFlags") {
2242 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags =
true;
2254 OutStreamer->AddComment(
"EVEX TO LEGACY Compression ",
false);
2256 OutStreamer->AddComment(
"EVEX TO VEX Compression ",
false);
2258 OutStreamer->AddComment(
"EVEX TO EVEX Compression ",
false);
2262 bool IsTailJump =
false;
2264 switch (
MI->getOpcode()) {
2265 case TargetOpcode::DBG_VALUE:
2268 case X86::EH_RETURN:
2269 case X86::EH_RETURN64: {
2276 case X86::CLEANUPRET: {
2282 case X86::CATCHRET: {
2289 case X86::ENDBR64: {
2298 MCInstLowering.Lower(
MI, Inst);
2299 EmitAndCountInstruction(Inst);
2307 case X86::TAILJMPd64:
2308 if (IndCSPrefix &&
MI->hasRegisterImplicitUseOperand(X86::R11))
2312 emitLabelAndRecordForImportCallOptimization(
2313 IMAGE_RETPOLINE_AMD64_IMPORT_BR);
2324 case X86::TAILJMPd_CC:
2325 case X86::TAILJMPr64:
2326 case X86::TAILJMPm64:
2327 case X86::TAILJMPd64_CC:
2328 if (EnableImportCallOptimization)
2330 "import call optimization was enabled");
2337 case X86::TAILJMPm64_REX:
2339 emitLabelAndRecordForImportCallOptimization(
2340 IMAGE_RETPOLINE_AMD64_CFG_BR_REX);
2347 case X86::TAILJMPr64_REX: {
2348 if (EnableImportCallOptimization) {
2349 assert(
MI->getOperand(0).getReg() == X86::RAX &&
2350 "Indirect tail calls with impcall enabled must go through RAX (as "
2351 "enforced by TCRETURNImpCallri64)");
2352 emitLabelAndRecordForImportCallOptimization(
2353 IMAGE_RETPOLINE_AMD64_INDIR_BR);
2366 emitLabelAndRecordForImportCallOptimization(
2367 (ImportCallKind)(IMAGE_RETPOLINE_AMD64_SWITCHTABLE_FIRST +
2379 "Unexpected JMP instruction was emitted for a jump-table when import "
2380 "call optimization was enabled");
2383 case X86::TLS_addr32:
2384 case X86::TLS_addr64:
2385 case X86::TLS_addrX32:
2386 case X86::TLS_base_addr32:
2387 case X86::TLS_base_addr64:
2388 case X86::TLS_base_addrX32:
2389 case X86::TLS_desc32:
2390 case X86::TLS_desc64:
2391 return LowerTlsAddr(MCInstLowering, *
MI);
2393 case X86::MOVPC32r: {
2404 EmitAndCountInstruction(
2410 bool hasFP = FrameLowering->
hasFP(*
MF);
2413 bool HasActiveDwarfFrame =
OutStreamer->getNumFrameInfos() &&
2418 if (HasActiveDwarfFrame && !hasFP) {
2419 OutStreamer->emitCFIAdjustCfaOffset(-stackGrowth);
2427 EmitAndCountInstruction(
2430 if (HasActiveDwarfFrame && !hasFP) {
2436 case X86::ADD32ri: {
2452 MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(
MI->getOperand(2));
2463 .addReg(
MI->getOperand(0).getReg())
2464 .
addReg(
MI->getOperand(1).getReg())
2468 case TargetOpcode::STATEPOINT:
2469 return LowerSTATEPOINT(*
MI, MCInstLowering);
2471 case TargetOpcode::FAULTING_OP:
2472 return LowerFAULTING_OP(*
MI, MCInstLowering);
2474 case TargetOpcode::FENTRY_CALL:
2475 return LowerFENTRY_CALL(*
MI, MCInstLowering);
2477 case TargetOpcode::PATCHABLE_OP:
2478 return LowerPATCHABLE_OP(*
MI, MCInstLowering);
2480 case TargetOpcode::STACKMAP:
2481 return LowerSTACKMAP(*
MI);
2483 case TargetOpcode::PATCHPOINT:
2484 return LowerPATCHPOINT(*
MI, MCInstLowering);
2486 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2487 return LowerPATCHABLE_FUNCTION_ENTER(*
MI, MCInstLowering);
2489 case TargetOpcode::PATCHABLE_RET:
2490 return LowerPATCHABLE_RET(*
MI, MCInstLowering);
2492 case TargetOpcode::PATCHABLE_TAIL_CALL:
2493 return LowerPATCHABLE_TAIL_CALL(*
MI, MCInstLowering);
2495 case TargetOpcode::PATCHABLE_EVENT_CALL:
2496 return LowerPATCHABLE_EVENT_CALL(*
MI, MCInstLowering);
2498 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
2499 return LowerPATCHABLE_TYPED_EVENT_CALL(*
MI, MCInstLowering);
2501 case X86::MORESTACK_RET:
2505 case X86::KCFI_CHECK:
2506 return LowerKCFI_CHECK(*
MI);
2508 case X86::ASAN_CHECK_MEMACCESS:
2509 return LowerASAN_CHECK_MEMACCESS(*
MI);
2511 case X86::MORESTACK_RET_RESTORE_R10:
2514 EmitAndCountInstruction(
2515 MCInstBuilder(X86::MOV64rr).addReg(X86::R10).addReg(X86::RAX));
2518 case X86::SEH_PushReg:
2519 case X86::SEH_SaveReg:
2520 case X86::SEH_SaveXMM:
2521 case X86::SEH_StackAlloc:
2522 case X86::SEH_StackAlign:
2523 case X86::SEH_SetFrame:
2524 case X86::SEH_PushFrame:
2525 case X86::SEH_EndPrologue:
2526 case X86::SEH_EndEpilogue:
2527 case X86::SEH_UnwindV2Start:
2528 case X86::SEH_UnwindVersion:
2529 EmitSEHInstruction(
MI);
2532 case X86::SEH_BeginEpilogue: {
2534 EmitSEHInstruction(
MI);
2537 case X86::UBSAN_UD1:
2542 .addReg(X86::NoRegister)
2543 .addImm(
MI->getOperand(0).getImm())
2544 .
addReg(X86::NoRegister));
2546 case X86::CALL64pcrel32:
2547 if (IndCSPrefix &&
MI->hasRegisterImplicitUseOperand(X86::R11))
2551 emitLabelAndRecordForImportCallOptimization(
2552 IMAGE_RETPOLINE_AMD64_IMPORT_CALL);
2555 MCInstLowering.Lower(
MI, TmpInst);
2560 emitCallInstruction(TmpInst);
2562 maybeEmitNopAfterCallForWindowsEH(
MI);
2569 if (EnableImportCallOptimization) {
2570 assert(
MI->getOperand(0).getReg() == X86::RAX &&
2571 "Indirect calls with impcall enabled must go through RAX (as "
2572 "enforced by CALL64r_ImpCall)");
2574 emitLabelAndRecordForImportCallOptimization(
2575 IMAGE_RETPOLINE_AMD64_INDIR_CALL);
2577 MCInstLowering.Lower(
MI, TmpInst);
2578 emitCallInstruction(TmpInst);
2583 maybeEmitNopAfterCallForWindowsEH(
MI);
2590 emitLabelAndRecordForImportCallOptimization(
2591 IMAGE_RETPOLINE_AMD64_CFG_CALL);
2601 &getAnalysis<MachineBranchProbabilityInfoWrapperPass>().getMBPI();
2606 if (EdgeProb > Threshold)
2613 MCInstLowering.Lower(
MI, TmpInst);
2616 emitCallInstruction(TmpInst);
2620 maybeEmitNopAfterCallForWindowsEH(
MI);
2624 EmitAndCountInstruction(TmpInst);
2627void X86AsmPrinter::emitCallInstruction(
const llvm::MCInst &MCI) {
2701void X86AsmPrinter::maybeEmitNopAfterCallForWindowsEH(
const MachineInstr *
MI) {
2732 if (HasEHPersonality) {
2752 if (NextMI.
getOpcode() == X86::SEH_BeginEpilogue) {
2779 if (HasEHPersonality) {
2785 if (
MI->getParent()->succ_empty())
2800void X86AsmPrinter::emitLabelAndRecordForImportCallOptimization(
2801 ImportCallKind Kind) {
2802 assert(EnableImportCallOptimization);
2807 SectionToImportedFunctionCalls[
OutStreamer->getCurrentSectionOnly()]
2808 .push_back({CallSiteSymbol,
Kind});
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
uint64_t IntrinsicInst * II
static cl::opt< bool > EnableBranchHint("ppc-use-branch-hint", cl::init(true), cl::desc("Enable static hinting of branches on ppc"), cl::Hidden)
static MCSymbol * GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
This file defines the SmallString class.
static MCOperand LowerSymbolOperand(const MachineInstr *MI, const MachineOperand &MO, const MCSymbol *Symbol, AsmPrinter &AP)
static void printShuffleMask(raw_ostream &CS, StringRef Src1Name, StringRef Src2Name, ArrayRef< int > Mask)
static void emitX86Nops(MCStreamer &OS, unsigned NumBytes, const X86Subtarget *Subtarget)
Emit the optimal amount of multi-byte nops on X86.
static unsigned getRetOpcode(const X86Subtarget &Subtarget)
static void printSignExtend(const MachineInstr *MI, MCStreamer &OutStreamer, int SrcEltBits, int DstEltBits)
static unsigned convertTailJumpOpcode(unsigned Opcode)
static unsigned getSrcIdx(const MachineInstr *MI, unsigned SrcIdx)
static void printBroadcast(const MachineInstr *MI, MCStreamer &OutStreamer, int Repeats, int BitWidth)
static bool printExtend(const MachineInstr *MI, MCStreamer &OutStreamer, int SrcEltBits, int DstEltBits, bool IsSext)
static void printZeroUpperMove(const MachineInstr *MI, MCStreamer &OutStreamer, int SclWidth, int VecWidth, const char *ShuffleComment)
#define MASK_AVX512_CASE(Instr)
#define CASE_ARITH_RM(Instr)
static void addConstantComments(const MachineInstr *MI, MCStreamer &OutStreamer)
#define CASE_256_MOV_RM()
bool hasJumpTableInfoInBlock(const llvm::MachineInstr *MI)
static unsigned emitNop(MCStreamer &OS, unsigned NumBytes, const X86Subtarget *Subtarget)
Emit the largest nop instruction smaller than or equal to NumBytes bytes.
static void printDstRegisterName(raw_ostream &CS, const MachineInstr *MI, unsigned SrcOpIdx)
#define CASE_MOVX_RM(Ext, Type)
bool isImportedFunction(const MachineOperand &MO)
static cl::opt< bool > EnableBranchHint("enable-branch-hint", cl::desc("Enable branch hint."), cl::init(false), cl::Hidden)
static void printConstant(const APInt &Val, raw_ostream &CS, bool PrintZero=false)
static void printZeroExtend(const MachineInstr *MI, MCStreamer &OutStreamer, int SrcEltBits, int DstEltBits)
static std::string getShuffleComment(const MachineInstr *MI, unsigned SrcOp1Idx, unsigned SrcOp2Idx, ArrayRef< int > Mask)
bool isCallToCFGuardFunction(const MachineInstr *MI)
#define CASE_512_MOV_RM()
static cl::opt< unsigned > BranchHintProbabilityThreshold("branch-hint-probability-threshold", cl::desc("The probability threshold of enabling branch hint."), cl::init(50), cl::Hidden)
#define CASE_128_MOV_RM()
void toString(SmallVectorImpl< char > &Str, unsigned FormatPrecision=0, unsigned FormatMaxPadding=3, bool TruncateZero=true) const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
unsigned getBitWidth() const
Return the number of bits in the APInt.
unsigned getNumWords() const
Get the number of words.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
const uint64_t * getRawData() const
This function returns a pointer to the internal storage of the APInt.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This class is intended to be used as a driving class for all asm writers.
MCSymbol * getSymbol(const GlobalValue *GV) const
MCSymbol * CurrentFnBegin
void EmitToStreamer(MCStreamer &S, const MCInst &Inst)
TargetMachine & TM
Target machine description.
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
void emitKCFITrapEntry(const MachineFunction &MF, const MCSymbol *Symbol)
const MCAsmInfo * MAI
Target Asm Printer information.
MachineFunction * MF
The current machine function.
MCSymbol * GetJTISymbol(unsigned JTID, bool isLinkerPrivate=false) const
Return the symbol for the specified jump table entry.
void recordSled(MCSymbol *Sled, const MachineInstr &MI, SledKind Kind, uint8_t Version=0)
MCSymbol * getSymbolPreferLocal(const GlobalValue &GV) const
Similar to getSymbol() but preferred for references.
MachineModuleInfo * MMI
This is a pointer to the current MachineModuleInfo.
MCContext & OutContext
This is the context for the output file that we are streaming.
MCSymbol * createTempSymbol(const Twine &Name) const
bool isPositionIndependent() const
MCSymbol * CurrentPatchableFunctionEntrySym
The symbol for the entry in __patchable_function_entires.
std::unique_ptr< MCStreamer > OutStreamer
This is the MCStreamer object for the file we are generating.
void getNameWithPrefix(SmallVectorImpl< char > &Name, const GlobalValue *GV) const
MCSymbol * GetBlockAddressSymbol(const BlockAddress *BA) const
Return the MCSymbol used to satisfy BlockAddress uses of the specified basic block.
const MCSubtargetInfo & getSubtargetInfo() const
Return information about subtarget.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
void recordFaultingOp(FaultKind FaultTy, const MCSymbol *FaultingLabel, const MCSymbol *HandlerLabel)
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasInternalLinkage() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
WinEH::EncodingType getWinEHEncodingType() const
ExceptionHandling getExceptionHandlingType() const
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
MCCodeEmitter - Generic instruction encoding interface.
virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl< char > &CB, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Encode the given Inst to bytes and append to CB.
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Context object for machine code objects.
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
const MCTargetOptions * getTargetOptions() const
LLVM_ABI MCSymbol * createNamedTempSymbol()
Create a temporary symbol with a unique name whose name cannot be omitted in the symbol table.
Base class for the full range of assembler expressions which are needed for parsing.
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addExpr(const MCExpr *Val)
Add a new MCExpr operand.
Instances of this class represent a single low-level machine instruction.
unsigned getNumOperands() const
unsigned getOpcode() const
iterator insert(iterator I, const MCOperand &Op)
void setFlags(unsigned F)
void addOperand(const MCOperand Op)
void setOpcode(unsigned Op)
const MCOperand & getOperand(unsigned i) const
Instances of this class represent operands of the MCInst class.
static MCOperand createExpr(const MCExpr *Val)
static MCOperand createReg(MCRegister Reg)
static MCOperand createImm(int64_t Val)
MCRegister getReg() const
Returns the register number.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
Wrapper class representing physical registers. Should be passed by value.
Streaming machine code generation interface.
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
virtual void emitRawComment(const Twine &T, bool TabPrefix=true)
Print T and prefix it with the comment string (normally #) and optionally a tab.
void setAllowAutoPadding(bool v)
bool getAllowAutoPadding() const
Generic base class for all target subtargets.
Represent a reference to a symbol from inside an expression.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
StringRef getName() const
getName - Get the symbol name.
instr_iterator instr_begin()
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
reverse_instr_iterator instr_rbegin()
reverse_instr_iterator instr_rend()
instr_iterator instr_end()
BranchProbability getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isPseudo(QueryType Type=IgnoreBundle) const
Return true if this is a pseudo instruction that doesn't correspond to a real machine instruction.
const MachineOperand & getOperand(unsigned i) const
bool isMetaInstruction(QueryType Type=IgnoreBundle) const
Return true if this instruction doesn't produce any output in the form of executable instructions.
MachineModuleInfoCOFF - This is a MachineModuleInfoImpl implementation for COFF targets.
StubValueTy & getGVStubEntry(MCSymbol *Sym)
PointerIntPair< MCSymbol *, 1, bool > StubValueTy
MachineModuleInfoMachO - This is a MachineModuleInfoImpl implementation for MachO targets.
const MCContext & getContext() const
const Module * getModule() const
Ty & getObjFileInfo()
Keep track of various per-module pieces of information for backends that would like to do so.
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
const BlockAddress * getBlockAddress() const
unsigned getTargetFlags() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
const char * getSymbolName() const
Register getReg() const
getReg - Returns the register number.
void setTargetFlags(unsigned F)
MCSymbol * getMCSymbol() const
@ MO_Immediate
Immediate operand.
@ MO_ConstantPoolIndex
Address of indexed Constant in Constant Pool.
@ MO_MCSymbol
MCSymbol reference (for debug/eh info)
@ MO_GlobalAddress
Address of a global value.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_BlockAddress
Address of a basic block.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
@ MO_JumpTableIndex
Address of indexed Jump Table for switch.
int64_t getOffset() const
Return the offset from the symbol in this operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
LLVM_ABI void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable's name.
bool getRtLibUseGOT() const
Returns true if PLT should be avoided for RTLib calls.
Pass interface - Implemented by all 'passes'.
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
MI-level patchpoint operands.
PointerIntPair - This class implements a pair of a pointer and small integer.
PointerTy getPointer() const
Wrapper class representing virtual and physical registers.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
LLVM_ABI void recordStatepoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a statepoint instruction.
LLVM_ABI void recordPatchPoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a patchpoint instruction.
LLVM_ABI void recordStackMap(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a stackmap instruction.
MI-level Statepoint operands.
StringRef - Represent a constant reference to a string, i.e.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
constexpr bool empty() const
empty - Check if the string is empty.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Primary interface to the complete machine description for the target machine.
const Triple & getTargetTriple() const
const MCRegisterInfo * getMCRegisterInfo() const
MCTargetOptions MCOptions
Machine level options.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static const char * getRegisterName(MCRegister Reg)
void emitInstruction(const MachineInstr *MI) override
Targets should implement this to emit instructions.
const X86Subtarget & getSubtarget() const
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
unsigned getSlotSize() const
bool isTargetWindowsMSVC() const
bool isTarget64BitLP64() const
Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
const X86RegisterInfo * getRegisterInfo() const override
bool useIndirectThunkCalls() const
X86 target streamer implementing x86-only assembly directives.
virtual bool emitFPOPushReg(MCRegister Reg, SMLoc L={})
virtual bool emitFPOEndPrologue(SMLoc L={})
virtual bool emitFPOStackAlign(unsigned Align, SMLoc L={})
virtual bool emitFPOSetFrame(MCRegister Reg, SMLoc L={})
virtual bool emitFPOStackAlloc(unsigned StackAlloc, SMLoc L={})
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
@ Itanium
Windows CE ARM, PowerPC, SH3, SH4.
Reg
All possible values of the reg field in the ModR/M byte.
bool isKMergeMasked(uint64_t TSFlags)
@ MO_TLSLD
MO_TLSLD - On a symbol operand this indicates that the immediate is the offset of the GOT entry with ...
@ MO_GOTPCREL_NORELAX
MO_GOTPCREL_NORELAX - Same as MO_GOTPCREL except that R_X86_64_GOTPCREL relocations are guaranteed to...
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_DARWIN_NONLAZY_PIC_BASE
MO_DARWIN_NONLAZY_PIC_BASE - On a symbol operand "FOO", this indicates that the reference is actually...
@ MO_GOT_ABSOLUTE_ADDRESS
MO_GOT_ABSOLUTE_ADDRESS - On a symbol operand, this represents a relocation of: SYMBOL_LABEL + [.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
@ MO_NTPOFF
MO_NTPOFF - On a symbol operand this indicates that the immediate is the negative thread-pointer offs...
@ MO_DARWIN_NONLAZY
MO_DARWIN_NONLAZY - On a symbol operand "FOO", this indicates that the reference is actually to the "...
@ MO_INDNTPOFF
MO_INDNTPOFF - On a symbol operand this indicates that the immediate is the absolute address of the G...
@ MO_GOTNTPOFF
MO_GOTNTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry w...
@ MO_TPOFF
MO_TPOFF - On a symbol operand this indicates that the immediate is the thread-pointer offset for the...
@ MO_TLVP_PIC_BASE
MO_TLVP_PIC_BASE - On a symbol operand this indicates that the immediate is some TLS offset from the ...
@ MO_GOT
MO_GOT - On a symbol operand this indicates that the immediate is the offset to the GOT entry for the...
@ MO_ABS8
MO_ABS8 - On a symbol operand this indicates that the symbol is known to be an absolute symbol in ran...
@ MO_PLT
MO_PLT - On a symbol operand this indicates that the immediate is offset to the PLT entry of symbol n...
@ MO_TLSGD
MO_TLSGD - On a symbol operand this indicates that the immediate is the offset of the GOT entry with ...
@ MO_NO_FLAG
MO_NO_FLAG - No flag for the operand.
@ MO_TLVP
MO_TLVP - On a symbol operand this indicates that the immediate is some TLS offset.
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand "FOO", this indicates that the reference is actually to the "__imp...
@ MO_GOTTPOFF
MO_GOTTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry wi...
@ MO_SECREL
MO_SECREL - On a symbol operand this indicates that the immediate is the offset from beginning of sec...
@ MO_DTPOFF
MO_DTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry with...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
@ MO_TLSLDM
MO_TLSLDM - On a symbol operand this indicates that the immediate is the offset of the GOT entry with...
@ MO_GOTPCREL
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
bool isKMasked(uint64_t TSFlags)
bool isX86_64ExtendedReg(MCRegister Reg)
bool optimizeToFixedRegisterOrShortImmediateForm(MCInst &MI)
bool optimizeMOV(MCInst &MI, bool In64BitMode)
Simplify things like MOV32rm to MOV32o32a.
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
bool optimizeMOVSX(MCInst &MI)
bool optimizeVPCMPWithImmediateOneOrSix(MCInst &MI)
bool optimizeShiftRotateWithImmediateOne(MCInst &MI)
bool optimizeInstFromVEX3ToVEX2(MCInst &MI, const MCInstrDesc &Desc)
const Constant * getConstantFromPool(const MachineInstr &MI, unsigned OpNo)
Find any constant pool entry associated with a specific instruction operand.
bool optimizeINCDEC(MCInst &MI, bool In64BitMode)
unsigned getVectorRegisterWidth(const MCOperandInfo &Info)
Get the width of the vector register operand.
initializer< Ty > init(const Ty &Val)
NodeAddr< CodeNode * > Code
This is an optimization pass for GlobalISel generic memory operations.
void DecodeZeroExtendMask(unsigned SrcScalarBits, unsigned DstScalarBits, unsigned NumDstElts, bool IsAnyExtend, SmallVectorImpl< int > &ShuffleMask)
Decode a zero extension instruction as a shuffle mask.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void DecodeVPERMILPMask(unsigned NumElts, unsigned ScalarBits, ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a VPERMILPD/VPERMILPS variable mask from a raw array of constants.
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
bool isCFGuardFunction(const GlobalValue *GV)
@ WinEH
Windows Exception Handling.
void DecodeVPERMIL2PMask(unsigned NumElts, unsigned ScalarBits, unsigned M2Z, ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a VPERMIL2PD/VPERMIL2PS variable mask from a raw array of constants.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
void DecodeVPPERMMask(ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a VPPERM mask from a raw array of constants such as from BUILD_VECTOR.
constexpr unsigned BitWidth
const char * toString(DWARFSectionKind Kind)
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
void DecodePSHUFBMask(ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a PSHUFB mask from a raw array of constants such as from BUILD_VECTOR.
A RAII helper which defines a region of instructions which can't have padding added between them for ...
void changeAndComment(bool b)
NoAutoPaddingScope(MCStreamer &OS)
const bool OldAllowAutoPadding
This struct is a compact representation of a valid (non-zero power of two) alignment.