31#define DEBUG_TYPE "x86-isel"
69static std::pair<MVT, unsigned>
75 return {MVT::v2i64, 1};
77 return {MVT::v4i32, 1};
80 return {MVT::v8i16, 1};
83 return {MVT::v16i8, 1};
87 return {MVT::v32i8, 1};
91 return {MVT::v64i8, 1};
92 return {MVT::v32i8, 2};
96 if (!
isPowerOf2_32(NumElts) || (NumElts == 64 && !Subtarget.hasBWI()) ||
98 return {MVT::i8, NumElts};
111 unsigned NumRegisters;
112 std::tie(RegisterVT, NumRegisters) =
123 if ((VT == MVT::f64 || VT == MVT::f80) && !Subtarget.is64Bit() &&
147 unsigned NumRegisters;
148 std::tie(RegisterVT, NumRegisters) =
160 if (!Subtarget.is64Bit() && !Subtarget.hasX87()) {
177 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
184 RegisterVT = MVT::i8;
185 IntermediateVT = MVT::i1;
187 return NumIntermediates;
191 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.
useAVX512Regs() &&
193 RegisterVT = MVT::v32i8;
194 IntermediateVT = MVT::v32i1;
195 NumIntermediates = 2;
205 NumIntermediates, RegisterVT);
248 if (Subtarget.is32Bit() && Ty->
isFP128Ty())
259 if (
VectorType *VTy = dyn_cast<VectorType>(Ty)) {
260 if (VTy->getPrimitiveSizeInBits().getFixedValue() == 128)
261 MaxAlign =
Align(16);
262 }
else if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
265 if (EltAlign > MaxAlign)
267 }
else if (
StructType *STy = dyn_cast<StructType>(Ty)) {
268 for (
auto *EltTy : STy->elements()) {
271 if (EltAlign > MaxAlign)
285 if (Subtarget.is64Bit())
286 return std::max(
DL.getABITypeAlign(Ty), Align::Constant<8>());
301 if (!FuncAttributes.
hasFnAttr(Attribute::NoImplicitFloat)) {
302 if (
Op.size() >= 16 &&
303 (!Subtarget.isUnalignedMem16Slow() ||
Op.isAligned(
Align(16)))) {
305 if (
Op.size() >= 64 && Subtarget.
hasAVX512() && Subtarget.hasEVEX512() &&
307 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
310 if (
Op.size() >= 32 && Subtarget.
hasAVX() &&
323 if (Subtarget.
hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
326 }
else if (((
Op.isMemcpy() && !
Op.isMemcpyStrSrc()) ||
Op.isZeroMemset()) &&
327 Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.
hasSSE2()) {
340 if (Subtarget.is64Bit() &&
Op.size() >= 8)
354 return (8 * Alignment.
value()) % SizeInBits == 0;
365 return !Subtarget.isUnalignedMem16Slow();
367 return !Subtarget.isUnalignedMem32Slow();
374 unsigned *
Fast)
const {
384 return (Alignment < 16 || !Subtarget.
hasSSE41());
393 unsigned AddrSpace,
Align Alignment,
395 unsigned *
Fast)
const {
419 if (Subtarget.
hasAVX512() && Subtarget.hasEVEX512())
447 return Subtarget.useSoftFloat();
454 if (Subtarget.is64Bit())
458 unsigned ParamRegs = 0;
460 ParamRegs = M->getNumberRegisterParameters();
463 for (
auto &Arg : Args) {
465 if (
T->isIntOrPtrTy())
467 unsigned numRegs = 1;
470 if (ParamRegs < numRegs)
472 ParamRegs -= numRegs;
491 if (!Subtarget.is64Bit())
506 (Subtarget.is64Bit() &&
514std::pair<const TargetRegisterClass *, uint8_t>
522 case MVT::i8:
case MVT::i16:
case MVT::i32:
case MVT::i64:
523 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
526 RRC = &X86::VR64RegClass;
528 case MVT::f32:
case MVT::f64:
529 case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
530 case MVT::v4f32:
case MVT::v2f64:
531 case MVT::v32i8:
case MVT::v16i16:
case MVT::v8i32:
case MVT::v4i64:
532 case MVT::v8f32:
case MVT::v4f64:
533 case MVT::v64i8:
case MVT::v32i16:
case MVT::v16i32:
case MVT::v8i64:
534 case MVT::v16f32:
case MVT::v8f64:
535 RRC = &X86::VR128XRegClass;
538 return std::make_pair(RRC,
Cost);
541unsigned X86TargetLowering::getAddressSpace()
const {
542 if (Subtarget.is64Bit())
573 int Offset = M->getStackProtectorGuardOffset();
578 Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
580 StringRef GuardReg = M->getStackProtectorGuardReg();
581 if (GuardReg ==
"fs")
583 else if (GuardReg ==
"gs")
587 StringRef GuardSymb = M->getStackProtectorGuardSymbol();
588 if (!GuardSymb.
empty()) {
594 nullptr, GuardSymb,
nullptr,
612 M.getOrInsertGlobal(
"__security_cookie",
621 F->addParamAttr(0, Attribute::AttrKind::InReg);
626 StringRef GuardMode = M.getStackProtectorGuard();
629 if ((GuardMode ==
"tls" || GuardMode.
empty()) &&
639 return M.getFunction(
"__security_check_cookie");
652 int Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
669bool X86TargetLowering::CanLowerReturn(
674 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
675 return CCInfo.CheckReturn(Outs,
RetCC_X86);
679 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
684 static const MCPhysReg RCRegs[] = {X86::FPCW, X86::MXCSR};
694 if (ValVT == MVT::v1i1)
698 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
699 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
703 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
705 if (ValLoc == MVT::i32)
710 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
711 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
725 assert(Subtarget.hasBWI() &&
"Expected AVX512BW target!");
726 assert(Subtarget.is32Bit() &&
"Expecting 32 bit target");
729 "The value should reside in two registers");
739 RegsToPass.push_back(std::make_pair(VA.
getLocReg(),
Lo));
740 RegsToPass.push_back(std::make_pair(NextVA.
getLocReg(),
Hi));
755 bool ShouldDisableCalleeSavedRegister =
767 for (
unsigned I = 0, OutsIndex = 0, E = RVLocs.
size();
I != E;
773 if (ShouldDisableCalleeSavedRegister)
776 SDValue ValToCopy = OutVals[OutsIndex];
794 "Unexpected FP-extend for return value.");
801 }
else if (!Subtarget.
hasSSE2() &&
802 X86::FR64XRegClass.contains(VA.
getLocReg()) &&
825 if (Subtarget.is64Bit()) {
826 if (ValVT == MVT::x86mmx) {
828 ValToCopy = DAG.
getBitcast(MVT::i64, ValToCopy);
834 ValToCopy = DAG.
getBitcast(MVT::v4f32, ValToCopy);
841 "Currently the only custom case is when we split v64i1 to 2 regs");
847 if (ShouldDisableCalleeSavedRegister)
862 for (
auto &RetVal : RetVals) {
863 if (RetVal.first == X86::FP0 || RetVal.first == X86::FP1) {
868 Chain = DAG.
getCopyToReg(Chain, dl, RetVal.first, RetVal.second, Glue);
871 DAG.
getRegister(RetVal.first, RetVal.second.getValueType()));
912 Chain = DAG.
getCopyToReg(Chain, dl, RetValReg, Val, Glue);
922 if (ShouldDisableCalleeSavedRegister &&
949 return DAG.
getNode(opcode, dl, MVT::Other, RetOps);
952bool X86TargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
953 if (
N->getNumValues() != 1 || !
N->hasNUsesOfValue(1, 0))
961 if (
Copy->getOperand(
Copy->getNumOperands()-1).getValueType() == MVT::Glue)
963 TCChain =
Copy->getOperand(0);
973 if (
U->getNumOperands() > 4)
975 if (
U->getNumOperands() == 4 &&
976 U->getOperand(
U->getNumOperands() - 1).getValueType() != MVT::Glue)
990 MVT ReturnMVT = MVT::i32;
993 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
1003 return VT.
bitsLT(MinVT) ? MinVT : VT;
1019 assert((Subtarget.hasBWI()) &&
"Expected AVX512BW target!");
1020 assert(Subtarget.is32Bit() &&
"Expecting 32 bit target");
1022 "Expecting first location of 64 bit width type");
1024 "The locations should have the same type");
1026 "The values should reside in two registers");
1029 SDValue ArgValueLo, ArgValueHi;
1035 if (
nullptr == InGlue) {
1071 if (ValVT == MVT::v1i1)
1074 if (ValVT == MVT::v64i1) {
1076 assert(ValLoc == MVT::i64 &&
"Expecting only i64 locations");
1082 MaskLenVT = MVT::i8;
1085 MaskLenVT = MVT::i16;
1088 MaskLenVT = MVT::i32;
1111SDValue X86TargetLowering::LowerCallResult(
1122 CCInfo.AnalyzeCallResult(Ins,
RetCC_X86);
1125 for (
unsigned I = 0, InsIndex = 0, E = RVLocs.
size();
I != E;
1145 }
else if (!Subtarget.
hasSSE2() &&
1146 X86::FR64XRegClass.contains(VA.
getLocReg()) &&
1147 CopyVT == MVT::f64) {
1157 bool RoundAfterCopy =
false;
1160 if (!Subtarget.hasX87())
1163 RoundAfterCopy = (CopyVT != VA.
getLocVT());
1169 "Currently the only custom case is when we split v64i1 to 2 regs");
1221template <
typename T>
1225 static_assert(std::is_same_v<T, ISD::OutputArg> ||
1226 std::is_same_v<T, ISD::InputArg>,
1227 "requires ISD::OutputArg or ISD::InputArg");
1231 if (!Subtarget.is32Bit())
1239 if (!Flags.isSRet() || Flags.isInReg())
1263 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
1303bool X86TargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
1324 bool isImmutable = !AlwaysUseMutable && !
Flags.isByVal();
1331 bool ExtendedInMem =
1344 if (
Flags.isByVal()) {
1345 unsigned Bytes =
Flags.getByValSize();
1346 if (Bytes == 0) Bytes = 1;
1355 EVT ArgVT =
Ins[i].ArgVT;
1366 if (
Flags.isCopyElisionCandidate() &&
1368 !ScalarizedVector) {
1370 if (Ins[i].PartOffset == 0) {
1379 ValVT, dl, Chain, PartAddr,
1393 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
1422 ValVT, dl, Chain, FIN,
1425 return ExtendedInMem
1435 assert(Subtarget.is64Bit());
1438 static const MCPhysReg GPR64ArgRegsWin64[] = {
1439 X86::RCX, X86::RDX, X86::R8, X86::R9
1441 return GPR64ArgRegsWin64;
1444 static const MCPhysReg GPR64ArgRegs64Bit[] = {
1445 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
1447 return GPR64ArgRegs64Bit;
1454 assert(Subtarget.is64Bit());
1463 bool isSoftFloat = Subtarget.useSoftFloat();
1464 if (isSoftFloat || !Subtarget.
hasSSE1())
1469 static const MCPhysReg XMMArgRegs64Bit[] = {
1470 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
1471 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
1473 return XMMArgRegs64Bit;
1480 return A.getValNo() <
B.getValNo();
1487class VarArgsLoweringHelper {
1492 : FuncInfo(FuncInfo),
DL(Loc), DAG(DAG), Subtarget(Subtarget),
1493 TheMachineFunction(DAG.getMachineFunction()),
1495 FrameInfo(TheMachineFunction.getFrameInfo()),
1496 FrameLowering(*Subtarget.getFrameLowering()),
1497 TargLowering(DAG.getTargetLoweringInfo()), CallConv(CallConv),
1501 void lowerVarArgsParameters(
SDValue &Chain,
unsigned StackSize);
1504 void createVarArgAreaAndStoreRegisters(
SDValue &Chain,
unsigned StackSize);
1506 void forwardMustTailParameters(
SDValue &Chain);
1508 bool is64Bit()
const {
return Subtarget.is64Bit(); }
1509 bool isWin64()
const {
return Subtarget.isCallingConvWin64(CallConv); }
1525void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(
1526 SDValue &Chain,
unsigned StackSize) {
1533 FrameInfo.CreateFixedObject(1, StackSize,
true));
1543 unsigned NumIntRegs = CCInfo.getFirstUnallocated(
ArgGPRs);
1544 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
1546 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
1547 "SSE register cannot be used when SSE is disabled!");
1552 int HomeOffset = FrameLowering.getOffsetOfLocalArea() + 8;
1554 FrameInfo.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset,
false));
1576 Register GPR = TheMachineFunction.addLiveIn(Reg, &X86::GR64RegClass);
1579 const auto &AvailableXmms = ArgXMMs.
slice(NumXMMRegs);
1580 if (!AvailableXmms.empty()) {
1581 Register AL = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
1588 TheMachineFunction.getRegInfo().addLiveIn(Reg);
1599 for (
SDValue Val : LiveGPRs) {
1613 if (!LiveXMMRegs.
empty()) {
1629 SaveXMMOps, MVT::i8, StoreMMO));
1632 if (!MemOps.
empty())
1637void VarArgsLoweringHelper::forwardMustTailParameters(
SDValue &Chain) {
1639 MVT VecVT = MVT::Other;
1641 if (Subtarget.useAVX512Regs() &&
1644 VecVT = MVT::v16f32;
1645 else if (Subtarget.hasAVX())
1647 else if (Subtarget.hasSSE2())
1654 if (VecVT != MVT::Other)
1660 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes,
CC_X86);
1663 if (
is64Bit() && !isWin64() && !CCInfo.isAllocated(X86::AL)) {
1664 Register ALVReg = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
1672 FR.VReg = TheMachineFunction.getRegInfo().createVirtualRegister(
1673 TargLowering.getRegClassFor(FR.VT));
1678void VarArgsLoweringHelper::lowerVarArgsParameters(
SDValue &Chain,
1679 unsigned StackSize) {
1685 if (FrameInfo.hasVAStart())
1686 createVarArgAreaAndStoreRegisters(Chain, StackSize);
1688 if (FrameInfo.hasMustTailInVarArgFunc())
1689 forwardMustTailParameters(Chain);
1692SDValue X86TargetLowering::LowerFormalArguments(
1701 F.getName() ==
"main")
1705 bool Is64Bit = Subtarget.is64Bit();
1710 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
1718 CCInfo.AllocateStack(32,
Align(8));
1720 CCInfo.AnalyzeArguments(Ins,
CC_X86);
1725 CCInfo.AnalyzeArgumentsSecondPass(Ins,
CC_X86);
1731 "Argument Location list must be sorted before lowering");
1734 for (
unsigned I = 0, InsIndex = 0, E = ArgLocs.
size();
I != E;
1736 assert(InsIndex <
Ins.size() &&
"Invalid Ins index");
1744 "Currently the only custom case is when we split v64i1 to 2 regs");
1752 if (RegVT == MVT::i8)
1753 RC = &X86::GR8RegClass;
1754 else if (RegVT == MVT::i16)
1755 RC = &X86::GR16RegClass;
1756 else if (RegVT == MVT::i32)
1757 RC = &X86::GR32RegClass;
1758 else if (Is64Bit && RegVT == MVT::i64)
1759 RC = &X86::GR64RegClass;
1760 else if (RegVT == MVT::f16)
1761 RC = Subtarget.
hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
1762 else if (RegVT == MVT::f32)
1763 RC = Subtarget.
hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
1764 else if (RegVT == MVT::f64)
1765 RC = Subtarget.
hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
1766 else if (RegVT == MVT::f80)
1767 RC = &X86::RFP80RegClass;
1768 else if (RegVT == MVT::f128)
1769 RC = &X86::VR128RegClass;
1771 RC = &X86::VR512RegClass;
1773 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
1775 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
1776 else if (RegVT == MVT::x86mmx)
1777 RC = &X86::VR64RegClass;
1778 else if (RegVT == MVT::v1i1)
1779 RC = &X86::VK1RegClass;
1780 else if (RegVT == MVT::v8i1)
1781 RC = &X86::VK8RegClass;
1782 else if (RegVT == MVT::v16i1)
1783 RC = &X86::VK16RegClass;
1784 else if (RegVT == MVT::v32i1)
1785 RC = &X86::VK32RegClass;
1786 else if (RegVT == MVT::v64i1)
1787 RC = &X86::VK64RegClass;
1823 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
1828 !(Ins[
I].Flags.isByVal() && VA.
isRegLoc())) {
1836 for (
unsigned I = 0, E =
Ins.size();
I != E; ++
I) {
1837 if (Ins[
I].
Flags.isSwiftAsync()) {
1842 int PtrSize = Subtarget.is64Bit() ? 8 : 4;
1845 X86FI->setSwiftAsyncContextFrameIdx(FI);
1863 if (Ins[
I].
Flags.isSRet()) {
1865 "SRet return has already been set");
1876 unsigned StackSize = CCInfo.getStackSize();
1880 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
1883 VarArgsLoweringHelper(FuncInfo, dl, DAG, Subtarget, CallConv, CCInfo)
1884 .lowerVarArgsParameters(Chain, StackSize);
1921 EHInfo->PSPSymFrameIdx = PSPSymFI;
1926 F.hasFnAttribute(
"no_caller_saved_registers")) {
1928 for (std::pair<MCRegister, Register> Pair :
MRI.liveins())
1929 MRI.disableCalleeSavedRegister(Pair.first);
1934 if (
In.Flags.isSwiftSelf() ||
In.Flags.isSwiftAsync() ||
1935 In.Flags.isSwiftError()) {
1937 "Swift attributes can't be used with preserve_none");
1950 bool isByVal)
const {
1963 Chain, dl, Arg, PtrOff,
1970SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
1972 bool Is64Bit,
int FPDiff,
const SDLoc &dl)
const {
1986 EVT PtrVT,
unsigned SlotSize,
1987 int FPDiff,
const SDLoc &dl) {
1989 if (!FPDiff)
return Chain;
1991 int NewReturnAddrFI =
1995 Chain = DAG.
getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2007 Mask.push_back(NumElems);
2008 for (
unsigned i = 1; i != NumElems; ++i)
2026 const auto *CB = CLI.
CB;
2029 bool Is64Bit = Subtarget.is64Bit();
2031 bool IsSibcall =
false;
2034 bool IsCalleePopSRet = !IsGuaranteeTCO &&
hasCalleePopSRet(Outs, Subtarget);
2036 bool HasNCSR = (CB && isa<CallInst>(CB) &&
2037 CB->hasFnAttr(
"no_caller_saved_registers"));
2038 bool IsIndirectCall = (CB && isa<CallInst>(CB) && CB->isIndirectCall());
2039 bool IsCFICall = IsIndirectCall && CLI.
CFIType;
2045 bool IsNoTrackIndirectCall = IsIndirectCall && CB->doesNoCfCheck() &&
2046 M->getModuleFlag(
"cf-protection-branch");
2047 if (IsNoTrackIndirectCall)
2058 if (IsIndirectCall && !IsWin64 &&
2059 M->getModuleFlag(
"import-call-optimization"))
2061 "Indirect calls must have a normal calling convention if "
2062 "Import Call Optimization is enabled");
2070 CCInfo.AllocateStack(32,
Align(8));
2072 CCInfo.AnalyzeArguments(Outs,
CC_X86);
2077 CCInfo.AnalyzeArgumentsSecondPass(Outs,
CC_X86);
2081 if (Subtarget.
isPICStyleGOT() && !IsGuaranteeTCO && !IsMustTail) {
2088 if (!
G || (!
G->getGlobal()->hasLocalLinkage() &&
2089 G->getGlobal()->hasDefaultVisibility()))
2093 if (isTailCall && !IsMustTail) {
2095 isTailCall = IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs,
2100 if (!IsGuaranteeTCO && isTailCall)
2107 if (IsMustTail && !isTailCall)
2109 "site marked musttail");
2112 "Var args not supported with calling convention fastcc, ghc or hipe");
2115 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
2121 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2130 FPDiff = NumBytesCallerPushed - NumBytes;
2134 if (FPDiff < X86Info->getTCReturnAddrDelta())
2138 unsigned NumBytesToPush = NumBytes;
2139 unsigned NumBytesToPop = NumBytes;
2144 if (!Outs.
empty() && Outs.
back().Flags.isInAlloca()) {
2146 if (!ArgLocs.
back().isMemLoc())
2149 if (ArgLocs.
back().getLocMemOffset() != 0)
2151 "the only memory argument");
2154 "cannot use preallocated attribute on a register "
2157 for (
size_t i = 0; i < CLI.
OutVals.size(); ++i) {
2159 PreallocatedOffsets.
push_back(ArgLocs[i].getLocMemOffset());
2163 size_t PreallocatedId = MFI->getPreallocatedIdForCallSite(CLI.
CB);
2164 MFI->setPreallocatedStackSize(PreallocatedId, NumBytes);
2165 MFI->setPreallocatedArgOffsets(PreallocatedId, PreallocatedOffsets);
2169 if (!IsSibcall && !IsMustTail)
2171 NumBytes - NumBytesToPush, dl);
2175 if (isTailCall && FPDiff)
2176 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2177 Is64Bit, FPDiff, dl);
2186 "Argument Location list must be sorted before lowering");
2191 for (
unsigned I = 0, OutIndex = 0, E = ArgLocs.
size();
I != E;
2193 assert(OutIndex < Outs.
size() &&
"Invalid Out index");
2196 if (
Flags.isInAlloca() ||
Flags.isPreallocated())
2201 SDValue Arg = OutVals[OutIndex];
2202 bool isByVal =
Flags.isByVal();
2222 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.
getUNDEF(MVT::v2i64), Arg);
2235 Flags.getByValSize(),
2236 std::max(
Align(16),
Flags.getNonZeroByValAlign()),
false);
2247 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2249 Chain, dl, Arg, SpillSlot,
2259 "Currently the only custom case is when we split v64i1 to 2 regs");
2267 if (isVarArg && IsWin64) {
2272 case X86::XMM0: ShadowReg = X86::RCX;
break;
2273 case X86::XMM1: ShadowReg = X86::RDX;
break;
2274 case X86::XMM2: ShadowReg = X86::R8;
break;
2275 case X86::XMM3: ShadowReg = X86::R9;
break;
2278 RegsToPass.
push_back(std::make_pair(ShadowReg, Arg));
2280 }
else if (!IsSibcall && (!isTailCall || isByVal)) {
2285 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2286 dl, DAG, VA, Flags, isByVal));
2290 if (!MemOpChains.
empty())
2315 if (
G && !
G->getGlobal()->hasLocalLinkage() &&
2316 G->getGlobal()->hasDefaultVisibility())
2317 Callee = LowerGlobalAddress(Callee, DAG);
2318 else if (isa<ExternalSymbolSDNode>(Callee))
2319 Callee = LowerExternalSymbol(Callee, DAG);
2323 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail &&
2324 (Subtarget.
hasSSE1() || !
M->getModuleFlag(
"SkipRaxSetup"))) {
2335 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2336 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2338 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
2340 &&
"SSE registers cannot be used when SSE is disabled");
2346 if (isVarArg && IsMustTail) {
2348 for (
const auto &
F : Forwards) {
2350 RegsToPass.
push_back(std::make_pair(
F.PReg, Val));
2357 if (!IsSibcall && isTailCall) {
2370 for (
unsigned I = 0, OutsIndex = 0, E = ArgLocs.
size();
I != E;
2377 "Expecting custom case only in regcall calling convention");
2387 SDValue Arg = OutVals[OutsIndex];
2390 if (
Flags.isInAlloca() ||
Flags.isPreallocated())
2398 if (
Flags.isByVal()) {
2412 Chain, dl, Arg, FIN,
2417 if (!MemOpChains2.
empty())
2423 RegInfo->getSlotSize(), FPDiff, dl);
2429 for (
const auto &[Reg,
N] : RegsToPass) {
2434 bool IsImpCall =
false;
2436 assert(Is64Bit &&
"Large code model is only legal in 64-bit mode.");
2447 Callee = LowerGlobalOrExternal(Callee, DAG,
true, &IsImpCall);
2449 Callee.getValueType() == MVT::i32) {
2456 if (!IsSibcall && isTailCall && !IsMustTail) {
2469 for (
const auto &[Reg,
N] : RegsToPass)
2474 auto AdaptedCC = CallConv;
2482 if (CB && CB->hasFnAttr(
"no_callee_saved_registers"))
2484 return RegInfo->getCallPreservedMask(MF, AdaptedCC);
2486 assert(Mask &&
"Missing call preserved mask for calling convention");
2490 if (CLI.
CB && isa<InvokeInst>(CLI.
CB))
2495 if (CLI.
CB && isa<InvokeInst>(CLI.
CB))
2504 if (!Is64Bit && CLI.
CB && isa<InvokeInst>(CLI.
CB)) {
2528 memcpy(RegMask, Mask,
sizeof(RegMask[0]) * RegMaskSize);
2532 if (ShouldDisableArgRegs) {
2533 for (
auto const &RegPair : RegsToPass)
2570 }
else if (IsNoTrackIndirectCall) {
2577 "tail calls cannot be marked with clang.arc.attachedcall");
2578 assert(Is64Bit &&
"clang.arc.attachedcall is only supported in 64bit mode");
2604 unsigned NumBytesForCalleeToPop = 0;
2607 NumBytesForCalleeToPop = NumBytes;
2611 NumBytesForCalleeToPop = 4;
2615 Chain = DAG.
getCALLSEQ_END(Chain, NumBytesToPop, NumBytesForCalleeToPop,
2622 if (Out.Flags.isSwiftSelf() || Out.Flags.isSwiftAsync() ||
2623 Out.Flags.isSwiftError()) {
2625 "Swift attributes can't be used with preserve_none");
2632 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG,
2670X86TargetLowering::GetAlignedArgumentStackSize(
const unsigned StackSize,
2674 assert(StackSize % SlotSize == 0 &&
2675 "StackSize must be a multiple of SlotSize");
2676 return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
2698 cast<VTSDNode>(TruncInput.
getOperand(1))->getVT() ==
2715 if (!Flags.isByVal()) {
2719 unsigned Opcode = Def->getOpcode();
2720 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
2721 Opcode == X86::LEA64_32r) &&
2722 Def->getOperand(1).isFI()) {
2723 FI = Def->getOperand(1).getIndex();
2724 Bytes = Flags.getByValSize();
2728 }
else if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2729 if (Flags.isByVal())
2744 Bytes = Flags.getByValSize();
2778 const auto &Outs = CLI.
Outs;
2779 const auto &OutVals = CLI.
OutVals;
2784 for (
unsigned E = Outs.size(); Pos != E; ++Pos)
2785 if (Outs[Pos].Flags.isSRet())
2788 if (Pos == Outs.size())
2795 SDValue SRetArgVal = OutVals[Pos];
2812bool X86TargetLowering::IsEligibleForTailCallOptimization(
2838 bool CCMatch = CallerCC == CalleeCC;
2847 if (IsCalleeWin64 != IsCallerWin64)
2850 if (IsGuaranteeTCO) {
2862 if (
RegInfo->hasStackRealignment(MF))
2874 }
else if (IsCalleePopSRet)
2882 if (isVarArg && !Outs.
empty()) {
2885 if (IsCalleeWin64 || IsCallerWin64)
2888 for (
const auto &VA : ArgLocs)
2897 for (
const auto &In : Ins) {
2905 CCState RVCCInfo(CalleeCC,
false, MF, RVLocs,
C);
2906 RVCCInfo.AnalyzeCallResult(Ins,
RetCC_X86);
2907 for (
const auto &VA : RVLocs) {
2919 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
2921 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
2922 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2930 if (CallerF.hasFnAttribute(
"no_caller_saved_registers"))
2937 if (!Outs.
empty()) {
2938 if (StackArgsSize > 0) {
2944 for (
unsigned I = 0, E = ArgLocs.size();
I != E; ++
I) {
2964 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
2965 !isa<ExternalSymbolSDNode>(Callee)) ||
2966 PositionIndependent)) {
2967 unsigned NumInRegs = 0;
2970 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
2972 for (
const auto &VA : ArgLocs) {
2978 case X86::EAX:
case X86::EDX:
case X86::ECX:
2979 if (++NumInRegs == MaxInRegs)
2991 bool CalleeWillPop =
2997 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
2998 if (!CalleePopMatches)
3000 }
else if (CalleeWillPop && StackArgsSize > 0) {
3011 bool is64Bit,
bool IsVarArg,
bool GuaranteeTCO) {
3017 switch (CallingConv) {
unsigned const MachineRegisterInfo * MRI
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)
Return true if the calling convention is one that we can guarantee TCO for.
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
Module.h This file contains the declarations for the Module class.
const MCPhysReg ArgGPRs[]
static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt)
Return true if the function is being made into a tailcall target by changing its ABI.
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const M68kInstrInfo *TII, const CCValAssign &VA)
Return true if the given stack call argument is already available in the same position (relatively) o...
Register const TargetRegisterInfo * TRI
This file defines ARC utility functions which are used by various parts of the compiler.
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign)
getMaxByValAlign - Helper for getByValTypeAlignment to determine the desired ByVal argument alignment...
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static bool is64Bit(const char *name)
static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc, const SDLoc &DL, SelectionDAG &DAG)
Lowers masks values (v*i1) to the local register values.
static void Passv64i1ArgInRegs(const SDLoc &DL, SelectionDAG &DAG, SDValue &Arg, SmallVectorImpl< std::pair< Register, SDValue > > &RegsToPass, CCValAssign &VA, CCValAssign &NextVA, const X86Subtarget &Subtarget)
Breaks v64i1 value into two registers and adds the new node to the DAG.
static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA, SDValue &Root, SelectionDAG &DAG, const SDLoc &DL, const X86Subtarget &Subtarget, SDValue *InGlue=nullptr)
Reads two 32 bit registers and creates a 64 bit mask value.
static ArrayRef< MCPhysReg > get64BitArgumentXMMs(MachineFunction &MF, CallingConv::ID CallConv, const X86Subtarget &Subtarget)
static bool isSortedByValueNo(ArrayRef< CCValAssign > ArgLocs)
static ArrayRef< MCPhysReg > get64BitArgumentGPRs(CallingConv::ID CallConv, const X86Subtarget &Subtarget)
static SDValue getPopFromX87Reg(SelectionDAG &DAG, SDValue Chain, const SDLoc &dl, Register Reg, EVT VT, SDValue Glue)
static bool mayBeSRetTailCallCompatible(const TargetLowering::CallLoweringInfo &CLI, Register CallerSRetReg)
static std::pair< MVT, unsigned > handleMaskRegisterForCallingConv(unsigned NumElts, CallingConv::ID CC, const X86Subtarget &Subtarget)
static bool shouldDisableRetRegFromCSR(CallingConv::ID CC)
Returns true if a CC can dynamically exclude a register from the list of callee-saved-registers (Targ...
static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl, const char *Msg)
Call this when the user attempts to do something unsupported, like returning a double without SSE2 en...
static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT, unsigned SlotSize, int FPDiff, const SDLoc &dl)
Emit a store of the return address if tail call optimization is performed and it is required (FPDiff!...
static bool hasCalleePopSRet(const SmallVectorImpl< T > &Args, const X86Subtarget &Subtarget)
Determines whether Args, either a set of outgoing arguments to a call, or a set of incoming args of a...
static bool shouldDisableArgRegFromCSR(CallingConv::ID CC)
Returns true if a CC can dynamically exclude a register from the list of callee-saved-registers (Targ...
static bool hasStackGuardSlotTLS(const Triple &TargetTriple)
static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT, const EVT &ValLoc, const SDLoc &DL, SelectionDAG &DAG)
The function will lower a register of various sizes (8/16/32/64) to a mask value of the expected size...
static Constant * SegmentOffset(IRBuilderBase &IRB, int Offset, unsigned AddressSpace)
static bool isBitAligned(Align Alignment, uint64_t SizeInBits)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
LLVM_ABI bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
const Function * getParent() const
Return the enclosing method, or null if none.
CCState - This class holds information needed while lowering arguments and return values.
static LLVM_ABI bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
void convertToReg(MCRegister Reg)
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Diagnostic information for unsupported feature in backend.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Module * getParent()
Get the module that this global value is contained inside of...
void setDSOLocal(bool Local)
@ ExternalLinkage
Externally visible function.
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
Common base class shared among various IRBuilders.
BasicBlock * GetInsertBlock() const
LLVMContext & getContext() const
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
@ INVALID_SIMPLE_VALUE_TYPE
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool is512BitVector() const
Return true if this is a 512-bit vector type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
MVT getVectorElementType() const
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setObjectZExt(int ObjectIdx, bool IsZExt)
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setObjectSExt(int ObjectIdx, bool IsSExt)
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
void setHasTailCall(bool V=true)
bool isObjectZExt(int ObjectIdx) const
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isObjectSExt(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
int getObjectIndexBegin() const
Return the minimum frame object index.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Representation of each machine instruction.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_LabelDifference64
EK_LabelDifference64 - Each entry is the address of the block minus the address of the jump table.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOStore
The memory access writes data.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void disableCalleeSavedRegister(MCRegister Reg)
Disables the register from the list of CSRs.
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
void setCFIType(uint32_t Type)
iterator_range< user_iterator > users()
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
void addHeapAllocSite(const SDNode *Node, MDNode *MD)
Set HeapAllocSite to be associated with Node.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
const TargetMachine & getTarget() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
void addCallSiteInfo(const SDNode *Node, CallSiteInfo &&CallInfo)
Set CallSiteInfo to be associated with Node.
LLVMContext * getContext() const
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
Class to represent struct types.
Information about stack frame layout on the target.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const
Returns the target-specific address of the unsafe stack pointer.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
virtual Value * getIRStackGuard(IRBuilderBase &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
bool isPositionIndependent() const
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
CodeModel::Model getCodeModel() const
Returns the code model.
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
unsigned EmitCallGraphSection
Emit section containing call graph metadata.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Triple - Helper class for working with autoconf configuration names.
bool isAndroidVersionLT(unsigned Major) const
bool isAndroid() const
Tests whether the target is Android.
bool isOSMSVCRT() const
Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
bool isOSGlibc() const
Tests whether the OS uses glibc.
bool isWindowsMSVCEnvironment() const
Checks if the environment could be MSVC.
bool isWindowsItaniumEnvironment() const
The instances of the Type class are immutable: once they are created, they are never changed.
bool isX86_FP80Ty() const
Return true if this is x86 long double.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
bool isFP128Ty() const
Return true if this is 'fp128'.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Value * getOperand(unsigned i) const
LLVM Value Representation.
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
void setBytesToPopOnReturn(unsigned bytes)
void setBPClobberedByCall(bool C)
void setFPClobberedByCall(bool C)
unsigned getVarArgsGPOffset() const
int getRegSaveFrameIndex() const
void setHasSwiftAsyncContext(bool v)
Register getSRetReturnReg() const
void setVarArgsGPOffset(unsigned Offset)
void setRegSaveFrameIndex(int Idx)
void setForceFramePointer(bool forceFP)
void setSRetReturnReg(Register Reg)
unsigned getVarArgsFPOffset() const
void setArgumentStackSize(unsigned size)
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
void setTCReturnAddrDelta(int delta)
void setVarArgsFrameIndex(int Idx)
void setBPClobberedByInvoke(bool C)
void setFPClobberedByInvoke(bool C)
unsigned getBytesToPopOnReturn() const
void setVarArgsFPOffset(unsigned Offset)
unsigned getSlotSize() const
bool useLight256BitInstructions() const
bool isPICStyleGOT() const
bool isTargetWindowsMSVC() const
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
bool isTargetDarwin() const
const Triple & getTargetTriple() const
const X86InstrInfo * getInstrInfo() const override
bool useAVX512Regs() const
bool isTargetCOFF() const
bool isCallingConvWin64(CallingConv::ID CC) const
bool isTargetFuchsia() const
bool isPICStyleRIPRel() const
bool isTargetCygMing() const
const X86RegisterInfo * getRegisterInfo() const override
unsigned getPreferVectorWidth() const
bool isTargetAndroid() const
const X86FrameLowering * getFrameLowering() const override
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isMemoryAccessFast(EVT VT, Align Alignment) const
bool useSoftFloat() const override
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool isSafeMemOpType(MVT VT) const override
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
For some targets, an LLVM struct type must be broken down into multiple simple types,...
Value * getIRStackGuard(IRBuilderBase &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
Return the desired alignment for ByVal aggregate function arguments in the caller parameter area.
Function * getSSPStackGuardCheck(const Module &M) const override
If the target has a standard stack protection check function that performs validation and error handl...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Returns true if the target allows unaligned memory accesses of the specified type.
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
It returns EVT::Other if the type should be determined using generic target-independent logic.
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const override
Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const override
Return true if the target stores SafeStack pointer at a fixed offset in some non-standard address spa...
bool isScalarFPTypeInSSEReg(EVT VT) const
Return true if the specified scalar FP type is computed in an SSE register, not on the X87 floating p...
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
This function returns true if the memory access is aligned or if the target allows this specific unal...
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the value type to use for ISD::SETCC.
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const override
For types supported by the target, this is an identity function.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, MCContext &Ctx) const override
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ X86_64_SysV
The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...
@ HiPE
Used by the High-Performance Erlang Compiler (HiPE).
@ Swift
Calling convention for Swift.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ X86_INTR
x86 hardware interrupt context.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ X86_ThisCall
Similar to X86_StdCall.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ X86_StdCall
stdcall is mostly used by the Win32 API.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ X86_RegCall
Register calling convention used for parameters transfer optimization.
@ C
The default llvm calling convention, compatible with C.
@ X86_FastCall
'fast' analog of X86_StdCall.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Reg
All possible values of the reg field in the ModR/M byte.
@ RET_GLUE
Return with a glue operand.
@ IRET
Return from interrupt. Operand 0 is the number of bytes to pop.
@ CALL
These operations represent an abstract X86 call instruction, which includes a bunch of information.
@ GlobalBaseReg
On Darwin, this node represents the result of the popl at function entry, used for PIC code.
@ TC_RETURN
Tail call return.
@ NT_CALL
Same as call except it adds the NoTrack prefix.
@ MOVDQ2Q
Copies a 64-bit value from the low word of an XMM vector to an MMX vector.
@ POP_FROM_X87_REG
The same as ISD::CopyFromReg except that this node makes it explicit that it may lower to an x87 FPU ...
bool isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget, const MachineFunction &MF)
True if the target supports the extended frame for async Swift functions.
bool isCalleePop(CallingConv::ID CallingConv, bool is64Bit, bool IsVarArg, bool GuaranteeTCO)
Determines whether the callee is required to pop its own arguments.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
bool hasAttachedCallOpBundle(const CallBase *CB)
This is an optimization pass for GlobalISel generic memory operations.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
bool CC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool RetCC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
bool is512BitVector() const
Return true if this is a 512-bit vector type.
bool isVector() const
Return true if this is a vector value type.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Describes a register that needs to be forwarded from the prologue to a musttail call.
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
SmallVector< ArgRegPair, 1 > ArgRegPairs
Vector of call argument and its forwarding register.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
const ConstantInt * CFIType
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.