46#include "llvm/IR/IntrinsicsHexagon.h"
66#define DEBUG_TYPE "hexagon-lowering"
70 cl::desc(
"Control jump table emission on Hexagon target"));
74 cl::desc(
"Enable Hexagon SDNode scheduling"));
78 cl::desc(
"Set minimum jump tables"));
82 cl::desc(
"Max #stores to inline memcpy"));
86 cl::desc(
"Max #stores to inline memcpy"));
90 cl::desc(
"Max #stores to inline memmove"));
95 cl::desc(
"Max #stores to inline memmove"));
99 cl::desc(
"Max #stores to inline memset"));
103 cl::desc(
"Max #stores to inline memset"));
107 cl::desc(
"Convert constant loads to immediate values."));
111 cl::desc(
"Rewrite unaligned loads as a pair of aligned loads"));
116 cl::desc(
"Disable minimum alignment of 1 for "
117 "arguments passed by value on stack"));
125 Hexagon::R0, Hexagon::R1, Hexagon::R2,
126 Hexagon::R3, Hexagon::R4, Hexagon::R5
128 const unsigned NumArgRegs = std::size(ArgRegs);
132 if (RegNum != NumArgRegs && RegNum % 2 == 1)
141#include "HexagonGenCallingConv.inc"
145 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
153 if (isBoolVector && !Subtarget.
useHVXOps() && isPowerOf2 && NumElts >= 8) {
154 RegisterVT = MVT::v8i8;
155 IntermediateVT = MVT::v8i1;
156 NumIntermediates = NumElts / 8;
157 return NumIntermediates;
162 if (isBoolVector && Subtarget.
useHVX64BOps() && isPowerOf2 && NumElts >= 64) {
163 RegisterVT = MVT::v64i8;
164 IntermediateVT = MVT::v64i1;
165 NumIntermediates = NumElts / 64;
166 return NumIntermediates;
171 if (isBoolVector && Subtarget.
useHVX128BOps() && isPowerOf2 &&
173 RegisterVT = MVT::v128i8;
174 IntermediateVT = MVT::v128i1;
175 NumIntermediates = NumElts / 128;
176 return NumIntermediates;
180 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
183std::pair<MVT, unsigned>
193 if (!Subtarget.
useHVXOps() && NumElems >= 8)
194 return {MVT::v8i8, NumElems / 8};
197 return {MVT::v64i8, NumElems / 64};
200 return {MVT::v128i8, NumElems / 128};
210 auto [RegisterVT, NumRegisters] =
222 unsigned IntNo =
Op.getConstantOperandVal(0);
227 case Intrinsic::thread_pointer: {
244 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
258 return CCInfo.
CheckReturn(Outs, RetCC_Hexagon_HVX);
288 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
337 .
Case(
"r0", Hexagon::R0)
338 .
Case(
"r1", Hexagon::R1)
339 .
Case(
"r2", Hexagon::R2)
340 .
Case(
"r3", Hexagon::R3)
341 .
Case(
"r4", Hexagon::R4)
342 .
Case(
"r5", Hexagon::R5)
343 .
Case(
"r6", Hexagon::R6)
344 .
Case(
"r7", Hexagon::R7)
345 .
Case(
"r8", Hexagon::R8)
346 .
Case(
"r9", Hexagon::R9)
347 .
Case(
"r10", Hexagon::R10)
348 .
Case(
"r11", Hexagon::R11)
349 .
Case(
"r12", Hexagon::R12)
350 .
Case(
"r13", Hexagon::R13)
351 .
Case(
"r14", Hexagon::R14)
352 .
Case(
"r15", Hexagon::R15)
353 .
Case(
"r16", Hexagon::R16)
354 .
Case(
"r17", Hexagon::R17)
355 .
Case(
"r18", Hexagon::R18)
356 .
Case(
"r19", Hexagon::R19)
357 .
Case(
"r20", Hexagon::R20)
358 .
Case(
"r21", Hexagon::R21)
359 .
Case(
"r22", Hexagon::R22)
360 .
Case(
"r23", Hexagon::R23)
361 .
Case(
"r24", Hexagon::R24)
362 .
Case(
"r25", Hexagon::R25)
363 .
Case(
"r26", Hexagon::R26)
364 .
Case(
"r27", Hexagon::R27)
365 .
Case(
"r28", Hexagon::R28)
366 .
Case(
"r29", Hexagon::R29)
367 .
Case(
"r30", Hexagon::R30)
368 .
Case(
"r31", Hexagon::R31)
369 .
Case(
"r1:0", Hexagon::D0)
370 .
Case(
"r3:2", Hexagon::D1)
371 .
Case(
"r5:4", Hexagon::D2)
372 .
Case(
"r7:6", Hexagon::D3)
373 .
Case(
"r9:8", Hexagon::D4)
374 .
Case(
"r11:10", Hexagon::D5)
375 .
Case(
"r13:12", Hexagon::D6)
376 .
Case(
"r15:14", Hexagon::D7)
377 .
Case(
"r17:16", Hexagon::D8)
378 .
Case(
"r19:18", Hexagon::D9)
379 .
Case(
"r21:20", Hexagon::D10)
380 .
Case(
"r23:22", Hexagon::D11)
381 .
Case(
"r25:24", Hexagon::D12)
382 .
Case(
"r27:26", Hexagon::D13)
383 .
Case(
"r29:28", Hexagon::D14)
384 .
Case(
"r31:30", Hexagon::D15)
385 .
Case(
"sp", Hexagon::R29)
386 .
Case(
"fp", Hexagon::R30)
387 .
Case(
"lr", Hexagon::R31)
388 .
Case(
"p0", Hexagon::P0)
389 .
Case(
"p1", Hexagon::P1)
390 .
Case(
"p2", Hexagon::P2)
391 .
Case(
"p3", Hexagon::P3)
392 .
Case(
"sa0", Hexagon::SA0)
393 .
Case(
"lc0", Hexagon::LC0)
394 .
Case(
"sa1", Hexagon::SA1)
395 .
Case(
"lc1", Hexagon::LC1)
396 .
Case(
"m0", Hexagon::M0)
397 .
Case(
"m1", Hexagon::M1)
398 .
Case(
"usr", Hexagon::USR)
399 .
Case(
"ugp", Hexagon::UGP)
400 .
Case(
"cs0", Hexagon::CS0)
401 .
Case(
"cs1", Hexagon::CS1)
428 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
430 if (RVLocs[i].getValVT() == MVT::i1) {
440 Register PredR =
MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
452 RVLocs[i].getValVT(), Glue);
478 bool IsStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
503 IsVarArg, IsStructRet, StructAttrFlag, Outs,
512 :
"Argument must be passed on stack. "
513 "Not eligible for Tail Call\n"));
524 bool NeedsArgAlign =
false;
525 Align LargestAlignSeen;
527 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
533 NeedsArgAlign |= ArgAlign;
559 StackPtr.getValueType());
562 LargestAlignSeen = std::max(
564 if (Flags.isByVal()) {
584 if (NeedsArgAlign && Subtarget.
hasV60Ops()) {
585 LLVM_DEBUG(
dbgs() <<
"Function needs byte stack align due to call args\n");
586 Align VecAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
587 LargestAlignSeen = std::max(LargestAlignSeen, VecAlign);
592 if (!MemOpChains.
empty())
606 for (
const auto &R : RegsToPass) {
607 Chain = DAG.
getCopyToReg(Chain, dl, R.first, R.second, Glue);
622 for (
const auto &R : RegsToPass) {
623 Chain = DAG.
getCopyToReg(Chain, dl, R.first, R.second, Glue);
638 dyn_cast<ExternalSymbolSDNode>(Callee)) {
649 for (
const auto &R : RegsToPass)
653 assert(Mask &&
"Missing call preserved mask for calling convention");
670 Chain = DAG.
getNode(OpCode, dl, {MVT::Other, MVT::Glue}, Ops);
680 InVals, OutVals, Callee);
695 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
696 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 ||
697 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 ||
698 VT == MVT::v4i16 || VT == MVT::v8i8 ||
707 if (!isa<ConstantSDNode>(
Offset.getNode()))
711 int32_t V = cast<ConstantSDNode>(
Offset.getNode())->getSExtValue();
727 unsigned LR = HRI.getRARegister();
734 if (
Op.getOperand(NumOps-1).getValueType() == MVT::Glue)
739 unsigned NumVals = Flags.getNumOperandRegisters();
742 switch (Flags.getKind()) {
753 for (; NumVals; --NumVals, ++i) {
754 Register Reg = cast<RegisterSDNode>(
Op.getOperand(i))->getReg();
757 HMFI.setHasClobberLR(
true);
809 unsigned IntNo =
Op.getConstantOperandVal(1);
811 if (IntNo == Intrinsic::hexagon_prefetch) {
829 assert(AlignConst &&
"Non-constant Align in LowerDYNAMIC_STACKALLOC");
835 A = HFI.getStackAlign().value();
838 dbgs () << __func__ <<
" Align: " <<
A <<
" Size: ";
839 Size.getNode()->dump(&DAG);
880 switch (RC.
getID()) {
881 case Hexagon::IntRegsRegClassID:
882 return Reg - Hexagon::R0 + 1;
883 case Hexagon::DoubleRegsRegClassID:
884 return (Reg - Hexagon::D0 + 1) * 2;
885 case Hexagon::HvxVRRegClassID:
886 return Reg - Hexagon::V0 + 1;
887 case Hexagon::HvxWRRegClassID:
888 return (Reg - Hexagon::W0 + 1) * 2;
895 HFL.FirstVarArgSavedReg = 0;
898 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
901 bool ByVal = Flags.isByVal();
907 if (VA.
isRegLoc() && ByVal && Flags.getByValSize() <= 8)
911 (!ByVal || (ByVal && Flags.getByValSize() > 8));
940 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.
getLocReg());
946 unsigned ObjSize = Flags.isByVal()
947 ? Flags.getByValSize()
955 if (Flags.isByVal()) {
969 for (
int i = HFL.FirstVarArgSavedReg; i < 6; i++)
970 MRI.addLiveIn(Hexagon::R0+i);
974 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1);
978 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg;
979 bool RequiresPadding = (NumVarArgRegs & 1);
980 int RegSaveAreaSizePlusPadding = RequiresPadding
981 ? (NumVarArgRegs + 1) * 4
984 if (RegSaveAreaSizePlusPadding > 0) {
987 if (!(RegAreaStart % 8))
988 RegAreaStart = (RegAreaStart + 7) & -8;
990 int RegSaveAreaFrameIndex =
992 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex);
995 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding;
997 HMFI.setVarArgsFrameIndex(FI);
1003 HMFI.setRegSavedAreaStartFrameIndex(FI);
1004 HMFI.setVarArgsFrameIndex(FI);
1013 HMFI.setVarArgsFrameIndex(FI);
1026 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
1046 SDValue SavedRegAreaStartFrameIndex =
1047 DAG.
getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), MVT::i32);
1051 if (HFL.FirstVarArgSavedReg & 1)
1052 SavedRegAreaStartFrameIndex =
1054 DAG.
getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(),
1061 SavedRegAreaStartFrameIndex,
1093 const Value *DestSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
1094 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
1100 false,
false,
nullptr, std::nullopt,
1112 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1129 auto isSExtFree = [
this](
SDValue N) {
1130 switch (
N.getOpcode()) {
1136 EVT OrigTy = cast<VTSDNode>(
Op.getOperand(1))->getVT();
1142 return ThisBW >= OrigBW;
1151 if (OpTy == MVT::i8 || OpTy == MVT::i16) {
1153 bool IsNegative =
C &&
C->getAPIntValue().isNegative();
1154 if (IsNegative || isSExtFree(
LHS) || isSExtFree(
RHS))
1166 SDValue Op1 =
Op.getOperand(1), Op2 =
Op.getOperand(2);
1170 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1188 EVT ValTy =
Op.getValueType();
1191 bool isVTi1Type =
false;
1192 if (
auto *CV = dyn_cast<ConstantVector>(CPN->
getConstVal())) {
1193 if (cast<VectorType>(CV->getType())->getElementType()->isIntegerTy(1)) {
1196 unsigned VecLen = CV->getNumOperands();
1198 "conversion only supported for pow2 VectorSize");
1199 for (
unsigned i = 0; i < VecLen; ++i)
1215 else if (isVTi1Type)
1221 assert(cast<ConstantPoolSDNode>(
T)->getTargetFlags() == TF &&
1222 "Inconsistent target flag encountered");
1224 if (IsPositionIndependent)
1231 EVT VT =
Op.getValueType();
1232 int Idx = cast<JumpTableSDNode>(
Op)->getIndex();
1249 EVT VT =
Op.getValueType();
1251 unsigned Depth =
Op.getConstantOperandVal(0);
1271 EVT VT =
Op.getValueType();
1273 unsigned Depth =
Op.getConstantOperandVal(0);
1291 auto *GAN = cast<GlobalAddressSDNode>(
Op);
1293 auto *GV = GAN->getGlobal();
1294 int64_t
Offset = GAN->getOffset();
1302 if (GO && Subtarget.
useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM))
1324 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
1350 unsigned char OperandFlags)
const {
1369 assert(Mask &&
"Missing call preserved mask for calling convention");
1404 if (IsPositionIndependent) {
1476 Hexagon::R0, Flags);
1616 for (
unsigned LegalIntOp :
1656 for (
unsigned IntExpOp :
1665 for (
unsigned FPExpOp :
1702 static const unsigned VectExpOps[] = {
1730 for (
unsigned VectExpOp : VectExpOps)
1744 if (VT.getVectorElementType() != MVT::i32) {
1771 for (
MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8,
1772 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1787 if (NativeVT.getVectorElementType() != MVT::i1) {
1794 for (
MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) {
1805 for (
MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8,
1806 MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1812 for (
MVT VT : {MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1818 for (
MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16,
1830 for (
MVT VT : {MVT::i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1883 for (
MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64,
1884 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) {
1912 initializeHVXLowering();
1956 return "HexagonISD::THREAD_POINTER";
1978HexagonTargetLowering::validateConstPtrAlignment(
SDValue Ptr,
Align NeedAlign,
1980 auto *CA = dyn_cast<ConstantSDNode>(
Ptr);
1983 unsigned Addr = CA->getZExtValue();
1986 if (HaveAlign >= NeedAlign)
1992 DiagnosticInfoMisalignedTrap(
StringRef M)
1998 return DI->
getKind() == DK_MisalignedTrap;
2006 <<
" has alignment " << HaveAlign.
value()
2007 <<
", but the memory access requires " << NeedAlign.
value();
2010 O <<
". The instruction has been replaced with a trap.";
2020 auto *
LS = cast<LSBaseSDNode>(
Op.getNode());
2021 assert(!
LS->isIndexed() &&
"Not expecting indexed ops on constant address");
2033 unsigned ID = cast<IntrinsicInst>(Inst)->getIntrinsicID();
2034 return (
ID == Intrinsic::hexagon_L2_loadrd_pbr ||
2035 ID == Intrinsic::hexagon_L2_loadri_pbr ||
2036 ID == Intrinsic::hexagon_L2_loadrh_pbr ||
2037 ID == Intrinsic::hexagon_L2_loadruh_pbr ||
2038 ID == Intrinsic::hexagon_L2_loadrb_pbr ||
2039 ID == Intrinsic::hexagon_L2_loadrub_pbr);
2048 V = cast<Operator>(V)->getOperand(0);
2050 V = cast<Instruction>(V)->getOperand(0);
2063 if (Blk == Parent) {
2068 BaseVal = BackEdgeVal;
2070 }
while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));
2073 if (IntrBaseVal == BackEdgeVal)
2080 assert(
Idx >= 0 &&
"Unexpected index to incoming argument in PHI");
2088 Value *IntrBaseVal = V;
2095 }
while (BaseVal != V);
2098 if (
const PHINode *PN = dyn_cast<PHINode>(V))
2112 unsigned Intrinsic)
const {
2113 switch (Intrinsic) {
2114 case Intrinsic::hexagon_L2_loadrd_pbr:
2115 case Intrinsic::hexagon_L2_loadri_pbr:
2116 case Intrinsic::hexagon_L2_loadrh_pbr:
2117 case Intrinsic::hexagon_L2_loadruh_pbr:
2118 case Intrinsic::hexagon_L2_loadrb_pbr:
2119 case Intrinsic::hexagon_L2_loadrub_pbr: {
2121 auto &
DL =
I.getDataLayout();
2122 auto &Cont =
I.getCalledFunction()->getParent()->getContext();
2126 Type *ElTy =
I.getCalledFunction()->getReturnType()->getStructElementType(0);
2133 Info.align =
DL.getABITypeAlign(
Info.memVT.getTypeForEVT(Cont));
2137 case Intrinsic::hexagon_V6_vgathermw:
2138 case Intrinsic::hexagon_V6_vgathermw_128B:
2139 case Intrinsic::hexagon_V6_vgathermh:
2140 case Intrinsic::hexagon_V6_vgathermh_128B:
2141 case Intrinsic::hexagon_V6_vgathermhw:
2142 case Intrinsic::hexagon_V6_vgathermhw_128B:
2143 case Intrinsic::hexagon_V6_vgathermwq:
2144 case Intrinsic::hexagon_V6_vgathermwq_128B:
2145 case Intrinsic::hexagon_V6_vgathermhq:
2146 case Intrinsic::hexagon_V6_vgathermhq_128B:
2147 case Intrinsic::hexagon_V6_vgathermhwq:
2148 case Intrinsic::hexagon_V6_vgathermhwq_128B: {
2149 const Module &M = *
I.getParent()->getParent()->getParent();
2151 Type *VecTy =
I.getArgOperand(1)->getType();
2153 Info.ptrVal =
I.getArgOperand(0);
2156 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8);
2169 return X.getValueType().isScalarInteger();
2189 unsigned DefinedValues)
const {
2194 unsigned Index)
const {
2226 unsigned Action = getPreferredHvxVectorAction(VT);
2232 if (ElemTy == MVT::i1)
2246 unsigned Action = getCustomHvxOperationAction(
Op);
2253std::pair<SDValue, int>
2254HexagonTargetLowering::getBaseAndOffset(
SDValue Addr)
const {
2257 if (
auto *CN = dyn_cast<const ConstantSDNode>(Op1.
getNode()))
2258 return {
Addr.getOperand(0), CN->getSExtValue() };
2268 const auto *SVN = cast<ShuffleVectorSDNode>(
Op);
2270 assert(AM.
size() <= 8 &&
"Unexpected shuffle mask");
2271 unsigned VecLen = AM.
size();
2275 "HVX shuffles should be legal");
2285 if (ty(Op0) != VecTy || ty(Op1) != VecTy)
2294 if (AM[
F] >=
int(VecLen)) {
2302 for (
int M : Mask) {
2304 for (
unsigned j = 0; j != ElemBytes; ++j)
2307 for (
unsigned j = 0; j != ElemBytes; ++j)
2320 for (
unsigned i = 0, e = ByteMask.
size(); i != e; ++i) {
2328 if (ByteMask.
size() == 4) {
2330 if (MaskIdx == (0x03020100 | MaskUnd))
2333 if (MaskIdx == (0x00010203 | MaskUnd)) {
2341 getCombine(Op1, Op0, dl, typeJoin({ty(Op1), ty(Op0)}), DAG);
2342 if (MaskIdx == (0x06040200 | MaskUnd))
2343 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG);
2344 if (MaskIdx == (0x07050301 | MaskUnd))
2345 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG);
2348 getCombine(Op0, Op1, dl, typeJoin({ty(Op0), ty(Op1)}), DAG);
2349 if (MaskIdx == (0x02000604 | MaskUnd))
2350 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG);
2351 if (MaskIdx == (0x03010705 | MaskUnd))
2352 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG);
2355 if (ByteMask.
size() == 8) {
2357 if (MaskIdx == (0x0706050403020100ull | MaskUnd))
2360 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) {
2367 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd))
2368 return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG);
2369 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd))
2370 return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG);
2371 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd))
2372 return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG);
2373 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd))
2374 return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG);
2375 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) {
2376 VectorPair
P = opSplit(Op0, dl, DAG);
2377 return getInstr(Hexagon::S2_packhl, dl, VecTy, {
P.second,
P.first}, DAG);
2381 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd))
2382 return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG);
2383 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd))
2384 return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG);
2392 switch (
Op.getOpcode()) {
2394 if (
SDValue S = cast<BuildVectorSDNode>(
Op)->getSplatValue())
2398 return Op.getOperand(0);
2408 switch (
Op.getOpcode()) {
2422 if (
SDValue Sp = getSplatValue(
Op.getOperand(1), DAG))
2435 if (
SDValue S = getVectorShiftByInt(
Op, DAG))
2449 MVT ResTy = ty(Res);
2467 return ShiftPartI8(
Opc, Val, Amt);
2469 auto [LoV, HiV] = opSplit(Val, dl, DAG);
2471 {ShiftPartI8(
Opc, LoV, Amt), ShiftPartI8(
Opc, HiV, Amt)});
2476 if (isa<ConstantSDNode>(
Op.getOperand(1).getNode()))
2485 MVT InpTy = ty(InpV);
2490 if (InpTy == MVT::i8) {
2491 if (ResTy == MVT::v8i1) {
2494 return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG);
2509 bool AllConst =
true;
2511 for (
unsigned i = 0, e = Values.
size(); i != e; ++i) {
2514 Consts[i] = ConstantInt::get(IntTy, 0);
2518 if (
auto *CN = dyn_cast<ConstantSDNode>(V.getNode())) {
2519 const ConstantInt *CI = CN->getConstantIntValue();
2521 }
else if (
auto *CN = dyn_cast<ConstantFPSDNode>(
V.getNode())) {
2522 const ConstantFP *CF = CN->getConstantFPValue();
2524 Consts[i] = ConstantInt::get(IntTy,
A.getZExtValue());
2539 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2543 if (!isUndef(Elem[
First]))
2551 return getZero(dl, VecTy, DAG);
2553 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2558 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) |
2559 Consts[1]->getZExtValue() << 16;
2563 if (ElemTy == MVT::f16) {
2570 SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {E1, E0}, DAG);
2574 if (ElemTy == MVT::i8) {
2577 uint32_t V = (Consts[0]->getZExtValue() & 0xFF) |
2578 (Consts[1]->getZExtValue() & 0xFF) << 8 |
2579 (Consts[2]->getZExtValue() & 0xFF) << 16 |
2580 Consts[3]->getZExtValue() << 24;
2585 bool IsSplat =
true;
2586 for (
unsigned i =
First+1; i != Num; ++i) {
2587 if (Elem[i] == Elem[
First] || isUndef(Elem[i]))
2603 for (
unsigned i = 0; i != 4; ++i) {
2613 SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG);
2618 dbgs() <<
"VecTy: " << VecTy <<
'\n';
2630 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2634 if (!isUndef(Elem[
First]))
2642 return getZero(dl, VecTy, DAG);
2645 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2646 bool IsSplat =
true;
2647 for (
unsigned i =
First+1; i != Num; ++i) {
2648 if (Elem[i] == Elem[
First] || isUndef(Elem[i]))
2667 for (
unsigned i = 0; i != Num; ++i)
2668 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() &
Mask);
2677 : buildVector32(Elem.
take_front(Num/2), dl, HalfTy, DAG);
2680 : buildVector32(Elem.
drop_front(Num/2), dl, HalfTy, DAG);
2681 return getCombine(
H, L, dl, VecTy, DAG);
2688 MVT VecTy = ty(VecV);
2692 return extractVectorPred(VecV, IdxV, dl, ValTy, ResTy, DAG);
2697 assert((VecWidth % ElemWidth) == 0);
2698 assert(VecWidth == 32 || VecWidth == 64);
2701 MVT ScalarTy = tyScalar(VecTy);
2707 if (
auto *IdxN = dyn_cast<ConstantSDNode>(IdxV)) {
2708 unsigned Off = IdxN->getZExtValue() * ElemWidth;
2709 if (VecWidth == 64 && ValWidth == 32) {
2710 assert(Off == 0 || Off == 32);
2711 ExtV =
Off == 0 ? LoHalf(VecV, DAG) : HiHalf(VecV, DAG);
2712 }
else if (Off == 0 && (ValWidth % 8) == 0) {
2719 {VecV, WidthV, OffV});
2722 if (ty(IdxV) != MVT::i32)
2727 {VecV, WidthV, OffV});
2737HexagonTargetLowering::extractVectorPred(
SDValue VecV,
SDValue IdxV,
2742 MVT VecTy = ty(VecV);
2746 "Vector elements should equal vector width size");
2747 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2);
2758 if (ValWidth == 1) {
2759 SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2769 unsigned Scale = VecWidth / ValWidth;
2773 assert(ty(IdxV) == MVT::i32);
2774 unsigned VecRep = 8 / VecWidth;
2782 T1 = LoHalf(T1, DAG);
2783 T1 = expandPredicate(T1, dl, DAG);
2794 MVT VecTy = ty(VecV);
2796 return insertVectorPred(VecV, ValV, IdxV, dl, ValTy, DAG);
2800 assert(VecWidth == 32 || VecWidth == 64);
2801 assert((VecWidth % ValWidth) == 0);
2817 unsigned W =
C->getZExtValue() * ValWidth;
2820 {VecV, ValV, WidthV, OffV});
2822 if (ty(IdxV) != MVT::i32)
2826 {VecV, ValV, WidthV, OffV});
2833HexagonTargetLowering::insertVectorPred(
SDValue VecV,
SDValue ValV,
2836 MVT VecTy = ty(VecV);
2839 if (ValTy == MVT::i1) {
2840 SDValue ToReg = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2846 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {
Ins}, DAG);
2857 for (
unsigned R = Scale;
R > 1;
R /= 2) {
2858 ValR = contractPredicate(ValR, dl, DAG);
2859 ValR = getCombine(DAG.
getUNDEF(MVT::i32), ValR, dl, MVT::i64, DAG);
2871HexagonTargetLowering::expandPredicate(
SDValue Vec32,
const SDLoc &dl,
2873 assert(ty(Vec32).getSizeInBits() == 32);
2882HexagonTargetLowering::contractPredicate(
SDValue Vec64,
const SDLoc &dl,
2884 assert(ty(Vec64).getSizeInBits() == 64);
2890 {0, 2, 4, 6, 1, 3, 5, 7});
2891 return extractVector(S, DAG.
getConstant(0, dl, MVT::i32), dl, MVT::v4i8,
2915 MVT ValTy = ty(Val);
2920 if (ValLen == ResLen)
2923 const SDLoc &dl(Val);
2925 assert(ResLen % ValLen == 0);
2928 for (
unsigned i = 1, e = ResLen / ValLen; i <
e; ++i)
2937 MVT ElemTy = ty(
Hi);
2966 return buildVector32(Ops, dl, VecTy, DAG);
2968 return buildVector64(Ops, dl, VecTy, DAG);
2970 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) {
2972 bool All0 =
true, All1 =
true;
2974 auto *CN = dyn_cast<ConstantSDNode>(
P.getNode());
2975 if (CN ==
nullptr) {
2976 All0 = All1 =
false;
2992 SDValue Z = getZero(dl, MVT::i32, DAG);
2995 for (
unsigned i = 0; i != 8; ++i) {
2997 Rs[i] = DAG.
getSelect(dl, MVT::i32, Ops[i/Rep], S, Z);
3000 for (
unsigned i = 0, e =
A.size()/2; i != e; ++i)
3004 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG);
3017 return getCombine(
Op.getOperand(1),
Op.getOperand(0), dl, VecTy, DAG);
3021 if (ElemTy == MVT::i1) {
3022 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1);
3023 MVT OpTy = ty(
Op.getOperand(0));
3036 for (
SDValue P :
Op.getNode()->op_values()) {
3038 for (
unsigned R = Scale; R > 1; R /= 2) {
3039 W = contractPredicate(W, dl, DAG);
3040 W = getCombine(DAG.
getUNDEF(MVT::i32), W, dl, MVT::i64, DAG);
3048 Words[IdxW ^ 1].
clear();
3050 for (
unsigned i = 0, e = Words[IdxW].
size(); i != e; i += 2) {
3051 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1];
3054 {W0, W1, WidthV, WidthV});
3062 assert(Scale == 2 && Words[IdxW].
size() == 2);
3064 SDValue WW = getCombine(Words[IdxW][1], Words[IdxW][0], dl, MVT::i64, DAG);
3076 return extractVector(Vec,
Op.getOperand(1),
SDLoc(
Op), ElemTy, ty(
Op), DAG);
3082 return extractVector(
Op.getOperand(0),
Op.getOperand(1),
SDLoc(
Op),
3083 ty(
Op), ty(
Op), DAG);
3089 return insertVector(
Op.getOperand(0),
Op.getOperand(1),
Op.getOperand(2),
3097 return insertVector(
Op.getOperand(0), ValV,
Op.getOperand(2),
3122 bool LoadPred = MemTy == MVT::v2i1 || MemTy == MVT::v4i1 || MemTy == MVT::v8i1;
3129 LN = cast<LoadSDNode>(NL.
getNode());
3133 if (!validateConstPtrAlignment(LN->
getBasePtr(), ClaimAlign, dl, DAG))
3134 return replaceMemWithUndef(
Op, DAG);
3140 SDValue TP = getInstr(Hexagon::C2_tfrrp, dl, MemTy, {LU}, DAG);
3159 if (Ty == MVT::v2i1 || Ty == MVT::v4i1 || Ty == MVT::v8i1) {
3161 SDValue TR = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {Val}, DAG);
3168 SN = cast<StoreSDNode>(NS.getNode());
3172 if (!validateConstPtrAlignment(SN->
getBasePtr(), ClaimAlign, dl, DAG))
3173 return replaceMemWithUndef(
Op, DAG);
3177 if (ClaimAlign < NeedAlign)
3186 MVT LoadTy = ty(
Op);
3189 if (HaveAlign >= NeedAlign)
3198 bool DoDefault =
false;
3209 if (!DoDefault && (2 * HaveAlign) == NeedAlign) {
3228 unsigned LoadLen = NeedAlign;
3231 auto BO = getBaseAndOffset(
Base);
3232 unsigned BaseOpc = BO.first.getOpcode();
3236 if (BO.second % LoadLen != 0) {
3238 DAG.
getConstant(BO.second % LoadLen, dl, MVT::i32));
3239 BO.second -= BO.second % LoadLen;
3254 MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen,
Align(LoadLen),
3255 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
3256 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
3273 auto *CY = dyn_cast<ConstantSDNode>(
Y);
3281 unsigned Opc =
Op.getOpcode();
3285 assert(VY != 0 &&
"This should have been folded");
3310 unsigned Opc =
Op.getOpcode();
3317 EVT CarryTy =
C.getValueType();
3319 { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) });
3338 unsigned OffsetReg = Hexagon::R28;
3354 unsigned Opc =
Op.getOpcode();
3360 if (isHvxOperation(
Op.getNode(), DAG)) {
3362 if (
SDValue V = LowerHvxOperation(
Op, DAG))
3369 Op.getNode()->dumpr(&DAG);
3371 errs() <<
"Error: check for a non-legal type in this operation\n";
3425 if (isHvxOperation(
N, DAG)) {
3426 LowerHvxOperationWrapper(
N,
Results, DAG);
3432 unsigned Opc =
N->getOpcode();
3456 if (isHvxOperation(
N, DAG)) {
3457 ReplaceHvxNodeResults(
N,
Results, DAG);
3463 switch (
N->getOpcode()) {
3470 if (
N->getValueType(0) == MVT::i8) {
3471 if (
N->getOperand(0).getValueType() == MVT::v8i1) {
3472 SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32,
3473 N->getOperand(0), DAG);
3485 if (isHvxOperation(
N, DCI.
DAG)) {
3486 if (
SDValue V = PerformHvxDAGCombine(
N, DCI))
3493 unsigned Opc =
Op.getOpcode();
3499 EVT TruncTy =
Op.getValueType();
3515 switch (
P.getOpcode()) {
3519 return getZero(dl, ty(
Op), DCI.
DAG);
3532 Op.getOperand(2),
Op.getOperand(1));
3540 MVT TruncTy = ty(
Op);
3543 if (ty(Elem0) == TruncTy)
3546 if (ty(Elem0).bitsGT(TruncTy))
3553 if (ty(
Op) != MVT::i64)
3564 auto *Amt = dyn_cast<ConstantSDNode>(Shl.
getOperand(1));
3565 if (Amt && Amt->getZExtValue() >= 32 && ty(Z).getSizeInBits() <= 32) {
3566 unsigned A = Amt->getZExtValue();
3588 int Idx = cast<JumpTableSDNode>(Table)->getIndex();
3600 if (Constraint.
size() == 1) {
3601 switch (Constraint[0]) {
3616std::pair<unsigned, const TargetRegisterClass*>
3620 if (Constraint.
size() == 1) {
3621 switch (Constraint[0]) {
3625 return {0u,
nullptr};
3631 return {0u, &Hexagon::IntRegsRegClass};
3634 return {0u, &Hexagon::DoubleRegsRegClass};
3639 return {0u,
nullptr};
3640 return {0u, &Hexagon::ModRegsRegClass};
3644 return {0u,
nullptr};
3647 return {0u, &Hexagon::HvxQRRegClass};
3653 return {0u,
nullptr};
3655 return {0u, &Hexagon::HvxVRRegClass};
3658 return {0u, &Hexagon::HvxVRRegClass};
3659 return {0u, &Hexagon::HvxWRRegClass};
3661 return {0u, &Hexagon::HvxWRRegClass};
3665 return {0u,
nullptr};
3676 bool ForCodeSize)
const {
3689 return (BitSize > 0 && BitSize <= 64);
3718 int Scale = AM.
Scale;
3742 return Imm >= -512 && Imm <= 511;
3752 bool IsCalleeStructRet,
3753 bool IsCallerStructRet,
3760 bool CCMatch = CallerCC == CalleeCC;
3768 if (!isa<GlobalAddressSDNode>(Callee) &&
3769 !isa<ExternalSymbolSDNode>(Callee)) {
3789 if (IsCalleeStructRet || IsCallerStructRet)
3813 if (
Op.size() >= 8 &&
Op.isAligned(
Align(8)))
3815 if (
Op.size() >= 4 &&
Op.isAligned(
Align(4)))
3817 if (
Op.size() >= 2 &&
Op.isAligned(
Align(2)))
3829 return allowsHvxMemoryAccess(SVT, Flags,
Fast);
3836 unsigned *
Fast)
const {
3841 return allowsHvxMisalignedMemoryAccesses(SVT, Flags,
Fast);
3847std::pair<const TargetRegisterClass*, uint8_t>
3855 return std::make_pair(&Hexagon::HvxQRRegClass, 1);
3857 return std::make_pair(&Hexagon::HvxVRRegClass, 1);
3859 return std::make_pair(&Hexagon::HvxWRRegClass, 1);
3867 std::optional<unsigned> ByteOffset)
const {
3873 auto *L = cast<LoadSDNode>(Load);
3874 std::pair<SDValue, int> BO = getBaseAndOffset(L->getBasePtr());
3880 const auto *GO = dyn_cast_or_null<const GlobalObject>(GA->getGlobal());
3888 AdjustHvxInstrPostInstrSelection(
MI, Node);
3895 assert((SZ == 32 || SZ == 64) &&
"Only 32/64-bit atomic loads supported");
3896 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
3897 : Intrinsic::hexagon_L4_loadd_locked;
3916 assert((SZ == 32 || SZ == 64) &&
"Only 32/64-bit atomic stores supported");
3917 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
3918 : Intrinsic::hexagon_S4_stored_locked;
3940 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static cl::opt< int > MaxStoresPerMemcpyCL("max-store-memcpy", cl::Hidden, cl::init(6), cl::desc("Max #stores to inline memcpy"))
static cl::opt< bool > ConstantLoadsToImm("constant-loads-to-imm", cl::Hidden, cl::init(true), cl::desc("Convert constant loads to immediate values."))
static Value * getUnderLyingObjectForBrevLdIntr(Value *V)
static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static cl::opt< bool > AlignLoads("hexagon-align-loads", cl::Hidden, cl::init(false), cl::desc("Rewrite unaligned loads as a pair of aligned loads"))
static bool isBrevLdIntrinsic(const Value *Inst)
static cl::opt< int > MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memmove"))
static cl::opt< int > MaxStoresPerMemmoveCL("max-store-memmove", cl::Hidden, cl::init(6), cl::desc("Max #stores to inline memmove"))
static Value * getBrevLdObject(Value *V)
static cl::opt< int > MaxStoresPerMemsetCL("max-store-memset", cl::Hidden, cl::init(8), cl::desc("Max #stores to inline memset"))
static cl::opt< bool > DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden, cl::init(false), cl::desc("Disable minimum alignment of 1 for " "arguments passed by value on stack"))
static Value * returnEdge(const PHINode *PN, Value *IntrBaseVal)
static cl::opt< int > MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memcpy"))
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
static cl::opt< int > MaxStoresPerMemsetOptSizeCL("max-store-memset-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memset"))
static cl::opt< bool > EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden, cl::desc("Control jump table emission on Hexagon target"))
static cl::opt< int > MinimumJumpTables("minimum-jump-tables", cl::Hidden, cl::init(5), cl::desc("Set minimum jump tables"))
static cl::opt< bool > EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden, cl::desc("Enable Hexagon SDNode scheduling"))
#define Hexagon_PointerSize
#define HEXAGON_LRFP_SIZE
#define HEXAGON_GOT_SYM_NAME
Module.h This file contains the declarations for the Module class.
std::pair< MCSymbol *, MachineModuleInfoImpl::StubValueTy > PairTy
Register const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallVector class.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
The address of a basic block.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
LLVM_ABI void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
LLVM_ABI bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
LLVM_ABI void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
const APInt & getValue() const
Return the constant as an APInt value reference.
MachineConstantPoolValue * getMachineCPVal() const
bool isMachineConstantPoolEntry() const
const Constant * getConstVal() const
int64_t getSExtValue() const
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
This is the base abstract class for diagnostic reporting in the backend.
Interface for custom diagnostic printing.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
int64_t getOffset() const
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
LLVM_ABI const GlobalObject * getAliaseeObject() const
bool isValidAutoIncImm(const EVT VT, const int Offset) const
Hexagon target-specific information for each MachineFunction.
int getVarArgsFrameIndex()
void setFirstNamedArgFrameIndex(int v)
void setHasEHReturn(bool H=true)
Register getStackRegister() const
Register getFrameRegister(const MachineFunction &MF) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const HexagonInstrInfo * getInstrInfo() const override
const HexagonFrameLowering * getFrameLowering() const override
bool useSmallData() const
const HexagonRegisterInfo * getRegisterInfo() const override
bool isHVXVectorType(EVT VecTy, bool IncludeBool=false) const
Align getTypeAlignment(MVT Ty) const
unsigned getVectorLength() const
bool useHVX128BOps() const
bool useHVX64BOps() const
bool isEnvironmentMusl() const
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
bool isTargetCanonicalConstantNode(SDValue Op) const override
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset) const override
Return true if it is profitable to reduce a load to a smaller type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const
AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, SDValue InGlue, EVT PtrVT, unsigned ReturnReg, unsigned char OperandGlues) const
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const
SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
SDValue LowerCallResult(SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals, const SmallVectorImpl< SDValue > &OutVals, SDValue Callee) const
LowerCallResult - Lower the result values of an ISD::CALL into the appropriate copies out of appropri...
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Return true if the target supports a memory access of this type for the given address space and align...
SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool isShuffleMaskLegal(ArrayRef< int > Mask, EVT VT) const override
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
LegalizeAction getCustomOperationAction(SDNode &Op) const override
How to legalize this custom operation?
SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerUAddSubOCarry(SDValue Op, SelectionDAG &DAG) const
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
LowerCall - Functions arguments are copied from virtual regs to (physical regs)/(stack frame),...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const
SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
HexagonTargetLowering(const TargetMachine &TM, const HexagonSubtarget &ST)
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool IsEligibleForTailCallOptimization(SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet, bool isCallerStructRet, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SmallVectorImpl< ISD::InputArg > &Ins, SelectionDAG &DAG) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization.
SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const
SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &, EVT) const override
Return true if an FMA operation is faster than a pair of mul and add instructions.
SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const
LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
std::pair< MVT, unsigned > handleMaskRegisterForCallingConv(const HexagonSubtarget &Subtarget, EVT VT) const
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const
SDValue LowerREADSTEADYCOUNTER(SDValue Op, SelectionDAG &DAG) const
HexagonTargetObjectFile * getObjFileLowering() const override
bool isGlobalInSmallSection(const GlobalObject *GO, const TargetMachine &TM) const
Return true if this global value should be placed into small data/bss section.
Common base class shared among various IRBuilders.
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
BasicBlock * GetInsertBlock() const
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Base class for LoadSDNode and StoreSDNode.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
@ INVALID_SIMPLE_VALUE_TYPE
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static LLVM_ABI MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
LLVM_ABI void print(raw_ostream &OS, const SlotIndexes *=nullptr, bool IsStandalone=true) const
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
LLVM_ABI void ensureMaxAlignment(Align Alignment)
Make sure the function is at least Align bytes aligned.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
unsigned getNumFixedObjects() const
Return the number of fixed objects.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
LLVM_ABI SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset=std::nullopt) const
Return true if it is profitable to reduce a load to a smaller type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ UndefinedBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool isTargetCanonicalConstantNode(SDValue Op) const
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
unsigned getID() const
Return the register class ID number.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const ParentTy * getParent() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ MO_PCREL
MO_PCREL - On a symbol operand, indicates a PC-relative relocation Used for computing a global addres...
@ MO_GOT
MO_GOT - Indicates a GOT-relative relocation.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ GLOBAL_OFFSET_TABLE
The address of the GOT.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper=false)
format_hex - Output N as a fixed width hexadecimal.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
DWARFExpression::Operation Op
LLVM_ABI int getNextAvailablePluginDiagnosticKind()
Get the next available kind ID for a plugin diagnostic.
unsigned M0(unsigned Val)
constexpr unsigned BitWidth
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
unsigned Log2(Align A)
Returns the log2 of the alignment.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool isVector() const
Return true if this is a vector value type.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const