31#include "llvm/IR/IntrinsicsLoongArch.h"
41#define DEBUG_TYPE "loongarch-isel-lowering"
56 cl::desc(
"Maximum number of instructions used (including code sequence "
57 "to generate the value and moving the value to FPR) when "
58 "materializing floating-point immediates (default = 3)"),
62 "Materialize FP immediate within 2 instructions"),
64 "Materialize FP immediate within 3 instructions"),
66 "Materialize FP immediate within 4 instructions"),
68 "Materialize FP immediate within 5 instructions"),
70 "Materialize FP immediate within 6 instructions "
71 "(behaves same as 5 on loongarch64)")));
74 cl::desc(
"Trap on integer division by zero."),
81 MVT GRLenVT = Subtarget.getGRLenVT();
86 if (Subtarget.hasBasicF())
88 if (Subtarget.hasBasicD())
92 MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64};
94 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64};
96 if (Subtarget.hasExtLSX())
100 if (Subtarget.hasExtLASX())
101 for (
MVT VT : LASXVTs)
169 if (Subtarget.is64Bit()) {
197 if (!Subtarget.is64Bit()) {
203 if (Subtarget.hasBasicD())
215 if (Subtarget.hasBasicF()) {
247 if (Subtarget.is64Bit())
250 if (!Subtarget.hasBasicD()) {
252 if (Subtarget.is64Bit()) {
261 if (Subtarget.hasBasicD()) {
294 if (Subtarget.is64Bit())
300 if (Subtarget.hasExtLSX()) {
315 for (
MVT VT : LSXVTs) {
329 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
350 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
352 for (
MVT VT : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
354 for (
MVT VT : {MVT::v4i32, MVT::v2i64}) {
358 for (
MVT VT : {MVT::v4f32, MVT::v2f64}) {
376 {MVT::v16i8, MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v8i16, MVT::v4i16,
377 MVT::v2i16, MVT::v4i32, MVT::v2i32, MVT::v2i64}) {
392 if (Subtarget.hasExtLASX()) {
393 for (
MVT VT : LASXVTs) {
408 for (
MVT VT : {MVT::v4i64, MVT::v8i32, MVT::v16i16, MVT::v32i8}) {
430 for (
MVT VT : {MVT::v32i8, MVT::v16i16, MVT::v8i32})
432 for (
MVT VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64})
434 for (
MVT VT : {MVT::v8i32, MVT::v4i32, MVT::v4i64}) {
438 for (
MVT VT : {MVT::v8f32, MVT::v4f64}) {
460 if (Subtarget.hasExtLSX()) {
467 if (Subtarget.hasExtLASX())
490 if (Subtarget.hasLAMCAS())
493 if (Subtarget.hasSCQ()) {
510 switch (
Op.getOpcode()) {
511 case ISD::ATOMIC_FENCE:
512 return lowerATOMIC_FENCE(
Op, DAG);
514 return lowerEH_DWARF_CFA(
Op, DAG);
516 return lowerGlobalAddress(
Op, DAG);
518 return lowerGlobalTLSAddress(
Op, DAG);
520 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
522 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
524 return lowerINTRINSIC_VOID(
Op, DAG);
526 return lowerBlockAddress(
Op, DAG);
528 return lowerJumpTable(
Op, DAG);
530 return lowerShiftLeftParts(
Op, DAG);
532 return lowerShiftRightParts(
Op, DAG,
true);
534 return lowerShiftRightParts(
Op, DAG,
false);
536 return lowerConstantPool(
Op, DAG);
538 return lowerFP_TO_SINT(
Op, DAG);
540 return lowerBITCAST(
Op, DAG);
542 return lowerUINT_TO_FP(
Op, DAG);
544 return lowerSINT_TO_FP(
Op, DAG);
546 return lowerVASTART(
Op, DAG);
548 return lowerFRAMEADDR(
Op, DAG);
550 return lowerRETURNADDR(
Op, DAG);
552 return lowerWRITE_REGISTER(
Op, DAG);
554 return lowerINSERT_VECTOR_ELT(
Op, DAG);
556 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
558 return lowerBUILD_VECTOR(
Op, DAG);
560 return lowerCONCAT_VECTORS(
Op, DAG);
562 return lowerVECTOR_SHUFFLE(
Op, DAG);
564 return lowerBITREVERSE(
Op, DAG);
566 return lowerSCALAR_TO_VECTOR(
Op, DAG);
568 return lowerPREFETCH(
Op, DAG);
570 return lowerSELECT(
Op, DAG);
572 return lowerBRCOND(
Op, DAG);
573 case ISD::FP_TO_FP16:
574 return lowerFP_TO_FP16(
Op, DAG);
575 case ISD::FP16_TO_FP:
576 return lowerFP16_TO_FP(
Op, DAG);
577 case ISD::FP_TO_BF16:
578 return lowerFP_TO_BF16(
Op, DAG);
579 case ISD::BF16_TO_FP:
580 return lowerBF16_TO_FP(
Op, DAG);
581 case ISD::VECREDUCE_ADD:
582 return lowerVECREDUCE_ADD(
Op, DAG);
583 case ISD::VECREDUCE_AND:
584 case ISD::VECREDUCE_OR:
585 case ISD::VECREDUCE_XOR:
586 case ISD::VECREDUCE_SMAX:
587 case ISD::VECREDUCE_SMIN:
588 case ISD::VECREDUCE_UMAX:
589 case ISD::VECREDUCE_UMIN:
590 return lowerVECREDUCE(
Op, DAG);
592 return lowerConstantFP(
Op, DAG);
599 EVT VT =
Op.getValueType();
604 assert((VT == MVT::f32 && Subtarget.hasBasicF()) ||
605 (VT == MVT::f64 && Subtarget.hasBasicD()));
622 int InsNum = Seq.size() + ((VT == MVT::f64 && !Subtarget.
is64Bit()) ? 2 : 1);
664 MVT OpVT =
Op.getSimpleValueType();
670 unsigned LegalVecSize = 128;
671 bool isLASX256Vector =
681 if (isLASX256Vector) {
686 for (
unsigned i = 1; i < NumEles; i *= 2, EleBits *= 2) {
692 if (isLASX256Vector) {
717 MVT OpVT =
Op.getSimpleValueType();
731 for (
int i = NumEles; i > 1; i /= 2) {
734 Val = DAG.
getNode(Opcode,
DL, VecTy, Tmp, Val);
743 unsigned IsData =
Op.getConstantOperandVal(4);
748 return Op.getOperand(0);
763 if (
LHS == LHS2 &&
RHS == RHS2) {
768 }
else if (
LHS == RHS2 &&
RHS == LHS2) {
784 MVT VT =
N->getSimpleValueType(0);
815 if (~TrueVal == FalseVal) {
855 unsigned SelOpNo = 0;
865 unsigned ConstSelOpNo = 1;
866 unsigned OtherSelOpNo = 2;
873 if (!ConstSelOpNode || ConstSelOpNode->
isOpaque())
878 if (!ConstBinOpNode || ConstBinOpNode->
isOpaque())
884 SDValue NewConstOps[2] = {ConstSelOp, ConstBinOp};
886 std::swap(NewConstOps[0], NewConstOps[1]);
898 SDValue NewNonConstOps[2] = {OtherSelOp, ConstBinOp};
900 std::swap(NewNonConstOps[0], NewNonConstOps[1]);
903 SDValue NewT = (ConstSelOpNo == 1) ? NewConstOp : NewNonConstOp;
904 SDValue NewF = (ConstSelOpNo == 1) ? NewNonConstOp : NewConstOp;
924 ShAmt =
LHS.getValueSizeInBits() - 1 -
Log2_64(Mask);
938 int64_t
C = RHSC->getSExtValue();
981 MVT VT =
Op.getSimpleValueType();
982 MVT GRLenVT = Subtarget.getGRLenVT();
987 if (
Op.hasOneUse()) {
988 unsigned UseOpc =
Op->user_begin()->getOpcode();
990 SDNode *BinOp = *
Op->user_begin();
997 return lowerSELECT(NewSel, DAG);
1037 if (TrueVal - 1 == FalseVal)
1039 if (TrueVal + 1 == FalseVal)
1046 RHS == TrueV &&
LHS == FalseV) {
1078 MVT GRLenVT = Subtarget.getGRLenVT();
1090 Op.getOperand(0),
LHS,
RHS, TargetCC,
1094 Op.getOperand(0), CondV,
Op.getOperand(2));
1104LoongArchTargetLowering::lowerSCALAR_TO_VECTOR(
SDValue Op,
1107 MVT OpVT =
Op.getSimpleValueType();
1118 EVT ResTy =
Op->getValueType(0);
1129 for (
unsigned int i = 0; i < NewEltNum; i++) {
1132 unsigned RevOp = (ResTy == MVT::v16i8 || ResTy == MVT::v32i8)
1151 for (
unsigned int i = 0; i < NewEltNum; i++)
1152 for (
int j = OrigEltNum / NewEltNum - 1;
j >= 0;
j--)
1153 Mask.push_back(j + (OrigEltNum / NewEltNum) * i);
1171 if (EltBits > 32 || EltBits == 1)
1199 int MaskOffset,
const APInt &Zeroable) {
1200 int Size = Mask.size();
1201 unsigned SizeInBits =
Size * ScalarSizeInBits;
1203 auto CheckZeros = [&](
int Shift,
int Scale,
bool Left) {
1204 for (
int i = 0; i <
Size; i += Scale)
1205 for (
int j = 0; j < Shift; ++j)
1206 if (!Zeroable[i + j + (
Left ? 0 : (Scale - Shift))])
1214 for (
unsigned i = Pos, e = Pos +
Size; i != e; ++i,
Low += Step)
1215 if (!(Mask[i] == -1 || Mask[i] ==
Low))
1220 auto MatchShift = [&](
int Shift,
int Scale,
bool Left) {
1221 for (
int i = 0; i !=
Size; i += Scale) {
1222 unsigned Pos =
Left ? i + Shift : i;
1223 unsigned Low =
Left ? i : i + Shift;
1224 unsigned Len = Scale - Shift;
1229 int ShiftEltBits = ScalarSizeInBits * Scale;
1230 bool ByteShift = ShiftEltBits > 64;
1233 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
1237 Scale = ByteShift ? Scale / 2 : Scale;
1243 return (
int)ShiftAmt;
1246 unsigned MaxWidth = 128;
1247 for (
int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
1248 for (
int Shift = 1; Shift != Scale; ++Shift)
1249 for (
bool Left : {
true,
false})
1250 if (CheckZeros(Shift, Scale,
Left)) {
1251 int ShiftAmt = MatchShift(Shift, Scale,
Left);
1276 const APInt &Zeroable) {
1277 int Size = Mask.size();
1291 Mask,
Size, Zeroable);
1299 "Illegal integer vector type");
1308template <
typename ValType>
1311 unsigned CheckStride,
1313 ValType ExpectedIndex,
unsigned ExpectedIndexStride) {
1317 if (*
I != -1 && *
I != ExpectedIndex)
1319 ExpectedIndex += ExpectedIndexStride;
1323 for (
unsigned n = 0; n < CheckStride &&
I != End; ++n, ++
I)
1335 int Size = Mask.size();
1345 int ScalarSizeInBits = VectorSizeInBits /
Size;
1346 assert(!(VectorSizeInBits % ScalarSizeInBits) &&
"Illegal shuffle mask size");
1347 (void)ScalarSizeInBits;
1349 for (
int i = 0; i <
Size; ++i) {
1355 if ((M >= 0 && M <
Size && V1IsZero) || (M >=
Size && V2IsZero)) {
1372 RepeatedMask.
assign(LaneSize, -1);
1373 int Size = Mask.size();
1374 for (
int i = 0; i <
Size; ++i) {
1375 assert(Mask[i] == -1 || Mask[i] >= 0);
1378 if ((Mask[i] %
Size) / LaneSize != i / LaneSize)
1385 Mask[i] <
Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
1386 if (RepeatedMask[i % LaneSize] < 0)
1388 RepeatedMask[i % LaneSize] = LocalM;
1389 else if (RepeatedMask[i % LaneSize] != LocalM)
1406 int NumElts = RepeatedMask.
size();
1408 int Scale = 16 / NumElts;
1410 for (
int i = 0; i < NumElts; ++i) {
1411 int M = RepeatedMask[i];
1412 assert((M == -1 || (0 <= M && M < (2 * NumElts))) &&
1413 "Unexpected mask index.");
1418 int StartIdx = i - (M % NumElts);
1425 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
1428 Rotation = CandidateRotation;
1429 else if (Rotation != CandidateRotation)
1433 SDValue MaskV = M < NumElts ? V1 : V2;
1444 else if (TargetV != MaskV)
1449 assert(Rotation != 0 &&
"Failed to locate a viable rotation!");
1450 assert((
Lo ||
Hi) &&
"Failed to find a rotated input vector!");
1459 return Rotation * Scale;
1478 if (ByteRotation <= 0)
1485 int LoByteShift = 16 - ByteRotation;
1486 int HiByteShift = ByteRotation;
1509 const APInt &Zeroable) {
1523 for (
int i = 0; i < NumElements; i++) {
1527 if (i % Scale != 0) {
1538 SDValue V = M < NumElements ? V1 : V2;
1539 M = M % NumElements;
1542 Offset = M - (i / Scale);
1545 if (
Offset % (NumElements / Scale))
1547 }
else if (InputV != V)
1550 if (M != (
Offset + (i / Scale)))
1561 if (
Offset >= (NumElements / 2)) {
1563 Offset -= (NumElements / 2);
1570 InputV = DAG.
getNode(VilVLoHi,
DL, InputVT, Ext, InputV);
1574 }
while (Scale > 1);
1580 for (
int NumExtElements = Bits / 64; NumExtElements < NumElements;
1581 NumExtElements *= 2) {
1601 int SplatIndex = -1;
1602 for (
const auto &M : Mask) {
1609 if (SplatIndex == -1)
1612 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
1614 APInt Imm(64, SplatIndex);
1645 unsigned SubVecSize = 4;
1646 if (VT == MVT::v2f64 || VT == MVT::v2i64)
1649 int SubMask[4] = {-1, -1, -1, -1};
1650 for (
unsigned i = 0; i < SubVecSize; ++i) {
1651 for (
unsigned j = i; j < Mask.size(); j += SubVecSize) {
1657 M -= 4 * (j / SubVecSize);
1658 if (M < 0 || M >= 4)
1664 if (SubMask[i] == -1)
1668 else if (M != -1 && M != SubMask[i])
1675 for (
int i = SubVecSize - 1; i >= 0; --i) {
1688 if (VT == MVT::v2f64 || VT == MVT::v2i64)
1715 const auto &Begin = Mask.begin();
1716 const auto &End = Mask.end();
1717 SDValue OriV1 = V1, OriV2 = V2;
1755 const auto &Begin = Mask.begin();
1756 const auto &End = Mask.end();
1757 SDValue OriV1 = V1, OriV2 = V2;
1796 const auto &Begin = Mask.begin();
1797 const auto &End = Mask.end();
1798 unsigned HalfSize = Mask.size() / 2;
1799 SDValue OriV1 = V1, OriV2 = V2;
1839 const auto &Begin = Mask.begin();
1840 const auto &End = Mask.end();
1841 SDValue OriV1 = V1, OriV2 = V2;
1879 const auto &Begin = Mask.begin();
1880 const auto &Mid = Mask.begin() + Mask.size() / 2;
1881 const auto &End = Mask.end();
1882 SDValue OriV1 = V1, OriV2 = V2;
1921 const auto &Begin = Mask.begin();
1922 const auto &Mid = Mask.begin() + Mask.size() / 2;
1923 const auto &End = Mask.end();
1924 SDValue OriV1 = V1, OriV2 = V2;
1978 "Vector type is unsupported for lsx!");
1980 "Two operands have different types!");
1982 "Unexpected mask size for shuffle!");
1983 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
1985 APInt KnownUndef, KnownZero;
1987 APInt Zeroable = KnownUndef | KnownZero;
2050 int SplatIndex = -1;
2051 for (
const auto &M : Mask) {
2058 if (SplatIndex == -1)
2061 const auto &Begin = Mask.begin();
2062 const auto &End = Mask.end();
2063 unsigned HalfSize = Mask.size() / 2;
2065 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
2083 if (Mask.size() <= 4)
2093 if (Mask.size() != 8 || (VT != MVT::v8i32 && VT != MVT::v8f32))
2097 unsigned HalfSize = NumElts / 2;
2098 bool FrontLo =
true, FrontHi =
true;
2099 bool BackLo =
true, BackHi =
true;
2101 auto inRange = [](
int val,
int low,
int high) {
2102 return (val == -1) || (val >= low && val < high);
2105 for (
unsigned i = 0; i < HalfSize; ++i) {
2106 int Fronti = Mask[i];
2107 int Backi = Mask[i + HalfSize];
2109 FrontLo &=
inRange(Fronti, 0, HalfSize);
2110 FrontHi &=
inRange(Fronti, HalfSize, NumElts);
2111 BackLo &=
inRange(Backi, 0, HalfSize);
2112 BackHi &=
inRange(Backi, HalfSize, NumElts);
2118 if ((FrontLo || FrontHi) && (BackLo || BackHi))
2122 for (
unsigned i = 0; i < NumElts; ++i)
2149 const auto &Begin = Mask.begin();
2150 const auto &End = Mask.end();
2151 unsigned HalfSize = Mask.size() / 2;
2152 unsigned LeftSize = HalfSize / 2;
2153 SDValue OriV1 = V1, OriV2 = V2;
2160 Mask.size() + HalfSize - LeftSize, 1) &&
2162 Mask.size() + HalfSize + LeftSize, 1))
2173 Mask.size() + HalfSize - LeftSize, 1) &&
2175 Mask.size() + HalfSize + LeftSize, 1))
2188 const auto &Begin = Mask.begin();
2189 const auto &End = Mask.end();
2190 unsigned HalfSize = Mask.size() / 2;
2191 SDValue OriV1 = V1, OriV2 = V2;
2198 Mask.size() + HalfSize, 1))
2209 Mask.size() + HalfSize, 1))
2222 const auto &Begin = Mask.begin();
2223 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
2224 const auto &Mid = Mask.begin() + Mask.size() / 2;
2225 const auto &RightMid = Mask.end() - Mask.size() / 4;
2226 const auto &End = Mask.end();
2227 unsigned HalfSize = Mask.size() / 2;
2228 SDValue OriV1 = V1, OriV2 = V2;
2257 const auto &Begin = Mask.begin();
2258 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
2259 const auto &Mid = Mask.begin() + Mask.size() / 2;
2260 const auto &RightMid = Mask.end() - Mask.size() / 4;
2261 const auto &End = Mask.end();
2262 unsigned HalfSize = Mask.size() / 2;
2263 SDValue OriV1 = V1, OriV2 = V2;
2293 int MaskSize = Mask.size();
2294 int HalfSize = Mask.size() / 2;
2295 const auto &Begin = Mask.begin();
2296 const auto &Mid = Mask.begin() + HalfSize;
2297 const auto &End = Mask.end();
2309 for (
auto it = Begin; it < Mid; it++) {
2312 else if ((*it >= 0 && *it < HalfSize) ||
2313 (*it >= MaskSize && *it < MaskSize + HalfSize)) {
2314 int M = *it < HalfSize ? *it : *it - HalfSize;
2319 assert((
int)MaskAlloc.
size() == HalfSize &&
"xvshuf convert failed!");
2321 for (
auto it = Mid; it < End; it++) {
2324 else if ((*it >= HalfSize && *it < MaskSize) ||
2325 (*it >= MaskSize + HalfSize && *it < MaskSize * 2)) {
2326 int M = *it < MaskSize ? *it - HalfSize : *it - MaskSize;
2331 assert((
int)MaskAlloc.
size() == MaskSize &&
"xvshuf convert failed!");
2361 enum HalfMaskType { HighLaneTy, LowLaneTy,
None };
2363 int MaskSize = Mask.size();
2364 int HalfSize = Mask.size() / 2;
2367 HalfMaskType preMask =
None, postMask =
None;
2369 if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
2370 return M < 0 || (M >= 0 && M < HalfSize) ||
2371 (M >= MaskSize && M < MaskSize + HalfSize);
2373 preMask = HighLaneTy;
2374 else if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
2375 return M < 0 || (M >= HalfSize && M < MaskSize) ||
2376 (M >= MaskSize + HalfSize && M < MaskSize * 2);
2378 preMask = LowLaneTy;
2380 if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
2381 return M < 0 || (M >= 0 && M < HalfSize) ||
2382 (M >= MaskSize && M < MaskSize + HalfSize);
2384 postMask = HighLaneTy;
2385 else if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
2386 return M < 0 || (M >= HalfSize && M < MaskSize) ||
2387 (M >= MaskSize + HalfSize && M < MaskSize * 2);
2389 postMask = LowLaneTy;
2397 if (preMask == HighLaneTy && postMask == LowLaneTy) {
2400 if (preMask == LowLaneTy && postMask == HighLaneTy) {
2413 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
2414 *it = *it < 0 ? *it : *it - HalfSize;
2416 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
2417 *it = *it < 0 ? *it : *it + HalfSize;
2419 }
else if (preMask == LowLaneTy && postMask == LowLaneTy) {
2432 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
2433 *it = *it < 0 ? *it : *it - HalfSize;
2435 }
else if (preMask == HighLaneTy && postMask == HighLaneTy) {
2448 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
2449 *it = *it < 0 ? *it : *it + HalfSize;
2472 int Size = Mask.size();
2473 int LaneSize =
Size / 2;
2475 bool LaneCrossing[2] = {
false,
false};
2476 for (
int i = 0; i <
Size; ++i)
2477 if (Mask[i] >= 0 && ((Mask[i] %
Size) / LaneSize) != (i / LaneSize))
2478 LaneCrossing[(Mask[i] %
Size) / LaneSize] =
true;
2481 if (!LaneCrossing[0] && !LaneCrossing[1])
2485 InLaneMask.
assign(Mask.begin(), Mask.end());
2486 for (
int i = 0; i <
Size; ++i) {
2487 int &M = InLaneMask[i];
2490 if (((M %
Size) / LaneSize) != (i / LaneSize))
2491 M = (M % LaneSize) + ((i / LaneSize) * LaneSize) +
Size;
2496 DAG.
getUNDEF(MVT::v4i64), {2, 3, 0, 1});
2511 "Vector type is unsupported for lasx!");
2513 "Two operands have different types!");
2515 "Unexpected mask size for shuffle!");
2516 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
2517 assert(Mask.size() >= 4 &&
"Mask size is less than 4.");
2523 APInt KnownUndef, KnownZero;
2525 APInt Zeroable = KnownUndef | KnownZero;
2562 Subtarget, Zeroable)))
2578 ArrayRef<int> OrigMask = SVOp->
getMask();
2581 MVT VT =
Op.getSimpleValueType();
2585 bool V1IsUndef = V1.
isUndef();
2586 bool V2IsUndef = V2.
isUndef();
2587 if (V1IsUndef && V2IsUndef)
2600 any_of(OrigMask, [NumElements](
int M) {
return M >= NumElements; })) {
2601 SmallVector<int, 8> NewMask(OrigMask);
2602 for (
int &M : NewMask)
2603 if (M >= NumElements)
2609 int MaskUpperLimit = OrigMask.
size() * (V2IsUndef ? 1 : 2);
2610 (void)MaskUpperLimit;
2612 [&](
int M) {
return -1 <=
M &&
M < MaskUpperLimit; }) &&
2613 "Out of bounds shuffle index");
2635 std::tie(Res, Chain) =
2636 makeLibCall(DAG, LC, MVT::f32, Op0, CallOptions,
DL, Chain);
2637 if (Subtarget.is64Bit())
2654 std::tie(Res, Chain) =
makeLibCall(DAG, RTLIB::FPEXT_F16_F32, MVT::f32, Arg,
2655 CallOptions,
DL, Chain);
2661 assert(Subtarget.hasBasicF() &&
"Unexpected custom legalization");
2667 makeLibCall(DAG, LC, MVT::f32,
Op.getOperand(0), CallOptions,
DL).first;
2668 if (Subtarget.is64Bit())
2675 assert(Subtarget.hasBasicF() &&
"Unexpected custom legalization");
2676 MVT VT =
Op.getSimpleValueType();
2685 return DAG.
getNode(ISD::FP_EXTEND,
DL, VT, Res);
2702 "Unsupported vector type for broadcast.");
2705 bool IsIdeneity =
true;
2707 for (
int i = 0; i !=
NumOps; i++) {
2709 if (
Op.getOpcode() != ISD::LOAD || (IdentitySrc &&
Op != IdentitySrc)) {
2721 auto ExtType = LN->getExtensionType();
2727 ? DAG.
getVTList(VT, LN->getBasePtr().getValueType(), MVT::Other)
2729 SDValue Ops[] = {LN->getChain(), LN->getBasePtr(), LN->getOffset()};
2747 for (
unsigned i = 1; i <
Ops.size(); ++i) {
2761 EVT ResTy,
unsigned first) {
2765 first + NumElts <= Node->getSimpleValueType(0).getVectorNumElements());
2768 Node->op_begin() + first + NumElts);
2777 MVT VT =
Node->getSimpleValueType(0);
2778 EVT ResTy =
Op->getValueType(0);
2781 APInt SplatValue, SplatUndef;
2782 unsigned SplatBitSize;
2785 bool UseSameConstant =
true;
2790 if ((!Subtarget.hasExtLSX() || !Is128Vec) &&
2791 (!Subtarget.hasExtLASX() || !Is256Vec))
2797 if (
Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
2799 SplatBitSize <= 64) {
2801 if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
2805 if (SplatBitSize == 64 && !Subtarget.is64Bit()) {
2811 if ((Is128Vec && ResTy == MVT::v4i32) ||
2812 (Is256Vec && ResTy == MVT::v8i32))
2818 switch (SplatBitSize) {
2822 ViaVecTy = Is128Vec ? MVT::v16i8 : MVT::v32i8;
2825 ViaVecTy = Is128Vec ? MVT::v8i16 : MVT::v16i16;
2828 ViaVecTy = Is128Vec ? MVT::v4i32 : MVT::v8i32;
2831 ViaVecTy = Is128Vec ? MVT::v2i64 : MVT::v4i64;
2839 if (ViaVecTy != ResTy)
2840 Result = DAG.
getNode(ISD::BITCAST, SDLoc(Node), ResTy, Result);
2848 for (
unsigned i = 0; i < NumElts; ++i) {
2853 ConstantValue = Opi;
2854 else if (ConstantValue != Opi)
2855 UseSameConstant =
false;
2860 if (IsConstant && UseSameConstant && ResTy != MVT::v2f64) {
2862 for (
unsigned i = 0; i < NumElts; ++i) {
2880 BitVector UndefElements;
2881 if (
Node->getRepeatedSequence(Sequence, &UndefElements) &&
2882 UndefElements.
count() == 0) {
2886 EVT FillTy = Is256Vec
2892 fillVector(Sequence, DAG,
DL, Subtarget, FillVec, FillTy);
2895 unsigned SplatLen = NumElts / SeqLen;
2901 if (SplatEltTy == MVT::i128)
2902 SplatTy = MVT::v4i64;
2912 DL, SplatTy, SrcVec);
2927 if (ResTy == MVT::v8i32 || ResTy == MVT::v8f32 || ResTy == MVT::v4i64 ||
2928 ResTy == MVT::v4f64) {
2929 unsigned NonUndefCount = 0;
2930 for (
unsigned i = NumElts / 2; i < NumElts; ++i) {
2931 if (!
Node->getOperand(i).isUndef()) {
2933 if (NonUndefCount > 1)
2937 if (NonUndefCount == 1)
2950 VecTy, NumElts / 2);
2961 MVT ResVT =
Op.getSimpleValueType();
2965 unsigned NumFreezeUndef = 0;
2966 unsigned NumZero = 0;
2967 unsigned NumNonZero = 0;
2968 unsigned NonZeros = 0;
2969 SmallSet<SDValue, 4> Undefs;
2970 for (
unsigned i = 0; i != NumOperands; ++i) {
2985 assert(i <
sizeof(NonZeros) * CHAR_BIT);
2992 if (NumNonZero > 2) {
2996 Ops.slice(0, NumOperands / 2));
2998 Ops.slice(NumOperands / 2));
3011 MVT SubVT =
Op.getOperand(0).getSimpleValueType();
3013 for (
unsigned i = 0; i != NumOperands; ++i) {
3014 if ((NonZeros & (1 << i)) == 0)
3025LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
3027 MVT EltVT =
Op.getSimpleValueType();
3032 MVT GRLenVT = Subtarget.getGRLenVT();
3064 DAG.
getBitcast((VecTy == MVT::v4f64) ? MVT::v4i64 : VecTy, IdxVec);
3084LoongArchTargetLowering::lowerINSERT_VECTOR_ELT(
SDValue Op,
3086 MVT VT =
Op.getSimpleValueType();
3108 for (
unsigned i = 0; i < NumElts; ++i)
3131 return DAG.
getNode(ISD::MEMBARRIER,
DL, MVT::Other,
Op.getOperand(0));
3139 if (Subtarget.is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i32) {
3141 "On LA64, only 64-bit registers can be written.");
3142 return Op.getOperand(0);
3145 if (!Subtarget.is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i64) {
3147 "On LA32, only 32-bit registers can be written.");
3148 return Op.getOperand(0);
3158 "be a constant integer");
3164 Register FrameReg = Subtarget.getRegisterInfo()->getFrameRegister(MF);
3165 EVT VT =
Op.getValueType();
3168 unsigned Depth =
Op.getConstantOperandVal(0);
3169 int GRLenInBytes = Subtarget.getGRLen() / 8;
3172 int Offset = -(GRLenInBytes * 2);
3184 if (
Op.getConstantOperandVal(0) != 0) {
3186 "return address can only be determined for the current frame");
3192 MVT GRLenVT = Subtarget.getGRLenVT();
3204 auto Size = Subtarget.getGRLen() / 8;
3212 auto *FuncInfo = MF.
getInfo<LoongArchMachineFunctionInfo>();
3222 MachinePointerInfo(SV));
3227 assert(Subtarget.is64Bit() && Subtarget.hasBasicF() &&
3228 !Subtarget.hasBasicD() &&
"unexpected target features");
3234 if (
C &&
C->getZExtValue() < UINT64_C(0xFFFFFFFF))
3248 EVT RetVT =
Op.getValueType();
3254 std::tie(Result, Chain) =
3261 assert(Subtarget.is64Bit() && Subtarget.hasBasicF() &&
3262 !Subtarget.hasBasicD() &&
"unexpected target features");
3273 EVT RetVT =
Op.getValueType();
3279 std::tie(Result, Chain) =
3288 EVT VT =
Op.getValueType();
3292 if (
Op.getValueType() == MVT::f32 && Op0VT == MVT::i32 &&
3293 Subtarget.is64Bit() && Subtarget.hasBasicF()) {
3297 if (VT == MVT::f64 && Op0VT == MVT::i64 && !Subtarget.is64Bit()) {
3312 Op0 = DAG.
getNode(ISD::FP_EXTEND,
DL, MVT::f32, Op0);
3314 if (
Op.getValueSizeInBits() > 32 && Subtarget.hasBasicF() &&
3315 !Subtarget.hasBasicD()) {
3322 return DAG.
getNode(ISD::BITCAST,
DL,
Op.getValueType(), Trunc);
3339 N->getOffset(), Flags);
3347template <
class NodeTy>
3350 bool IsLocal)
const {
3361 assert(Subtarget.is64Bit() &&
"Large code model requires LA64");
3432 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
3434 const GlobalValue *GV =
N->getGlobal();
3446 unsigned Opc,
bool UseGOT,
3450 MVT GRLenVT = Subtarget.getGRLenVT();
3464 if (
Opc == LoongArch::PseudoLA_TLS_LE && !Large)
3502 Args.emplace_back(Load, CallTy);
3505 TargetLowering::CallLoweringInfo CLI(DAG);
3520 const GlobalValue *GV =
N->getGlobal();
3534LoongArchTargetLowering::lowerGlobalTLSAddress(
SDValue Op,
3541 assert((!Large || Subtarget.is64Bit()) &&
"Large code model requires LA64");
3544 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
3557 return getDynamicTLSAddr(
N, DAG,
3558 Large ? LoongArch::PseudoLA_TLS_GD_LARGE
3559 : LoongArch::PseudoLA_TLS_GD,
3566 return getDynamicTLSAddr(
N, DAG,
3567 Large ? LoongArch::PseudoLA_TLS_LD_LARGE
3568 : LoongArch::PseudoLA_TLS_LD,
3573 return getStaticTLSAddr(
N, DAG,
3574 Large ? LoongArch::PseudoLA_TLS_IE_LARGE
3575 : LoongArch::PseudoLA_TLS_IE,
3582 return getStaticTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_LE,
3586 return getTLSDescAddr(
N, DAG,
3587 Large ? LoongArch::PseudoLA_TLS_DESC_LARGE
3588 : LoongArch::PseudoLA_TLS_DESC,
3592template <
unsigned N>
3597 if ((IsSigned && !
isInt<N>(CImm->getSExtValue())) ||
3598 (!IsSigned && !
isUInt<N>(CImm->getZExtValue()))) {
3600 ": argument out of range.");
3607LoongArchTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
3609 switch (
Op.getConstantOperandVal(0)) {
3612 case Intrinsic::thread_pointer: {
3616 case Intrinsic::loongarch_lsx_vpickve2gr_d:
3617 case Intrinsic::loongarch_lsx_vpickve2gr_du:
3618 case Intrinsic::loongarch_lsx_vreplvei_d:
3619 case Intrinsic::loongarch_lasx_xvrepl128vei_d:
3621 case Intrinsic::loongarch_lsx_vreplvei_w:
3622 case Intrinsic::loongarch_lasx_xvrepl128vei_w:
3623 case Intrinsic::loongarch_lasx_xvpickve2gr_d:
3624 case Intrinsic::loongarch_lasx_xvpickve2gr_du:
3625 case Intrinsic::loongarch_lasx_xvpickve_d:
3626 case Intrinsic::loongarch_lasx_xvpickve_d_f:
3628 case Intrinsic::loongarch_lasx_xvinsve0_d:
3630 case Intrinsic::loongarch_lsx_vsat_b:
3631 case Intrinsic::loongarch_lsx_vsat_bu:
3632 case Intrinsic::loongarch_lsx_vrotri_b:
3633 case Intrinsic::loongarch_lsx_vsllwil_h_b:
3634 case Intrinsic::loongarch_lsx_vsllwil_hu_bu:
3635 case Intrinsic::loongarch_lsx_vsrlri_b:
3636 case Intrinsic::loongarch_lsx_vsrari_b:
3637 case Intrinsic::loongarch_lsx_vreplvei_h:
3638 case Intrinsic::loongarch_lasx_xvsat_b:
3639 case Intrinsic::loongarch_lasx_xvsat_bu:
3640 case Intrinsic::loongarch_lasx_xvrotri_b:
3641 case Intrinsic::loongarch_lasx_xvsllwil_h_b:
3642 case Intrinsic::loongarch_lasx_xvsllwil_hu_bu:
3643 case Intrinsic::loongarch_lasx_xvsrlri_b:
3644 case Intrinsic::loongarch_lasx_xvsrari_b:
3645 case Intrinsic::loongarch_lasx_xvrepl128vei_h:
3646 case Intrinsic::loongarch_lasx_xvpickve_w:
3647 case Intrinsic::loongarch_lasx_xvpickve_w_f:
3649 case Intrinsic::loongarch_lasx_xvinsve0_w:
3651 case Intrinsic::loongarch_lsx_vsat_h:
3652 case Intrinsic::loongarch_lsx_vsat_hu:
3653 case Intrinsic::loongarch_lsx_vrotri_h:
3654 case Intrinsic::loongarch_lsx_vsllwil_w_h:
3655 case Intrinsic::loongarch_lsx_vsllwil_wu_hu:
3656 case Intrinsic::loongarch_lsx_vsrlri_h:
3657 case Intrinsic::loongarch_lsx_vsrari_h:
3658 case Intrinsic::loongarch_lsx_vreplvei_b:
3659 case Intrinsic::loongarch_lasx_xvsat_h:
3660 case Intrinsic::loongarch_lasx_xvsat_hu:
3661 case Intrinsic::loongarch_lasx_xvrotri_h:
3662 case Intrinsic::loongarch_lasx_xvsllwil_w_h:
3663 case Intrinsic::loongarch_lasx_xvsllwil_wu_hu:
3664 case Intrinsic::loongarch_lasx_xvsrlri_h:
3665 case Intrinsic::loongarch_lasx_xvsrari_h:
3666 case Intrinsic::loongarch_lasx_xvrepl128vei_b:
3668 case Intrinsic::loongarch_lsx_vsrlni_b_h:
3669 case Intrinsic::loongarch_lsx_vsrani_b_h:
3670 case Intrinsic::loongarch_lsx_vsrlrni_b_h:
3671 case Intrinsic::loongarch_lsx_vsrarni_b_h:
3672 case Intrinsic::loongarch_lsx_vssrlni_b_h:
3673 case Intrinsic::loongarch_lsx_vssrani_b_h:
3674 case Intrinsic::loongarch_lsx_vssrlni_bu_h:
3675 case Intrinsic::loongarch_lsx_vssrani_bu_h:
3676 case Intrinsic::loongarch_lsx_vssrlrni_b_h:
3677 case Intrinsic::loongarch_lsx_vssrarni_b_h:
3678 case Intrinsic::loongarch_lsx_vssrlrni_bu_h:
3679 case Intrinsic::loongarch_lsx_vssrarni_bu_h:
3680 case Intrinsic::loongarch_lasx_xvsrlni_b_h:
3681 case Intrinsic::loongarch_lasx_xvsrani_b_h:
3682 case Intrinsic::loongarch_lasx_xvsrlrni_b_h:
3683 case Intrinsic::loongarch_lasx_xvsrarni_b_h:
3684 case Intrinsic::loongarch_lasx_xvssrlni_b_h:
3685 case Intrinsic::loongarch_lasx_xvssrani_b_h:
3686 case Intrinsic::loongarch_lasx_xvssrlni_bu_h:
3687 case Intrinsic::loongarch_lasx_xvssrani_bu_h:
3688 case Intrinsic::loongarch_lasx_xvssrlrni_b_h:
3689 case Intrinsic::loongarch_lasx_xvssrarni_b_h:
3690 case Intrinsic::loongarch_lasx_xvssrlrni_bu_h:
3691 case Intrinsic::loongarch_lasx_xvssrarni_bu_h:
3693 case Intrinsic::loongarch_lsx_vsat_w:
3694 case Intrinsic::loongarch_lsx_vsat_wu:
3695 case Intrinsic::loongarch_lsx_vrotri_w:
3696 case Intrinsic::loongarch_lsx_vsllwil_d_w:
3697 case Intrinsic::loongarch_lsx_vsllwil_du_wu:
3698 case Intrinsic::loongarch_lsx_vsrlri_w:
3699 case Intrinsic::loongarch_lsx_vsrari_w:
3700 case Intrinsic::loongarch_lsx_vslei_bu:
3701 case Intrinsic::loongarch_lsx_vslei_hu:
3702 case Intrinsic::loongarch_lsx_vslei_wu:
3703 case Intrinsic::loongarch_lsx_vslei_du:
3704 case Intrinsic::loongarch_lsx_vslti_bu:
3705 case Intrinsic::loongarch_lsx_vslti_hu:
3706 case Intrinsic::loongarch_lsx_vslti_wu:
3707 case Intrinsic::loongarch_lsx_vslti_du:
3708 case Intrinsic::loongarch_lsx_vbsll_v:
3709 case Intrinsic::loongarch_lsx_vbsrl_v:
3710 case Intrinsic::loongarch_lasx_xvsat_w:
3711 case Intrinsic::loongarch_lasx_xvsat_wu:
3712 case Intrinsic::loongarch_lasx_xvrotri_w:
3713 case Intrinsic::loongarch_lasx_xvsllwil_d_w:
3714 case Intrinsic::loongarch_lasx_xvsllwil_du_wu:
3715 case Intrinsic::loongarch_lasx_xvsrlri_w:
3716 case Intrinsic::loongarch_lasx_xvsrari_w:
3717 case Intrinsic::loongarch_lasx_xvslei_bu:
3718 case Intrinsic::loongarch_lasx_xvslei_hu:
3719 case Intrinsic::loongarch_lasx_xvslei_wu:
3720 case Intrinsic::loongarch_lasx_xvslei_du:
3721 case Intrinsic::loongarch_lasx_xvslti_bu:
3722 case Intrinsic::loongarch_lasx_xvslti_hu:
3723 case Intrinsic::loongarch_lasx_xvslti_wu:
3724 case Intrinsic::loongarch_lasx_xvslti_du:
3725 case Intrinsic::loongarch_lasx_xvbsll_v:
3726 case Intrinsic::loongarch_lasx_xvbsrl_v:
3728 case Intrinsic::loongarch_lsx_vseqi_b:
3729 case Intrinsic::loongarch_lsx_vseqi_h:
3730 case Intrinsic::loongarch_lsx_vseqi_w:
3731 case Intrinsic::loongarch_lsx_vseqi_d:
3732 case Intrinsic::loongarch_lsx_vslei_b:
3733 case Intrinsic::loongarch_lsx_vslei_h:
3734 case Intrinsic::loongarch_lsx_vslei_w:
3735 case Intrinsic::loongarch_lsx_vslei_d:
3736 case Intrinsic::loongarch_lsx_vslti_b:
3737 case Intrinsic::loongarch_lsx_vslti_h:
3738 case Intrinsic::loongarch_lsx_vslti_w:
3739 case Intrinsic::loongarch_lsx_vslti_d:
3740 case Intrinsic::loongarch_lasx_xvseqi_b:
3741 case Intrinsic::loongarch_lasx_xvseqi_h:
3742 case Intrinsic::loongarch_lasx_xvseqi_w:
3743 case Intrinsic::loongarch_lasx_xvseqi_d:
3744 case Intrinsic::loongarch_lasx_xvslei_b:
3745 case Intrinsic::loongarch_lasx_xvslei_h:
3746 case Intrinsic::loongarch_lasx_xvslei_w:
3747 case Intrinsic::loongarch_lasx_xvslei_d:
3748 case Intrinsic::loongarch_lasx_xvslti_b:
3749 case Intrinsic::loongarch_lasx_xvslti_h:
3750 case Intrinsic::loongarch_lasx_xvslti_w:
3751 case Intrinsic::loongarch_lasx_xvslti_d:
3753 case Intrinsic::loongarch_lsx_vsrlni_h_w:
3754 case Intrinsic::loongarch_lsx_vsrani_h_w:
3755 case Intrinsic::loongarch_lsx_vsrlrni_h_w:
3756 case Intrinsic::loongarch_lsx_vsrarni_h_w:
3757 case Intrinsic::loongarch_lsx_vssrlni_h_w:
3758 case Intrinsic::loongarch_lsx_vssrani_h_w:
3759 case Intrinsic::loongarch_lsx_vssrlni_hu_w:
3760 case Intrinsic::loongarch_lsx_vssrani_hu_w:
3761 case Intrinsic::loongarch_lsx_vssrlrni_h_w:
3762 case Intrinsic::loongarch_lsx_vssrarni_h_w:
3763 case Intrinsic::loongarch_lsx_vssrlrni_hu_w:
3764 case Intrinsic::loongarch_lsx_vssrarni_hu_w:
3765 case Intrinsic::loongarch_lsx_vfrstpi_b:
3766 case Intrinsic::loongarch_lsx_vfrstpi_h:
3767 case Intrinsic::loongarch_lasx_xvsrlni_h_w:
3768 case Intrinsic::loongarch_lasx_xvsrani_h_w:
3769 case Intrinsic::loongarch_lasx_xvsrlrni_h_w:
3770 case Intrinsic::loongarch_lasx_xvsrarni_h_w:
3771 case Intrinsic::loongarch_lasx_xvssrlni_h_w:
3772 case Intrinsic::loongarch_lasx_xvssrani_h_w:
3773 case Intrinsic::loongarch_lasx_xvssrlni_hu_w:
3774 case Intrinsic::loongarch_lasx_xvssrani_hu_w:
3775 case Intrinsic::loongarch_lasx_xvssrlrni_h_w:
3776 case Intrinsic::loongarch_lasx_xvssrarni_h_w:
3777 case Intrinsic::loongarch_lasx_xvssrlrni_hu_w:
3778 case Intrinsic::loongarch_lasx_xvssrarni_hu_w:
3779 case Intrinsic::loongarch_lasx_xvfrstpi_b:
3780 case Intrinsic::loongarch_lasx_xvfrstpi_h:
3782 case Intrinsic::loongarch_lsx_vsat_d:
3783 case Intrinsic::loongarch_lsx_vsat_du:
3784 case Intrinsic::loongarch_lsx_vrotri_d:
3785 case Intrinsic::loongarch_lsx_vsrlri_d:
3786 case Intrinsic::loongarch_lsx_vsrari_d:
3787 case Intrinsic::loongarch_lasx_xvsat_d:
3788 case Intrinsic::loongarch_lasx_xvsat_du:
3789 case Intrinsic::loongarch_lasx_xvrotri_d:
3790 case Intrinsic::loongarch_lasx_xvsrlri_d:
3791 case Intrinsic::loongarch_lasx_xvsrari_d:
3793 case Intrinsic::loongarch_lsx_vsrlni_w_d:
3794 case Intrinsic::loongarch_lsx_vsrani_w_d:
3795 case Intrinsic::loongarch_lsx_vsrlrni_w_d:
3796 case Intrinsic::loongarch_lsx_vsrarni_w_d:
3797 case Intrinsic::loongarch_lsx_vssrlni_w_d:
3798 case Intrinsic::loongarch_lsx_vssrani_w_d:
3799 case Intrinsic::loongarch_lsx_vssrlni_wu_d:
3800 case Intrinsic::loongarch_lsx_vssrani_wu_d:
3801 case Intrinsic::loongarch_lsx_vssrlrni_w_d:
3802 case Intrinsic::loongarch_lsx_vssrarni_w_d:
3803 case Intrinsic::loongarch_lsx_vssrlrni_wu_d:
3804 case Intrinsic::loongarch_lsx_vssrarni_wu_d:
3805 case Intrinsic::loongarch_lasx_xvsrlni_w_d:
3806 case Intrinsic::loongarch_lasx_xvsrani_w_d:
3807 case Intrinsic::loongarch_lasx_xvsrlrni_w_d:
3808 case Intrinsic::loongarch_lasx_xvsrarni_w_d:
3809 case Intrinsic::loongarch_lasx_xvssrlni_w_d:
3810 case Intrinsic::loongarch_lasx_xvssrani_w_d:
3811 case Intrinsic::loongarch_lasx_xvssrlni_wu_d:
3812 case Intrinsic::loongarch_lasx_xvssrani_wu_d:
3813 case Intrinsic::loongarch_lasx_xvssrlrni_w_d:
3814 case Intrinsic::loongarch_lasx_xvssrarni_w_d:
3815 case Intrinsic::loongarch_lasx_xvssrlrni_wu_d:
3816 case Intrinsic::loongarch_lasx_xvssrarni_wu_d:
3818 case Intrinsic::loongarch_lsx_vsrlni_d_q:
3819 case Intrinsic::loongarch_lsx_vsrani_d_q:
3820 case Intrinsic::loongarch_lsx_vsrlrni_d_q:
3821 case Intrinsic::loongarch_lsx_vsrarni_d_q:
3822 case Intrinsic::loongarch_lsx_vssrlni_d_q:
3823 case Intrinsic::loongarch_lsx_vssrani_d_q:
3824 case Intrinsic::loongarch_lsx_vssrlni_du_q:
3825 case Intrinsic::loongarch_lsx_vssrani_du_q:
3826 case Intrinsic::loongarch_lsx_vssrlrni_d_q:
3827 case Intrinsic::loongarch_lsx_vssrarni_d_q:
3828 case Intrinsic::loongarch_lsx_vssrlrni_du_q:
3829 case Intrinsic::loongarch_lsx_vssrarni_du_q:
3830 case Intrinsic::loongarch_lasx_xvsrlni_d_q:
3831 case Intrinsic::loongarch_lasx_xvsrani_d_q:
3832 case Intrinsic::loongarch_lasx_xvsrlrni_d_q:
3833 case Intrinsic::loongarch_lasx_xvsrarni_d_q:
3834 case Intrinsic::loongarch_lasx_xvssrlni_d_q:
3835 case Intrinsic::loongarch_lasx_xvssrani_d_q:
3836 case Intrinsic::loongarch_lasx_xvssrlni_du_q:
3837 case Intrinsic::loongarch_lasx_xvssrani_du_q:
3838 case Intrinsic::loongarch_lasx_xvssrlrni_d_q:
3839 case Intrinsic::loongarch_lasx_xvssrarni_d_q:
3840 case Intrinsic::loongarch_lasx_xvssrlrni_du_q:
3841 case Intrinsic::loongarch_lasx_xvssrarni_du_q:
3843 case Intrinsic::loongarch_lsx_vnori_b:
3844 case Intrinsic::loongarch_lsx_vshuf4i_b:
3845 case Intrinsic::loongarch_lsx_vshuf4i_h:
3846 case Intrinsic::loongarch_lsx_vshuf4i_w:
3847 case Intrinsic::loongarch_lasx_xvnori_b:
3848 case Intrinsic::loongarch_lasx_xvshuf4i_b:
3849 case Intrinsic::loongarch_lasx_xvshuf4i_h:
3850 case Intrinsic::loongarch_lasx_xvshuf4i_w:
3851 case Intrinsic::loongarch_lasx_xvpermi_d:
3853 case Intrinsic::loongarch_lsx_vshuf4i_d:
3854 case Intrinsic::loongarch_lsx_vpermi_w:
3855 case Intrinsic::loongarch_lsx_vbitseli_b:
3856 case Intrinsic::loongarch_lsx_vextrins_b:
3857 case Intrinsic::loongarch_lsx_vextrins_h:
3858 case Intrinsic::loongarch_lsx_vextrins_w:
3859 case Intrinsic::loongarch_lsx_vextrins_d:
3860 case Intrinsic::loongarch_lasx_xvshuf4i_d:
3861 case Intrinsic::loongarch_lasx_xvpermi_w:
3862 case Intrinsic::loongarch_lasx_xvpermi_q:
3863 case Intrinsic::loongarch_lasx_xvbitseli_b:
3864 case Intrinsic::loongarch_lasx_xvextrins_b:
3865 case Intrinsic::loongarch_lasx_xvextrins_h:
3866 case Intrinsic::loongarch_lasx_xvextrins_w:
3867 case Intrinsic::loongarch_lasx_xvextrins_d:
3869 case Intrinsic::loongarch_lsx_vrepli_b:
3870 case Intrinsic::loongarch_lsx_vrepli_h:
3871 case Intrinsic::loongarch_lsx_vrepli_w:
3872 case Intrinsic::loongarch_lsx_vrepli_d:
3873 case Intrinsic::loongarch_lasx_xvrepli_b:
3874 case Intrinsic::loongarch_lasx_xvrepli_h:
3875 case Intrinsic::loongarch_lasx_xvrepli_w:
3876 case Intrinsic::loongarch_lasx_xvrepli_d:
3878 case Intrinsic::loongarch_lsx_vldi:
3879 case Intrinsic::loongarch_lasx_xvldi:
3895LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
3898 MVT GRLenVT = Subtarget.getGRLenVT();
3899 EVT VT =
Op.getValueType();
3901 const StringRef ErrorMsgOOR =
"argument out of range";
3902 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
3903 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
3905 switch (
Op.getConstantOperandVal(1)) {
3908 case Intrinsic::loongarch_crc_w_b_w:
3909 case Intrinsic::loongarch_crc_w_h_w:
3910 case Intrinsic::loongarch_crc_w_w_w:
3911 case Intrinsic::loongarch_crc_w_d_w:
3912 case Intrinsic::loongarch_crcc_w_b_w:
3913 case Intrinsic::loongarch_crcc_w_h_w:
3914 case Intrinsic::loongarch_crcc_w_w_w:
3915 case Intrinsic::loongarch_crcc_w_d_w:
3917 case Intrinsic::loongarch_csrrd_w:
3918 case Intrinsic::loongarch_csrrd_d: {
3919 unsigned Imm =
Op.getConstantOperandVal(2);
3925 case Intrinsic::loongarch_csrwr_w:
3926 case Intrinsic::loongarch_csrwr_d: {
3927 unsigned Imm =
Op.getConstantOperandVal(3);
3931 {Chain,
Op.getOperand(2),
3934 case Intrinsic::loongarch_csrxchg_w:
3935 case Intrinsic::loongarch_csrxchg_d: {
3936 unsigned Imm =
Op.getConstantOperandVal(4);
3940 {Chain,
Op.getOperand(2),
Op.getOperand(3),
3943 case Intrinsic::loongarch_iocsrrd_d: {
3948#define IOCSRRD_CASE(NAME, NODE) \
3949 case Intrinsic::loongarch_##NAME: { \
3950 return DAG.getNode(LoongArchISD::NODE, DL, {GRLenVT, MVT::Other}, \
3951 {Chain, Op.getOperand(2)}); \
3957 case Intrinsic::loongarch_cpucfg: {
3959 {Chain,
Op.getOperand(2)});
3961 case Intrinsic::loongarch_lddir_d: {
3962 unsigned Imm =
Op.getConstantOperandVal(3);
3967 case Intrinsic::loongarch_movfcsr2gr: {
3968 if (!Subtarget.hasBasicF())
3970 unsigned Imm =
Op.getConstantOperandVal(2);
3976 case Intrinsic::loongarch_lsx_vld:
3977 case Intrinsic::loongarch_lsx_vldrepl_b:
3978 case Intrinsic::loongarch_lasx_xvld:
3979 case Intrinsic::loongarch_lasx_xvldrepl_b:
3983 case Intrinsic::loongarch_lsx_vldrepl_h:
3984 case Intrinsic::loongarch_lasx_xvldrepl_h:
3988 Op,
"argument out of range or not a multiple of 2", DAG)
3990 case Intrinsic::loongarch_lsx_vldrepl_w:
3991 case Intrinsic::loongarch_lasx_xvldrepl_w:
3995 Op,
"argument out of range or not a multiple of 4", DAG)
3997 case Intrinsic::loongarch_lsx_vldrepl_d:
3998 case Intrinsic::loongarch_lasx_xvldrepl_d:
4002 Op,
"argument out of range or not a multiple of 8", DAG)
4013 return Op.getOperand(0);
4019 MVT GRLenVT = Subtarget.getGRLenVT();
4021 uint64_t IntrinsicEnum =
Op.getConstantOperandVal(1);
4023 const StringRef ErrorMsgOOR =
"argument out of range";
4024 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
4025 const StringRef ErrorMsgReqLA32 =
"requires loongarch32";
4026 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
4028 switch (IntrinsicEnum) {
4032 case Intrinsic::loongarch_cacop_d:
4033 case Intrinsic::loongarch_cacop_w: {
4034 if (IntrinsicEnum == Intrinsic::loongarch_cacop_d && !Subtarget.is64Bit())
4036 if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.is64Bit())
4045 case Intrinsic::loongarch_dbar: {
4052 case Intrinsic::loongarch_ibar: {
4059 case Intrinsic::loongarch_break: {
4066 case Intrinsic::loongarch_movgr2fcsr: {
4067 if (!Subtarget.hasBasicF())
4077 case Intrinsic::loongarch_syscall: {
4084#define IOCSRWR_CASE(NAME, NODE) \
4085 case Intrinsic::loongarch_##NAME: { \
4086 SDValue Op3 = Op.getOperand(3); \
4087 return Subtarget.is64Bit() \
4088 ? DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, \
4089 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
4090 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op3)) \
4091 : DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, Op2, \
4098 case Intrinsic::loongarch_iocsrwr_d: {
4099 return !Subtarget.is64Bit()
4106#define ASRT_LE_GT_CASE(NAME) \
4107 case Intrinsic::loongarch_##NAME: { \
4108 return !Subtarget.is64Bit() \
4109 ? emitIntrinsicErrorMessage(Op, ErrorMsgReqLA64, DAG) \
4114#undef ASRT_LE_GT_CASE
4115 case Intrinsic::loongarch_ldpte_d: {
4116 unsigned Imm =
Op.getConstantOperandVal(3);
4117 return !Subtarget.is64Bit()
4122 case Intrinsic::loongarch_lsx_vst:
4123 case Intrinsic::loongarch_lasx_xvst:
4127 case Intrinsic::loongarch_lasx_xvstelm_b:
4132 case Intrinsic::loongarch_lsx_vstelm_b:
4137 case Intrinsic::loongarch_lasx_xvstelm_h:
4142 Op,
"argument out of range or not a multiple of 2", DAG)
4144 case Intrinsic::loongarch_lsx_vstelm_h:
4149 Op,
"argument out of range or not a multiple of 2", DAG)
4151 case Intrinsic::loongarch_lasx_xvstelm_w:
4156 Op,
"argument out of range or not a multiple of 4", DAG)
4158 case Intrinsic::loongarch_lsx_vstelm_w:
4163 Op,
"argument out of range or not a multiple of 4", DAG)
4165 case Intrinsic::loongarch_lasx_xvstelm_d:
4170 Op,
"argument out of range or not a multiple of 8", DAG)
4172 case Intrinsic::loongarch_lsx_vstelm_d:
4177 Op,
"argument out of range or not a multiple of 8", DAG)
4188 EVT VT =
Lo.getValueType();
4229 EVT VT =
Lo.getValueType();
4321 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
4322 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0);
4326 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
4332 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0, NewOp1);
4359 StringRef ErrorMsg,
bool WithChain =
true) {
4364 Results.push_back(
N->getOperand(0));
4367template <
unsigned N>
4372 const StringRef ErrorMsgOOR =
"argument out of range";
4373 unsigned Imm =
Node->getConstantOperandVal(2);
4407 switch (
N->getConstantOperandVal(0)) {
4410 case Intrinsic::loongarch_lsx_vpickve2gr_b:
4414 case Intrinsic::loongarch_lsx_vpickve2gr_h:
4415 case Intrinsic::loongarch_lasx_xvpickve2gr_w:
4419 case Intrinsic::loongarch_lsx_vpickve2gr_w:
4423 case Intrinsic::loongarch_lsx_vpickve2gr_bu:
4427 case Intrinsic::loongarch_lsx_vpickve2gr_hu:
4428 case Intrinsic::loongarch_lasx_xvpickve2gr_wu:
4432 case Intrinsic::loongarch_lsx_vpickve2gr_wu:
4436 case Intrinsic::loongarch_lsx_bz_b:
4437 case Intrinsic::loongarch_lsx_bz_h:
4438 case Intrinsic::loongarch_lsx_bz_w:
4439 case Intrinsic::loongarch_lsx_bz_d:
4440 case Intrinsic::loongarch_lasx_xbz_b:
4441 case Intrinsic::loongarch_lasx_xbz_h:
4442 case Intrinsic::loongarch_lasx_xbz_w:
4443 case Intrinsic::loongarch_lasx_xbz_d:
4447 case Intrinsic::loongarch_lsx_bz_v:
4448 case Intrinsic::loongarch_lasx_xbz_v:
4452 case Intrinsic::loongarch_lsx_bnz_b:
4453 case Intrinsic::loongarch_lsx_bnz_h:
4454 case Intrinsic::loongarch_lsx_bnz_w:
4455 case Intrinsic::loongarch_lsx_bnz_d:
4456 case Intrinsic::loongarch_lasx_xbnz_b:
4457 case Intrinsic::loongarch_lasx_xbnz_h:
4458 case Intrinsic::loongarch_lasx_xbnz_w:
4459 case Intrinsic::loongarch_lasx_xbnz_d:
4463 case Intrinsic::loongarch_lsx_bnz_v:
4464 case Intrinsic::loongarch_lasx_xbnz_v:
4474 assert(
N->getValueType(0) == MVT::i128 &&
4475 "AtomicCmpSwap on types less than 128 should be legal");
4479 switch (
MemOp->getMergedOrdering()) {
4483 Opcode = LoongArch::PseudoCmpXchg128Acquire;
4487 Opcode = LoongArch::PseudoCmpXchg128;
4494 auto CmpVal = DAG.
SplitScalar(
N->getOperand(2),
DL, MVT::i64, MVT::i64);
4495 auto NewVal = DAG.
SplitScalar(
N->getOperand(3),
DL, MVT::i64, MVT::i64);
4496 SDValue Ops[] = {
N->getOperand(1), CmpVal.first, CmpVal.second,
4497 NewVal.first, NewVal.second,
N->getOperand(0)};
4500 Opcode,
SDLoc(
N), DAG.
getVTList(MVT::i64, MVT::i64, MVT::i64, MVT::Other),
4511 EVT VT =
N->getValueType(0);
4512 switch (
N->getOpcode()) {
4517 assert(
N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4518 "Unexpected custom legalisation");
4525 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4526 "Unexpected custom legalisation");
4528 Subtarget.hasDiv32() && VT == MVT::i32
4535 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4536 "Unexpected custom legalisation");
4544 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4545 "Unexpected custom legalisation");
4549 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4550 "Unexpected custom legalisation");
4557 if (Src.getValueType() == MVT::f16)
4558 Src = DAG.
getNode(ISD::FP_EXTEND,
DL, MVT::f32, Src);
4568 EVT OpVT = Src.getValueType();
4572 std::tie(Result, Chain) =
4577 case ISD::BITCAST: {
4579 EVT SrcVT = Src.getValueType();
4580 if (VT == MVT::i32 && SrcVT == MVT::f32 && Subtarget.is64Bit() &&
4581 Subtarget.hasBasicF()) {
4585 }
else if (VT == MVT::i64 && SrcVT == MVT::f64 && !Subtarget.is64Bit()) {
4587 DAG.
getVTList(MVT::i32, MVT::i32), Src);
4595 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4596 "Unexpected custom legalisation");
4599 TLI.expandFP_TO_UINT(
N, Tmp1, Tmp2, DAG);
4605 assert((VT == MVT::i16 || VT == MVT::i32) &&
4606 "Unexpected custom legalization");
4607 MVT GRLenVT = Subtarget.getGRLenVT();
4627 assert((VT == MVT::i8 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
4628 "Unexpected custom legalization");
4629 MVT GRLenVT = Subtarget.getGRLenVT();
4647 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4648 "Unexpected custom legalisation");
4655 MVT GRLenVT = Subtarget.getGRLenVT();
4656 const StringRef ErrorMsgOOR =
"argument out of range";
4657 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
4658 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
4660 switch (
N->getConstantOperandVal(1)) {
4663 case Intrinsic::loongarch_movfcsr2gr: {
4664 if (!Subtarget.hasBasicF()) {
4681#define CRC_CASE_EXT_BINARYOP(NAME, NODE) \
4682 case Intrinsic::loongarch_##NAME: { \
4683 SDValue NODE = DAG.getNode( \
4684 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
4685 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
4686 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
4687 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
4688 Results.push_back(NODE.getValue(1)); \
4697#undef CRC_CASE_EXT_BINARYOP
4699#define CRC_CASE_EXT_UNARYOP(NAME, NODE) \
4700 case Intrinsic::loongarch_##NAME: { \
4701 SDValue NODE = DAG.getNode( \
4702 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
4704 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
4705 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
4706 Results.push_back(NODE.getValue(1)); \
4711#undef CRC_CASE_EXT_UNARYOP
4712#define CSR_CASE(ID) \
4713 case Intrinsic::loongarch_##ID: { \
4714 if (!Subtarget.is64Bit()) \
4715 emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqLA64); \
4723 case Intrinsic::loongarch_csrrd_w: {
4737 case Intrinsic::loongarch_csrwr_w: {
4738 unsigned Imm =
N->getConstantOperandVal(3);
4752 case Intrinsic::loongarch_csrxchg_w: {
4753 unsigned Imm =
N->getConstantOperandVal(4);
4768#define IOCSRRD_CASE(NAME, NODE) \
4769 case Intrinsic::loongarch_##NAME: { \
4770 SDValue IOCSRRDResults = \
4771 DAG.getNode(LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
4772 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2)}); \
4773 Results.push_back( \
4774 DAG.getNode(ISD::TRUNCATE, DL, VT, IOCSRRDResults.getValue(0))); \
4775 Results.push_back(IOCSRRDResults.getValue(1)); \
4782 case Intrinsic::loongarch_cpucfg: {
4791 case Intrinsic::loongarch_lddir_d: {
4792 if (!Subtarget.is64Bit()) {
4802 if (Subtarget.is64Bit())
4804 "On LA64, only 64-bit registers can be read.");
4807 "On LA32, only 32-bit registers can be read.");
4809 Results.push_back(
N->getOperand(0));
4820 OpVT == MVT::f64 ? RTLIB::LROUND_F64 : RTLIB::LROUND_F32;
4828 case ISD::ATOMIC_CMP_SWAP: {
4833 MVT VT =
N->getSimpleValueType(0);
4839 EVT InVT = In.getValueType();
4850 for (
unsigned I = 0;
I < MinElts; ++
I)
4851 TruncMask[
I] = Scale *
I;
4853 unsigned WidenNumElts = 128 / In.getScalarValueSizeInBits();
4854 MVT SVT = In.getSimpleValueType().getScalarType();
4860 "Illegal vector type in truncation");
4879 SDValue FirstOperand =
N->getOperand(0);
4880 SDValue SecondOperand =
N->getOperand(1);
4881 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
4882 EVT ValTy =
N->getValueType(0);
4885 unsigned SMIdx, SMLen;
4891 if (!Subtarget.has32S())
4913 if (SMIdx != 0 || lsb + SMLen > ValTy.getSizeInBits())
4928 if (SMIdx + SMLen > ValTy.getSizeInBits())
4947 NewOperand = FirstOperand;
4950 msb = lsb + SMLen - 1;
4954 if (FirstOperandOpc ==
ISD::SRA || FirstOperandOpc ==
ISD::SRL || lsb == 0)
4967 if (!Subtarget.has32S())
4979 SDValue FirstOperand =
N->getOperand(0);
4981 EVT ValTy =
N->getValueType(0);
4984 unsigned MaskIdx, MaskLen;
4999 if (MaskIdx <= Shamt && Shamt <= MaskIdx + MaskLen - 1)
5015 switch (Src.getOpcode()) {
5018 return Src.getOperand(0).getValueSizeInBits() ==
Size;
5028 return Src.getOperand(0).getScalarValueSizeInBits() == 1 &&
5041 switch (Src.getOpcode()) {
5051 Src.getOpcode(),
DL, SExtVT,
5057 DL, SExtVT, Src.getOperand(0),
5069 EVT VT =
N->getValueType(0);
5071 EVT SrcVT = Src.getValueType();
5073 if (Src.getOpcode() !=
ISD::SETCC || !Src.hasOneUse())
5078 EVT CmpVT = Src.getOperand(0).getValueType();
5083 else if (Subtarget.has32S() && Subtarget.hasExtLASX() &&
5111 (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
5118 (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
5142 EVT VT =
N->getValueType(0);
5144 EVT SrcVT = Src.getValueType();
5160 bool UseLASX =
false;
5161 bool PropagateSExt =
false;
5163 if (Src.getOpcode() ==
ISD::SETCC && Src.hasOneUse()) {
5164 EVT CmpVT = Src.getOperand(0).getValueType();
5173 SExtVT = MVT::v2i64;
5176 SExtVT = MVT::v4i32;
5178 SExtVT = MVT::v4i64;
5180 PropagateSExt =
true;
5184 SExtVT = MVT::v8i16;
5186 SExtVT = MVT::v8i32;
5188 PropagateSExt =
true;
5192 SExtVT = MVT::v16i8;
5194 SExtVT = MVT::v16i16;
5196 PropagateSExt =
true;
5200 SExtVT = MVT::v32i8;
5208 if (!Subtarget.has32S() || !Subtarget.hasExtLASX()) {
5209 if (Src.getSimpleValueType() == MVT::v32i8) {
5217 }
else if (UseLASX) {
5236 EVT ValTy =
N->getValueType(0);
5237 SDValue N0 =
N->getOperand(0), N1 =
N->getOperand(1);
5240 unsigned ValBits = ValTy.getSizeInBits();
5241 unsigned MaskIdx0, MaskLen0, MaskIdx1, MaskLen1;
5243 bool SwapAndRetried =
false;
5246 if (!Subtarget.has32S())
5252 if (ValBits != 32 && ValBits != 64)
5267 MaskIdx0 == MaskIdx1 && MaskLen0 == MaskLen1 &&
5270 (MaskIdx0 + MaskLen0 <= ValBits)) {
5291 MaskLen0 == MaskLen1 && MaskIdx1 == 0 &&
5292 (MaskIdx0 + MaskLen0 <= ValBits)) {
5309 (MaskIdx0 + MaskLen0 <= 64) &&
5317 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
5318 : (MaskIdx0 + MaskLen0 - 1),
5334 (MaskIdx0 + MaskLen0 <= ValBits)) {
5357 DAG.
getConstant(ValBits == 32 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
5358 : (MaskIdx0 + MaskLen0 - 1),
5373 unsigned MaskIdx, MaskLen;
5374 if (N1.getOpcode() ==
ISD::SHL && N1.getOperand(0).getOpcode() ==
ISD::AND &&
5401 N1.getOperand(0).getOpcode() ==
ISD::SHL &&
5415 if (!SwapAndRetried) {
5417 SwapAndRetried =
true;
5421 SwapAndRetried =
false;
5447 if (!SwapAndRetried) {
5449 SwapAndRetried =
true;
5459 switch (V.getNode()->getOpcode()) {
5471 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
5479 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
5556 SDNode *AndNode =
N->getOperand(0).getNode();
5564 SDValue CmpInputValue =
N->getOperand(1);
5575 AndInputValue1 = AndInputValue1.
getOperand(0);
5579 if (AndInputValue2 != CmpInputValue)
5612 TruncInputValue1, TruncInputValue2);
5614 DAG.
getSetCC(
SDLoc(
N),
N->getValueType(0), NewAnd, TruncInputValue2, CC);
5655 LHS.getOperand(0).getValueType() == Subtarget.
getGRLenVT()) {
5683 ShAmt =
LHS.getValueSizeInBits() - 1 - ShAmt;
5717 N->getOperand(0),
LHS,
RHS, CC,
N->getOperand(4));
5733 EVT VT =
N->getValueType(0);
5736 if (TrueV == FalseV)
5768 {LHS, RHS, CC, TrueV, FalseV});
5773template <
unsigned N>
5777 bool IsSigned =
false) {
5781 if ((IsSigned && !
isInt<N>(CImm->getSExtValue())) ||
5782 (!IsSigned && !
isUInt<N>(CImm->getZExtValue()))) {
5784 ": argument out of range.");
5790template <
unsigned N>
5794 EVT ResTy =
Node->getValueType(0);
5798 if ((IsSigned && !
isInt<N>(CImm->getSExtValue())) ||
5799 (!IsSigned && !
isUInt<N>(CImm->getZExtValue()))) {
5801 ": argument out of range.");
5806 IsSigned ? CImm->getSExtValue() : CImm->getZExtValue(), IsSigned),
5812 EVT ResTy =
Node->getValueType(0);
5820 EVT ResTy =
Node->getValueType(0);
5829template <
unsigned N>
5832 EVT ResTy =
Node->getValueType(0);
5837 ": argument out of range.");
5847template <
unsigned N>
5850 EVT ResTy =
Node->getValueType(0);
5855 ": argument out of range.");
5864template <
unsigned N>
5867 EVT ResTy =
Node->getValueType(0);
5872 ": argument out of range.");
5886 switch (
N->getConstantOperandVal(0)) {
5889 case Intrinsic::loongarch_lsx_vadd_b:
5890 case Intrinsic::loongarch_lsx_vadd_h:
5891 case Intrinsic::loongarch_lsx_vadd_w:
5892 case Intrinsic::loongarch_lsx_vadd_d:
5893 case Intrinsic::loongarch_lasx_xvadd_b:
5894 case Intrinsic::loongarch_lasx_xvadd_h:
5895 case Intrinsic::loongarch_lasx_xvadd_w:
5896 case Intrinsic::loongarch_lasx_xvadd_d:
5899 case Intrinsic::loongarch_lsx_vaddi_bu:
5900 case Intrinsic::loongarch_lsx_vaddi_hu:
5901 case Intrinsic::loongarch_lsx_vaddi_wu:
5902 case Intrinsic::loongarch_lsx_vaddi_du:
5903 case Intrinsic::loongarch_lasx_xvaddi_bu:
5904 case Intrinsic::loongarch_lasx_xvaddi_hu:
5905 case Intrinsic::loongarch_lasx_xvaddi_wu:
5906 case Intrinsic::loongarch_lasx_xvaddi_du:
5909 case Intrinsic::loongarch_lsx_vsub_b:
5910 case Intrinsic::loongarch_lsx_vsub_h:
5911 case Intrinsic::loongarch_lsx_vsub_w:
5912 case Intrinsic::loongarch_lsx_vsub_d:
5913 case Intrinsic::loongarch_lasx_xvsub_b:
5914 case Intrinsic::loongarch_lasx_xvsub_h:
5915 case Intrinsic::loongarch_lasx_xvsub_w:
5916 case Intrinsic::loongarch_lasx_xvsub_d:
5919 case Intrinsic::loongarch_lsx_vsubi_bu:
5920 case Intrinsic::loongarch_lsx_vsubi_hu:
5921 case Intrinsic::loongarch_lsx_vsubi_wu:
5922 case Intrinsic::loongarch_lsx_vsubi_du:
5923 case Intrinsic::loongarch_lasx_xvsubi_bu:
5924 case Intrinsic::loongarch_lasx_xvsubi_hu:
5925 case Intrinsic::loongarch_lasx_xvsubi_wu:
5926 case Intrinsic::loongarch_lasx_xvsubi_du:
5929 case Intrinsic::loongarch_lsx_vneg_b:
5930 case Intrinsic::loongarch_lsx_vneg_h:
5931 case Intrinsic::loongarch_lsx_vneg_w:
5932 case Intrinsic::loongarch_lsx_vneg_d:
5933 case Intrinsic::loongarch_lasx_xvneg_b:
5934 case Intrinsic::loongarch_lasx_xvneg_h:
5935 case Intrinsic::loongarch_lasx_xvneg_w:
5936 case Intrinsic::loongarch_lasx_xvneg_d:
5940 APInt(
N->getValueType(0).getScalarType().getSizeInBits(), 0,
5942 SDLoc(
N),
N->getValueType(0)),
5944 case Intrinsic::loongarch_lsx_vmax_b:
5945 case Intrinsic::loongarch_lsx_vmax_h:
5946 case Intrinsic::loongarch_lsx_vmax_w:
5947 case Intrinsic::loongarch_lsx_vmax_d:
5948 case Intrinsic::loongarch_lasx_xvmax_b:
5949 case Intrinsic::loongarch_lasx_xvmax_h:
5950 case Intrinsic::loongarch_lasx_xvmax_w:
5951 case Intrinsic::loongarch_lasx_xvmax_d:
5954 case Intrinsic::loongarch_lsx_vmax_bu:
5955 case Intrinsic::loongarch_lsx_vmax_hu:
5956 case Intrinsic::loongarch_lsx_vmax_wu:
5957 case Intrinsic::loongarch_lsx_vmax_du:
5958 case Intrinsic::loongarch_lasx_xvmax_bu:
5959 case Intrinsic::loongarch_lasx_xvmax_hu:
5960 case Intrinsic::loongarch_lasx_xvmax_wu:
5961 case Intrinsic::loongarch_lasx_xvmax_du:
5964 case Intrinsic::loongarch_lsx_vmaxi_b:
5965 case Intrinsic::loongarch_lsx_vmaxi_h:
5966 case Intrinsic::loongarch_lsx_vmaxi_w:
5967 case Intrinsic::loongarch_lsx_vmaxi_d:
5968 case Intrinsic::loongarch_lasx_xvmaxi_b:
5969 case Intrinsic::loongarch_lasx_xvmaxi_h:
5970 case Intrinsic::loongarch_lasx_xvmaxi_w:
5971 case Intrinsic::loongarch_lasx_xvmaxi_d:
5974 case Intrinsic::loongarch_lsx_vmaxi_bu:
5975 case Intrinsic::loongarch_lsx_vmaxi_hu:
5976 case Intrinsic::loongarch_lsx_vmaxi_wu:
5977 case Intrinsic::loongarch_lsx_vmaxi_du:
5978 case Intrinsic::loongarch_lasx_xvmaxi_bu:
5979 case Intrinsic::loongarch_lasx_xvmaxi_hu:
5980 case Intrinsic::loongarch_lasx_xvmaxi_wu:
5981 case Intrinsic::loongarch_lasx_xvmaxi_du:
5984 case Intrinsic::loongarch_lsx_vmin_b:
5985 case Intrinsic::loongarch_lsx_vmin_h:
5986 case Intrinsic::loongarch_lsx_vmin_w:
5987 case Intrinsic::loongarch_lsx_vmin_d:
5988 case Intrinsic::loongarch_lasx_xvmin_b:
5989 case Intrinsic::loongarch_lasx_xvmin_h:
5990 case Intrinsic::loongarch_lasx_xvmin_w:
5991 case Intrinsic::loongarch_lasx_xvmin_d:
5994 case Intrinsic::loongarch_lsx_vmin_bu:
5995 case Intrinsic::loongarch_lsx_vmin_hu:
5996 case Intrinsic::loongarch_lsx_vmin_wu:
5997 case Intrinsic::loongarch_lsx_vmin_du:
5998 case Intrinsic::loongarch_lasx_xvmin_bu:
5999 case Intrinsic::loongarch_lasx_xvmin_hu:
6000 case Intrinsic::loongarch_lasx_xvmin_wu:
6001 case Intrinsic::loongarch_lasx_xvmin_du:
6004 case Intrinsic::loongarch_lsx_vmini_b:
6005 case Intrinsic::loongarch_lsx_vmini_h:
6006 case Intrinsic::loongarch_lsx_vmini_w:
6007 case Intrinsic::loongarch_lsx_vmini_d:
6008 case Intrinsic::loongarch_lasx_xvmini_b:
6009 case Intrinsic::loongarch_lasx_xvmini_h:
6010 case Intrinsic::loongarch_lasx_xvmini_w:
6011 case Intrinsic::loongarch_lasx_xvmini_d:
6014 case Intrinsic::loongarch_lsx_vmini_bu:
6015 case Intrinsic::loongarch_lsx_vmini_hu:
6016 case Intrinsic::loongarch_lsx_vmini_wu:
6017 case Intrinsic::loongarch_lsx_vmini_du:
6018 case Intrinsic::loongarch_lasx_xvmini_bu:
6019 case Intrinsic::loongarch_lasx_xvmini_hu:
6020 case Intrinsic::loongarch_lasx_xvmini_wu:
6021 case Intrinsic::loongarch_lasx_xvmini_du:
6024 case Intrinsic::loongarch_lsx_vmul_b:
6025 case Intrinsic::loongarch_lsx_vmul_h:
6026 case Intrinsic::loongarch_lsx_vmul_w:
6027 case Intrinsic::loongarch_lsx_vmul_d:
6028 case Intrinsic::loongarch_lasx_xvmul_b:
6029 case Intrinsic::loongarch_lasx_xvmul_h:
6030 case Intrinsic::loongarch_lasx_xvmul_w:
6031 case Intrinsic::loongarch_lasx_xvmul_d:
6034 case Intrinsic::loongarch_lsx_vmadd_b:
6035 case Intrinsic::loongarch_lsx_vmadd_h:
6036 case Intrinsic::loongarch_lsx_vmadd_w:
6037 case Intrinsic::loongarch_lsx_vmadd_d:
6038 case Intrinsic::loongarch_lasx_xvmadd_b:
6039 case Intrinsic::loongarch_lasx_xvmadd_h:
6040 case Intrinsic::loongarch_lasx_xvmadd_w:
6041 case Intrinsic::loongarch_lasx_xvmadd_d: {
6042 EVT ResTy =
N->getValueType(0);
6047 case Intrinsic::loongarch_lsx_vmsub_b:
6048 case Intrinsic::loongarch_lsx_vmsub_h:
6049 case Intrinsic::loongarch_lsx_vmsub_w:
6050 case Intrinsic::loongarch_lsx_vmsub_d:
6051 case Intrinsic::loongarch_lasx_xvmsub_b:
6052 case Intrinsic::loongarch_lasx_xvmsub_h:
6053 case Intrinsic::loongarch_lasx_xvmsub_w:
6054 case Intrinsic::loongarch_lasx_xvmsub_d: {
6055 EVT ResTy =
N->getValueType(0);
6060 case Intrinsic::loongarch_lsx_vdiv_b:
6061 case Intrinsic::loongarch_lsx_vdiv_h:
6062 case Intrinsic::loongarch_lsx_vdiv_w:
6063 case Intrinsic::loongarch_lsx_vdiv_d:
6064 case Intrinsic::loongarch_lasx_xvdiv_b:
6065 case Intrinsic::loongarch_lasx_xvdiv_h:
6066 case Intrinsic::loongarch_lasx_xvdiv_w:
6067 case Intrinsic::loongarch_lasx_xvdiv_d:
6070 case Intrinsic::loongarch_lsx_vdiv_bu:
6071 case Intrinsic::loongarch_lsx_vdiv_hu:
6072 case Intrinsic::loongarch_lsx_vdiv_wu:
6073 case Intrinsic::loongarch_lsx_vdiv_du:
6074 case Intrinsic::loongarch_lasx_xvdiv_bu:
6075 case Intrinsic::loongarch_lasx_xvdiv_hu:
6076 case Intrinsic::loongarch_lasx_xvdiv_wu:
6077 case Intrinsic::loongarch_lasx_xvdiv_du:
6080 case Intrinsic::loongarch_lsx_vmod_b:
6081 case Intrinsic::loongarch_lsx_vmod_h:
6082 case Intrinsic::loongarch_lsx_vmod_w:
6083 case Intrinsic::loongarch_lsx_vmod_d:
6084 case Intrinsic::loongarch_lasx_xvmod_b:
6085 case Intrinsic::loongarch_lasx_xvmod_h:
6086 case Intrinsic::loongarch_lasx_xvmod_w:
6087 case Intrinsic::loongarch_lasx_xvmod_d:
6090 case Intrinsic::loongarch_lsx_vmod_bu:
6091 case Intrinsic::loongarch_lsx_vmod_hu:
6092 case Intrinsic::loongarch_lsx_vmod_wu:
6093 case Intrinsic::loongarch_lsx_vmod_du:
6094 case Intrinsic::loongarch_lasx_xvmod_bu:
6095 case Intrinsic::loongarch_lasx_xvmod_hu:
6096 case Intrinsic::loongarch_lasx_xvmod_wu:
6097 case Intrinsic::loongarch_lasx_xvmod_du:
6100 case Intrinsic::loongarch_lsx_vand_v:
6101 case Intrinsic::loongarch_lasx_xvand_v:
6104 case Intrinsic::loongarch_lsx_vor_v:
6105 case Intrinsic::loongarch_lasx_xvor_v:
6108 case Intrinsic::loongarch_lsx_vxor_v:
6109 case Intrinsic::loongarch_lasx_xvxor_v:
6112 case Intrinsic::loongarch_lsx_vnor_v:
6113 case Intrinsic::loongarch_lasx_xvnor_v: {
6118 case Intrinsic::loongarch_lsx_vandi_b:
6119 case Intrinsic::loongarch_lasx_xvandi_b:
6122 case Intrinsic::loongarch_lsx_vori_b:
6123 case Intrinsic::loongarch_lasx_xvori_b:
6126 case Intrinsic::loongarch_lsx_vxori_b:
6127 case Intrinsic::loongarch_lasx_xvxori_b:
6130 case Intrinsic::loongarch_lsx_vsll_b:
6131 case Intrinsic::loongarch_lsx_vsll_h:
6132 case Intrinsic::loongarch_lsx_vsll_w:
6133 case Intrinsic::loongarch_lsx_vsll_d:
6134 case Intrinsic::loongarch_lasx_xvsll_b:
6135 case Intrinsic::loongarch_lasx_xvsll_h:
6136 case Intrinsic::loongarch_lasx_xvsll_w:
6137 case Intrinsic::loongarch_lasx_xvsll_d:
6140 case Intrinsic::loongarch_lsx_vslli_b:
6141 case Intrinsic::loongarch_lasx_xvslli_b:
6144 case Intrinsic::loongarch_lsx_vslli_h:
6145 case Intrinsic::loongarch_lasx_xvslli_h:
6148 case Intrinsic::loongarch_lsx_vslli_w:
6149 case Intrinsic::loongarch_lasx_xvslli_w:
6152 case Intrinsic::loongarch_lsx_vslli_d:
6153 case Intrinsic::loongarch_lasx_xvslli_d:
6156 case Intrinsic::loongarch_lsx_vsrl_b:
6157 case Intrinsic::loongarch_lsx_vsrl_h:
6158 case Intrinsic::loongarch_lsx_vsrl_w:
6159 case Intrinsic::loongarch_lsx_vsrl_d:
6160 case Intrinsic::loongarch_lasx_xvsrl_b:
6161 case Intrinsic::loongarch_lasx_xvsrl_h:
6162 case Intrinsic::loongarch_lasx_xvsrl_w:
6163 case Intrinsic::loongarch_lasx_xvsrl_d:
6166 case Intrinsic::loongarch_lsx_vsrli_b:
6167 case Intrinsic::loongarch_lasx_xvsrli_b:
6170 case Intrinsic::loongarch_lsx_vsrli_h:
6171 case Intrinsic::loongarch_lasx_xvsrli_h:
6174 case Intrinsic::loongarch_lsx_vsrli_w:
6175 case Intrinsic::loongarch_lasx_xvsrli_w:
6178 case Intrinsic::loongarch_lsx_vsrli_d:
6179 case Intrinsic::loongarch_lasx_xvsrli_d:
6182 case Intrinsic::loongarch_lsx_vsra_b:
6183 case Intrinsic::loongarch_lsx_vsra_h:
6184 case Intrinsic::loongarch_lsx_vsra_w:
6185 case Intrinsic::loongarch_lsx_vsra_d:
6186 case Intrinsic::loongarch_lasx_xvsra_b:
6187 case Intrinsic::loongarch_lasx_xvsra_h:
6188 case Intrinsic::loongarch_lasx_xvsra_w:
6189 case Intrinsic::loongarch_lasx_xvsra_d:
6192 case Intrinsic::loongarch_lsx_vsrai_b:
6193 case Intrinsic::loongarch_lasx_xvsrai_b:
6196 case Intrinsic::loongarch_lsx_vsrai_h:
6197 case Intrinsic::loongarch_lasx_xvsrai_h:
6200 case Intrinsic::loongarch_lsx_vsrai_w:
6201 case Intrinsic::loongarch_lasx_xvsrai_w:
6204 case Intrinsic::loongarch_lsx_vsrai_d:
6205 case Intrinsic::loongarch_lasx_xvsrai_d:
6208 case Intrinsic::loongarch_lsx_vclz_b:
6209 case Intrinsic::loongarch_lsx_vclz_h:
6210 case Intrinsic::loongarch_lsx_vclz_w:
6211 case Intrinsic::loongarch_lsx_vclz_d:
6212 case Intrinsic::loongarch_lasx_xvclz_b:
6213 case Intrinsic::loongarch_lasx_xvclz_h:
6214 case Intrinsic::loongarch_lasx_xvclz_w:
6215 case Intrinsic::loongarch_lasx_xvclz_d:
6217 case Intrinsic::loongarch_lsx_vpcnt_b:
6218 case Intrinsic::loongarch_lsx_vpcnt_h:
6219 case Intrinsic::loongarch_lsx_vpcnt_w:
6220 case Intrinsic::loongarch_lsx_vpcnt_d:
6221 case Intrinsic::loongarch_lasx_xvpcnt_b:
6222 case Intrinsic::loongarch_lasx_xvpcnt_h:
6223 case Intrinsic::loongarch_lasx_xvpcnt_w:
6224 case Intrinsic::loongarch_lasx_xvpcnt_d:
6226 case Intrinsic::loongarch_lsx_vbitclr_b:
6227 case Intrinsic::loongarch_lsx_vbitclr_h:
6228 case Intrinsic::loongarch_lsx_vbitclr_w:
6229 case Intrinsic::loongarch_lsx_vbitclr_d:
6230 case Intrinsic::loongarch_lasx_xvbitclr_b:
6231 case Intrinsic::loongarch_lasx_xvbitclr_h:
6232 case Intrinsic::loongarch_lasx_xvbitclr_w:
6233 case Intrinsic::loongarch_lasx_xvbitclr_d:
6235 case Intrinsic::loongarch_lsx_vbitclri_b:
6236 case Intrinsic::loongarch_lasx_xvbitclri_b:
6238 case Intrinsic::loongarch_lsx_vbitclri_h:
6239 case Intrinsic::loongarch_lasx_xvbitclri_h:
6241 case Intrinsic::loongarch_lsx_vbitclri_w:
6242 case Intrinsic::loongarch_lasx_xvbitclri_w:
6244 case Intrinsic::loongarch_lsx_vbitclri_d:
6245 case Intrinsic::loongarch_lasx_xvbitclri_d:
6247 case Intrinsic::loongarch_lsx_vbitset_b:
6248 case Intrinsic::loongarch_lsx_vbitset_h:
6249 case Intrinsic::loongarch_lsx_vbitset_w:
6250 case Intrinsic::loongarch_lsx_vbitset_d:
6251 case Intrinsic::loongarch_lasx_xvbitset_b:
6252 case Intrinsic::loongarch_lasx_xvbitset_h:
6253 case Intrinsic::loongarch_lasx_xvbitset_w:
6254 case Intrinsic::loongarch_lasx_xvbitset_d: {
6255 EVT VecTy =
N->getValueType(0);
6261 case Intrinsic::loongarch_lsx_vbitseti_b:
6262 case Intrinsic::loongarch_lasx_xvbitseti_b:
6264 case Intrinsic::loongarch_lsx_vbitseti_h:
6265 case Intrinsic::loongarch_lasx_xvbitseti_h:
6267 case Intrinsic::loongarch_lsx_vbitseti_w:
6268 case Intrinsic::loongarch_lasx_xvbitseti_w:
6270 case Intrinsic::loongarch_lsx_vbitseti_d:
6271 case Intrinsic::loongarch_lasx_xvbitseti_d:
6273 case Intrinsic::loongarch_lsx_vbitrev_b:
6274 case Intrinsic::loongarch_lsx_vbitrev_h:
6275 case Intrinsic::loongarch_lsx_vbitrev_w:
6276 case Intrinsic::loongarch_lsx_vbitrev_d:
6277 case Intrinsic::loongarch_lasx_xvbitrev_b:
6278 case Intrinsic::loongarch_lasx_xvbitrev_h:
6279 case Intrinsic::loongarch_lasx_xvbitrev_w:
6280 case Intrinsic::loongarch_lasx_xvbitrev_d: {
6281 EVT VecTy =
N->getValueType(0);
6287 case Intrinsic::loongarch_lsx_vbitrevi_b:
6288 case Intrinsic::loongarch_lasx_xvbitrevi_b:
6290 case Intrinsic::loongarch_lsx_vbitrevi_h:
6291 case Intrinsic::loongarch_lasx_xvbitrevi_h:
6293 case Intrinsic::loongarch_lsx_vbitrevi_w:
6294 case Intrinsic::loongarch_lasx_xvbitrevi_w:
6296 case Intrinsic::loongarch_lsx_vbitrevi_d:
6297 case Intrinsic::loongarch_lasx_xvbitrevi_d:
6299 case Intrinsic::loongarch_lsx_vfadd_s:
6300 case Intrinsic::loongarch_lsx_vfadd_d:
6301 case Intrinsic::loongarch_lasx_xvfadd_s:
6302 case Intrinsic::loongarch_lasx_xvfadd_d:
6305 case Intrinsic::loongarch_lsx_vfsub_s:
6306 case Intrinsic::loongarch_lsx_vfsub_d:
6307 case Intrinsic::loongarch_lasx_xvfsub_s:
6308 case Intrinsic::loongarch_lasx_xvfsub_d:
6311 case Intrinsic::loongarch_lsx_vfmul_s:
6312 case Intrinsic::loongarch_lsx_vfmul_d:
6313 case Intrinsic::loongarch_lasx_xvfmul_s:
6314 case Intrinsic::loongarch_lasx_xvfmul_d:
6317 case Intrinsic::loongarch_lsx_vfdiv_s:
6318 case Intrinsic::loongarch_lsx_vfdiv_d:
6319 case Intrinsic::loongarch_lasx_xvfdiv_s:
6320 case Intrinsic::loongarch_lasx_xvfdiv_d:
6323 case Intrinsic::loongarch_lsx_vfmadd_s:
6324 case Intrinsic::loongarch_lsx_vfmadd_d:
6325 case Intrinsic::loongarch_lasx_xvfmadd_s:
6326 case Intrinsic::loongarch_lasx_xvfmadd_d:
6328 N->getOperand(2),
N->getOperand(3));
6329 case Intrinsic::loongarch_lsx_vinsgr2vr_b:
6331 N->getOperand(1),
N->getOperand(2),
6333 case Intrinsic::loongarch_lsx_vinsgr2vr_h:
6334 case Intrinsic::loongarch_lasx_xvinsgr2vr_w:
6336 N->getOperand(1),
N->getOperand(2),
6338 case Intrinsic::loongarch_lsx_vinsgr2vr_w:
6339 case Intrinsic::loongarch_lasx_xvinsgr2vr_d:
6341 N->getOperand(1),
N->getOperand(2),
6343 case Intrinsic::loongarch_lsx_vinsgr2vr_d:
6345 N->getOperand(1),
N->getOperand(2),
6347 case Intrinsic::loongarch_lsx_vreplgr2vr_b:
6348 case Intrinsic::loongarch_lsx_vreplgr2vr_h:
6349 case Intrinsic::loongarch_lsx_vreplgr2vr_w:
6350 case Intrinsic::loongarch_lsx_vreplgr2vr_d:
6351 case Intrinsic::loongarch_lasx_xvreplgr2vr_b:
6352 case Intrinsic::loongarch_lasx_xvreplgr2vr_h:
6353 case Intrinsic::loongarch_lasx_xvreplgr2vr_w:
6354 case Intrinsic::loongarch_lasx_xvreplgr2vr_d:
6358 case Intrinsic::loongarch_lsx_vreplve_b:
6359 case Intrinsic::loongarch_lsx_vreplve_h:
6360 case Intrinsic::loongarch_lsx_vreplve_w:
6361 case Intrinsic::loongarch_lsx_vreplve_d:
6362 case Intrinsic::loongarch_lasx_xvreplve_b:
6363 case Intrinsic::loongarch_lasx_xvreplve_h:
6364 case Intrinsic::loongarch_lasx_xvreplve_w:
6365 case Intrinsic::loongarch_lasx_xvreplve_d:
6395 "Unexpected value type!");
6404 MVT VT =
N->getSimpleValueType(0);
6438 APInt V =
C->getValueAPF().bitcastToAPInt();
6454 MVT EltVT =
N->getSimpleValueType(0);
6486 switch (
N->getOpcode()) {
6540 MF->
insert(It, BreakMBB);
6544 SinkMBB->splice(SinkMBB->end(),
MBB, std::next(
MI.getIterator()),
MBB->end());
6545 SinkMBB->transferSuccessorsAndUpdatePHIs(
MBB);
6557 MBB->addSuccessor(BreakMBB);
6558 MBB->addSuccessor(SinkMBB);
6564 BreakMBB->addSuccessor(SinkMBB);
6576 switch (
MI.getOpcode()) {
6579 case LoongArch::PseudoVBZ:
6580 CondOpc = LoongArch::VSETEQZ_V;
6582 case LoongArch::PseudoVBZ_B:
6583 CondOpc = LoongArch::VSETANYEQZ_B;
6585 case LoongArch::PseudoVBZ_H:
6586 CondOpc = LoongArch::VSETANYEQZ_H;
6588 case LoongArch::PseudoVBZ_W:
6589 CondOpc = LoongArch::VSETANYEQZ_W;
6591 case LoongArch::PseudoVBZ_D:
6592 CondOpc = LoongArch::VSETANYEQZ_D;
6594 case LoongArch::PseudoVBNZ:
6595 CondOpc = LoongArch::VSETNEZ_V;
6597 case LoongArch::PseudoVBNZ_B:
6598 CondOpc = LoongArch::VSETALLNEZ_B;
6600 case LoongArch::PseudoVBNZ_H:
6601 CondOpc = LoongArch::VSETALLNEZ_H;
6603 case LoongArch::PseudoVBNZ_W:
6604 CondOpc = LoongArch::VSETALLNEZ_W;
6606 case LoongArch::PseudoVBNZ_D:
6607 CondOpc = LoongArch::VSETALLNEZ_D;
6609 case LoongArch::PseudoXVBZ:
6610 CondOpc = LoongArch::XVSETEQZ_V;
6612 case LoongArch::PseudoXVBZ_B:
6613 CondOpc = LoongArch::XVSETANYEQZ_B;
6615 case LoongArch::PseudoXVBZ_H:
6616 CondOpc = LoongArch::XVSETANYEQZ_H;
6618 case LoongArch::PseudoXVBZ_W:
6619 CondOpc = LoongArch::XVSETANYEQZ_W;
6621 case LoongArch::PseudoXVBZ_D:
6622 CondOpc = LoongArch::XVSETANYEQZ_D;
6624 case LoongArch::PseudoXVBNZ:
6625 CondOpc = LoongArch::XVSETNEZ_V;
6627 case LoongArch::PseudoXVBNZ_B:
6628 CondOpc = LoongArch::XVSETALLNEZ_B;
6630 case LoongArch::PseudoXVBNZ_H:
6631 CondOpc = LoongArch::XVSETALLNEZ_H;
6633 case LoongArch::PseudoXVBNZ_W:
6634 CondOpc = LoongArch::XVSETALLNEZ_W;
6636 case LoongArch::PseudoXVBNZ_D:
6637 CondOpc = LoongArch::XVSETALLNEZ_D;
6652 F->insert(It, FalseBB);
6653 F->insert(It, TrueBB);
6654 F->insert(It, SinkBB);
6657 SinkBB->
splice(SinkBB->
end(), BB, std::next(
MI.getIterator()), BB->
end());
6661 Register FCC =
MRI.createVirtualRegister(&LoongArch::CFRRegClass);
6670 Register RD1 =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
6678 Register RD2 =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
6686 MI.getOperand(0).getReg())
6693 MI.eraseFromParent();
6701 unsigned BroadcastOp;
6703 switch (
MI.getOpcode()) {
6706 case LoongArch::PseudoXVINSGR2VR_B:
6708 BroadcastOp = LoongArch::XVREPLGR2VR_B;
6709 InsOp = LoongArch::XVEXTRINS_B;
6711 case LoongArch::PseudoXVINSGR2VR_H:
6713 BroadcastOp = LoongArch::XVREPLGR2VR_H;
6714 InsOp = LoongArch::XVEXTRINS_H;
6726 unsigned Idx =
MI.getOperand(3).getImm();
6728 if (XSrc.
isVirtual() &&
MRI.getVRegDef(XSrc)->isImplicitDef() &&
6730 Register ScratchSubReg1 =
MRI.createVirtualRegister(SubRC);
6731 Register ScratchSubReg2 =
MRI.createVirtualRegister(SubRC);
6734 .
addReg(XSrc, 0, LoongArch::sub_128);
6736 TII->get(HalfSize == 8 ? LoongArch::VINSGR2VR_H
6737 : LoongArch::VINSGR2VR_B),
6746 .
addImm(LoongArch::sub_128);
6748 Register ScratchReg1 =
MRI.createVirtualRegister(RC);
6749 Register ScratchReg2 =
MRI.createVirtualRegister(RC);
6753 BuildMI(*BB,
MI,
DL,
TII->get(LoongArch::XVPERMI_Q), ScratchReg2)
6756 .
addImm(Idx >= HalfSize ? 48 : 18);
6761 .
addImm((Idx >= HalfSize ? Idx - HalfSize : Idx) * 17);
6764 MI.eraseFromParent();
6771 assert(Subtarget.hasExtLSX());
6778 Register ScratchReg1 =
MRI.createVirtualRegister(RC);
6779 Register ScratchReg2 =
MRI.createVirtualRegister(RC);
6780 Register ScratchReg3 =
MRI.createVirtualRegister(RC);
6784 TII->get(Subtarget.
is64Bit() ? LoongArch::VINSGR2VR_D
6785 : LoongArch::VINSGR2VR_W),
6792 TII->get(Subtarget.
is64Bit() ? LoongArch::VPCNT_D : LoongArch::VPCNT_W),
6796 TII->get(Subtarget.
is64Bit() ? LoongArch::VPICKVE2GR_D
6797 : LoongArch::VPICKVE2GR_W),
6802 MI.eraseFromParent();
6816 unsigned EleBits = 8;
6817 unsigned NotOpc = 0;
6820 switch (
MI.getOpcode()) {
6823 case LoongArch::PseudoVMSKLTZ_B:
6824 MskOpc = LoongArch::VMSKLTZ_B;
6826 case LoongArch::PseudoVMSKLTZ_H:
6827 MskOpc = LoongArch::VMSKLTZ_H;
6830 case LoongArch::PseudoVMSKLTZ_W:
6831 MskOpc = LoongArch::VMSKLTZ_W;
6834 case LoongArch::PseudoVMSKLTZ_D:
6835 MskOpc = LoongArch::VMSKLTZ_D;
6838 case LoongArch::PseudoVMSKGEZ_B:
6839 MskOpc = LoongArch::VMSKGEZ_B;
6841 case LoongArch::PseudoVMSKEQZ_B:
6842 MskOpc = LoongArch::VMSKNZ_B;
6843 NotOpc = LoongArch::VNOR_V;
6845 case LoongArch::PseudoVMSKNEZ_B:
6846 MskOpc = LoongArch::VMSKNZ_B;
6848 case LoongArch::PseudoXVMSKLTZ_B:
6849 MskOpc = LoongArch::XVMSKLTZ_B;
6850 RC = &LoongArch::LASX256RegClass;
6852 case LoongArch::PseudoXVMSKLTZ_H:
6853 MskOpc = LoongArch::XVMSKLTZ_H;
6854 RC = &LoongArch::LASX256RegClass;
6857 case LoongArch::PseudoXVMSKLTZ_W:
6858 MskOpc = LoongArch::XVMSKLTZ_W;
6859 RC = &LoongArch::LASX256RegClass;
6862 case LoongArch::PseudoXVMSKLTZ_D:
6863 MskOpc = LoongArch::XVMSKLTZ_D;
6864 RC = &LoongArch::LASX256RegClass;
6867 case LoongArch::PseudoXVMSKGEZ_B:
6868 MskOpc = LoongArch::XVMSKGEZ_B;
6869 RC = &LoongArch::LASX256RegClass;
6871 case LoongArch::PseudoXVMSKEQZ_B:
6872 MskOpc = LoongArch::XVMSKNZ_B;
6873 NotOpc = LoongArch::XVNOR_V;
6874 RC = &LoongArch::LASX256RegClass;
6876 case LoongArch::PseudoXVMSKNEZ_B:
6877 MskOpc = LoongArch::XVMSKNZ_B;
6878 RC = &LoongArch::LASX256RegClass;
6893 if (
TRI->getRegSizeInBits(*RC) > 128) {
6894 Register Lo =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
6895 Register Hi =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
6903 TII->get(Subtarget.
is64Bit() ? LoongArch::BSTRINS_D
6904 : LoongArch::BSTRINS_W),
6908 .
addImm(256 / EleBits - 1)
6916 MI.eraseFromParent();
6923 assert(
MI.getOpcode() == LoongArch::SplitPairF64Pseudo &&
6924 "Unexpected instruction");
6936 MI.eraseFromParent();
6943 assert(
MI.getOpcode() == LoongArch::BuildPairF64Pseudo &&
6944 "Unexpected instruction");
6950 Register TmpReg =
MRI.createVirtualRegister(&LoongArch::FPR64RegClass);
6960 MI.eraseFromParent();
6965 switch (
MI.getOpcode()) {
6968 case LoongArch::Select_GPR_Using_CC_GPR:
7004 if (
MI.getOperand(2).isReg())
7005 RHS =
MI.getOperand(2).getReg();
7006 auto CC =
static_cast<unsigned>(
MI.getOperand(3).
getImm());
7010 SelectDests.
insert(
MI.getOperand(0).getReg());
7014 SequenceMBBI !=
E; ++SequenceMBBI) {
7015 if (SequenceMBBI->isDebugInstr())
7018 if (SequenceMBBI->getOperand(1).getReg() !=
LHS ||
7019 !SequenceMBBI->getOperand(2).isReg() ||
7020 SequenceMBBI->getOperand(2).getReg() !=
RHS ||
7021 SequenceMBBI->getOperand(3).getImm() != CC ||
7022 SelectDests.
count(SequenceMBBI->getOperand(4).getReg()) ||
7023 SelectDests.
count(SequenceMBBI->getOperand(5).getReg()))
7025 LastSelectPseudo = &*SequenceMBBI;
7027 SelectDests.
insert(SequenceMBBI->getOperand(0).getReg());
7030 if (SequenceMBBI->hasUnmodeledSideEffects() ||
7031 SequenceMBBI->mayLoadOrStore() ||
7032 SequenceMBBI->usesCustomInsertionHook())
7035 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
7050 F->insert(
I, IfFalseMBB);
7051 F->insert(
I, TailMBB);
7054 unsigned CallFrameSize =
TII.getCallFrameSizeAt(*LastSelectPseudo);
7060 TailMBB->
push_back(DebugInstr->removeFromParent());
7064 TailMBB->
splice(TailMBB->
end(), HeadMBB,
7074 if (
MI.getOperand(2).isImm())
7086 auto SelectMBBI =
MI.getIterator();
7087 auto SelectEnd = std::next(LastSelectPseudo->
getIterator());
7089 while (SelectMBBI != SelectEnd) {
7090 auto Next = std::next(SelectMBBI);
7094 TII.get(LoongArch::PHI), SelectMBBI->getOperand(0).getReg())
7095 .
addReg(SelectMBBI->getOperand(4).getReg())
7097 .
addReg(SelectMBBI->getOperand(5).getReg())
7104 F->getProperties().resetNoPHIs();
7110 const TargetInstrInfo *
TII = Subtarget.getInstrInfo();
7113 switch (
MI.getOpcode()) {
7116 case LoongArch::DIV_W:
7117 case LoongArch::DIV_WU:
7118 case LoongArch::MOD_W:
7119 case LoongArch::MOD_WU:
7120 case LoongArch::DIV_D:
7121 case LoongArch::DIV_DU:
7122 case LoongArch::MOD_D:
7123 case LoongArch::MOD_DU:
7126 case LoongArch::WRFCSR: {
7128 LoongArch::FCSR0 +
MI.getOperand(0).getImm())
7129 .
addReg(
MI.getOperand(1).getReg());
7130 MI.eraseFromParent();
7133 case LoongArch::RDFCSR: {
7134 MachineInstr *ReadFCSR =
7136 MI.getOperand(0).getReg())
7137 .
addReg(LoongArch::FCSR0 +
MI.getOperand(1).getImm());
7139 MI.eraseFromParent();
7142 case LoongArch::Select_GPR_Using_CC_GPR:
7144 case LoongArch::BuildPairF64Pseudo:
7146 case LoongArch::SplitPairF64Pseudo:
7148 case LoongArch::PseudoVBZ:
7149 case LoongArch::PseudoVBZ_B:
7150 case LoongArch::PseudoVBZ_H:
7151 case LoongArch::PseudoVBZ_W:
7152 case LoongArch::PseudoVBZ_D:
7153 case LoongArch::PseudoVBNZ:
7154 case LoongArch::PseudoVBNZ_B:
7155 case LoongArch::PseudoVBNZ_H:
7156 case LoongArch::PseudoVBNZ_W:
7157 case LoongArch::PseudoVBNZ_D:
7158 case LoongArch::PseudoXVBZ:
7159 case LoongArch::PseudoXVBZ_B:
7160 case LoongArch::PseudoXVBZ_H:
7161 case LoongArch::PseudoXVBZ_W:
7162 case LoongArch::PseudoXVBZ_D:
7163 case LoongArch::PseudoXVBNZ:
7164 case LoongArch::PseudoXVBNZ_B:
7165 case LoongArch::PseudoXVBNZ_H:
7166 case LoongArch::PseudoXVBNZ_W:
7167 case LoongArch::PseudoXVBNZ_D:
7169 case LoongArch::PseudoXVINSGR2VR_B:
7170 case LoongArch::PseudoXVINSGR2VR_H:
7172 case LoongArch::PseudoCTPOP:
7174 case LoongArch::PseudoVMSKLTZ_B:
7175 case LoongArch::PseudoVMSKLTZ_H:
7176 case LoongArch::PseudoVMSKLTZ_W:
7177 case LoongArch::PseudoVMSKLTZ_D:
7178 case LoongArch::PseudoVMSKGEZ_B:
7179 case LoongArch::PseudoVMSKEQZ_B:
7180 case LoongArch::PseudoVMSKNEZ_B:
7181 case LoongArch::PseudoXVMSKLTZ_B:
7182 case LoongArch::PseudoXVMSKLTZ_H:
7183 case LoongArch::PseudoXVMSKLTZ_W:
7184 case LoongArch::PseudoXVMSKLTZ_D:
7185 case LoongArch::PseudoXVMSKGEZ_B:
7186 case LoongArch::PseudoXVMSKEQZ_B:
7187 case LoongArch::PseudoXVMSKNEZ_B:
7189 case TargetOpcode::STATEPOINT:
7195 MI.addOperand(*
MI.getMF(),
7197 LoongArch::R1,
true,
7200 if (!Subtarget.is64Bit())
7208 unsigned *
Fast)
const {
7209 if (!Subtarget.hasUAL())
7223#define NODE_NAME_CASE(node) \
7224 case LoongArchISD::node: \
7225 return "LoongArchISD::" #node;
7330#undef NODE_NAME_CASE
7343 LoongArch::R7, LoongArch::R8, LoongArch::R9,
7344 LoongArch::R10, LoongArch::R11};
7348 LoongArch::F3, LoongArch::F4, LoongArch::F5,
7349 LoongArch::F6, LoongArch::F7};
7352 LoongArch::F0_64, LoongArch::F1_64, LoongArch::F2_64, LoongArch::F3_64,
7353 LoongArch::F4_64, LoongArch::F5_64, LoongArch::F6_64, LoongArch::F7_64};
7356 LoongArch::VR3, LoongArch::VR4, LoongArch::VR5,
7357 LoongArch::VR6, LoongArch::VR7};
7360 LoongArch::XR3, LoongArch::XR4, LoongArch::XR5,
7361 LoongArch::XR6, LoongArch::XR7};
7367 unsigned ValNo2,
MVT ValVT2,
MVT LocVT2,
7369 unsigned GRLenInBytes = GRLen / 8;
7380 State.AllocateStack(GRLenInBytes, StackAlign),
7383 ValNo2, ValVT2, State.AllocateStack(GRLenInBytes,
Align(GRLenInBytes)),
7394 ValNo2, ValVT2, State.AllocateStack(GRLenInBytes,
Align(GRLenInBytes)),
7402 unsigned ValNo,
MVT ValVT,
7405 unsigned GRLen =
DL.getLargestLegalIntTypeSizeInBits();
7406 assert((GRLen == 32 || GRLen == 64) &&
"Unspport GRLen");
7407 MVT GRLenVT = GRLen == 32 ? MVT::i32 : MVT::i64;
7412 if (IsRet && ValNo > 1)
7416 bool UseGPRForFloat =
true;
7426 UseGPRForFloat = ArgFlags.
isVarArg();
7439 unsigned TwoGRLenInBytes = (2 * GRLen) / 8;
7442 DL.getTypeAllocSize(OrigTy) == TwoGRLenInBytes) {
7443 unsigned RegIdx = State.getFirstUnallocated(
ArgGPRs);
7445 if (RegIdx != std::size(
ArgGPRs) && RegIdx % 2 == 1)
7451 State.getPendingArgFlags();
7454 "PendingLocs and PendingArgFlags out of sync");
7458 UseGPRForFloat =
true;
7460 if (UseGPRForFloat && ValVT == MVT::f32) {
7463 }
else if (UseGPRForFloat && GRLen == 64 && ValVT == MVT::f64) {
7466 }
else if (UseGPRForFloat && GRLen == 32 && ValVT == MVT::f64) {
7469 assert(PendingLocs.
empty() &&
"Can't lower f64 if it is split");
7511 PendingLocs.
size() <= 2) {
7512 assert(PendingLocs.
size() == 2 &&
"Unexpected PendingLocs.size()");
7517 PendingLocs.
clear();
7518 PendingArgFlags.
clear();
7525 unsigned StoreSizeBytes = GRLen / 8;
7528 if (ValVT == MVT::f32 && !UseGPRForFloat) {
7530 }
else if (ValVT == MVT::f64 && !UseGPRForFloat) {
7534 UseGPRForFloat =
false;
7535 StoreSizeBytes = 16;
7536 StackAlign =
Align(16);
7539 UseGPRForFloat =
false;
7540 StoreSizeBytes = 32;
7541 StackAlign =
Align(32);
7547 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
7551 if (!PendingLocs.
empty()) {
7553 assert(PendingLocs.
size() > 2 &&
"Unexpected PendingLocs.size()");
7554 for (
auto &It : PendingLocs) {
7556 It.convertToReg(
Reg);
7561 PendingLocs.clear();
7562 PendingArgFlags.
clear();
7565 assert((!UseGPRForFloat || LocVT == GRLenVT) &&
7566 "Expected an GRLenVT at this stage");
7583void LoongArchTargetLowering::analyzeInputArgs(
7586 LoongArchCCAssignFn Fn)
const {
7588 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
7589 MVT ArgVT =
Ins[i].VT;
7590 Type *ArgTy =
nullptr;
7592 ArgTy = FType->getReturnType();
7593 else if (Ins[i].isOrigArg())
7594 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
7598 CCInfo, IsRet, ArgTy)) {
7599 LLVM_DEBUG(
dbgs() <<
"InputArg #" << i <<
" has unhandled type " << ArgVT
7606void LoongArchTargetLowering::analyzeOutputArgs(
7609 CallLoweringInfo *CLI, LoongArchCCAssignFn Fn)
const {
7610 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
7611 MVT ArgVT = Outs[i].VT;
7612 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty :
nullptr;
7616 CCInfo, IsRet, OrigTy)) {
7617 LLVM_DEBUG(
dbgs() <<
"OutputArg #" << i <<
" has unhandled type " << ArgVT
7658 if (In.isOrigArg()) {
7663 if ((
BitWidth <= 32 && In.Flags.isSExt()) ||
7664 (
BitWidth < 32 && In.Flags.isZExt())) {
7714 Register LoVReg =
RegInfo.createVirtualRegister(&LoongArch::GPRRegClass);
7727 Register HiVReg =
RegInfo.createVirtualRegister(&LoongArch::GPRRegClass);
7747 Val = DAG.
getNode(ISD::BITCAST,
DL, LocVT, Val);
7757 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7761 LoongArch::R23, LoongArch::R24, LoongArch::R25,
7762 LoongArch::R26, LoongArch::R27, LoongArch::R28,
7763 LoongArch::R29, LoongArch::R30, LoongArch::R31};
7770 if (LocVT == MVT::f32) {
7773 static const MCPhysReg FPR32List[] = {LoongArch::F24, LoongArch::F25,
7774 LoongArch::F26, LoongArch::F27};
7781 if (LocVT == MVT::f64) {
7784 static const MCPhysReg FPR64List[] = {LoongArch::F28_64, LoongArch::F29_64,
7785 LoongArch::F30_64, LoongArch::F31_64};
7815 "GHC calling convention requires the F and D extensions");
7819 MVT GRLenVT = Subtarget.getGRLenVT();
7820 unsigned GRLenInBytes = Subtarget.getGRLen() / 8;
7822 std::vector<SDValue> OutChains;
7831 analyzeInputArgs(MF, CCInfo, Ins,
false,
CC_LoongArch);
7833 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
7850 unsigned ArgIndex = Ins[InsIdx].OrigArgIndex;
7851 unsigned ArgPartOffset = Ins[InsIdx].PartOffset;
7852 assert(ArgPartOffset == 0);
7853 while (i + 1 != e && Ins[InsIdx + 1].OrigArgIndex == ArgIndex) {
7855 unsigned PartOffset = Ins[InsIdx + 1].PartOffset - ArgPartOffset;
7879 int VaArgOffset, VarArgsSaveSize;
7883 if (ArgRegs.
size() == Idx) {
7885 VarArgsSaveSize = 0;
7887 VarArgsSaveSize = GRLenInBytes * (ArgRegs.
size() - Idx);
7888 VaArgOffset = -VarArgsSaveSize;
7894 LoongArchFI->setVarArgsFrameIndex(FI);
7902 VarArgsSaveSize += GRLenInBytes;
7907 for (
unsigned I = Idx;
I < ArgRegs.
size();
7908 ++
I, VaArgOffset += GRLenInBytes) {
7909 const Register Reg = RegInfo.createVirtualRegister(RC);
7910 RegInfo.addLiveIn(ArgRegs[
I], Reg);
7918 ->setValue((
Value *)
nullptr);
7919 OutChains.push_back(Store);
7921 LoongArchFI->setVarArgsSaveSize(VarArgsSaveSize);
7926 if (!OutChains.empty()) {
7927 OutChains.push_back(Chain);
7942 if (
N->getNumValues() != 1)
7944 if (!
N->hasNUsesOfValue(1, 0))
7947 SDNode *Copy = *
N->user_begin();
7953 if (Copy->getGluedNode())
7957 bool HasRet =
false;
7967 Chain = Copy->getOperand(0);
7972bool LoongArchTargetLowering::isEligibleForTailCallOptimization(
7976 auto CalleeCC = CLI.CallConv;
7977 auto &Outs = CLI.Outs;
7979 auto CallerCC = Caller.getCallingConv();
7986 for (
auto &VA : ArgLocs)
7992 auto IsCallerStructRet = Caller.hasStructRetAttr();
7993 auto IsCalleeStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
7994 if (IsCallerStructRet || IsCalleeStructRet)
7998 for (
auto &Arg : Outs)
7999 if (Arg.Flags.isByVal())
8004 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
8005 if (CalleeCC != CallerCC) {
8006 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
8007 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
8033 MVT GRLenVT = Subtarget.getGRLenVT();
8045 analyzeOutputArgs(MF, ArgCCInfo, Outs,
false, &CLI,
CC_LoongArch);
8049 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
8055 "site marked musttail");
8062 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
8064 if (!Flags.isByVal())
8068 unsigned Size = Flags.getByValSize();
8069 Align Alignment = Flags.getNonZeroByValAlign();
8076 Chain = DAG.
getMemcpy(Chain,
DL, FIPtr, Arg, SizeNode, Alignment,
8078 false,
nullptr, std::nullopt,
8090 for (
unsigned i = 0, j = 0, e = ArgLocs.
size(), OutIdx = 0; i != e;
8093 SDValue ArgValue = OutVals[OutIdx];
8102 DAG.
getVTList(MVT::i32, MVT::i32), ArgValue);
8114 if (!StackPtr.getNode())
8126 RegsToPass.
push_back(std::make_pair(RegHigh,
Hi));
8141 unsigned ArgIndex = Outs[OutIdx].OrigArgIndex;
8142 unsigned ArgPartOffset = Outs[OutIdx].PartOffset;
8143 assert(ArgPartOffset == 0);
8148 while (i + 1 != e && Outs[OutIdx + 1].OrigArgIndex == ArgIndex) {
8149 SDValue PartValue = OutVals[OutIdx + 1];
8150 unsigned PartOffset = Outs[OutIdx + 1].PartOffset - ArgPartOffset;
8165 for (
const auto &Part : Parts) {
8166 SDValue PartValue = Part.first;
8167 SDValue PartOffset = Part.second;
8174 ArgValue = SpillSlot;
8180 if (Flags.isByVal())
8181 ArgValue = ByValArgs[j++];
8188 assert(!IsTailCall &&
"Tail call not allowed if stack is used "
8189 "for passing parameters");
8192 if (!StackPtr.getNode())
8205 if (!MemOpChains.
empty())
8211 for (
auto &Reg : RegsToPass) {
8212 Chain = DAG.
getCopyToReg(Chain,
DL, Reg.first, Reg.second, Glue);
8234 Ops.push_back(Chain);
8235 Ops.push_back(Callee);
8239 for (
auto &Reg : RegsToPass)
8240 Ops.push_back(DAG.
getRegister(Reg.first, Reg.second.getValueType()));
8245 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
8246 assert(Mask &&
"Missing call preserved mask for calling convention");
8252 Ops.push_back(Glue);
8264 assert(Subtarget.is64Bit() &&
"Medium code model requires LA64");
8268 assert(Subtarget.is64Bit() &&
"Large code model requires LA64");
8291 analyzeInputArgs(MF, RetCCInfo, Ins,
true,
CC_LoongArch);
8294 for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
8295 auto &VA = RVLocs[i];
8303 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
8304 assert(VA.needsCustom());
8310 RetValue, RetValue2);
8323 const Type *RetTy)
const {
8325 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
8327 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
8331 Outs[i].Flags, CCInfo,
true,
nullptr))
8357 for (
unsigned i = 0, e = RVLocs.
size(), OutIdx = 0; i < e; ++i, ++OutIdx) {
8358 SDValue Val = OutVals[OutIdx];
8367 DAG.
getVTList(MVT::i32, MVT::i32), Val);
8371 Register RegHi = RVLocs[++i].getLocReg();
8401 if (!Subtarget.hasExtLSX())
8404 if (VT == MVT::f32) {
8405 uint64_t masked = Imm.bitcastToAPInt().getZExtValue() & 0x7e07ffff;
8406 return (masked == 0x3e000000 || masked == 0x40000000);
8409 if (VT == MVT::f64) {
8410 uint64_t masked = Imm.bitcastToAPInt().getZExtValue() & 0x7fc0ffffffffffff;
8411 return (masked == 0x3fc0000000000000 || masked == 0x4000000000000000);
8417bool LoongArchTargetLowering::isFPImmLegal(
const APFloat &Imm,
EVT VT,
8418 bool ForCodeSize)
const {
8420 if (VT == MVT::f32 && !Subtarget.hasBasicF())
8422 if (VT == MVT::f64 && !Subtarget.hasBasicD())
8424 return (Imm.isZero() || Imm.isExactlyValue(1.0) ||
isFPImmVLDILegal(Imm, VT));
8435bool LoongArchTargetLowering::shouldInsertFencesForAtomic(
8445 Type *Ty =
I->getOperand(0)->getType();
8447 unsigned Size = Ty->getIntegerBitWidth();
8474 case Intrinsic::loongarch_masked_atomicrmw_xchg_i32:
8475 case Intrinsic::loongarch_masked_atomicrmw_add_i32:
8476 case Intrinsic::loongarch_masked_atomicrmw_sub_i32:
8477 case Intrinsic::loongarch_masked_atomicrmw_nand_i32:
8479 Info.memVT = MVT::i32;
8480 Info.ptrVal =
I.getArgOperand(0);
8482 Info.align =
Align(4);
8499 "Unable to expand");
8500 unsigned MinWordSize = 4;
8512 Value *AlignedAddr = Builder.CreateIntrinsic(
8513 Intrinsic::ptrmask, {PtrTy, IntTy},
8514 {Addr, ConstantInt::get(IntTy, ~(
uint64_t)(MinWordSize - 1))},
nullptr,
8517 Value *AddrInt = Builder.CreatePtrToInt(Addr, IntTy);
8518 Value *PtrLSB = Builder.CreateAnd(AddrInt, MinWordSize - 1,
"PtrLSB");
8519 Value *ShiftAmt = Builder.CreateShl(PtrLSB, 3);
8520 ShiftAmt = Builder.CreateTrunc(ShiftAmt, WordType,
"ShiftAmt");
8521 Value *Mask = Builder.CreateShl(
8522 ConstantInt::get(WordType,
8525 Value *Inv_Mask = Builder.CreateNot(Mask,
"Inv_Mask");
8526 Value *ValOperand_Shifted =
8527 Builder.CreateShl(Builder.CreateZExt(AI->
getValOperand(), WordType),
8528 ShiftAmt,
"ValOperand_Shifted");
8531 NewOperand = Builder.CreateOr(ValOperand_Shifted, Inv_Mask,
"AndOperand");
8533 NewOperand = ValOperand_Shifted;
8536 Builder.CreateAtomicRMW(
Op, AlignedAddr, NewOperand,
Align(MinWordSize),
8539 Value *Shift = Builder.CreateLShr(NewAI, ShiftAmt,
"shifted");
8540 Value *Trunc = Builder.CreateTrunc(Shift,
ValueType,
"extracted");
8559 if (Subtarget.hasLAM_BH() && Subtarget.is64Bit() &&
8567 if (Subtarget.hasLAMCAS()) {
8589 return Intrinsic::loongarch_masked_atomicrmw_xchg_i64;
8591 return Intrinsic::loongarch_masked_atomicrmw_add_i64;
8593 return Intrinsic::loongarch_masked_atomicrmw_sub_i64;
8595 return Intrinsic::loongarch_masked_atomicrmw_nand_i64;
8597 return Intrinsic::loongarch_masked_atomicrmw_umax_i64;
8599 return Intrinsic::loongarch_masked_atomicrmw_umin_i64;
8601 return Intrinsic::loongarch_masked_atomicrmw_max_i64;
8603 return Intrinsic::loongarch_masked_atomicrmw_min_i64;
8613 return Intrinsic::loongarch_masked_atomicrmw_xchg_i32;
8615 return Intrinsic::loongarch_masked_atomicrmw_add_i32;
8617 return Intrinsic::loongarch_masked_atomicrmw_sub_i32;
8619 return Intrinsic::loongarch_masked_atomicrmw_nand_i32;
8621 return Intrinsic::loongarch_masked_atomicrmw_umax_i32;
8623 return Intrinsic::loongarch_masked_atomicrmw_umin_i32;
8625 return Intrinsic::loongarch_masked_atomicrmw_max_i32;
8627 return Intrinsic::loongarch_masked_atomicrmw_min_i32;
8639 if (Subtarget.hasLAMCAS())
8651 unsigned GRLen = Subtarget.getGRLen();
8653 Value *FailureOrdering =
8654 Builder.getIntN(Subtarget.getGRLen(),
static_cast<uint64_t>(FailOrd));
8655 Intrinsic::ID CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i32;
8657 CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i64;
8658 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
8659 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
8660 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8663 Value *Result = Builder.CreateIntrinsic(
8664 CmpXchgIntrID, Tys, {AlignedAddr, CmpVal, NewVal, Mask, FailureOrdering});
8666 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8682 Builder.CreateNot(Mask,
"Inv_Mask"),
8689 unsigned GRLen = Subtarget.getGRLen();
8698 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
8699 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8700 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
8716 Builder.CreateSub(Builder.getIntN(GRLen, GRLen - ValWidth), ShiftAmt);
8717 Result = Builder.CreateCall(LlwOpScwLoop,
8718 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
8721 Builder.CreateCall(LlwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
8725 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8748 const Constant *PersonalityFn)
const {
8749 return LoongArch::R4;
8753 const Constant *PersonalityFn)
const {
8754 return LoongArch::R5;
8765 int RefinementSteps = VT.
getScalarType() == MVT::f64 ? 2 : 1;
8766 return RefinementSteps;
8771 int &RefinementSteps,
8772 bool &UseOneConstNR,
8773 bool Reciprocal)
const {
8774 if (Subtarget.hasFrecipe()) {
8778 if (VT == MVT::f32 || (VT == MVT::f64 && Subtarget.hasBasicD()) ||
8779 (VT == MVT::v4f32 && Subtarget.hasExtLSX()) ||
8780 (VT == MVT::v2f64 && Subtarget.hasExtLSX()) ||
8781 (VT == MVT::v8f32 && Subtarget.hasExtLASX()) ||
8782 (VT == MVT::v4f64 && Subtarget.hasExtLASX())) {
8801 int &RefinementSteps)
const {
8802 if (Subtarget.hasFrecipe()) {
8806 if (VT == MVT::f32 || (VT == MVT::f64 && Subtarget.hasBasicD()) ||
8807 (VT == MVT::v4f32 && Subtarget.hasExtLSX()) ||
8808 (VT == MVT::v2f64 && Subtarget.hasExtLSX()) ||
8809 (VT == MVT::v8f32 && Subtarget.hasExtLASX()) ||
8810 (VT == MVT::v4f64 && Subtarget.hasExtLASX())) {
8827LoongArchTargetLowering::getConstraintType(
StringRef Constraint)
const {
8847 if (Constraint.
size() == 1) {
8848 switch (Constraint[0]) {
8864 if (Constraint ==
"ZC" || Constraint ==
"ZB")
8873 return StringSwitch<InlineAsm::ConstraintCode>(ConstraintCode)
8880std::pair<unsigned, const TargetRegisterClass *>
8881LoongArchTargetLowering::getRegForInlineAsmConstraint(
8885 if (Constraint.
size() == 1) {
8886 switch (Constraint[0]) {
8891 return std::make_pair(0U, &LoongArch::GPRRegClass);
8893 return std::make_pair(0U, &LoongArch::GPRNoR0R1RegClass);
8895 if (Subtarget.hasBasicF() && VT == MVT::f32)
8896 return std::make_pair(0U, &LoongArch::FPR32RegClass);
8897 if (Subtarget.hasBasicD() && VT == MVT::f64)
8898 return std::make_pair(0U, &LoongArch::FPR64RegClass);
8899 if (Subtarget.hasExtLSX() &&
8900 TRI->isTypeLegalForClass(LoongArch::LSX128RegClass, VT))
8901 return std::make_pair(0U, &LoongArch::LSX128RegClass);
8902 if (Subtarget.hasExtLASX() &&
8903 TRI->isTypeLegalForClass(LoongArch::LASX256RegClass, VT))
8904 return std::make_pair(0U, &LoongArch::LASX256RegClass);
8924 bool IsFP = Constraint[2] ==
'f';
8925 std::pair<StringRef, StringRef> Temp = Constraint.
split(
'$');
8926 std::pair<unsigned, const TargetRegisterClass *>
R;
8931 unsigned RegNo =
R.first;
8932 if (LoongArch::F0 <= RegNo && RegNo <= LoongArch::F31) {
8933 if (Subtarget.hasBasicD() && (VT == MVT::f64 || VT == MVT::Other)) {
8934 unsigned DReg = RegNo - LoongArch::F0 + LoongArch::F0_64;
8935 return std::make_pair(DReg, &LoongArch::FPR64RegClass);
8945void LoongArchTargetLowering::LowerAsmOperandForConstraint(
8949 if (Constraint.
size() == 1) {
8950 switch (Constraint[0]) {
8954 uint64_t CVal =
C->getSExtValue();
8957 Subtarget.getGRLenVT()));
8963 uint64_t CVal =
C->getSExtValue();
8966 Subtarget.getGRLenVT()));
8972 if (
C->getZExtValue() == 0)
8979 uint64_t CVal =
C->getZExtValue();
8992#define GET_REGISTER_MATCHER
8993#include "LoongArchGenAsmMatcher.inc"
8999 std::string NewRegName = Name.second.str();
9005 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
9006 if (!ReservedRegs.
test(Reg))
9023 const APInt &Imm = ConstNode->getAPIntValue();
9025 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
9026 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
9029 if (ConstNode->hasOneUse() &&
9030 ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
9031 (Imm - 8).isPowerOf2() || (Imm - 16).isPowerOf2()))
9037 if (ConstNode->hasOneUse() && !(Imm.sge(-2048) && Imm.sle(4095))) {
9038 unsigned Shifts = Imm.countr_zero();
9044 APInt ImmPop = Imm.ashr(Shifts);
9045 if (ImmPop == 3 || ImmPop == 5 || ImmPop == 9 || ImmPop == 17)
9049 APInt ImmSmall =
APInt(Imm.getBitWidth(), 1ULL << Shifts,
true);
9050 if ((Imm - ImmSmall).isPowerOf2() || (Imm + ImmSmall).isPowerOf2() ||
9051 (ImmSmall - Imm).isPowerOf2())
9061 Type *Ty,
unsigned AS,
9116 EVT MemVT = LD->getMemoryVT();
9117 if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
9128 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
9137 if (
Y.getValueType().isVector())
9149 Type *Ty,
bool IsSigned)
const {
9150 if (Subtarget.is64Bit() && Ty->isIntegerTy(32))
9159 if (Subtarget.isSoftFPABI() && (
Type.isFloatingPoint() && !
Type.isVector() &&
9160 Type.getSizeInBits() < Subtarget.getGRLen()))
9170 Align &PrefAlign)
const {
9174 if (Subtarget.is64Bit()) {
9176 PrefAlign =
Align(8);
9179 PrefAlign =
Align(4);
9194bool LoongArchTargetLowering::splitValueIntoRegisterParts(
9196 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID> CC)
const {
9197 bool IsABIRegCopy = CC.has_value();
9200 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
9201 PartVT == MVT::f32) {
9204 Val = DAG.
getNode(ISD::BITCAST,
DL, MVT::i16, Val);
9208 Val = DAG.
getNode(ISD::BITCAST,
DL, MVT::f32, Val);
9216SDValue LoongArchTargetLowering::joinRegisterPartsIntoValue(
9218 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID> CC)
const {
9219 bool IsABIRegCopy = CC.has_value();
9221 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
9222 PartVT == MVT::f32) {
9226 Val = DAG.
getNode(ISD::BITCAST,
DL, MVT::i32, Val);
9228 Val = DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
9239 if (VT == MVT::f16 && Subtarget.hasBasicF())
9245unsigned LoongArchTargetLowering::getNumRegistersForCallingConv(
9248 if (VT == MVT::f16 && Subtarget.hasBasicF())
9257 unsigned Depth)
const {
9258 EVT VT =
Op.getValueType();
9260 unsigned Opc =
Op.getOpcode();
9267 MVT SrcVT = Src.getSimpleValueType();
9272 if (OriginalDemandedBits.
countr_zero() >= NumElts)
9276 APInt KnownUndef, KnownZero;
9292 if (KnownSrc.
One[SrcBits - 1])
9294 else if (KnownSrc.
Zero[SrcBits - 1])
9299 Src, DemandedSrcBits, DemandedElts, TLO.
DAG,
Depth + 1))
9306 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO,
Depth);
unsigned const MachineRegisterInfo * MRI
static MCRegister MatchRegisterName(StringRef Name)
static bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType)
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
#define NODE_NAME_CASE(node)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static MCRegister MatchRegisterAltName(StringRef Name)
Maps from the set of all alternative registernames to a register number.
Function Alias Analysis Results
static uint64_t getConstant(const Value *IndexValue)
static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static MachineBasicBlock * emitSelectPseudo(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode)
static SDValue unpackFromRegLoc(const CSKYSubtarget &Subtarget, SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static SDValue performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
const MCPhysReg ArgFPR32s[]
static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Dispatching routine to lower various 128-bit LoongArch vector shuffles.
static SDValue lowerVECTOR_SHUFFLE_XVSHUF4I(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVSHUF4I (if possible).
static SDValue lowerVECTOR_SHUFFLE_VPICKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPICKEV (if possible).
static SDValue combineSelectToBinOp(SDNode *N, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_XVPICKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPICKOD (if possible).
static SDValue unpackF64OnLA32DSoftABI(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const CCValAssign &HiVA, const SDLoc &DL)
static bool fitsRegularPattern(typename SmallVectorImpl< ValType >::const_iterator Begin, unsigned CheckStride, typename SmallVectorImpl< ValType >::const_iterator End, ValType ExpectedIndex, unsigned ExpectedIndexStride)
Determine whether a range fits a regular pattern of values.
static SDValue lowerVECTOR_SHUFFLE_XVREPLVEI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVREPLVEI (if possible).
static SDValue emitIntrinsicErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static cl::opt< bool > ZeroDivCheck("loongarch-check-zero-division", cl::Hidden, cl::desc("Trap on integer division by zero."), cl::init(false))
static int getEstimateRefinementSteps(EVT VT, const LoongArchSubtarget &Subtarget)
static void emitErrorAndReplaceIntrinsicResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, StringRef ErrorMsg, bool WithChain=true)
static SDValue lowerVECTOR_SHUFFLEAsByteRotate(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE as byte rotate (if possible).
static SDValue checkIntrinsicImmArg(SDValue Op, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
static SDValue performMOVFR2GR_SCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_VILVH(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VILVH (if possible).
static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI, unsigned ValNo, MVT ValVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsRet, Type *OrigTy)
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
static SDValue performSPLIT_PAIR_F64Combine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue performBITCASTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static MachineBasicBlock * emitSplitPairF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorBitSetImm(SDNode *Node, SelectionDAG &DAG)
static SDValue performSETCC_BITCASTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_XVPACKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPACKOD (if possible).
static std::optional< bool > matchSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue Val)
static SDValue lowerBUILD_VECTORAsBroadCastLoad(BuildVectorSDNode *BVOp, const SDLoc &DL, SelectionDAG &DAG)
#define CRC_CASE_EXT_BINARYOP(NAME, NODE)
static SDValue lowerVectorBitRevImm(SDNode *Node, SelectionDAG &DAG)
static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size, unsigned Depth)
static SDValue lowerVECTOR_SHUFFLEAsShift(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, const APInt &Zeroable)
Lower VECTOR_SHUFFLE as shift (if possible).
static SDValue lowerVECTOR_SHUFFLE_VSHUF4I(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into VSHUF4I (if possible).
static SDValue truncateVecElts(SDNode *Node, SelectionDAG &DAG)
static bool CC_LoongArch_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG)
static SDValue lowerVectorBitClear(SDNode *Node, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLE_XVPERM(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPERM (if possible).
static SDValue lowerVECTOR_SHUFFLE_VPACKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPACKEV (if possible).
static MachineBasicBlock * emitPseudoVMSKCOND(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue performSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static void replaceVPICKVE2GRResults(SDNode *Node, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
static SDValue lowerVECTOR_SHUFFLEAsZeroOrAnyExtend(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const APInt &Zeroable)
Lower VECTOR_SHUFFLE as ZERO_EXTEND Or ANY_EXTEND (if possible).
static SDValue legalizeIntrinsicImmArg(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, bool IsSigned=false)
static cl::opt< MaterializeFPImm > MaterializeFPImmInsNum("loongarch-materialize-float-imm", cl::Hidden, cl::desc("Maximum number of instructions used (including code sequence " "to generate the value and moving the value to FPR) when " "materializing floating-point immediates (default = 3)"), cl::init(MaterializeFPImm3Ins), cl::values(clEnumValN(NoMaterializeFPImm, "0", "Use constant pool"), clEnumValN(MaterializeFPImm2Ins, "2", "Materialize FP immediate within 2 instructions"), clEnumValN(MaterializeFPImm3Ins, "3", "Materialize FP immediate within 3 instructions"), clEnumValN(MaterializeFPImm4Ins, "4", "Materialize FP immediate within 4 instructions"), clEnumValN(MaterializeFPImm5Ins, "5", "Materialize FP immediate within 5 instructions"), clEnumValN(MaterializeFPImm6Ins, "6", "Materialize FP immediate within 6 instructions " "(behaves same as 5 on loongarch64)")))
static SDValue emitIntrinsicWithChainErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static bool CC_LoongArchAssign2GRLen(unsigned GRLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2)
const MCPhysReg ArgFPR64s[]
static MachineBasicBlock * emitPseudoCTPOP(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue performMOVGR2FR_WCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
#define IOCSRWR_CASE(NAME, NODE)
#define CRC_CASE_EXT_UNARYOP(NAME, NODE)
static SDValue lowerVECTOR_SHUFFLE_VPACKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPACKOD (if possible).
static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT, SDValue Src, const SDLoc &DL)
static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Dispatching routine to lower various 256-bit LoongArch vector shuffles.
static MachineBasicBlock * emitPseudoXVINSGR2VR(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static void fillVector(ArrayRef< SDValue > Ops, SelectionDAG &DAG, SDLoc DL, const LoongArchSubtarget &Subtarget, SDValue &Vector, EVT ResTy)
static SDValue performEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue fillSubVectorFromBuildVector(BuildVectorSDNode *Node, SelectionDAG &DAG, SDLoc DL, const LoongArchSubtarget &Subtarget, EVT ResTy, unsigned first)
static bool isSelectPseudo(MachineInstr &MI)
static SDValue foldBinOpIntoSelectIfProfitable(SDNode *BO, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorSplatImm(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
const MCPhysReg ArgGPRs[]
static SDValue lowerVECTOR_SHUFFLE_XVILVL(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVILVL (if possible).
static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, int NumOp, unsigned ExtOpc=ISD::ANY_EXTEND)
static void replaceVecCondBranchResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
#define ASRT_LE_GT_CASE(NAME)
static SDValue lowerVECTOR_SHUFFLE_XVPACKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPACKEV (if possible).
static SDValue performBR_CCCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static void computeZeroableShuffleElements(ArrayRef< int > Mask, SDValue V1, SDValue V2, APInt &KnownUndef, APInt &KnownZero)
Compute whether each element of a shuffle is zeroable.
static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue widenShuffleMask(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
static MachineBasicBlock * emitVecCondBranchPseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_XVILVH(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVILVH (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVSHUF(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVSHUF (if possible).
static SDValue lowerVECTOR_SHUFFLE_VREPLVEI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into VREPLVEI (if possible).
static void replaceCMP_XCHG_128Results(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static SDValue performBITREV_WCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static void canonicalizeShuffleVectorByLane(const SDLoc &DL, MutableArrayRef< int > Mask, MVT VT, SDValue &V1, SDValue &V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Shuffle vectors by lane to generate more optimized instructions.
#define IOCSRRD_CASE(NAME, NODE)
static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2, ArrayRef< int > Mask)
Attempts to match vector shuffle as byte rotation.
static SDValue lowerVECTOR_SHUFFLE_XVPICKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPICKEV (if possible).
static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode, unsigned ScalarSizeInBits, ArrayRef< int > Mask, int MaskOffset, const APInt &Zeroable)
Attempts to match a shuffle mask against the VBSLL, VBSRL, VSLLI and VSRLI instruction.
static SDValue lowerVECTOR_SHUFFLE_VILVL(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VILVL (if possible).
static SDValue lowerVectorBitClearImm(SDNode *Node, SelectionDAG &DAG)
static MachineBasicBlock * emitBuildPairF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLEAsLanePermuteAndShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE as lane permute and then shuffle (if possible).
static SDValue performVMSKLTZCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static void replaceINTRINSIC_WO_CHAINResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_VPICKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPICKOD (if possible).
static Intrinsic::ID getIntrinsicForMaskedAtomicRMWBinOp(unsigned GRLen, AtomicRMWInst::BinOp BinOp)
static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, ISD::CondCode &CC, SelectionDAG &DAG)
static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT, ArrayRef< int > Mask, SmallVectorImpl< int > &RepeatedMask)
Test whether a shuffle mask is equivalent within each sub-lane.
static SDValue lowerVECTOR_SHUFFLE_VSHUF(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VSHUF.
static LoongArchISD::NodeType getLoongArchWOpcode(unsigned Opcode)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
This file defines the SmallSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool inRange(const MCExpr *Expr, int64_t MinValue, int64_t MaxValue, bool AllowSymbol=false)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static bool isSequentialOrUndefInRange(ArrayRef< int > Mask, unsigned Pos, unsigned Size, int Low, int Step=1)
Return true if every element in Mask, beginning from position Pos and ending in Pos + Size,...
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
unsigned countr_zero() const
Count the number of trailing zero bits.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getCompareOperand()
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ USubCond
Subtract only if no unsigned overflow.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
Value * getPointerOperand()
bool isFloatingPointOperation() const
BinOp getOperation() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM Basic Block Representation.
bool test(unsigned Idx) const
size_type count() const
count - Returns the number of bits which are set.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
int64_t getLocMemOffset() const
unsigned getValNo() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This class represents a function call, abstracting a target machine's calling convention.
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
int64_t getSExtValue() const
This is an important base class in LLVM.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
The size in bits of the pointer representation in a given address space.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Argument * getArg(unsigned i) const
Common base class shared among various IRBuilders.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
LoongArchMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private Lo...
void addSExt32Register(Register Reg)
const LoongArchRegisterInfo * getRegisterInfo() const override
const LoongArchInstrInfo * getInstrInfo() const override
unsigned getGRLen() const
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override
Return true if result of the specified node is used by a return node only.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps, bool &UseOneConstNR, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Determine if the target supports unaligned memory accesses.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, Align &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool signExtendConstant(const ConstantInt *CI) const override
Return true if this constant should be sign extended when promoting to a larger type.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const override
Returns true if arguments should be sign-extended in lib calls.
bool isFPImmVLDILegal(const APFloat &Imm, EVT VT) const
bool shouldExtendTypeInLibCall(EVT Type) const override
Returns true if arguments should be extended in lib calls.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
void emitExpandAtomicRMW(AtomicRMWInst *AI) const override
Perform a atomicrmw expansion using a target-specific way.
ISD::NodeType getExtendForAtomicCmpSwapArg() const override
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_...
LoongArchTargetLowering(const TargetMachine &TM, const LoongArchSubtarget &STI)
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool hasAndNotCompare(SDValue Y) const override
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps) const override
Return a reciprocal estimate value for the input operand.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
Wrapper class representing physical registers. Should be passed by value.
bool hasFeature(unsigned Feature) const
static MVT getFloatingPointVT(unsigned BitWidth)
bool is128BitVector() const
Return true if this is a 128-bit vector type.
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool is256BitVector() const
Return true if this is a 256-bit vector type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT getHalfNumVectorElementsVT() const
Return a VT for a vector type with the same element type but half the number of elements.
MVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void push_back(MachineInstr *MI)
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
LLVM_ABI void collectDebugValues(SmallVectorImpl< MachineInstr * > &DbgValues)
Scan instructions immediately following MI and collect any matching DBG_VALUEs.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setIsKill(bool Val=true)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
EVT getMemoryVT() const
Return the type of the in-memory value.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVM_ABI bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
size_t use_size() const
Return the number of uses of this node.
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
bool isSafeToSpeculativelyExecute(unsigned Opcode) const
Some opcodes may create immediate undefined behavior when used with some values (integer division-by-...
SDValue getExtractSubvector(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Return the VT typed sub-vector of Vec at Idx.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getInsertSubvector(const SDLoc &DL, SDValue Vec, SDValue SubVec, unsigned Idx)
Insert SubVec at the Idx element of Vec.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
LLVM_ABI SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
LLVM_ABI std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI SDValue WidenVector(const SDValue &N, const SDLoc &DL)
Widen the vector up to the next power of two using INSERT_SUBVECTOR.
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI SDValue getCondCode(ISD::CondCode Cond)
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
ArrayRef< int > getMask() const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
typename SuperClass::const_iterator const_iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setMaxBytesForAlignment(unsigned MaxBytes)
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
TargetLowering(const TargetLowering &)=delete
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
bool useTLSDESC() const
Returns true if this target uses TLS Descriptors.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
This class is used to represent EVT's, which are used to parameterize some operations.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ABI bool isFreezeUndef(const SDNode *N)
Return true if the specified node is FREEZE(UNDEF).
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LLVM_ABI bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
LLVM_ABI NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
ABI getTargetABI(StringRef ABIName)
InstSeq generateInstSeq(int64_t Val)
LLVM_ABI Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ Kill
The last use of a register.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Sequence
A sequence of states that a pointer may go through in which an objc_retain and objc_release are actua...
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
LLVM_ABI bool widenShuffleMaskElts(int Scale, ArrayRef< int > Mask, SmallVectorImpl< int > &ScaledMask)
Try to transform a shuffle mask by replacing elements with the scaled index for an equivalent mask of...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
constexpr unsigned BitWidth
std::string join_items(Sep Separator, Args &&... Items)
Joins the strings in the parameter pack Items, adding Separator between the elements....
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
Align getNonZeroOrigAlign() const
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const
bool isBeforeLegalize() const
LLVM_ABI SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)