30#include "llvm/IR/IntrinsicsLoongArch.h"
40#define DEBUG_TYPE "loongarch-isel-lowering"
45 cl::desc(
"Trap on integer division by zero."),
52 MVT GRLenVT = Subtarget.getGRLenVT();
57 if (Subtarget.hasBasicF())
59 if (Subtarget.hasBasicD())
63 MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64};
65 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64};
67 if (Subtarget.hasExtLSX())
71 if (Subtarget.hasExtLASX())
72 for (
MVT VT : LASXVTs)
140 if (Subtarget.is64Bit()) {
168 if (!Subtarget.is64Bit()) {
174 if (Subtarget.hasBasicD())
186 if (Subtarget.hasBasicF()) {
217 if (Subtarget.is64Bit())
220 if (!Subtarget.hasBasicD()) {
222 if (Subtarget.is64Bit()) {
231 if (Subtarget.hasBasicD()) {
263 if (Subtarget.is64Bit())
269 if (Subtarget.hasExtLSX()) {
284 for (
MVT VT : LSXVTs) {
298 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
319 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
321 for (
MVT VT : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
323 for (
MVT VT : {MVT::v4i32, MVT::v2i64}) {
327 for (
MVT VT : {MVT::v4f32, MVT::v2f64}) {
345 {MVT::v16i8, MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v8i16, MVT::v4i16,
346 MVT::v2i16, MVT::v4i32, MVT::v2i32, MVT::v2i64}) {
361 if (Subtarget.hasExtLASX()) {
362 for (
MVT VT : LASXVTs) {
377 for (
MVT VT : {MVT::v4i64, MVT::v8i32, MVT::v16i16, MVT::v32i8}) {
399 for (
MVT VT : {MVT::v32i8, MVT::v16i16, MVT::v8i32})
401 for (
MVT VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64})
403 for (
MVT VT : {MVT::v8i32, MVT::v4i32, MVT::v4i64}) {
407 for (
MVT VT : {MVT::v8f32, MVT::v4f64}) {
429 if (Subtarget.hasExtLSX()) {
436 if (Subtarget.hasExtLASX())
459 if (Subtarget.hasLAMCAS())
462 if (Subtarget.hasSCQ()) {
479 switch (
Op.getOpcode()) {
480 case ISD::ATOMIC_FENCE:
481 return lowerATOMIC_FENCE(
Op, DAG);
483 return lowerEH_DWARF_CFA(
Op, DAG);
485 return lowerGlobalAddress(
Op, DAG);
487 return lowerGlobalTLSAddress(
Op, DAG);
489 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
491 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
493 return lowerINTRINSIC_VOID(
Op, DAG);
495 return lowerBlockAddress(
Op, DAG);
497 return lowerJumpTable(
Op, DAG);
499 return lowerShiftLeftParts(
Op, DAG);
501 return lowerShiftRightParts(
Op, DAG,
true);
503 return lowerShiftRightParts(
Op, DAG,
false);
505 return lowerConstantPool(
Op, DAG);
507 return lowerFP_TO_SINT(
Op, DAG);
509 return lowerBITCAST(
Op, DAG);
511 return lowerUINT_TO_FP(
Op, DAG);
513 return lowerSINT_TO_FP(
Op, DAG);
515 return lowerVASTART(
Op, DAG);
517 return lowerFRAMEADDR(
Op, DAG);
519 return lowerRETURNADDR(
Op, DAG);
521 return lowerWRITE_REGISTER(
Op, DAG);
523 return lowerINSERT_VECTOR_ELT(
Op, DAG);
525 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
527 return lowerBUILD_VECTOR(
Op, DAG);
529 return lowerCONCAT_VECTORS(
Op, DAG);
531 return lowerVECTOR_SHUFFLE(
Op, DAG);
533 return lowerBITREVERSE(
Op, DAG);
535 return lowerSCALAR_TO_VECTOR(
Op, DAG);
537 return lowerPREFETCH(
Op, DAG);
539 return lowerSELECT(
Op, DAG);
541 return lowerBRCOND(
Op, DAG);
542 case ISD::FP_TO_FP16:
543 return lowerFP_TO_FP16(
Op, DAG);
544 case ISD::FP16_TO_FP:
545 return lowerFP16_TO_FP(
Op, DAG);
546 case ISD::FP_TO_BF16:
547 return lowerFP_TO_BF16(
Op, DAG);
548 case ISD::BF16_TO_FP:
549 return lowerBF16_TO_FP(
Op, DAG);
550 case ISD::VECREDUCE_ADD:
551 return lowerVECREDUCE_ADD(
Op, DAG);
552 case ISD::VECREDUCE_AND:
553 case ISD::VECREDUCE_OR:
554 case ISD::VECREDUCE_XOR:
555 case ISD::VECREDUCE_SMAX:
556 case ISD::VECREDUCE_SMIN:
557 case ISD::VECREDUCE_UMAX:
558 case ISD::VECREDUCE_UMIN:
559 return lowerVECREDUCE(
Op, DAG);
576 MVT OpVT =
Op.getSimpleValueType();
582 unsigned LegalVecSize = 128;
583 bool isLASX256Vector =
593 if (isLASX256Vector) {
598 for (
unsigned i = 1; i < NumEles; i *= 2, EleBits *= 2) {
604 if (isLASX256Vector) {
629 MVT OpVT =
Op.getSimpleValueType();
643 for (
int i = NumEles; i > 1; i /= 2) {
646 Val = DAG.
getNode(Opcode,
DL, VecTy, Tmp, Val);
655 unsigned IsData =
Op.getConstantOperandVal(4);
660 return Op.getOperand(0);
675 if (
LHS == LHS2 &&
RHS == RHS2) {
680 }
else if (
LHS == RHS2 &&
RHS == LHS2) {
696 MVT VT =
N->getSimpleValueType(0);
727 if (~TrueVal == FalseVal) {
767 unsigned SelOpNo = 0;
777 unsigned ConstSelOpNo = 1;
778 unsigned OtherSelOpNo = 2;
785 if (!ConstSelOpNode || ConstSelOpNode->
isOpaque())
790 if (!ConstBinOpNode || ConstBinOpNode->
isOpaque())
796 SDValue NewConstOps[2] = {ConstSelOp, ConstBinOp};
798 std::swap(NewConstOps[0], NewConstOps[1]);
810 SDValue NewNonConstOps[2] = {OtherSelOp, ConstBinOp};
812 std::swap(NewNonConstOps[0], NewNonConstOps[1]);
815 SDValue NewT = (ConstSelOpNo == 1) ? NewConstOp : NewNonConstOp;
816 SDValue NewF = (ConstSelOpNo == 1) ? NewNonConstOp : NewConstOp;
836 ShAmt =
LHS.getValueSizeInBits() - 1 -
Log2_64(Mask);
850 int64_t
C = RHSC->getSExtValue();
893 MVT VT =
Op.getSimpleValueType();
894 MVT GRLenVT = Subtarget.getGRLenVT();
899 if (
Op.hasOneUse()) {
900 unsigned UseOpc =
Op->user_begin()->getOpcode();
902 SDNode *BinOp = *
Op->user_begin();
909 return lowerSELECT(NewSel, DAG);
949 if (TrueVal - 1 == FalseVal)
951 if (TrueVal + 1 == FalseVal)
958 RHS == TrueV &&
LHS == FalseV) {
990 MVT GRLenVT = Subtarget.getGRLenVT();
1002 Op.getOperand(0),
LHS,
RHS, TargetCC,
1006 Op.getOperand(0), CondV,
Op.getOperand(2));
1016LoongArchTargetLowering::lowerSCALAR_TO_VECTOR(
SDValue Op,
1019 MVT OpVT =
Op.getSimpleValueType();
1030 EVT ResTy =
Op->getValueType(0);
1041 for (
unsigned int i = 0; i < NewEltNum; i++) {
1044 unsigned RevOp = (ResTy == MVT::v16i8 || ResTy == MVT::v32i8)
1063 for (
unsigned int i = 0; i < NewEltNum; i++)
1064 for (
int j = OrigEltNum / NewEltNum - 1;
j >= 0;
j--)
1065 Mask.push_back(j + (OrigEltNum / NewEltNum) * i);
1083 if (EltBits > 32 || EltBits == 1)
1111 int MaskOffset,
const APInt &Zeroable) {
1112 int Size = Mask.size();
1113 unsigned SizeInBits =
Size * ScalarSizeInBits;
1115 auto CheckZeros = [&](
int Shift,
int Scale,
bool Left) {
1116 for (
int i = 0; i <
Size; i += Scale)
1117 for (
int j = 0; j < Shift; ++j)
1118 if (!Zeroable[i + j + (
Left ? 0 : (Scale - Shift))])
1126 for (
unsigned i = Pos, e = Pos +
Size; i != e; ++i,
Low += Step)
1127 if (!(Mask[i] == -1 || Mask[i] ==
Low))
1132 auto MatchShift = [&](
int Shift,
int Scale,
bool Left) {
1133 for (
int i = 0; i !=
Size; i += Scale) {
1134 unsigned Pos =
Left ? i + Shift : i;
1135 unsigned Low =
Left ? i : i + Shift;
1136 unsigned Len = Scale - Shift;
1141 int ShiftEltBits = ScalarSizeInBits * Scale;
1142 bool ByteShift = ShiftEltBits > 64;
1145 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
1149 Scale = ByteShift ? Scale / 2 : Scale;
1155 return (
int)ShiftAmt;
1158 unsigned MaxWidth = 128;
1159 for (
int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
1160 for (
int Shift = 1; Shift != Scale; ++Shift)
1161 for (
bool Left : {
true,
false})
1162 if (CheckZeros(Shift, Scale,
Left)) {
1163 int ShiftAmt = MatchShift(Shift, Scale,
Left);
1188 const APInt &Zeroable) {
1189 int Size = Mask.size();
1203 Mask,
Size, Zeroable);
1211 "Illegal integer vector type");
1220template <
typename ValType>
1223 unsigned CheckStride,
1225 ValType ExpectedIndex,
unsigned ExpectedIndexStride) {
1229 if (*
I != -1 && *
I != ExpectedIndex)
1231 ExpectedIndex += ExpectedIndexStride;
1235 for (
unsigned n = 0; n < CheckStride &&
I != End; ++n, ++
I)
1247 int Size = Mask.size();
1257 int ScalarSizeInBits = VectorSizeInBits /
Size;
1258 assert(!(VectorSizeInBits % ScalarSizeInBits) &&
"Illegal shuffle mask size");
1259 (void)ScalarSizeInBits;
1261 for (
int i = 0; i <
Size; ++i) {
1267 if ((M >= 0 && M <
Size && V1IsZero) || (M >=
Size && V2IsZero)) {
1284 RepeatedMask.
assign(LaneSize, -1);
1285 int Size = Mask.size();
1286 for (
int i = 0; i <
Size; ++i) {
1287 assert(Mask[i] == -1 || Mask[i] >= 0);
1290 if ((Mask[i] %
Size) / LaneSize != i / LaneSize)
1297 Mask[i] <
Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
1298 if (RepeatedMask[i % LaneSize] < 0)
1300 RepeatedMask[i % LaneSize] = LocalM;
1301 else if (RepeatedMask[i % LaneSize] != LocalM)
1318 int NumElts = RepeatedMask.
size();
1320 int Scale = 16 / NumElts;
1322 for (
int i = 0; i < NumElts; ++i) {
1323 int M = RepeatedMask[i];
1324 assert((M == -1 || (0 <= M && M < (2 * NumElts))) &&
1325 "Unexpected mask index.");
1330 int StartIdx = i - (M % NumElts);
1337 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
1340 Rotation = CandidateRotation;
1341 else if (Rotation != CandidateRotation)
1345 SDValue MaskV = M < NumElts ? V1 : V2;
1356 else if (TargetV != MaskV)
1361 assert(Rotation != 0 &&
"Failed to locate a viable rotation!");
1362 assert((
Lo ||
Hi) &&
"Failed to find a rotated input vector!");
1371 return Rotation * Scale;
1390 if (ByteRotation <= 0)
1397 int LoByteShift = 16 - ByteRotation;
1398 int HiByteShift = ByteRotation;
1421 const APInt &Zeroable) {
1435 for (
int i = 0; i < NumElements; i++) {
1439 if (i % Scale != 0) {
1450 SDValue V = M < NumElements ? V1 : V2;
1451 M = M % NumElements;
1454 Offset = M - (i / Scale);
1457 if (
Offset % (NumElements / Scale))
1459 }
else if (InputV != V)
1462 if (M != (
Offset + (i / Scale)))
1473 if (
Offset >= (NumElements / 2)) {
1475 Offset -= (NumElements / 2);
1482 InputV = DAG.
getNode(VilVLoHi,
DL, InputVT, Ext, InputV);
1486 }
while (Scale > 1);
1492 for (
int NumExtElements = Bits / 64; NumExtElements < NumElements;
1493 NumExtElements *= 2) {
1513 int SplatIndex = -1;
1514 for (
const auto &M : Mask) {
1521 if (SplatIndex == -1)
1524 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
1526 APInt Imm(64, SplatIndex);
1557 unsigned SubVecSize = 4;
1558 if (VT == MVT::v2f64 || VT == MVT::v2i64)
1561 int SubMask[4] = {-1, -1, -1, -1};
1562 for (
unsigned i = 0; i < SubVecSize; ++i) {
1563 for (
unsigned j = i; j < Mask.size(); j += SubVecSize) {
1569 M -= 4 * (j / SubVecSize);
1570 if (M < 0 || M >= 4)
1576 if (SubMask[i] == -1)
1580 else if (M != -1 && M != SubMask[i])
1587 for (
int i = SubVecSize - 1; i >= 0; --i) {
1600 if (VT == MVT::v2f64 || VT == MVT::v2i64)
1627 const auto &Begin = Mask.begin();
1628 const auto &End = Mask.end();
1629 SDValue OriV1 = V1, OriV2 = V2;
1667 const auto &Begin = Mask.begin();
1668 const auto &End = Mask.end();
1669 SDValue OriV1 = V1, OriV2 = V2;
1708 const auto &Begin = Mask.begin();
1709 const auto &End = Mask.end();
1710 unsigned HalfSize = Mask.size() / 2;
1711 SDValue OriV1 = V1, OriV2 = V2;
1751 const auto &Begin = Mask.begin();
1752 const auto &End = Mask.end();
1753 SDValue OriV1 = V1, OriV2 = V2;
1791 const auto &Begin = Mask.begin();
1792 const auto &Mid = Mask.begin() + Mask.size() / 2;
1793 const auto &End = Mask.end();
1794 SDValue OriV1 = V1, OriV2 = V2;
1833 const auto &Begin = Mask.begin();
1834 const auto &Mid = Mask.begin() + Mask.size() / 2;
1835 const auto &End = Mask.end();
1836 SDValue OriV1 = V1, OriV2 = V2;
1890 "Vector type is unsupported for lsx!");
1892 "Two operands have different types!");
1894 "Unexpected mask size for shuffle!");
1895 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
1897 APInt KnownUndef, KnownZero;
1899 APInt Zeroable = KnownUndef | KnownZero;
1962 int SplatIndex = -1;
1963 for (
const auto &M : Mask) {
1970 if (SplatIndex == -1)
1973 const auto &Begin = Mask.begin();
1974 const auto &End = Mask.end();
1975 unsigned HalfSize = Mask.size() / 2;
1977 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
1981 APInt Imm(64, SplatIndex);
1996 if (Mask.size() <= 4)
2006 if (Mask.size() != 8 || (VT != MVT::v8i32 && VT != MVT::v8f32))
2010 unsigned HalfSize = NumElts / 2;
2011 bool FrontLo =
true, FrontHi =
true;
2012 bool BackLo =
true, BackHi =
true;
2014 auto inRange = [](
int val,
int low,
int high) {
2015 return (val == -1) || (val >= low && val < high);
2018 for (
unsigned i = 0; i < HalfSize; ++i) {
2019 int Fronti = Mask[i];
2020 int Backi = Mask[i + HalfSize];
2022 FrontLo &=
inRange(Fronti, 0, HalfSize);
2023 FrontHi &=
inRange(Fronti, HalfSize, NumElts);
2024 BackLo &=
inRange(Backi, 0, HalfSize);
2025 BackHi &=
inRange(Backi, HalfSize, NumElts);
2031 if ((FrontLo || FrontHi) && (BackLo || BackHi))
2035 for (
unsigned i = 0; i < NumElts; ++i)
2062 const auto &Begin = Mask.begin();
2063 const auto &End = Mask.end();
2064 unsigned HalfSize = Mask.size() / 2;
2065 unsigned LeftSize = HalfSize / 2;
2066 SDValue OriV1 = V1, OriV2 = V2;
2073 Mask.size() + HalfSize - LeftSize, 1) &&
2075 Mask.size() + HalfSize + LeftSize, 1))
2086 Mask.size() + HalfSize - LeftSize, 1) &&
2088 Mask.size() + HalfSize + LeftSize, 1))
2101 const auto &Begin = Mask.begin();
2102 const auto &End = Mask.end();
2103 unsigned HalfSize = Mask.size() / 2;
2104 SDValue OriV1 = V1, OriV2 = V2;
2111 Mask.size() + HalfSize, 1))
2122 Mask.size() + HalfSize, 1))
2135 const auto &Begin = Mask.begin();
2136 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
2137 const auto &Mid = Mask.begin() + Mask.size() / 2;
2138 const auto &RightMid = Mask.end() - Mask.size() / 4;
2139 const auto &End = Mask.end();
2140 unsigned HalfSize = Mask.size() / 2;
2141 SDValue OriV1 = V1, OriV2 = V2;
2170 const auto &Begin = Mask.begin();
2171 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
2172 const auto &Mid = Mask.begin() + Mask.size() / 2;
2173 const auto &RightMid = Mask.end() - Mask.size() / 4;
2174 const auto &End = Mask.end();
2175 unsigned HalfSize = Mask.size() / 2;
2176 SDValue OriV1 = V1, OriV2 = V2;
2206 int MaskSize = Mask.size();
2207 int HalfSize = Mask.size() / 2;
2208 const auto &Begin = Mask.begin();
2209 const auto &Mid = Mask.begin() + HalfSize;
2210 const auto &End = Mask.end();
2222 for (
auto it = Begin; it < Mid; it++) {
2225 else if ((*it >= 0 && *it < HalfSize) ||
2226 (*it >= MaskSize && *it < MaskSize + HalfSize)) {
2227 int M = *it < HalfSize ? *it : *it - HalfSize;
2232 assert((
int)MaskAlloc.
size() == HalfSize &&
"xvshuf convert failed!");
2234 for (
auto it = Mid; it < End; it++) {
2237 else if ((*it >= HalfSize && *it < MaskSize) ||
2238 (*it >= MaskSize + HalfSize && *it < MaskSize * 2)) {
2239 int M = *it < MaskSize ? *it - HalfSize : *it - MaskSize;
2244 assert((
int)MaskAlloc.
size() == MaskSize &&
"xvshuf convert failed!");
2274 enum HalfMaskType { HighLaneTy, LowLaneTy,
None };
2276 int MaskSize = Mask.size();
2277 int HalfSize = Mask.size() / 2;
2280 HalfMaskType preMask =
None, postMask =
None;
2282 if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
2283 return M < 0 || (M >= 0 && M < HalfSize) ||
2284 (M >= MaskSize && M < MaskSize + HalfSize);
2286 preMask = HighLaneTy;
2287 else if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
2288 return M < 0 || (M >= HalfSize && M < MaskSize) ||
2289 (M >= MaskSize + HalfSize && M < MaskSize * 2);
2291 preMask = LowLaneTy;
2293 if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
2294 return M < 0 || (M >= 0 && M < HalfSize) ||
2295 (M >= MaskSize && M < MaskSize + HalfSize);
2297 postMask = HighLaneTy;
2298 else if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
2299 return M < 0 || (M >= HalfSize && M < MaskSize) ||
2300 (M >= MaskSize + HalfSize && M < MaskSize * 2);
2302 postMask = LowLaneTy;
2310 if (preMask == HighLaneTy && postMask == LowLaneTy) {
2313 if (preMask == LowLaneTy && postMask == HighLaneTy) {
2326 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
2327 *it = *it < 0 ? *it : *it - HalfSize;
2329 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
2330 *it = *it < 0 ? *it : *it + HalfSize;
2332 }
else if (preMask == LowLaneTy && postMask == LowLaneTy) {
2345 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
2346 *it = *it < 0 ? *it : *it - HalfSize;
2348 }
else if (preMask == HighLaneTy && postMask == HighLaneTy) {
2361 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
2362 *it = *it < 0 ? *it : *it + HalfSize;
2385 int Size = Mask.size();
2386 int LaneSize =
Size / 2;
2388 bool LaneCrossing[2] = {
false,
false};
2389 for (
int i = 0; i <
Size; ++i)
2390 if (Mask[i] >= 0 && ((Mask[i] %
Size) / LaneSize) != (i / LaneSize))
2391 LaneCrossing[(Mask[i] %
Size) / LaneSize] =
true;
2394 if (!LaneCrossing[0] && !LaneCrossing[1])
2398 InLaneMask.
assign(Mask.begin(), Mask.end());
2399 for (
int i = 0; i <
Size; ++i) {
2400 int &M = InLaneMask[i];
2403 if (((M %
Size) / LaneSize) != (i / LaneSize))
2404 M = (M % LaneSize) + ((i / LaneSize) * LaneSize) +
Size;
2409 DAG.
getUNDEF(MVT::v4i64), {2, 3, 0, 1});
2424 "Vector type is unsupported for lasx!");
2426 "Two operands have different types!");
2428 "Unexpected mask size for shuffle!");
2429 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
2430 assert(Mask.size() >= 4 &&
"Mask size is less than 4.");
2436 APInt KnownUndef, KnownZero;
2438 APInt Zeroable = KnownUndef | KnownZero;
2475 Subtarget, Zeroable)))
2491 ArrayRef<int> OrigMask = SVOp->
getMask();
2494 MVT VT =
Op.getSimpleValueType();
2498 bool V1IsUndef = V1.
isUndef();
2499 bool V2IsUndef = V2.
isUndef();
2500 if (V1IsUndef && V2IsUndef)
2513 any_of(OrigMask, [NumElements](
int M) {
return M >= NumElements; })) {
2514 SmallVector<int, 8> NewMask(OrigMask);
2515 for (
int &M : NewMask)
2516 if (M >= NumElements)
2522 int MaskUpperLimit = OrigMask.
size() * (V2IsUndef ? 1 : 2);
2523 (void)MaskUpperLimit;
2525 [&](
int M) {
return -1 <=
M &&
M < MaskUpperLimit; }) &&
2526 "Out of bounds shuffle index");
2548 std::tie(Res, Chain) =
2549 makeLibCall(DAG, LC, MVT::f32, Op0, CallOptions,
DL, Chain);
2550 if (Subtarget.is64Bit())
2567 std::tie(Res, Chain) =
makeLibCall(DAG, RTLIB::FPEXT_F16_F32, MVT::f32, Arg,
2568 CallOptions,
DL, Chain);
2574 assert(Subtarget.hasBasicF() &&
"Unexpected custom legalization");
2580 makeLibCall(DAG, LC, MVT::f32,
Op.getOperand(0), CallOptions,
DL).first;
2581 if (Subtarget.is64Bit())
2588 assert(Subtarget.hasBasicF() &&
"Unexpected custom legalization");
2589 MVT VT =
Op.getSimpleValueType();
2598 return DAG.
getNode(ISD::FP_EXTEND,
DL, VT, Res);
2615 "Unsupported vector type for broadcast.");
2618 bool IsIdeneity =
true;
2620 for (
int i = 0; i !=
NumOps; i++) {
2622 if (
Op.getOpcode() != ISD::LOAD || (IdentitySrc &&
Op != IdentitySrc)) {
2634 auto ExtType = LN->getExtensionType();
2640 ? DAG.
getVTList(VT, LN->getBasePtr().getValueType(), MVT::Other)
2642 SDValue Ops[] = {LN->getChain(), LN->getBasePtr(), LN->getOffset()};
2653 EVT ResTy =
Op->getValueType(0);
2656 APInt SplatValue, SplatUndef;
2657 unsigned SplatBitSize;
2660 bool UseSameConstant =
true;
2665 if ((!Subtarget.hasExtLSX() || !Is128Vec) &&
2666 (!Subtarget.hasExtLASX() || !Is256Vec))
2672 if (
Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
2674 SplatBitSize <= 64) {
2676 if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
2680 if (SplatBitSize == 64 && !Subtarget.is64Bit()) {
2685 if ((Is128Vec && ResTy == MVT::v4i32) ||
2686 (Is256Vec && ResTy == MVT::v8i32))
2692 switch (SplatBitSize) {
2696 ViaVecTy = Is128Vec ? MVT::v16i8 : MVT::v32i8;
2699 ViaVecTy = Is128Vec ? MVT::v8i16 : MVT::v16i16;
2702 ViaVecTy = Is128Vec ? MVT::v4i32 : MVT::v8i32;
2705 ViaVecTy = Is128Vec ? MVT::v2i64 : MVT::v4i64;
2713 if (ViaVecTy != ResTy)
2714 Result = DAG.
getNode(ISD::BITCAST, SDLoc(Node), ResTy, Result);
2722 for (
unsigned i = 0; i < NumElts; ++i) {
2727 ConstantValue = Opi;
2728 else if (ConstantValue != Opi)
2729 UseSameConstant =
false;
2734 if (IsConstant && UseSameConstant && ResTy != MVT::v2f64) {
2736 for (
unsigned i = 0; i < NumElts; ++i) {
2756 for (
unsigned i = 1; i < NumElts; ++i) {
2772 MVT ResVT =
Op.getSimpleValueType();
2776 unsigned NumFreezeUndef = 0;
2777 unsigned NumZero = 0;
2778 unsigned NumNonZero = 0;
2779 unsigned NonZeros = 0;
2780 SmallSet<SDValue, 4> Undefs;
2781 for (
unsigned i = 0; i != NumOperands; ++i) {
2796 assert(i <
sizeof(NonZeros) * CHAR_BIT);
2803 if (NumNonZero > 2) {
2807 Ops.slice(0, NumOperands / 2));
2809 Ops.slice(NumOperands / 2));
2822 MVT SubVT =
Op.getOperand(0).getSimpleValueType();
2824 for (
unsigned i = 0; i != NumOperands; ++i) {
2825 if ((NonZeros & (1 << i)) == 0)
2836LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
2838 MVT EltVT =
Op.getSimpleValueType();
2843 MVT GRLenVT = Subtarget.getGRLenVT();
2873 DAG.
getBitcast((VecTy == MVT::v4f64) ? MVT::v4i64 : VecTy, IdxVec);
2893LoongArchTargetLowering::lowerINSERT_VECTOR_ELT(
SDValue Op,
2895 MVT VT =
Op.getSimpleValueType();
2917 for (
unsigned i = 0; i < NumElts; ++i)
2940 return DAG.
getNode(ISD::MEMBARRIER,
DL, MVT::Other,
Op.getOperand(0));
2948 if (Subtarget.is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i32) {
2950 "On LA64, only 64-bit registers can be written.");
2951 return Op.getOperand(0);
2954 if (!Subtarget.is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i64) {
2956 "On LA32, only 32-bit registers can be written.");
2957 return Op.getOperand(0);
2967 "be a constant integer");
2973 Register FrameReg = Subtarget.getRegisterInfo()->getFrameRegister(MF);
2974 EVT VT =
Op.getValueType();
2977 unsigned Depth =
Op.getConstantOperandVal(0);
2978 int GRLenInBytes = Subtarget.getGRLen() / 8;
2981 int Offset = -(GRLenInBytes * 2);
2993 if (
Op.getConstantOperandVal(0) != 0) {
2995 "return address can only be determined for the current frame");
3001 MVT GRLenVT = Subtarget.getGRLenVT();
3013 auto Size = Subtarget.getGRLen() / 8;
3021 auto *FuncInfo = MF.
getInfo<LoongArchMachineFunctionInfo>();
3031 MachinePointerInfo(SV));
3036 assert(Subtarget.is64Bit() && Subtarget.hasBasicF() &&
3037 !Subtarget.hasBasicD() &&
"unexpected target features");
3043 if (
C &&
C->getZExtValue() < UINT64_C(0xFFFFFFFF))
3057 EVT RetVT =
Op.getValueType();
3063 std::tie(Result, Chain) =
3070 assert(Subtarget.is64Bit() && Subtarget.hasBasicF() &&
3071 !Subtarget.hasBasicD() &&
"unexpected target features");
3082 EVT RetVT =
Op.getValueType();
3088 std::tie(Result, Chain) =
3097 EVT VT =
Op.getValueType();
3101 if (
Op.getValueType() == MVT::f32 && Op0VT == MVT::i32 &&
3102 Subtarget.is64Bit() && Subtarget.hasBasicF()) {
3106 if (VT == MVT::f64 && Op0VT == MVT::i64 && !Subtarget.is64Bit()) {
3121 Op0 = DAG.
getNode(ISD::FP_EXTEND,
DL, MVT::f32, Op0);
3123 if (
Op.getValueSizeInBits() > 32 && Subtarget.hasBasicF() &&
3124 !Subtarget.hasBasicD()) {
3131 return DAG.
getNode(ISD::BITCAST,
DL,
Op.getValueType(), Trunc);
3148 N->getOffset(), Flags);
3156template <
class NodeTy>
3159 bool IsLocal)
const {
3170 assert(Subtarget.is64Bit() &&
"Large code model requires LA64");
3241 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
3243 const GlobalValue *GV =
N->getGlobal();
3255 unsigned Opc,
bool UseGOT,
3259 MVT GRLenVT = Subtarget.getGRLenVT();
3273 if (
Opc == LoongArch::PseudoLA_TLS_LE && !Large)
3311 Args.emplace_back(Load, CallTy);
3314 TargetLowering::CallLoweringInfo CLI(DAG);
3329 const GlobalValue *GV =
N->getGlobal();
3343LoongArchTargetLowering::lowerGlobalTLSAddress(
SDValue Op,
3350 assert((!Large || Subtarget.is64Bit()) &&
"Large code model requires LA64");
3353 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
3366 return getDynamicTLSAddr(
N, DAG,
3367 Large ? LoongArch::PseudoLA_TLS_GD_LARGE
3368 : LoongArch::PseudoLA_TLS_GD,
3375 return getDynamicTLSAddr(
N, DAG,
3376 Large ? LoongArch::PseudoLA_TLS_LD_LARGE
3377 : LoongArch::PseudoLA_TLS_LD,
3382 return getStaticTLSAddr(
N, DAG,
3383 Large ? LoongArch::PseudoLA_TLS_IE_LARGE
3384 : LoongArch::PseudoLA_TLS_IE,
3391 return getStaticTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_LE,
3395 return getTLSDescAddr(
N, DAG,
3396 Large ? LoongArch::PseudoLA_TLS_DESC_LARGE
3397 : LoongArch::PseudoLA_TLS_DESC,
3401template <
unsigned N>
3406 if ((IsSigned && !
isInt<N>(CImm->getSExtValue())) ||
3407 (!IsSigned && !
isUInt<N>(CImm->getZExtValue()))) {
3409 ": argument out of range.");
3416LoongArchTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
3418 switch (
Op.getConstantOperandVal(0)) {
3421 case Intrinsic::thread_pointer: {
3425 case Intrinsic::loongarch_lsx_vpickve2gr_d:
3426 case Intrinsic::loongarch_lsx_vpickve2gr_du:
3427 case Intrinsic::loongarch_lsx_vreplvei_d:
3428 case Intrinsic::loongarch_lasx_xvrepl128vei_d:
3430 case Intrinsic::loongarch_lsx_vreplvei_w:
3431 case Intrinsic::loongarch_lasx_xvrepl128vei_w:
3432 case Intrinsic::loongarch_lasx_xvpickve2gr_d:
3433 case Intrinsic::loongarch_lasx_xvpickve2gr_du:
3434 case Intrinsic::loongarch_lasx_xvpickve_d:
3435 case Intrinsic::loongarch_lasx_xvpickve_d_f:
3437 case Intrinsic::loongarch_lasx_xvinsve0_d:
3439 case Intrinsic::loongarch_lsx_vsat_b:
3440 case Intrinsic::loongarch_lsx_vsat_bu:
3441 case Intrinsic::loongarch_lsx_vrotri_b:
3442 case Intrinsic::loongarch_lsx_vsllwil_h_b:
3443 case Intrinsic::loongarch_lsx_vsllwil_hu_bu:
3444 case Intrinsic::loongarch_lsx_vsrlri_b:
3445 case Intrinsic::loongarch_lsx_vsrari_b:
3446 case Intrinsic::loongarch_lsx_vreplvei_h:
3447 case Intrinsic::loongarch_lasx_xvsat_b:
3448 case Intrinsic::loongarch_lasx_xvsat_bu:
3449 case Intrinsic::loongarch_lasx_xvrotri_b:
3450 case Intrinsic::loongarch_lasx_xvsllwil_h_b:
3451 case Intrinsic::loongarch_lasx_xvsllwil_hu_bu:
3452 case Intrinsic::loongarch_lasx_xvsrlri_b:
3453 case Intrinsic::loongarch_lasx_xvsrari_b:
3454 case Intrinsic::loongarch_lasx_xvrepl128vei_h:
3455 case Intrinsic::loongarch_lasx_xvpickve_w:
3456 case Intrinsic::loongarch_lasx_xvpickve_w_f:
3458 case Intrinsic::loongarch_lasx_xvinsve0_w:
3460 case Intrinsic::loongarch_lsx_vsat_h:
3461 case Intrinsic::loongarch_lsx_vsat_hu:
3462 case Intrinsic::loongarch_lsx_vrotri_h:
3463 case Intrinsic::loongarch_lsx_vsllwil_w_h:
3464 case Intrinsic::loongarch_lsx_vsllwil_wu_hu:
3465 case Intrinsic::loongarch_lsx_vsrlri_h:
3466 case Intrinsic::loongarch_lsx_vsrari_h:
3467 case Intrinsic::loongarch_lsx_vreplvei_b:
3468 case Intrinsic::loongarch_lasx_xvsat_h:
3469 case Intrinsic::loongarch_lasx_xvsat_hu:
3470 case Intrinsic::loongarch_lasx_xvrotri_h:
3471 case Intrinsic::loongarch_lasx_xvsllwil_w_h:
3472 case Intrinsic::loongarch_lasx_xvsllwil_wu_hu:
3473 case Intrinsic::loongarch_lasx_xvsrlri_h:
3474 case Intrinsic::loongarch_lasx_xvsrari_h:
3475 case Intrinsic::loongarch_lasx_xvrepl128vei_b:
3477 case Intrinsic::loongarch_lsx_vsrlni_b_h:
3478 case Intrinsic::loongarch_lsx_vsrani_b_h:
3479 case Intrinsic::loongarch_lsx_vsrlrni_b_h:
3480 case Intrinsic::loongarch_lsx_vsrarni_b_h:
3481 case Intrinsic::loongarch_lsx_vssrlni_b_h:
3482 case Intrinsic::loongarch_lsx_vssrani_b_h:
3483 case Intrinsic::loongarch_lsx_vssrlni_bu_h:
3484 case Intrinsic::loongarch_lsx_vssrani_bu_h:
3485 case Intrinsic::loongarch_lsx_vssrlrni_b_h:
3486 case Intrinsic::loongarch_lsx_vssrarni_b_h:
3487 case Intrinsic::loongarch_lsx_vssrlrni_bu_h:
3488 case Intrinsic::loongarch_lsx_vssrarni_bu_h:
3489 case Intrinsic::loongarch_lasx_xvsrlni_b_h:
3490 case Intrinsic::loongarch_lasx_xvsrani_b_h:
3491 case Intrinsic::loongarch_lasx_xvsrlrni_b_h:
3492 case Intrinsic::loongarch_lasx_xvsrarni_b_h:
3493 case Intrinsic::loongarch_lasx_xvssrlni_b_h:
3494 case Intrinsic::loongarch_lasx_xvssrani_b_h:
3495 case Intrinsic::loongarch_lasx_xvssrlni_bu_h:
3496 case Intrinsic::loongarch_lasx_xvssrani_bu_h:
3497 case Intrinsic::loongarch_lasx_xvssrlrni_b_h:
3498 case Intrinsic::loongarch_lasx_xvssrarni_b_h:
3499 case Intrinsic::loongarch_lasx_xvssrlrni_bu_h:
3500 case Intrinsic::loongarch_lasx_xvssrarni_bu_h:
3502 case Intrinsic::loongarch_lsx_vsat_w:
3503 case Intrinsic::loongarch_lsx_vsat_wu:
3504 case Intrinsic::loongarch_lsx_vrotri_w:
3505 case Intrinsic::loongarch_lsx_vsllwil_d_w:
3506 case Intrinsic::loongarch_lsx_vsllwil_du_wu:
3507 case Intrinsic::loongarch_lsx_vsrlri_w:
3508 case Intrinsic::loongarch_lsx_vsrari_w:
3509 case Intrinsic::loongarch_lsx_vslei_bu:
3510 case Intrinsic::loongarch_lsx_vslei_hu:
3511 case Intrinsic::loongarch_lsx_vslei_wu:
3512 case Intrinsic::loongarch_lsx_vslei_du:
3513 case Intrinsic::loongarch_lsx_vslti_bu:
3514 case Intrinsic::loongarch_lsx_vslti_hu:
3515 case Intrinsic::loongarch_lsx_vslti_wu:
3516 case Intrinsic::loongarch_lsx_vslti_du:
3517 case Intrinsic::loongarch_lsx_vbsll_v:
3518 case Intrinsic::loongarch_lsx_vbsrl_v:
3519 case Intrinsic::loongarch_lasx_xvsat_w:
3520 case Intrinsic::loongarch_lasx_xvsat_wu:
3521 case Intrinsic::loongarch_lasx_xvrotri_w:
3522 case Intrinsic::loongarch_lasx_xvsllwil_d_w:
3523 case Intrinsic::loongarch_lasx_xvsllwil_du_wu:
3524 case Intrinsic::loongarch_lasx_xvsrlri_w:
3525 case Intrinsic::loongarch_lasx_xvsrari_w:
3526 case Intrinsic::loongarch_lasx_xvslei_bu:
3527 case Intrinsic::loongarch_lasx_xvslei_hu:
3528 case Intrinsic::loongarch_lasx_xvslei_wu:
3529 case Intrinsic::loongarch_lasx_xvslei_du:
3530 case Intrinsic::loongarch_lasx_xvslti_bu:
3531 case Intrinsic::loongarch_lasx_xvslti_hu:
3532 case Intrinsic::loongarch_lasx_xvslti_wu:
3533 case Intrinsic::loongarch_lasx_xvslti_du:
3534 case Intrinsic::loongarch_lasx_xvbsll_v:
3535 case Intrinsic::loongarch_lasx_xvbsrl_v:
3537 case Intrinsic::loongarch_lsx_vseqi_b:
3538 case Intrinsic::loongarch_lsx_vseqi_h:
3539 case Intrinsic::loongarch_lsx_vseqi_w:
3540 case Intrinsic::loongarch_lsx_vseqi_d:
3541 case Intrinsic::loongarch_lsx_vslei_b:
3542 case Intrinsic::loongarch_lsx_vslei_h:
3543 case Intrinsic::loongarch_lsx_vslei_w:
3544 case Intrinsic::loongarch_lsx_vslei_d:
3545 case Intrinsic::loongarch_lsx_vslti_b:
3546 case Intrinsic::loongarch_lsx_vslti_h:
3547 case Intrinsic::loongarch_lsx_vslti_w:
3548 case Intrinsic::loongarch_lsx_vslti_d:
3549 case Intrinsic::loongarch_lasx_xvseqi_b:
3550 case Intrinsic::loongarch_lasx_xvseqi_h:
3551 case Intrinsic::loongarch_lasx_xvseqi_w:
3552 case Intrinsic::loongarch_lasx_xvseqi_d:
3553 case Intrinsic::loongarch_lasx_xvslei_b:
3554 case Intrinsic::loongarch_lasx_xvslei_h:
3555 case Intrinsic::loongarch_lasx_xvslei_w:
3556 case Intrinsic::loongarch_lasx_xvslei_d:
3557 case Intrinsic::loongarch_lasx_xvslti_b:
3558 case Intrinsic::loongarch_lasx_xvslti_h:
3559 case Intrinsic::loongarch_lasx_xvslti_w:
3560 case Intrinsic::loongarch_lasx_xvslti_d:
3562 case Intrinsic::loongarch_lsx_vsrlni_h_w:
3563 case Intrinsic::loongarch_lsx_vsrani_h_w:
3564 case Intrinsic::loongarch_lsx_vsrlrni_h_w:
3565 case Intrinsic::loongarch_lsx_vsrarni_h_w:
3566 case Intrinsic::loongarch_lsx_vssrlni_h_w:
3567 case Intrinsic::loongarch_lsx_vssrani_h_w:
3568 case Intrinsic::loongarch_lsx_vssrlni_hu_w:
3569 case Intrinsic::loongarch_lsx_vssrani_hu_w:
3570 case Intrinsic::loongarch_lsx_vssrlrni_h_w:
3571 case Intrinsic::loongarch_lsx_vssrarni_h_w:
3572 case Intrinsic::loongarch_lsx_vssrlrni_hu_w:
3573 case Intrinsic::loongarch_lsx_vssrarni_hu_w:
3574 case Intrinsic::loongarch_lsx_vfrstpi_b:
3575 case Intrinsic::loongarch_lsx_vfrstpi_h:
3576 case Intrinsic::loongarch_lasx_xvsrlni_h_w:
3577 case Intrinsic::loongarch_lasx_xvsrani_h_w:
3578 case Intrinsic::loongarch_lasx_xvsrlrni_h_w:
3579 case Intrinsic::loongarch_lasx_xvsrarni_h_w:
3580 case Intrinsic::loongarch_lasx_xvssrlni_h_w:
3581 case Intrinsic::loongarch_lasx_xvssrani_h_w:
3582 case Intrinsic::loongarch_lasx_xvssrlni_hu_w:
3583 case Intrinsic::loongarch_lasx_xvssrani_hu_w:
3584 case Intrinsic::loongarch_lasx_xvssrlrni_h_w:
3585 case Intrinsic::loongarch_lasx_xvssrarni_h_w:
3586 case Intrinsic::loongarch_lasx_xvssrlrni_hu_w:
3587 case Intrinsic::loongarch_lasx_xvssrarni_hu_w:
3588 case Intrinsic::loongarch_lasx_xvfrstpi_b:
3589 case Intrinsic::loongarch_lasx_xvfrstpi_h:
3591 case Intrinsic::loongarch_lsx_vsat_d:
3592 case Intrinsic::loongarch_lsx_vsat_du:
3593 case Intrinsic::loongarch_lsx_vrotri_d:
3594 case Intrinsic::loongarch_lsx_vsrlri_d:
3595 case Intrinsic::loongarch_lsx_vsrari_d:
3596 case Intrinsic::loongarch_lasx_xvsat_d:
3597 case Intrinsic::loongarch_lasx_xvsat_du:
3598 case Intrinsic::loongarch_lasx_xvrotri_d:
3599 case Intrinsic::loongarch_lasx_xvsrlri_d:
3600 case Intrinsic::loongarch_lasx_xvsrari_d:
3602 case Intrinsic::loongarch_lsx_vsrlni_w_d:
3603 case Intrinsic::loongarch_lsx_vsrani_w_d:
3604 case Intrinsic::loongarch_lsx_vsrlrni_w_d:
3605 case Intrinsic::loongarch_lsx_vsrarni_w_d:
3606 case Intrinsic::loongarch_lsx_vssrlni_w_d:
3607 case Intrinsic::loongarch_lsx_vssrani_w_d:
3608 case Intrinsic::loongarch_lsx_vssrlni_wu_d:
3609 case Intrinsic::loongarch_lsx_vssrani_wu_d:
3610 case Intrinsic::loongarch_lsx_vssrlrni_w_d:
3611 case Intrinsic::loongarch_lsx_vssrarni_w_d:
3612 case Intrinsic::loongarch_lsx_vssrlrni_wu_d:
3613 case Intrinsic::loongarch_lsx_vssrarni_wu_d:
3614 case Intrinsic::loongarch_lasx_xvsrlni_w_d:
3615 case Intrinsic::loongarch_lasx_xvsrani_w_d:
3616 case Intrinsic::loongarch_lasx_xvsrlrni_w_d:
3617 case Intrinsic::loongarch_lasx_xvsrarni_w_d:
3618 case Intrinsic::loongarch_lasx_xvssrlni_w_d:
3619 case Intrinsic::loongarch_lasx_xvssrani_w_d:
3620 case Intrinsic::loongarch_lasx_xvssrlni_wu_d:
3621 case Intrinsic::loongarch_lasx_xvssrani_wu_d:
3622 case Intrinsic::loongarch_lasx_xvssrlrni_w_d:
3623 case Intrinsic::loongarch_lasx_xvssrarni_w_d:
3624 case Intrinsic::loongarch_lasx_xvssrlrni_wu_d:
3625 case Intrinsic::loongarch_lasx_xvssrarni_wu_d:
3627 case Intrinsic::loongarch_lsx_vsrlni_d_q:
3628 case Intrinsic::loongarch_lsx_vsrani_d_q:
3629 case Intrinsic::loongarch_lsx_vsrlrni_d_q:
3630 case Intrinsic::loongarch_lsx_vsrarni_d_q:
3631 case Intrinsic::loongarch_lsx_vssrlni_d_q:
3632 case Intrinsic::loongarch_lsx_vssrani_d_q:
3633 case Intrinsic::loongarch_lsx_vssrlni_du_q:
3634 case Intrinsic::loongarch_lsx_vssrani_du_q:
3635 case Intrinsic::loongarch_lsx_vssrlrni_d_q:
3636 case Intrinsic::loongarch_lsx_vssrarni_d_q:
3637 case Intrinsic::loongarch_lsx_vssrlrni_du_q:
3638 case Intrinsic::loongarch_lsx_vssrarni_du_q:
3639 case Intrinsic::loongarch_lasx_xvsrlni_d_q:
3640 case Intrinsic::loongarch_lasx_xvsrani_d_q:
3641 case Intrinsic::loongarch_lasx_xvsrlrni_d_q:
3642 case Intrinsic::loongarch_lasx_xvsrarni_d_q:
3643 case Intrinsic::loongarch_lasx_xvssrlni_d_q:
3644 case Intrinsic::loongarch_lasx_xvssrani_d_q:
3645 case Intrinsic::loongarch_lasx_xvssrlni_du_q:
3646 case Intrinsic::loongarch_lasx_xvssrani_du_q:
3647 case Intrinsic::loongarch_lasx_xvssrlrni_d_q:
3648 case Intrinsic::loongarch_lasx_xvssrarni_d_q:
3649 case Intrinsic::loongarch_lasx_xvssrlrni_du_q:
3650 case Intrinsic::loongarch_lasx_xvssrarni_du_q:
3652 case Intrinsic::loongarch_lsx_vnori_b:
3653 case Intrinsic::loongarch_lsx_vshuf4i_b:
3654 case Intrinsic::loongarch_lsx_vshuf4i_h:
3655 case Intrinsic::loongarch_lsx_vshuf4i_w:
3656 case Intrinsic::loongarch_lasx_xvnori_b:
3657 case Intrinsic::loongarch_lasx_xvshuf4i_b:
3658 case Intrinsic::loongarch_lasx_xvshuf4i_h:
3659 case Intrinsic::loongarch_lasx_xvshuf4i_w:
3660 case Intrinsic::loongarch_lasx_xvpermi_d:
3662 case Intrinsic::loongarch_lsx_vshuf4i_d:
3663 case Intrinsic::loongarch_lsx_vpermi_w:
3664 case Intrinsic::loongarch_lsx_vbitseli_b:
3665 case Intrinsic::loongarch_lsx_vextrins_b:
3666 case Intrinsic::loongarch_lsx_vextrins_h:
3667 case Intrinsic::loongarch_lsx_vextrins_w:
3668 case Intrinsic::loongarch_lsx_vextrins_d:
3669 case Intrinsic::loongarch_lasx_xvshuf4i_d:
3670 case Intrinsic::loongarch_lasx_xvpermi_w:
3671 case Intrinsic::loongarch_lasx_xvpermi_q:
3672 case Intrinsic::loongarch_lasx_xvbitseli_b:
3673 case Intrinsic::loongarch_lasx_xvextrins_b:
3674 case Intrinsic::loongarch_lasx_xvextrins_h:
3675 case Intrinsic::loongarch_lasx_xvextrins_w:
3676 case Intrinsic::loongarch_lasx_xvextrins_d:
3678 case Intrinsic::loongarch_lsx_vrepli_b:
3679 case Intrinsic::loongarch_lsx_vrepli_h:
3680 case Intrinsic::loongarch_lsx_vrepli_w:
3681 case Intrinsic::loongarch_lsx_vrepli_d:
3682 case Intrinsic::loongarch_lasx_xvrepli_b:
3683 case Intrinsic::loongarch_lasx_xvrepli_h:
3684 case Intrinsic::loongarch_lasx_xvrepli_w:
3685 case Intrinsic::loongarch_lasx_xvrepli_d:
3687 case Intrinsic::loongarch_lsx_vldi:
3688 case Intrinsic::loongarch_lasx_xvldi:
3704LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
3707 MVT GRLenVT = Subtarget.getGRLenVT();
3708 EVT VT =
Op.getValueType();
3710 const StringRef ErrorMsgOOR =
"argument out of range";
3711 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
3712 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
3714 switch (
Op.getConstantOperandVal(1)) {
3717 case Intrinsic::loongarch_crc_w_b_w:
3718 case Intrinsic::loongarch_crc_w_h_w:
3719 case Intrinsic::loongarch_crc_w_w_w:
3720 case Intrinsic::loongarch_crc_w_d_w:
3721 case Intrinsic::loongarch_crcc_w_b_w:
3722 case Intrinsic::loongarch_crcc_w_h_w:
3723 case Intrinsic::loongarch_crcc_w_w_w:
3724 case Intrinsic::loongarch_crcc_w_d_w:
3726 case Intrinsic::loongarch_csrrd_w:
3727 case Intrinsic::loongarch_csrrd_d: {
3728 unsigned Imm =
Op.getConstantOperandVal(2);
3734 case Intrinsic::loongarch_csrwr_w:
3735 case Intrinsic::loongarch_csrwr_d: {
3736 unsigned Imm =
Op.getConstantOperandVal(3);
3740 {Chain,
Op.getOperand(2),
3743 case Intrinsic::loongarch_csrxchg_w:
3744 case Intrinsic::loongarch_csrxchg_d: {
3745 unsigned Imm =
Op.getConstantOperandVal(4);
3749 {Chain,
Op.getOperand(2),
Op.getOperand(3),
3752 case Intrinsic::loongarch_iocsrrd_d: {
3757#define IOCSRRD_CASE(NAME, NODE) \
3758 case Intrinsic::loongarch_##NAME: { \
3759 return DAG.getNode(LoongArchISD::NODE, DL, {GRLenVT, MVT::Other}, \
3760 {Chain, Op.getOperand(2)}); \
3766 case Intrinsic::loongarch_cpucfg: {
3768 {Chain,
Op.getOperand(2)});
3770 case Intrinsic::loongarch_lddir_d: {
3771 unsigned Imm =
Op.getConstantOperandVal(3);
3776 case Intrinsic::loongarch_movfcsr2gr: {
3777 if (!Subtarget.hasBasicF())
3779 unsigned Imm =
Op.getConstantOperandVal(2);
3785 case Intrinsic::loongarch_lsx_vld:
3786 case Intrinsic::loongarch_lsx_vldrepl_b:
3787 case Intrinsic::loongarch_lasx_xvld:
3788 case Intrinsic::loongarch_lasx_xvldrepl_b:
3792 case Intrinsic::loongarch_lsx_vldrepl_h:
3793 case Intrinsic::loongarch_lasx_xvldrepl_h:
3797 Op,
"argument out of range or not a multiple of 2", DAG)
3799 case Intrinsic::loongarch_lsx_vldrepl_w:
3800 case Intrinsic::loongarch_lasx_xvldrepl_w:
3804 Op,
"argument out of range or not a multiple of 4", DAG)
3806 case Intrinsic::loongarch_lsx_vldrepl_d:
3807 case Intrinsic::loongarch_lasx_xvldrepl_d:
3811 Op,
"argument out of range or not a multiple of 8", DAG)
3822 return Op.getOperand(0);
3828 MVT GRLenVT = Subtarget.getGRLenVT();
3830 uint64_t IntrinsicEnum =
Op.getConstantOperandVal(1);
3832 const StringRef ErrorMsgOOR =
"argument out of range";
3833 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
3834 const StringRef ErrorMsgReqLA32 =
"requires loongarch32";
3835 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
3837 switch (IntrinsicEnum) {
3841 case Intrinsic::loongarch_cacop_d:
3842 case Intrinsic::loongarch_cacop_w: {
3843 if (IntrinsicEnum == Intrinsic::loongarch_cacop_d && !Subtarget.is64Bit())
3845 if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.is64Bit())
3854 case Intrinsic::loongarch_dbar: {
3861 case Intrinsic::loongarch_ibar: {
3868 case Intrinsic::loongarch_break: {
3875 case Intrinsic::loongarch_movgr2fcsr: {
3876 if (!Subtarget.hasBasicF())
3886 case Intrinsic::loongarch_syscall: {
3893#define IOCSRWR_CASE(NAME, NODE) \
3894 case Intrinsic::loongarch_##NAME: { \
3895 SDValue Op3 = Op.getOperand(3); \
3896 return Subtarget.is64Bit() \
3897 ? DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, \
3898 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
3899 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op3)) \
3900 : DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, Op2, \
3907 case Intrinsic::loongarch_iocsrwr_d: {
3908 return !Subtarget.is64Bit()
3915#define ASRT_LE_GT_CASE(NAME) \
3916 case Intrinsic::loongarch_##NAME: { \
3917 return !Subtarget.is64Bit() \
3918 ? emitIntrinsicErrorMessage(Op, ErrorMsgReqLA64, DAG) \
3923#undef ASRT_LE_GT_CASE
3924 case Intrinsic::loongarch_ldpte_d: {
3925 unsigned Imm =
Op.getConstantOperandVal(3);
3926 return !Subtarget.is64Bit()
3931 case Intrinsic::loongarch_lsx_vst:
3932 case Intrinsic::loongarch_lasx_xvst:
3936 case Intrinsic::loongarch_lasx_xvstelm_b:
3941 case Intrinsic::loongarch_lsx_vstelm_b:
3946 case Intrinsic::loongarch_lasx_xvstelm_h:
3951 Op,
"argument out of range or not a multiple of 2", DAG)
3953 case Intrinsic::loongarch_lsx_vstelm_h:
3958 Op,
"argument out of range or not a multiple of 2", DAG)
3960 case Intrinsic::loongarch_lasx_xvstelm_w:
3965 Op,
"argument out of range or not a multiple of 4", DAG)
3967 case Intrinsic::loongarch_lsx_vstelm_w:
3972 Op,
"argument out of range or not a multiple of 4", DAG)
3974 case Intrinsic::loongarch_lasx_xvstelm_d:
3979 Op,
"argument out of range or not a multiple of 8", DAG)
3981 case Intrinsic::loongarch_lsx_vstelm_d:
3986 Op,
"argument out of range or not a multiple of 8", DAG)
3997 EVT VT =
Lo.getValueType();
4038 EVT VT =
Lo.getValueType();
4130 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
4131 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0);
4135 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
4141 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0, NewOp1);
4168 StringRef ErrorMsg,
bool WithChain =
true) {
4173 Results.push_back(
N->getOperand(0));
4176template <
unsigned N>
4181 const StringRef ErrorMsgOOR =
"argument out of range";
4182 unsigned Imm =
Node->getConstantOperandVal(2);
4216 switch (
N->getConstantOperandVal(0)) {
4219 case Intrinsic::loongarch_lsx_vpickve2gr_b:
4223 case Intrinsic::loongarch_lsx_vpickve2gr_h:
4224 case Intrinsic::loongarch_lasx_xvpickve2gr_w:
4228 case Intrinsic::loongarch_lsx_vpickve2gr_w:
4232 case Intrinsic::loongarch_lsx_vpickve2gr_bu:
4236 case Intrinsic::loongarch_lsx_vpickve2gr_hu:
4237 case Intrinsic::loongarch_lasx_xvpickve2gr_wu:
4241 case Intrinsic::loongarch_lsx_vpickve2gr_wu:
4245 case Intrinsic::loongarch_lsx_bz_b:
4246 case Intrinsic::loongarch_lsx_bz_h:
4247 case Intrinsic::loongarch_lsx_bz_w:
4248 case Intrinsic::loongarch_lsx_bz_d:
4249 case Intrinsic::loongarch_lasx_xbz_b:
4250 case Intrinsic::loongarch_lasx_xbz_h:
4251 case Intrinsic::loongarch_lasx_xbz_w:
4252 case Intrinsic::loongarch_lasx_xbz_d:
4256 case Intrinsic::loongarch_lsx_bz_v:
4257 case Intrinsic::loongarch_lasx_xbz_v:
4261 case Intrinsic::loongarch_lsx_bnz_b:
4262 case Intrinsic::loongarch_lsx_bnz_h:
4263 case Intrinsic::loongarch_lsx_bnz_w:
4264 case Intrinsic::loongarch_lsx_bnz_d:
4265 case Intrinsic::loongarch_lasx_xbnz_b:
4266 case Intrinsic::loongarch_lasx_xbnz_h:
4267 case Intrinsic::loongarch_lasx_xbnz_w:
4268 case Intrinsic::loongarch_lasx_xbnz_d:
4272 case Intrinsic::loongarch_lsx_bnz_v:
4273 case Intrinsic::loongarch_lasx_xbnz_v:
4283 assert(
N->getValueType(0) == MVT::i128 &&
4284 "AtomicCmpSwap on types less than 128 should be legal");
4288 switch (
MemOp->getMergedOrdering()) {
4292 Opcode = LoongArch::PseudoCmpXchg128Acquire;
4296 Opcode = LoongArch::PseudoCmpXchg128;
4303 auto CmpVal = DAG.
SplitScalar(
N->getOperand(2),
DL, MVT::i64, MVT::i64);
4304 auto NewVal = DAG.
SplitScalar(
N->getOperand(3),
DL, MVT::i64, MVT::i64);
4305 SDValue Ops[] = {
N->getOperand(1), CmpVal.first, CmpVal.second,
4306 NewVal.first, NewVal.second,
N->getOperand(0)};
4309 Opcode,
SDLoc(
N), DAG.
getVTList(MVT::i64, MVT::i64, MVT::i64, MVT::Other),
4320 EVT VT =
N->getValueType(0);
4321 switch (
N->getOpcode()) {
4326 assert(
N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4327 "Unexpected custom legalisation");
4334 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4335 "Unexpected custom legalisation");
4337 Subtarget.hasDiv32() && VT == MVT::i32
4344 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4345 "Unexpected custom legalisation");
4353 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4354 "Unexpected custom legalisation");
4358 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4359 "Unexpected custom legalisation");
4366 if (Src.getValueType() == MVT::f16)
4367 Src = DAG.
getNode(ISD::FP_EXTEND,
DL, MVT::f32, Src);
4377 EVT OpVT = Src.getValueType();
4381 std::tie(Result, Chain) =
4386 case ISD::BITCAST: {
4388 EVT SrcVT = Src.getValueType();
4389 if (VT == MVT::i32 && SrcVT == MVT::f32 && Subtarget.is64Bit() &&
4390 Subtarget.hasBasicF()) {
4394 }
else if (VT == MVT::i64 && SrcVT == MVT::f64 && !Subtarget.is64Bit()) {
4396 DAG.
getVTList(MVT::i32, MVT::i32), Src);
4404 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4405 "Unexpected custom legalisation");
4408 TLI.expandFP_TO_UINT(
N, Tmp1, Tmp2, DAG);
4414 assert((VT == MVT::i16 || VT == MVT::i32) &&
4415 "Unexpected custom legalization");
4416 MVT GRLenVT = Subtarget.getGRLenVT();
4436 assert((VT == MVT::i8 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
4437 "Unexpected custom legalization");
4438 MVT GRLenVT = Subtarget.getGRLenVT();
4456 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4457 "Unexpected custom legalisation");
4464 MVT GRLenVT = Subtarget.getGRLenVT();
4465 const StringRef ErrorMsgOOR =
"argument out of range";
4466 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
4467 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
4469 switch (
N->getConstantOperandVal(1)) {
4472 case Intrinsic::loongarch_movfcsr2gr: {
4473 if (!Subtarget.hasBasicF()) {
4490#define CRC_CASE_EXT_BINARYOP(NAME, NODE) \
4491 case Intrinsic::loongarch_##NAME: { \
4492 SDValue NODE = DAG.getNode( \
4493 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
4494 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
4495 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
4496 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
4497 Results.push_back(NODE.getValue(1)); \
4506#undef CRC_CASE_EXT_BINARYOP
4508#define CRC_CASE_EXT_UNARYOP(NAME, NODE) \
4509 case Intrinsic::loongarch_##NAME: { \
4510 SDValue NODE = DAG.getNode( \
4511 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
4513 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
4514 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
4515 Results.push_back(NODE.getValue(1)); \
4520#undef CRC_CASE_EXT_UNARYOP
4521#define CSR_CASE(ID) \
4522 case Intrinsic::loongarch_##ID: { \
4523 if (!Subtarget.is64Bit()) \
4524 emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqLA64); \
4532 case Intrinsic::loongarch_csrrd_w: {
4546 case Intrinsic::loongarch_csrwr_w: {
4547 unsigned Imm =
N->getConstantOperandVal(3);
4561 case Intrinsic::loongarch_csrxchg_w: {
4562 unsigned Imm =
N->getConstantOperandVal(4);
4577#define IOCSRRD_CASE(NAME, NODE) \
4578 case Intrinsic::loongarch_##NAME: { \
4579 SDValue IOCSRRDResults = \
4580 DAG.getNode(LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
4581 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2)}); \
4582 Results.push_back( \
4583 DAG.getNode(ISD::TRUNCATE, DL, VT, IOCSRRDResults.getValue(0))); \
4584 Results.push_back(IOCSRRDResults.getValue(1)); \
4591 case Intrinsic::loongarch_cpucfg: {
4600 case Intrinsic::loongarch_lddir_d: {
4601 if (!Subtarget.is64Bit()) {
4611 if (Subtarget.is64Bit())
4613 "On LA64, only 64-bit registers can be read.");
4616 "On LA32, only 32-bit registers can be read.");
4618 Results.push_back(
N->getOperand(0));
4629 OpVT == MVT::f64 ? RTLIB::LROUND_F64 : RTLIB::LROUND_F32;
4637 case ISD::ATOMIC_CMP_SWAP: {
4642 MVT VT =
N->getSimpleValueType(0);
4648 EVT InVT = In.getValueType();
4659 for (
unsigned I = 0;
I < MinElts; ++
I)
4660 TruncMask[
I] = Scale *
I;
4662 unsigned WidenNumElts = 128 / In.getScalarValueSizeInBits();
4663 MVT SVT = In.getSimpleValueType().getScalarType();
4669 "Illegal vector type in truncation");
4688 SDValue FirstOperand =
N->getOperand(0);
4689 SDValue SecondOperand =
N->getOperand(1);
4690 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
4691 EVT ValTy =
N->getValueType(0);
4694 unsigned SMIdx, SMLen;
4700 if (!Subtarget.has32S())
4722 if (SMIdx != 0 || lsb + SMLen > ValTy.getSizeInBits())
4737 if (SMIdx + SMLen > ValTy.getSizeInBits())
4756 NewOperand = FirstOperand;
4759 msb = lsb + SMLen - 1;
4763 if (FirstOperandOpc ==
ISD::SRA || FirstOperandOpc ==
ISD::SRL || lsb == 0)
4776 if (!Subtarget.has32S())
4788 SDValue FirstOperand =
N->getOperand(0);
4790 EVT ValTy =
N->getValueType(0);
4793 unsigned MaskIdx, MaskLen;
4808 if (MaskIdx <= Shamt && Shamt <= MaskIdx + MaskLen - 1)
4824 switch (Src.getOpcode()) {
4827 return Src.getOperand(0).getValueSizeInBits() ==
Size;
4837 return Src.getOperand(0).getScalarValueSizeInBits() == 1 &&
4850 switch (Src.getOpcode()) {
4860 Src.getOpcode(),
DL, SExtVT,
4866 DL, SExtVT, Src.getOperand(0),
4878 EVT VT =
N->getValueType(0);
4880 EVT SrcVT = Src.getValueType();
4882 if (Src.getOpcode() !=
ISD::SETCC || !Src.hasOneUse())
4887 EVT CmpVT = Src.getOperand(0).getValueType();
4892 else if (Subtarget.has32S() && Subtarget.hasExtLASX() &&
4920 (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
4927 (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
4951 EVT VT =
N->getValueType(0);
4953 EVT SrcVT = Src.getValueType();
4969 bool UseLASX =
false;
4970 bool PropagateSExt =
false;
4972 if (Src.getOpcode() ==
ISD::SETCC && Src.hasOneUse()) {
4973 EVT CmpVT = Src.getOperand(0).getValueType();
4982 SExtVT = MVT::v2i64;
4985 SExtVT = MVT::v4i32;
4987 SExtVT = MVT::v4i64;
4989 PropagateSExt =
true;
4993 SExtVT = MVT::v8i16;
4995 SExtVT = MVT::v8i32;
4997 PropagateSExt =
true;
5001 SExtVT = MVT::v16i8;
5003 SExtVT = MVT::v16i16;
5005 PropagateSExt =
true;
5009 SExtVT = MVT::v32i8;
5017 if (!Subtarget.has32S() || !Subtarget.hasExtLASX()) {
5018 if (Src.getSimpleValueType() == MVT::v32i8) {
5026 }
else if (UseLASX) {
5045 EVT ValTy =
N->getValueType(0);
5046 SDValue N0 =
N->getOperand(0), N1 =
N->getOperand(1);
5049 unsigned ValBits = ValTy.getSizeInBits();
5050 unsigned MaskIdx0, MaskLen0, MaskIdx1, MaskLen1;
5052 bool SwapAndRetried =
false;
5055 if (!Subtarget.has32S())
5061 if (ValBits != 32 && ValBits != 64)
5076 MaskIdx0 == MaskIdx1 && MaskLen0 == MaskLen1 &&
5079 (MaskIdx0 + MaskLen0 <= ValBits)) {
5100 MaskLen0 == MaskLen1 && MaskIdx1 == 0 &&
5101 (MaskIdx0 + MaskLen0 <= ValBits)) {
5118 (MaskIdx0 + MaskLen0 <= 64) &&
5126 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
5127 : (MaskIdx0 + MaskLen0 - 1),
5143 (MaskIdx0 + MaskLen0 <= ValBits)) {
5166 DAG.
getConstant(ValBits == 32 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
5167 : (MaskIdx0 + MaskLen0 - 1),
5182 unsigned MaskIdx, MaskLen;
5183 if (N1.getOpcode() ==
ISD::SHL && N1.getOperand(0).getOpcode() ==
ISD::AND &&
5210 N1.getOperand(0).getOpcode() ==
ISD::SHL &&
5224 if (!SwapAndRetried) {
5226 SwapAndRetried =
true;
5230 SwapAndRetried =
false;
5256 if (!SwapAndRetried) {
5258 SwapAndRetried =
true;
5268 switch (V.getNode()->getOpcode()) {
5280 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
5288 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
5365 SDNode *AndNode =
N->getOperand(0).getNode();
5373 SDValue CmpInputValue =
N->getOperand(1);
5384 AndInputValue1 = AndInputValue1.
getOperand(0);
5388 if (AndInputValue2 != CmpInputValue)
5421 TruncInputValue1, TruncInputValue2);
5423 DAG.
getSetCC(
SDLoc(
N),
N->getValueType(0), NewAnd, TruncInputValue2, CC);
5464 LHS.getOperand(0).getValueType() == Subtarget.
getGRLenVT()) {
5492 ShAmt =
LHS.getValueSizeInBits() - 1 - ShAmt;
5526 N->getOperand(0),
LHS,
RHS, CC,
N->getOperand(4));
5542 EVT VT =
N->getValueType(0);
5545 if (TrueV == FalseV)
5577 {LHS, RHS, CC, TrueV, FalseV});
5582template <
unsigned N>
5586 bool IsSigned =
false) {
5590 if ((IsSigned && !
isInt<N>(CImm->getSExtValue())) ||
5591 (!IsSigned && !
isUInt<N>(CImm->getZExtValue()))) {
5593 ": argument out of range.");
5599template <
unsigned N>
5603 EVT ResTy =
Node->getValueType(0);
5607 if ((IsSigned && !
isInt<N>(CImm->getSExtValue())) ||
5608 (!IsSigned && !
isUInt<N>(CImm->getZExtValue()))) {
5610 ": argument out of range.");
5615 IsSigned ? CImm->getSExtValue() : CImm->getZExtValue(), IsSigned),
5621 EVT ResTy =
Node->getValueType(0);
5629 EVT ResTy =
Node->getValueType(0);
5638template <
unsigned N>
5641 EVT ResTy =
Node->getValueType(0);
5646 ": argument out of range.");
5656template <
unsigned N>
5659 EVT ResTy =
Node->getValueType(0);
5664 ": argument out of range.");
5673template <
unsigned N>
5676 EVT ResTy =
Node->getValueType(0);
5681 ": argument out of range.");
5695 switch (
N->getConstantOperandVal(0)) {
5698 case Intrinsic::loongarch_lsx_vadd_b:
5699 case Intrinsic::loongarch_lsx_vadd_h:
5700 case Intrinsic::loongarch_lsx_vadd_w:
5701 case Intrinsic::loongarch_lsx_vadd_d:
5702 case Intrinsic::loongarch_lasx_xvadd_b:
5703 case Intrinsic::loongarch_lasx_xvadd_h:
5704 case Intrinsic::loongarch_lasx_xvadd_w:
5705 case Intrinsic::loongarch_lasx_xvadd_d:
5708 case Intrinsic::loongarch_lsx_vaddi_bu:
5709 case Intrinsic::loongarch_lsx_vaddi_hu:
5710 case Intrinsic::loongarch_lsx_vaddi_wu:
5711 case Intrinsic::loongarch_lsx_vaddi_du:
5712 case Intrinsic::loongarch_lasx_xvaddi_bu:
5713 case Intrinsic::loongarch_lasx_xvaddi_hu:
5714 case Intrinsic::loongarch_lasx_xvaddi_wu:
5715 case Intrinsic::loongarch_lasx_xvaddi_du:
5718 case Intrinsic::loongarch_lsx_vsub_b:
5719 case Intrinsic::loongarch_lsx_vsub_h:
5720 case Intrinsic::loongarch_lsx_vsub_w:
5721 case Intrinsic::loongarch_lsx_vsub_d:
5722 case Intrinsic::loongarch_lasx_xvsub_b:
5723 case Intrinsic::loongarch_lasx_xvsub_h:
5724 case Intrinsic::loongarch_lasx_xvsub_w:
5725 case Intrinsic::loongarch_lasx_xvsub_d:
5728 case Intrinsic::loongarch_lsx_vsubi_bu:
5729 case Intrinsic::loongarch_lsx_vsubi_hu:
5730 case Intrinsic::loongarch_lsx_vsubi_wu:
5731 case Intrinsic::loongarch_lsx_vsubi_du:
5732 case Intrinsic::loongarch_lasx_xvsubi_bu:
5733 case Intrinsic::loongarch_lasx_xvsubi_hu:
5734 case Intrinsic::loongarch_lasx_xvsubi_wu:
5735 case Intrinsic::loongarch_lasx_xvsubi_du:
5738 case Intrinsic::loongarch_lsx_vneg_b:
5739 case Intrinsic::loongarch_lsx_vneg_h:
5740 case Intrinsic::loongarch_lsx_vneg_w:
5741 case Intrinsic::loongarch_lsx_vneg_d:
5742 case Intrinsic::loongarch_lasx_xvneg_b:
5743 case Intrinsic::loongarch_lasx_xvneg_h:
5744 case Intrinsic::loongarch_lasx_xvneg_w:
5745 case Intrinsic::loongarch_lasx_xvneg_d:
5749 APInt(
N->getValueType(0).getScalarType().getSizeInBits(), 0,
5751 SDLoc(
N),
N->getValueType(0)),
5753 case Intrinsic::loongarch_lsx_vmax_b:
5754 case Intrinsic::loongarch_lsx_vmax_h:
5755 case Intrinsic::loongarch_lsx_vmax_w:
5756 case Intrinsic::loongarch_lsx_vmax_d:
5757 case Intrinsic::loongarch_lasx_xvmax_b:
5758 case Intrinsic::loongarch_lasx_xvmax_h:
5759 case Intrinsic::loongarch_lasx_xvmax_w:
5760 case Intrinsic::loongarch_lasx_xvmax_d:
5763 case Intrinsic::loongarch_lsx_vmax_bu:
5764 case Intrinsic::loongarch_lsx_vmax_hu:
5765 case Intrinsic::loongarch_lsx_vmax_wu:
5766 case Intrinsic::loongarch_lsx_vmax_du:
5767 case Intrinsic::loongarch_lasx_xvmax_bu:
5768 case Intrinsic::loongarch_lasx_xvmax_hu:
5769 case Intrinsic::loongarch_lasx_xvmax_wu:
5770 case Intrinsic::loongarch_lasx_xvmax_du:
5773 case Intrinsic::loongarch_lsx_vmaxi_b:
5774 case Intrinsic::loongarch_lsx_vmaxi_h:
5775 case Intrinsic::loongarch_lsx_vmaxi_w:
5776 case Intrinsic::loongarch_lsx_vmaxi_d:
5777 case Intrinsic::loongarch_lasx_xvmaxi_b:
5778 case Intrinsic::loongarch_lasx_xvmaxi_h:
5779 case Intrinsic::loongarch_lasx_xvmaxi_w:
5780 case Intrinsic::loongarch_lasx_xvmaxi_d:
5783 case Intrinsic::loongarch_lsx_vmaxi_bu:
5784 case Intrinsic::loongarch_lsx_vmaxi_hu:
5785 case Intrinsic::loongarch_lsx_vmaxi_wu:
5786 case Intrinsic::loongarch_lsx_vmaxi_du:
5787 case Intrinsic::loongarch_lasx_xvmaxi_bu:
5788 case Intrinsic::loongarch_lasx_xvmaxi_hu:
5789 case Intrinsic::loongarch_lasx_xvmaxi_wu:
5790 case Intrinsic::loongarch_lasx_xvmaxi_du:
5793 case Intrinsic::loongarch_lsx_vmin_b:
5794 case Intrinsic::loongarch_lsx_vmin_h:
5795 case Intrinsic::loongarch_lsx_vmin_w:
5796 case Intrinsic::loongarch_lsx_vmin_d:
5797 case Intrinsic::loongarch_lasx_xvmin_b:
5798 case Intrinsic::loongarch_lasx_xvmin_h:
5799 case Intrinsic::loongarch_lasx_xvmin_w:
5800 case Intrinsic::loongarch_lasx_xvmin_d:
5803 case Intrinsic::loongarch_lsx_vmin_bu:
5804 case Intrinsic::loongarch_lsx_vmin_hu:
5805 case Intrinsic::loongarch_lsx_vmin_wu:
5806 case Intrinsic::loongarch_lsx_vmin_du:
5807 case Intrinsic::loongarch_lasx_xvmin_bu:
5808 case Intrinsic::loongarch_lasx_xvmin_hu:
5809 case Intrinsic::loongarch_lasx_xvmin_wu:
5810 case Intrinsic::loongarch_lasx_xvmin_du:
5813 case Intrinsic::loongarch_lsx_vmini_b:
5814 case Intrinsic::loongarch_lsx_vmini_h:
5815 case Intrinsic::loongarch_lsx_vmini_w:
5816 case Intrinsic::loongarch_lsx_vmini_d:
5817 case Intrinsic::loongarch_lasx_xvmini_b:
5818 case Intrinsic::loongarch_lasx_xvmini_h:
5819 case Intrinsic::loongarch_lasx_xvmini_w:
5820 case Intrinsic::loongarch_lasx_xvmini_d:
5823 case Intrinsic::loongarch_lsx_vmini_bu:
5824 case Intrinsic::loongarch_lsx_vmini_hu:
5825 case Intrinsic::loongarch_lsx_vmini_wu:
5826 case Intrinsic::loongarch_lsx_vmini_du:
5827 case Intrinsic::loongarch_lasx_xvmini_bu:
5828 case Intrinsic::loongarch_lasx_xvmini_hu:
5829 case Intrinsic::loongarch_lasx_xvmini_wu:
5830 case Intrinsic::loongarch_lasx_xvmini_du:
5833 case Intrinsic::loongarch_lsx_vmul_b:
5834 case Intrinsic::loongarch_lsx_vmul_h:
5835 case Intrinsic::loongarch_lsx_vmul_w:
5836 case Intrinsic::loongarch_lsx_vmul_d:
5837 case Intrinsic::loongarch_lasx_xvmul_b:
5838 case Intrinsic::loongarch_lasx_xvmul_h:
5839 case Intrinsic::loongarch_lasx_xvmul_w:
5840 case Intrinsic::loongarch_lasx_xvmul_d:
5843 case Intrinsic::loongarch_lsx_vmadd_b:
5844 case Intrinsic::loongarch_lsx_vmadd_h:
5845 case Intrinsic::loongarch_lsx_vmadd_w:
5846 case Intrinsic::loongarch_lsx_vmadd_d:
5847 case Intrinsic::loongarch_lasx_xvmadd_b:
5848 case Intrinsic::loongarch_lasx_xvmadd_h:
5849 case Intrinsic::loongarch_lasx_xvmadd_w:
5850 case Intrinsic::loongarch_lasx_xvmadd_d: {
5851 EVT ResTy =
N->getValueType(0);
5856 case Intrinsic::loongarch_lsx_vmsub_b:
5857 case Intrinsic::loongarch_lsx_vmsub_h:
5858 case Intrinsic::loongarch_lsx_vmsub_w:
5859 case Intrinsic::loongarch_lsx_vmsub_d:
5860 case Intrinsic::loongarch_lasx_xvmsub_b:
5861 case Intrinsic::loongarch_lasx_xvmsub_h:
5862 case Intrinsic::loongarch_lasx_xvmsub_w:
5863 case Intrinsic::loongarch_lasx_xvmsub_d: {
5864 EVT ResTy =
N->getValueType(0);
5869 case Intrinsic::loongarch_lsx_vdiv_b:
5870 case Intrinsic::loongarch_lsx_vdiv_h:
5871 case Intrinsic::loongarch_lsx_vdiv_w:
5872 case Intrinsic::loongarch_lsx_vdiv_d:
5873 case Intrinsic::loongarch_lasx_xvdiv_b:
5874 case Intrinsic::loongarch_lasx_xvdiv_h:
5875 case Intrinsic::loongarch_lasx_xvdiv_w:
5876 case Intrinsic::loongarch_lasx_xvdiv_d:
5879 case Intrinsic::loongarch_lsx_vdiv_bu:
5880 case Intrinsic::loongarch_lsx_vdiv_hu:
5881 case Intrinsic::loongarch_lsx_vdiv_wu:
5882 case Intrinsic::loongarch_lsx_vdiv_du:
5883 case Intrinsic::loongarch_lasx_xvdiv_bu:
5884 case Intrinsic::loongarch_lasx_xvdiv_hu:
5885 case Intrinsic::loongarch_lasx_xvdiv_wu:
5886 case Intrinsic::loongarch_lasx_xvdiv_du:
5889 case Intrinsic::loongarch_lsx_vmod_b:
5890 case Intrinsic::loongarch_lsx_vmod_h:
5891 case Intrinsic::loongarch_lsx_vmod_w:
5892 case Intrinsic::loongarch_lsx_vmod_d:
5893 case Intrinsic::loongarch_lasx_xvmod_b:
5894 case Intrinsic::loongarch_lasx_xvmod_h:
5895 case Intrinsic::loongarch_lasx_xvmod_w:
5896 case Intrinsic::loongarch_lasx_xvmod_d:
5899 case Intrinsic::loongarch_lsx_vmod_bu:
5900 case Intrinsic::loongarch_lsx_vmod_hu:
5901 case Intrinsic::loongarch_lsx_vmod_wu:
5902 case Intrinsic::loongarch_lsx_vmod_du:
5903 case Intrinsic::loongarch_lasx_xvmod_bu:
5904 case Intrinsic::loongarch_lasx_xvmod_hu:
5905 case Intrinsic::loongarch_lasx_xvmod_wu:
5906 case Intrinsic::loongarch_lasx_xvmod_du:
5909 case Intrinsic::loongarch_lsx_vand_v:
5910 case Intrinsic::loongarch_lasx_xvand_v:
5913 case Intrinsic::loongarch_lsx_vor_v:
5914 case Intrinsic::loongarch_lasx_xvor_v:
5917 case Intrinsic::loongarch_lsx_vxor_v:
5918 case Intrinsic::loongarch_lasx_xvxor_v:
5921 case Intrinsic::loongarch_lsx_vnor_v:
5922 case Intrinsic::loongarch_lasx_xvnor_v: {
5927 case Intrinsic::loongarch_lsx_vandi_b:
5928 case Intrinsic::loongarch_lasx_xvandi_b:
5931 case Intrinsic::loongarch_lsx_vori_b:
5932 case Intrinsic::loongarch_lasx_xvori_b:
5935 case Intrinsic::loongarch_lsx_vxori_b:
5936 case Intrinsic::loongarch_lasx_xvxori_b:
5939 case Intrinsic::loongarch_lsx_vsll_b:
5940 case Intrinsic::loongarch_lsx_vsll_h:
5941 case Intrinsic::loongarch_lsx_vsll_w:
5942 case Intrinsic::loongarch_lsx_vsll_d:
5943 case Intrinsic::loongarch_lasx_xvsll_b:
5944 case Intrinsic::loongarch_lasx_xvsll_h:
5945 case Intrinsic::loongarch_lasx_xvsll_w:
5946 case Intrinsic::loongarch_lasx_xvsll_d:
5949 case Intrinsic::loongarch_lsx_vslli_b:
5950 case Intrinsic::loongarch_lasx_xvslli_b:
5953 case Intrinsic::loongarch_lsx_vslli_h:
5954 case Intrinsic::loongarch_lasx_xvslli_h:
5957 case Intrinsic::loongarch_lsx_vslli_w:
5958 case Intrinsic::loongarch_lasx_xvslli_w:
5961 case Intrinsic::loongarch_lsx_vslli_d:
5962 case Intrinsic::loongarch_lasx_xvslli_d:
5965 case Intrinsic::loongarch_lsx_vsrl_b:
5966 case Intrinsic::loongarch_lsx_vsrl_h:
5967 case Intrinsic::loongarch_lsx_vsrl_w:
5968 case Intrinsic::loongarch_lsx_vsrl_d:
5969 case Intrinsic::loongarch_lasx_xvsrl_b:
5970 case Intrinsic::loongarch_lasx_xvsrl_h:
5971 case Intrinsic::loongarch_lasx_xvsrl_w:
5972 case Intrinsic::loongarch_lasx_xvsrl_d:
5975 case Intrinsic::loongarch_lsx_vsrli_b:
5976 case Intrinsic::loongarch_lasx_xvsrli_b:
5979 case Intrinsic::loongarch_lsx_vsrli_h:
5980 case Intrinsic::loongarch_lasx_xvsrli_h:
5983 case Intrinsic::loongarch_lsx_vsrli_w:
5984 case Intrinsic::loongarch_lasx_xvsrli_w:
5987 case Intrinsic::loongarch_lsx_vsrli_d:
5988 case Intrinsic::loongarch_lasx_xvsrli_d:
5991 case Intrinsic::loongarch_lsx_vsra_b:
5992 case Intrinsic::loongarch_lsx_vsra_h:
5993 case Intrinsic::loongarch_lsx_vsra_w:
5994 case Intrinsic::loongarch_lsx_vsra_d:
5995 case Intrinsic::loongarch_lasx_xvsra_b:
5996 case Intrinsic::loongarch_lasx_xvsra_h:
5997 case Intrinsic::loongarch_lasx_xvsra_w:
5998 case Intrinsic::loongarch_lasx_xvsra_d:
6001 case Intrinsic::loongarch_lsx_vsrai_b:
6002 case Intrinsic::loongarch_lasx_xvsrai_b:
6005 case Intrinsic::loongarch_lsx_vsrai_h:
6006 case Intrinsic::loongarch_lasx_xvsrai_h:
6009 case Intrinsic::loongarch_lsx_vsrai_w:
6010 case Intrinsic::loongarch_lasx_xvsrai_w:
6013 case Intrinsic::loongarch_lsx_vsrai_d:
6014 case Intrinsic::loongarch_lasx_xvsrai_d:
6017 case Intrinsic::loongarch_lsx_vclz_b:
6018 case Intrinsic::loongarch_lsx_vclz_h:
6019 case Intrinsic::loongarch_lsx_vclz_w:
6020 case Intrinsic::loongarch_lsx_vclz_d:
6021 case Intrinsic::loongarch_lasx_xvclz_b:
6022 case Intrinsic::loongarch_lasx_xvclz_h:
6023 case Intrinsic::loongarch_lasx_xvclz_w:
6024 case Intrinsic::loongarch_lasx_xvclz_d:
6026 case Intrinsic::loongarch_lsx_vpcnt_b:
6027 case Intrinsic::loongarch_lsx_vpcnt_h:
6028 case Intrinsic::loongarch_lsx_vpcnt_w:
6029 case Intrinsic::loongarch_lsx_vpcnt_d:
6030 case Intrinsic::loongarch_lasx_xvpcnt_b:
6031 case Intrinsic::loongarch_lasx_xvpcnt_h:
6032 case Intrinsic::loongarch_lasx_xvpcnt_w:
6033 case Intrinsic::loongarch_lasx_xvpcnt_d:
6035 case Intrinsic::loongarch_lsx_vbitclr_b:
6036 case Intrinsic::loongarch_lsx_vbitclr_h:
6037 case Intrinsic::loongarch_lsx_vbitclr_w:
6038 case Intrinsic::loongarch_lsx_vbitclr_d:
6039 case Intrinsic::loongarch_lasx_xvbitclr_b:
6040 case Intrinsic::loongarch_lasx_xvbitclr_h:
6041 case Intrinsic::loongarch_lasx_xvbitclr_w:
6042 case Intrinsic::loongarch_lasx_xvbitclr_d:
6044 case Intrinsic::loongarch_lsx_vbitclri_b:
6045 case Intrinsic::loongarch_lasx_xvbitclri_b:
6047 case Intrinsic::loongarch_lsx_vbitclri_h:
6048 case Intrinsic::loongarch_lasx_xvbitclri_h:
6050 case Intrinsic::loongarch_lsx_vbitclri_w:
6051 case Intrinsic::loongarch_lasx_xvbitclri_w:
6053 case Intrinsic::loongarch_lsx_vbitclri_d:
6054 case Intrinsic::loongarch_lasx_xvbitclri_d:
6056 case Intrinsic::loongarch_lsx_vbitset_b:
6057 case Intrinsic::loongarch_lsx_vbitset_h:
6058 case Intrinsic::loongarch_lsx_vbitset_w:
6059 case Intrinsic::loongarch_lsx_vbitset_d:
6060 case Intrinsic::loongarch_lasx_xvbitset_b:
6061 case Intrinsic::loongarch_lasx_xvbitset_h:
6062 case Intrinsic::loongarch_lasx_xvbitset_w:
6063 case Intrinsic::loongarch_lasx_xvbitset_d: {
6064 EVT VecTy =
N->getValueType(0);
6070 case Intrinsic::loongarch_lsx_vbitseti_b:
6071 case Intrinsic::loongarch_lasx_xvbitseti_b:
6073 case Intrinsic::loongarch_lsx_vbitseti_h:
6074 case Intrinsic::loongarch_lasx_xvbitseti_h:
6076 case Intrinsic::loongarch_lsx_vbitseti_w:
6077 case Intrinsic::loongarch_lasx_xvbitseti_w:
6079 case Intrinsic::loongarch_lsx_vbitseti_d:
6080 case Intrinsic::loongarch_lasx_xvbitseti_d:
6082 case Intrinsic::loongarch_lsx_vbitrev_b:
6083 case Intrinsic::loongarch_lsx_vbitrev_h:
6084 case Intrinsic::loongarch_lsx_vbitrev_w:
6085 case Intrinsic::loongarch_lsx_vbitrev_d:
6086 case Intrinsic::loongarch_lasx_xvbitrev_b:
6087 case Intrinsic::loongarch_lasx_xvbitrev_h:
6088 case Intrinsic::loongarch_lasx_xvbitrev_w:
6089 case Intrinsic::loongarch_lasx_xvbitrev_d: {
6090 EVT VecTy =
N->getValueType(0);
6096 case Intrinsic::loongarch_lsx_vbitrevi_b:
6097 case Intrinsic::loongarch_lasx_xvbitrevi_b:
6099 case Intrinsic::loongarch_lsx_vbitrevi_h:
6100 case Intrinsic::loongarch_lasx_xvbitrevi_h:
6102 case Intrinsic::loongarch_lsx_vbitrevi_w:
6103 case Intrinsic::loongarch_lasx_xvbitrevi_w:
6105 case Intrinsic::loongarch_lsx_vbitrevi_d:
6106 case Intrinsic::loongarch_lasx_xvbitrevi_d:
6108 case Intrinsic::loongarch_lsx_vfadd_s:
6109 case Intrinsic::loongarch_lsx_vfadd_d:
6110 case Intrinsic::loongarch_lasx_xvfadd_s:
6111 case Intrinsic::loongarch_lasx_xvfadd_d:
6114 case Intrinsic::loongarch_lsx_vfsub_s:
6115 case Intrinsic::loongarch_lsx_vfsub_d:
6116 case Intrinsic::loongarch_lasx_xvfsub_s:
6117 case Intrinsic::loongarch_lasx_xvfsub_d:
6120 case Intrinsic::loongarch_lsx_vfmul_s:
6121 case Intrinsic::loongarch_lsx_vfmul_d:
6122 case Intrinsic::loongarch_lasx_xvfmul_s:
6123 case Intrinsic::loongarch_lasx_xvfmul_d:
6126 case Intrinsic::loongarch_lsx_vfdiv_s:
6127 case Intrinsic::loongarch_lsx_vfdiv_d:
6128 case Intrinsic::loongarch_lasx_xvfdiv_s:
6129 case Intrinsic::loongarch_lasx_xvfdiv_d:
6132 case Intrinsic::loongarch_lsx_vfmadd_s:
6133 case Intrinsic::loongarch_lsx_vfmadd_d:
6134 case Intrinsic::loongarch_lasx_xvfmadd_s:
6135 case Intrinsic::loongarch_lasx_xvfmadd_d:
6137 N->getOperand(2),
N->getOperand(3));
6138 case Intrinsic::loongarch_lsx_vinsgr2vr_b:
6140 N->getOperand(1),
N->getOperand(2),
6142 case Intrinsic::loongarch_lsx_vinsgr2vr_h:
6143 case Intrinsic::loongarch_lasx_xvinsgr2vr_w:
6145 N->getOperand(1),
N->getOperand(2),
6147 case Intrinsic::loongarch_lsx_vinsgr2vr_w:
6148 case Intrinsic::loongarch_lasx_xvinsgr2vr_d:
6150 N->getOperand(1),
N->getOperand(2),
6152 case Intrinsic::loongarch_lsx_vinsgr2vr_d:
6154 N->getOperand(1),
N->getOperand(2),
6156 case Intrinsic::loongarch_lsx_vreplgr2vr_b:
6157 case Intrinsic::loongarch_lsx_vreplgr2vr_h:
6158 case Intrinsic::loongarch_lsx_vreplgr2vr_w:
6159 case Intrinsic::loongarch_lsx_vreplgr2vr_d:
6160 case Intrinsic::loongarch_lasx_xvreplgr2vr_b:
6161 case Intrinsic::loongarch_lasx_xvreplgr2vr_h:
6162 case Intrinsic::loongarch_lasx_xvreplgr2vr_w:
6163 case Intrinsic::loongarch_lasx_xvreplgr2vr_d:
6167 case Intrinsic::loongarch_lsx_vreplve_b:
6168 case Intrinsic::loongarch_lsx_vreplve_h:
6169 case Intrinsic::loongarch_lsx_vreplve_w:
6170 case Intrinsic::loongarch_lsx_vreplve_d:
6171 case Intrinsic::loongarch_lasx_xvreplve_b:
6172 case Intrinsic::loongarch_lasx_xvreplve_h:
6173 case Intrinsic::loongarch_lasx_xvreplve_w:
6174 case Intrinsic::loongarch_lasx_xvreplve_d:
6204 "Unexpected value type!");
6213 MVT VT =
N->getSimpleValueType(0);
6247 APInt V =
C->getValueAPF().bitcastToAPInt();
6263 MVT EltVT =
N->getSimpleValueType(0);
6295 switch (
N->getOpcode()) {
6349 MF->
insert(It, BreakMBB);
6353 SinkMBB->splice(SinkMBB->end(),
MBB, std::next(
MI.getIterator()),
MBB->end());
6354 SinkMBB->transferSuccessorsAndUpdatePHIs(
MBB);
6366 MBB->addSuccessor(BreakMBB);
6367 MBB->addSuccessor(SinkMBB);
6373 BreakMBB->addSuccessor(SinkMBB);
6385 switch (
MI.getOpcode()) {
6388 case LoongArch::PseudoVBZ:
6389 CondOpc = LoongArch::VSETEQZ_V;
6391 case LoongArch::PseudoVBZ_B:
6392 CondOpc = LoongArch::VSETANYEQZ_B;
6394 case LoongArch::PseudoVBZ_H:
6395 CondOpc = LoongArch::VSETANYEQZ_H;
6397 case LoongArch::PseudoVBZ_W:
6398 CondOpc = LoongArch::VSETANYEQZ_W;
6400 case LoongArch::PseudoVBZ_D:
6401 CondOpc = LoongArch::VSETANYEQZ_D;
6403 case LoongArch::PseudoVBNZ:
6404 CondOpc = LoongArch::VSETNEZ_V;
6406 case LoongArch::PseudoVBNZ_B:
6407 CondOpc = LoongArch::VSETALLNEZ_B;
6409 case LoongArch::PseudoVBNZ_H:
6410 CondOpc = LoongArch::VSETALLNEZ_H;
6412 case LoongArch::PseudoVBNZ_W:
6413 CondOpc = LoongArch::VSETALLNEZ_W;
6415 case LoongArch::PseudoVBNZ_D:
6416 CondOpc = LoongArch::VSETALLNEZ_D;
6418 case LoongArch::PseudoXVBZ:
6419 CondOpc = LoongArch::XVSETEQZ_V;
6421 case LoongArch::PseudoXVBZ_B:
6422 CondOpc = LoongArch::XVSETANYEQZ_B;
6424 case LoongArch::PseudoXVBZ_H:
6425 CondOpc = LoongArch::XVSETANYEQZ_H;
6427 case LoongArch::PseudoXVBZ_W:
6428 CondOpc = LoongArch::XVSETANYEQZ_W;
6430 case LoongArch::PseudoXVBZ_D:
6431 CondOpc = LoongArch::XVSETANYEQZ_D;
6433 case LoongArch::PseudoXVBNZ:
6434 CondOpc = LoongArch::XVSETNEZ_V;
6436 case LoongArch::PseudoXVBNZ_B:
6437 CondOpc = LoongArch::XVSETALLNEZ_B;
6439 case LoongArch::PseudoXVBNZ_H:
6440 CondOpc = LoongArch::XVSETALLNEZ_H;
6442 case LoongArch::PseudoXVBNZ_W:
6443 CondOpc = LoongArch::XVSETALLNEZ_W;
6445 case LoongArch::PseudoXVBNZ_D:
6446 CondOpc = LoongArch::XVSETALLNEZ_D;
6461 F->insert(It, FalseBB);
6462 F->insert(It, TrueBB);
6463 F->insert(It, SinkBB);
6466 SinkBB->
splice(SinkBB->
end(), BB, std::next(
MI.getIterator()), BB->
end());
6470 Register FCC =
MRI.createVirtualRegister(&LoongArch::CFRRegClass);
6479 Register RD1 =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
6487 Register RD2 =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
6495 MI.getOperand(0).getReg())
6502 MI.eraseFromParent();
6510 unsigned BroadcastOp;
6512 switch (
MI.getOpcode()) {
6515 case LoongArch::PseudoXVINSGR2VR_B:
6517 BroadcastOp = LoongArch::XVREPLGR2VR_B;
6518 InsOp = LoongArch::XVEXTRINS_B;
6520 case LoongArch::PseudoXVINSGR2VR_H:
6522 BroadcastOp = LoongArch::XVREPLGR2VR_H;
6523 InsOp = LoongArch::XVEXTRINS_H;
6535 unsigned Idx =
MI.getOperand(3).getImm();
6537 if (XSrc.
isVirtual() &&
MRI.getVRegDef(XSrc)->isImplicitDef() &&
6539 Register ScratchSubReg1 =
MRI.createVirtualRegister(SubRC);
6540 Register ScratchSubReg2 =
MRI.createVirtualRegister(SubRC);
6543 .
addReg(XSrc, 0, LoongArch::sub_128);
6545 TII->get(HalfSize == 8 ? LoongArch::VINSGR2VR_H
6546 : LoongArch::VINSGR2VR_B),
6555 .
addImm(LoongArch::sub_128);
6557 Register ScratchReg1 =
MRI.createVirtualRegister(RC);
6558 Register ScratchReg2 =
MRI.createVirtualRegister(RC);
6562 BuildMI(*BB,
MI,
DL,
TII->get(LoongArch::XVPERMI_Q), ScratchReg2)
6565 .
addImm(Idx >= HalfSize ? 48 : 18);
6570 .
addImm((Idx >= HalfSize ? Idx - HalfSize : Idx) * 17);
6573 MI.eraseFromParent();
6580 assert(Subtarget.hasExtLSX());
6587 Register ScratchReg1 =
MRI.createVirtualRegister(RC);
6588 Register ScratchReg2 =
MRI.createVirtualRegister(RC);
6589 Register ScratchReg3 =
MRI.createVirtualRegister(RC);
6593 TII->get(Subtarget.
is64Bit() ? LoongArch::VINSGR2VR_D
6594 : LoongArch::VINSGR2VR_W),
6601 TII->get(Subtarget.
is64Bit() ? LoongArch::VPCNT_D : LoongArch::VPCNT_W),
6605 TII->get(Subtarget.
is64Bit() ? LoongArch::VPICKVE2GR_D
6606 : LoongArch::VPICKVE2GR_W),
6611 MI.eraseFromParent();
6625 unsigned EleBits = 8;
6626 unsigned NotOpc = 0;
6629 switch (
MI.getOpcode()) {
6632 case LoongArch::PseudoVMSKLTZ_B:
6633 MskOpc = LoongArch::VMSKLTZ_B;
6635 case LoongArch::PseudoVMSKLTZ_H:
6636 MskOpc = LoongArch::VMSKLTZ_H;
6639 case LoongArch::PseudoVMSKLTZ_W:
6640 MskOpc = LoongArch::VMSKLTZ_W;
6643 case LoongArch::PseudoVMSKLTZ_D:
6644 MskOpc = LoongArch::VMSKLTZ_D;
6647 case LoongArch::PseudoVMSKGEZ_B:
6648 MskOpc = LoongArch::VMSKGEZ_B;
6650 case LoongArch::PseudoVMSKEQZ_B:
6651 MskOpc = LoongArch::VMSKNZ_B;
6652 NotOpc = LoongArch::VNOR_V;
6654 case LoongArch::PseudoVMSKNEZ_B:
6655 MskOpc = LoongArch::VMSKNZ_B;
6657 case LoongArch::PseudoXVMSKLTZ_B:
6658 MskOpc = LoongArch::XVMSKLTZ_B;
6659 RC = &LoongArch::LASX256RegClass;
6661 case LoongArch::PseudoXVMSKLTZ_H:
6662 MskOpc = LoongArch::XVMSKLTZ_H;
6663 RC = &LoongArch::LASX256RegClass;
6666 case LoongArch::PseudoXVMSKLTZ_W:
6667 MskOpc = LoongArch::XVMSKLTZ_W;
6668 RC = &LoongArch::LASX256RegClass;
6671 case LoongArch::PseudoXVMSKLTZ_D:
6672 MskOpc = LoongArch::XVMSKLTZ_D;
6673 RC = &LoongArch::LASX256RegClass;
6676 case LoongArch::PseudoXVMSKGEZ_B:
6677 MskOpc = LoongArch::XVMSKGEZ_B;
6678 RC = &LoongArch::LASX256RegClass;
6680 case LoongArch::PseudoXVMSKEQZ_B:
6681 MskOpc = LoongArch::XVMSKNZ_B;
6682 NotOpc = LoongArch::XVNOR_V;
6683 RC = &LoongArch::LASX256RegClass;
6685 case LoongArch::PseudoXVMSKNEZ_B:
6686 MskOpc = LoongArch::XVMSKNZ_B;
6687 RC = &LoongArch::LASX256RegClass;
6702 if (
TRI->getRegSizeInBits(*RC) > 128) {
6703 Register Lo =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
6704 Register Hi =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
6712 TII->get(Subtarget.
is64Bit() ? LoongArch::BSTRINS_D
6713 : LoongArch::BSTRINS_W),
6717 .
addImm(256 / EleBits - 1)
6725 MI.eraseFromParent();
6732 assert(
MI.getOpcode() == LoongArch::SplitPairF64Pseudo &&
6733 "Unexpected instruction");
6745 MI.eraseFromParent();
6752 assert(
MI.getOpcode() == LoongArch::BuildPairF64Pseudo &&
6753 "Unexpected instruction");
6759 Register TmpReg =
MRI.createVirtualRegister(&LoongArch::FPR64RegClass);
6769 MI.eraseFromParent();
6774 switch (
MI.getOpcode()) {
6777 case LoongArch::Select_GPR_Using_CC_GPR:
6813 if (
MI.getOperand(2).isReg())
6814 RHS =
MI.getOperand(2).getReg();
6815 auto CC =
static_cast<unsigned>(
MI.getOperand(3).
getImm());
6819 SelectDests.
insert(
MI.getOperand(0).getReg());
6823 SequenceMBBI !=
E; ++SequenceMBBI) {
6824 if (SequenceMBBI->isDebugInstr())
6827 if (SequenceMBBI->getOperand(1).getReg() !=
LHS ||
6828 !SequenceMBBI->getOperand(2).isReg() ||
6829 SequenceMBBI->getOperand(2).getReg() !=
RHS ||
6830 SequenceMBBI->getOperand(3).getImm() != CC ||
6831 SelectDests.
count(SequenceMBBI->getOperand(4).getReg()) ||
6832 SelectDests.
count(SequenceMBBI->getOperand(5).getReg()))
6834 LastSelectPseudo = &*SequenceMBBI;
6836 SelectDests.
insert(SequenceMBBI->getOperand(0).getReg());
6839 if (SequenceMBBI->hasUnmodeledSideEffects() ||
6840 SequenceMBBI->mayLoadOrStore() ||
6841 SequenceMBBI->usesCustomInsertionHook())
6844 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
6859 F->insert(
I, IfFalseMBB);
6860 F->insert(
I, TailMBB);
6863 unsigned CallFrameSize =
TII.getCallFrameSizeAt(*LastSelectPseudo);
6869 TailMBB->
push_back(DebugInstr->removeFromParent());
6873 TailMBB->
splice(TailMBB->
end(), HeadMBB,
6883 if (
MI.getOperand(2).isImm())
6895 auto SelectMBBI =
MI.getIterator();
6896 auto SelectEnd = std::next(LastSelectPseudo->
getIterator());
6898 while (SelectMBBI != SelectEnd) {
6899 auto Next = std::next(SelectMBBI);
6903 TII.get(LoongArch::PHI), SelectMBBI->getOperand(0).getReg())
6904 .
addReg(SelectMBBI->getOperand(4).getReg())
6906 .
addReg(SelectMBBI->getOperand(5).getReg())
6913 F->getProperties().resetNoPHIs();
6919 const TargetInstrInfo *
TII = Subtarget.getInstrInfo();
6922 switch (
MI.getOpcode()) {
6925 case LoongArch::DIV_W:
6926 case LoongArch::DIV_WU:
6927 case LoongArch::MOD_W:
6928 case LoongArch::MOD_WU:
6929 case LoongArch::DIV_D:
6930 case LoongArch::DIV_DU:
6931 case LoongArch::MOD_D:
6932 case LoongArch::MOD_DU:
6935 case LoongArch::WRFCSR: {
6937 LoongArch::FCSR0 +
MI.getOperand(0).getImm())
6938 .
addReg(
MI.getOperand(1).getReg());
6939 MI.eraseFromParent();
6942 case LoongArch::RDFCSR: {
6943 MachineInstr *ReadFCSR =
6945 MI.getOperand(0).getReg())
6946 .
addReg(LoongArch::FCSR0 +
MI.getOperand(1).getImm());
6948 MI.eraseFromParent();
6951 case LoongArch::Select_GPR_Using_CC_GPR:
6953 case LoongArch::BuildPairF64Pseudo:
6955 case LoongArch::SplitPairF64Pseudo:
6957 case LoongArch::PseudoVBZ:
6958 case LoongArch::PseudoVBZ_B:
6959 case LoongArch::PseudoVBZ_H:
6960 case LoongArch::PseudoVBZ_W:
6961 case LoongArch::PseudoVBZ_D:
6962 case LoongArch::PseudoVBNZ:
6963 case LoongArch::PseudoVBNZ_B:
6964 case LoongArch::PseudoVBNZ_H:
6965 case LoongArch::PseudoVBNZ_W:
6966 case LoongArch::PseudoVBNZ_D:
6967 case LoongArch::PseudoXVBZ:
6968 case LoongArch::PseudoXVBZ_B:
6969 case LoongArch::PseudoXVBZ_H:
6970 case LoongArch::PseudoXVBZ_W:
6971 case LoongArch::PseudoXVBZ_D:
6972 case LoongArch::PseudoXVBNZ:
6973 case LoongArch::PseudoXVBNZ_B:
6974 case LoongArch::PseudoXVBNZ_H:
6975 case LoongArch::PseudoXVBNZ_W:
6976 case LoongArch::PseudoXVBNZ_D:
6978 case LoongArch::PseudoXVINSGR2VR_B:
6979 case LoongArch::PseudoXVINSGR2VR_H:
6981 case LoongArch::PseudoCTPOP:
6983 case LoongArch::PseudoVMSKLTZ_B:
6984 case LoongArch::PseudoVMSKLTZ_H:
6985 case LoongArch::PseudoVMSKLTZ_W:
6986 case LoongArch::PseudoVMSKLTZ_D:
6987 case LoongArch::PseudoVMSKGEZ_B:
6988 case LoongArch::PseudoVMSKEQZ_B:
6989 case LoongArch::PseudoVMSKNEZ_B:
6990 case LoongArch::PseudoXVMSKLTZ_B:
6991 case LoongArch::PseudoXVMSKLTZ_H:
6992 case LoongArch::PseudoXVMSKLTZ_W:
6993 case LoongArch::PseudoXVMSKLTZ_D:
6994 case LoongArch::PseudoXVMSKGEZ_B:
6995 case LoongArch::PseudoXVMSKEQZ_B:
6996 case LoongArch::PseudoXVMSKNEZ_B:
6998 case TargetOpcode::STATEPOINT:
7004 MI.addOperand(*
MI.getMF(),
7006 LoongArch::R1,
true,
7009 if (!Subtarget.is64Bit())
7017 unsigned *
Fast)
const {
7018 if (!Subtarget.hasUAL())
7032#define NODE_NAME_CASE(node) \
7033 case LoongArchISD::node: \
7034 return "LoongArchISD::" #node;
7134#undef NODE_NAME_CASE
7147 LoongArch::R7, LoongArch::R8, LoongArch::R9,
7148 LoongArch::R10, LoongArch::R11};
7152 LoongArch::F3, LoongArch::F4, LoongArch::F5,
7153 LoongArch::F6, LoongArch::F7};
7156 LoongArch::F0_64, LoongArch::F1_64, LoongArch::F2_64, LoongArch::F3_64,
7157 LoongArch::F4_64, LoongArch::F5_64, LoongArch::F6_64, LoongArch::F7_64};
7160 LoongArch::VR3, LoongArch::VR4, LoongArch::VR5,
7161 LoongArch::VR6, LoongArch::VR7};
7164 LoongArch::XR3, LoongArch::XR4, LoongArch::XR5,
7165 LoongArch::XR6, LoongArch::XR7};
7171 unsigned ValNo2,
MVT ValVT2,
MVT LocVT2,
7173 unsigned GRLenInBytes = GRLen / 8;
7184 State.AllocateStack(GRLenInBytes, StackAlign),
7187 ValNo2, ValVT2, State.AllocateStack(GRLenInBytes,
Align(GRLenInBytes)),
7198 ValNo2, ValVT2, State.AllocateStack(GRLenInBytes,
Align(GRLenInBytes)),
7206 unsigned ValNo,
MVT ValVT,
7209 unsigned GRLen =
DL.getLargestLegalIntTypeSizeInBits();
7210 assert((GRLen == 32 || GRLen == 64) &&
"Unspport GRLen");
7211 MVT GRLenVT = GRLen == 32 ? MVT::i32 : MVT::i64;
7216 if (IsRet && ValNo > 1)
7220 bool UseGPRForFloat =
true;
7230 UseGPRForFloat = ArgFlags.
isVarArg();
7243 unsigned TwoGRLenInBytes = (2 * GRLen) / 8;
7246 DL.getTypeAllocSize(OrigTy) == TwoGRLenInBytes) {
7247 unsigned RegIdx = State.getFirstUnallocated(
ArgGPRs);
7249 if (RegIdx != std::size(
ArgGPRs) && RegIdx % 2 == 1)
7255 State.getPendingArgFlags();
7258 "PendingLocs and PendingArgFlags out of sync");
7262 UseGPRForFloat =
true;
7264 if (UseGPRForFloat && ValVT == MVT::f32) {
7267 }
else if (UseGPRForFloat && GRLen == 64 && ValVT == MVT::f64) {
7270 }
else if (UseGPRForFloat && GRLen == 32 && ValVT == MVT::f64) {
7273 assert(PendingLocs.
empty() &&
"Can't lower f64 if it is split");
7315 PendingLocs.
size() <= 2) {
7316 assert(PendingLocs.
size() == 2 &&
"Unexpected PendingLocs.size()");
7321 PendingLocs.
clear();
7322 PendingArgFlags.
clear();
7329 unsigned StoreSizeBytes = GRLen / 8;
7332 if (ValVT == MVT::f32 && !UseGPRForFloat)
7334 else if (ValVT == MVT::f64 && !UseGPRForFloat)
7344 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
7348 if (!PendingLocs.
empty()) {
7350 assert(PendingLocs.
size() > 2 &&
"Unexpected PendingLocs.size()");
7351 for (
auto &It : PendingLocs) {
7353 It.convertToReg(
Reg);
7358 PendingLocs.clear();
7359 PendingArgFlags.
clear();
7362 assert((!UseGPRForFloat || LocVT == GRLenVT) &&
7363 "Expected an GRLenVT at this stage");
7380void LoongArchTargetLowering::analyzeInputArgs(
7383 LoongArchCCAssignFn Fn)
const {
7385 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
7386 MVT ArgVT =
Ins[i].VT;
7387 Type *ArgTy =
nullptr;
7389 ArgTy = FType->getReturnType();
7390 else if (Ins[i].isOrigArg())
7391 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
7395 CCInfo, IsRet, ArgTy)) {
7396 LLVM_DEBUG(
dbgs() <<
"InputArg #" << i <<
" has unhandled type " << ArgVT
7403void LoongArchTargetLowering::analyzeOutputArgs(
7406 CallLoweringInfo *CLI, LoongArchCCAssignFn Fn)
const {
7407 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
7408 MVT ArgVT = Outs[i].VT;
7409 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty :
nullptr;
7413 CCInfo, IsRet, OrigTy)) {
7414 LLVM_DEBUG(
dbgs() <<
"OutputArg #" << i <<
" has unhandled type " << ArgVT
7455 if (In.isOrigArg()) {
7460 if ((
BitWidth <= 32 && In.Flags.isSExt()) ||
7461 (
BitWidth < 32 && In.Flags.isZExt())) {
7511 Register LoVReg =
RegInfo.createVirtualRegister(&LoongArch::GPRRegClass);
7524 Register HiVReg =
RegInfo.createVirtualRegister(&LoongArch::GPRRegClass);
7544 Val = DAG.
getNode(ISD::BITCAST,
DL, LocVT, Val);
7554 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7558 LoongArch::R23, LoongArch::R24, LoongArch::R25,
7559 LoongArch::R26, LoongArch::R27, LoongArch::R28,
7560 LoongArch::R29, LoongArch::R30, LoongArch::R31};
7567 if (LocVT == MVT::f32) {
7570 static const MCPhysReg FPR32List[] = {LoongArch::F24, LoongArch::F25,
7571 LoongArch::F26, LoongArch::F27};
7578 if (LocVT == MVT::f64) {
7581 static const MCPhysReg FPR64List[] = {LoongArch::F28_64, LoongArch::F29_64,
7582 LoongArch::F30_64, LoongArch::F31_64};
7612 "GHC calling convention requires the F and D extensions");
7616 MVT GRLenVT = Subtarget.getGRLenVT();
7617 unsigned GRLenInBytes = Subtarget.getGRLen() / 8;
7619 std::vector<SDValue> OutChains;
7628 analyzeInputArgs(MF, CCInfo, Ins,
false,
CC_LoongArch);
7630 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
7647 unsigned ArgIndex = Ins[InsIdx].OrigArgIndex;
7648 unsigned ArgPartOffset = Ins[InsIdx].PartOffset;
7649 assert(ArgPartOffset == 0);
7650 while (i + 1 != e && Ins[InsIdx + 1].OrigArgIndex == ArgIndex) {
7652 unsigned PartOffset = Ins[InsIdx + 1].PartOffset - ArgPartOffset;
7676 int VaArgOffset, VarArgsSaveSize;
7680 if (ArgRegs.
size() == Idx) {
7682 VarArgsSaveSize = 0;
7684 VarArgsSaveSize = GRLenInBytes * (ArgRegs.
size() - Idx);
7685 VaArgOffset = -VarArgsSaveSize;
7691 LoongArchFI->setVarArgsFrameIndex(FI);
7699 VarArgsSaveSize += GRLenInBytes;
7704 for (
unsigned I = Idx;
I < ArgRegs.
size();
7705 ++
I, VaArgOffset += GRLenInBytes) {
7706 const Register Reg = RegInfo.createVirtualRegister(RC);
7707 RegInfo.addLiveIn(ArgRegs[
I], Reg);
7715 ->setValue((
Value *)
nullptr);
7716 OutChains.push_back(Store);
7718 LoongArchFI->setVarArgsSaveSize(VarArgsSaveSize);
7723 if (!OutChains.empty()) {
7724 OutChains.push_back(Chain);
7739 if (
N->getNumValues() != 1)
7741 if (!
N->hasNUsesOfValue(1, 0))
7744 SDNode *Copy = *
N->user_begin();
7750 if (Copy->getGluedNode())
7754 bool HasRet =
false;
7764 Chain = Copy->getOperand(0);
7769bool LoongArchTargetLowering::isEligibleForTailCallOptimization(
7773 auto CalleeCC = CLI.CallConv;
7774 auto &Outs = CLI.Outs;
7776 auto CallerCC = Caller.getCallingConv();
7783 for (
auto &VA : ArgLocs)
7789 auto IsCallerStructRet = Caller.hasStructRetAttr();
7790 auto IsCalleeStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
7791 if (IsCallerStructRet || IsCalleeStructRet)
7795 for (
auto &Arg : Outs)
7796 if (Arg.Flags.isByVal())
7801 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
7802 if (CalleeCC != CallerCC) {
7803 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
7804 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
7830 MVT GRLenVT = Subtarget.getGRLenVT();
7842 analyzeOutputArgs(MF, ArgCCInfo, Outs,
false, &CLI,
CC_LoongArch);
7846 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
7852 "site marked musttail");
7859 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
7861 if (!Flags.isByVal())
7865 unsigned Size = Flags.getByValSize();
7866 Align Alignment = Flags.getNonZeroByValAlign();
7873 Chain = DAG.
getMemcpy(Chain,
DL, FIPtr, Arg, SizeNode, Alignment,
7875 false,
nullptr, std::nullopt,
7887 for (
unsigned i = 0, j = 0, e = ArgLocs.
size(), OutIdx = 0; i != e;
7890 SDValue ArgValue = OutVals[OutIdx];
7899 DAG.
getVTList(MVT::i32, MVT::i32), ArgValue);
7911 if (!StackPtr.getNode())
7923 RegsToPass.
push_back(std::make_pair(RegHigh,
Hi));
7938 unsigned ArgIndex = Outs[OutIdx].OrigArgIndex;
7939 unsigned ArgPartOffset = Outs[OutIdx].PartOffset;
7940 assert(ArgPartOffset == 0);
7945 while (i + 1 != e && Outs[OutIdx + 1].OrigArgIndex == ArgIndex) {
7946 SDValue PartValue = OutVals[OutIdx + 1];
7947 unsigned PartOffset = Outs[OutIdx + 1].PartOffset - ArgPartOffset;
7962 for (
const auto &Part : Parts) {
7963 SDValue PartValue = Part.first;
7964 SDValue PartOffset = Part.second;
7971 ArgValue = SpillSlot;
7977 if (Flags.isByVal())
7978 ArgValue = ByValArgs[j++];
7985 assert(!IsTailCall &&
"Tail call not allowed if stack is used "
7986 "for passing parameters");
7989 if (!StackPtr.getNode())
8002 if (!MemOpChains.
empty())
8008 for (
auto &Reg : RegsToPass) {
8009 Chain = DAG.
getCopyToReg(Chain,
DL, Reg.first, Reg.second, Glue);
8031 Ops.push_back(Chain);
8032 Ops.push_back(Callee);
8036 for (
auto &Reg : RegsToPass)
8037 Ops.push_back(DAG.
getRegister(Reg.first, Reg.second.getValueType()));
8042 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
8043 assert(Mask &&
"Missing call preserved mask for calling convention");
8049 Ops.push_back(Glue);
8061 assert(Subtarget.is64Bit() &&
"Medium code model requires LA64");
8065 assert(Subtarget.is64Bit() &&
"Large code model requires LA64");
8088 analyzeInputArgs(MF, RetCCInfo, Ins,
true,
CC_LoongArch);
8091 for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
8092 auto &VA = RVLocs[i];
8100 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
8101 assert(VA.needsCustom());
8107 RetValue, RetValue2);
8120 const Type *RetTy)
const {
8122 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
8124 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
8128 Outs[i].Flags, CCInfo,
true,
nullptr))
8154 for (
unsigned i = 0, e = RVLocs.
size(), OutIdx = 0; i < e; ++i, ++OutIdx) {
8155 SDValue Val = OutVals[OutIdx];
8164 DAG.
getVTList(MVT::i32, MVT::i32), Val);
8168 Register RegHi = RVLocs[++i].getLocReg();
8198 if (!Subtarget.hasExtLSX())
8201 if (VT == MVT::f32) {
8202 uint64_t masked = Imm.bitcastToAPInt().getZExtValue() & 0x7e07ffff;
8203 return (masked == 0x3e000000 || masked == 0x40000000);
8206 if (VT == MVT::f64) {
8207 uint64_t masked = Imm.bitcastToAPInt().getZExtValue() & 0x7fc0ffffffffffff;
8208 return (masked == 0x3fc0000000000000 || masked == 0x4000000000000000);
8214bool LoongArchTargetLowering::isFPImmLegal(
const APFloat &Imm,
EVT VT,
8215 bool ForCodeSize)
const {
8217 if (VT == MVT::f32 && !Subtarget.hasBasicF())
8219 if (VT == MVT::f64 && !Subtarget.hasBasicD())
8221 return (Imm.isZero() || Imm.isExactlyValue(1.0) ||
isFPImmVLDILegal(Imm, VT));
8232bool LoongArchTargetLowering::shouldInsertFencesForAtomic(
8242 Type *Ty =
I->getOperand(0)->getType();
8244 unsigned Size = Ty->getIntegerBitWidth();
8271 case Intrinsic::loongarch_masked_atomicrmw_xchg_i32:
8272 case Intrinsic::loongarch_masked_atomicrmw_add_i32:
8273 case Intrinsic::loongarch_masked_atomicrmw_sub_i32:
8274 case Intrinsic::loongarch_masked_atomicrmw_nand_i32:
8276 Info.memVT = MVT::i32;
8277 Info.ptrVal =
I.getArgOperand(0);
8279 Info.align =
Align(4);
8296 "Unable to expand");
8297 unsigned MinWordSize = 4;
8309 Value *AlignedAddr = Builder.CreateIntrinsic(
8310 Intrinsic::ptrmask, {PtrTy, IntTy},
8311 {Addr, ConstantInt::get(IntTy, ~(
uint64_t)(MinWordSize - 1))},
nullptr,
8314 Value *AddrInt = Builder.CreatePtrToInt(Addr, IntTy);
8315 Value *PtrLSB = Builder.CreateAnd(AddrInt, MinWordSize - 1,
"PtrLSB");
8316 Value *ShiftAmt = Builder.CreateShl(PtrLSB, 3);
8317 ShiftAmt = Builder.CreateTrunc(ShiftAmt, WordType,
"ShiftAmt");
8318 Value *Mask = Builder.CreateShl(
8319 ConstantInt::get(WordType,
8322 Value *Inv_Mask = Builder.CreateNot(Mask,
"Inv_Mask");
8323 Value *ValOperand_Shifted =
8324 Builder.CreateShl(Builder.CreateZExt(AI->
getValOperand(), WordType),
8325 ShiftAmt,
"ValOperand_Shifted");
8328 NewOperand = Builder.CreateOr(ValOperand_Shifted, Inv_Mask,
"AndOperand");
8330 NewOperand = ValOperand_Shifted;
8333 Builder.CreateAtomicRMW(
Op, AlignedAddr, NewOperand,
Align(MinWordSize),
8336 Value *Shift = Builder.CreateLShr(NewAI, ShiftAmt,
"shifted");
8337 Value *Trunc = Builder.CreateTrunc(Shift,
ValueType,
"extracted");
8356 if (Subtarget.hasLAM_BH() && Subtarget.is64Bit() &&
8364 if (Subtarget.hasLAMCAS()) {
8386 return Intrinsic::loongarch_masked_atomicrmw_xchg_i64;
8388 return Intrinsic::loongarch_masked_atomicrmw_add_i64;
8390 return Intrinsic::loongarch_masked_atomicrmw_sub_i64;
8392 return Intrinsic::loongarch_masked_atomicrmw_nand_i64;
8394 return Intrinsic::loongarch_masked_atomicrmw_umax_i64;
8396 return Intrinsic::loongarch_masked_atomicrmw_umin_i64;
8398 return Intrinsic::loongarch_masked_atomicrmw_max_i64;
8400 return Intrinsic::loongarch_masked_atomicrmw_min_i64;
8410 return Intrinsic::loongarch_masked_atomicrmw_xchg_i32;
8412 return Intrinsic::loongarch_masked_atomicrmw_add_i32;
8414 return Intrinsic::loongarch_masked_atomicrmw_sub_i32;
8416 return Intrinsic::loongarch_masked_atomicrmw_nand_i32;
8418 return Intrinsic::loongarch_masked_atomicrmw_umax_i32;
8420 return Intrinsic::loongarch_masked_atomicrmw_umin_i32;
8422 return Intrinsic::loongarch_masked_atomicrmw_max_i32;
8424 return Intrinsic::loongarch_masked_atomicrmw_min_i32;
8436 if (Subtarget.hasLAMCAS())
8448 unsigned GRLen = Subtarget.getGRLen();
8450 Value *FailureOrdering =
8451 Builder.getIntN(Subtarget.getGRLen(),
static_cast<uint64_t>(FailOrd));
8452 Intrinsic::ID CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i32;
8454 CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i64;
8455 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
8456 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
8457 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8460 Value *Result = Builder.CreateIntrinsic(
8461 CmpXchgIntrID, Tys, {AlignedAddr, CmpVal, NewVal, Mask, FailureOrdering});
8463 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8479 Builder.CreateNot(Mask,
"Inv_Mask"),
8486 unsigned GRLen = Subtarget.getGRLen();
8495 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
8496 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8497 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
8513 Builder.CreateSub(Builder.getIntN(GRLen, GRLen - ValWidth), ShiftAmt);
8514 Result = Builder.CreateCall(LlwOpScwLoop,
8515 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
8518 Builder.CreateCall(LlwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
8522 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8545 const Constant *PersonalityFn)
const {
8546 return LoongArch::R4;
8550 const Constant *PersonalityFn)
const {
8551 return LoongArch::R5;
8562 int RefinementSteps = VT.
getScalarType() == MVT::f64 ? 2 : 1;
8563 return RefinementSteps;
8568 int &RefinementSteps,
8569 bool &UseOneConstNR,
8570 bool Reciprocal)
const {
8571 if (Subtarget.hasFrecipe()) {
8575 if (VT == MVT::f32 || (VT == MVT::f64 && Subtarget.hasBasicD()) ||
8576 (VT == MVT::v4f32 && Subtarget.hasExtLSX()) ||
8577 (VT == MVT::v2f64 && Subtarget.hasExtLSX()) ||
8578 (VT == MVT::v8f32 && Subtarget.hasExtLASX()) ||
8579 (VT == MVT::v4f64 && Subtarget.hasExtLASX())) {
8598 int &RefinementSteps)
const {
8599 if (Subtarget.hasFrecipe()) {
8603 if (VT == MVT::f32 || (VT == MVT::f64 && Subtarget.hasBasicD()) ||
8604 (VT == MVT::v4f32 && Subtarget.hasExtLSX()) ||
8605 (VT == MVT::v2f64 && Subtarget.hasExtLSX()) ||
8606 (VT == MVT::v8f32 && Subtarget.hasExtLASX()) ||
8607 (VT == MVT::v4f64 && Subtarget.hasExtLASX())) {
8624LoongArchTargetLowering::getConstraintType(
StringRef Constraint)
const {
8644 if (Constraint.
size() == 1) {
8645 switch (Constraint[0]) {
8661 if (Constraint ==
"ZC" || Constraint ==
"ZB")
8670 return StringSwitch<InlineAsm::ConstraintCode>(ConstraintCode)
8677std::pair<unsigned, const TargetRegisterClass *>
8678LoongArchTargetLowering::getRegForInlineAsmConstraint(
8682 if (Constraint.
size() == 1) {
8683 switch (Constraint[0]) {
8688 return std::make_pair(0U, &LoongArch::GPRRegClass);
8690 return std::make_pair(0U, &LoongArch::GPRNoR0R1RegClass);
8692 if (Subtarget.hasBasicF() && VT == MVT::f32)
8693 return std::make_pair(0U, &LoongArch::FPR32RegClass);
8694 if (Subtarget.hasBasicD() && VT == MVT::f64)
8695 return std::make_pair(0U, &LoongArch::FPR64RegClass);
8696 if (Subtarget.hasExtLSX() &&
8697 TRI->isTypeLegalForClass(LoongArch::LSX128RegClass, VT))
8698 return std::make_pair(0U, &LoongArch::LSX128RegClass);
8699 if (Subtarget.hasExtLASX() &&
8700 TRI->isTypeLegalForClass(LoongArch::LASX256RegClass, VT))
8701 return std::make_pair(0U, &LoongArch::LASX256RegClass);
8721 bool IsFP = Constraint[2] ==
'f';
8722 std::pair<StringRef, StringRef> Temp = Constraint.
split(
'$');
8723 std::pair<unsigned, const TargetRegisterClass *>
R;
8728 unsigned RegNo =
R.first;
8729 if (LoongArch::F0 <= RegNo && RegNo <= LoongArch::F31) {
8730 if (Subtarget.hasBasicD() && (VT == MVT::f64 || VT == MVT::Other)) {
8731 unsigned DReg = RegNo - LoongArch::F0 + LoongArch::F0_64;
8732 return std::make_pair(DReg, &LoongArch::FPR64RegClass);
8742void LoongArchTargetLowering::LowerAsmOperandForConstraint(
8746 if (Constraint.
size() == 1) {
8747 switch (Constraint[0]) {
8751 uint64_t CVal =
C->getSExtValue();
8754 Subtarget.getGRLenVT()));
8760 uint64_t CVal =
C->getSExtValue();
8763 Subtarget.getGRLenVT()));
8769 if (
C->getZExtValue() == 0)
8776 uint64_t CVal =
C->getZExtValue();
8789#define GET_REGISTER_MATCHER
8790#include "LoongArchGenAsmMatcher.inc"
8796 std::string NewRegName = Name.second.str();
8802 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
8803 if (!ReservedRegs.
test(Reg))
8820 const APInt &Imm = ConstNode->getAPIntValue();
8822 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
8823 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
8826 if (ConstNode->hasOneUse() &&
8827 ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
8828 (Imm - 8).isPowerOf2() || (Imm - 16).isPowerOf2()))
8834 if (ConstNode->hasOneUse() && !(Imm.sge(-2048) && Imm.sle(4095))) {
8835 unsigned Shifts = Imm.countr_zero();
8841 APInt ImmPop = Imm.ashr(Shifts);
8842 if (ImmPop == 3 || ImmPop == 5 || ImmPop == 9 || ImmPop == 17)
8846 APInt ImmSmall =
APInt(Imm.getBitWidth(), 1ULL << Shifts,
true);
8847 if ((Imm - ImmSmall).isPowerOf2() || (Imm + ImmSmall).isPowerOf2() ||
8848 (ImmSmall - Imm).isPowerOf2())
8858 Type *Ty,
unsigned AS,
8913 EVT MemVT = LD->getMemoryVT();
8914 if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
8925 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
8934 if (
Y.getValueType().isVector())
8946 Type *Ty,
bool IsSigned)
const {
8947 if (Subtarget.is64Bit() && Ty->isIntegerTy(32))
8956 if (Subtarget.isSoftFPABI() && (
Type.isFloatingPoint() && !
Type.isVector() &&
8957 Type.getSizeInBits() < Subtarget.getGRLen()))
8967 Align &PrefAlign)
const {
8971 if (Subtarget.is64Bit()) {
8973 PrefAlign =
Align(8);
8976 PrefAlign =
Align(4);
8991bool LoongArchTargetLowering::splitValueIntoRegisterParts(
8993 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID> CC)
const {
8994 bool IsABIRegCopy = CC.has_value();
8997 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
8998 PartVT == MVT::f32) {
9001 Val = DAG.
getNode(ISD::BITCAST,
DL, MVT::i16, Val);
9005 Val = DAG.
getNode(ISD::BITCAST,
DL, MVT::f32, Val);
9013SDValue LoongArchTargetLowering::joinRegisterPartsIntoValue(
9015 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID> CC)
const {
9016 bool IsABIRegCopy = CC.has_value();
9018 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
9019 PartVT == MVT::f32) {
9023 Val = DAG.
getNode(ISD::BITCAST,
DL, MVT::i32, Val);
9025 Val = DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
9036 if (VT == MVT::f16 && Subtarget.hasBasicF())
9042unsigned LoongArchTargetLowering::getNumRegistersForCallingConv(
9045 if (VT == MVT::f16 && Subtarget.hasBasicF())
9054 unsigned Depth)
const {
9055 EVT VT =
Op.getValueType();
9057 unsigned Opc =
Op.getOpcode();
9064 MVT SrcVT = Src.getSimpleValueType();
9069 if (OriginalDemandedBits.
countr_zero() >= NumElts)
9073 APInt KnownUndef, KnownZero;
9089 if (KnownSrc.
One[SrcBits - 1])
9091 else if (KnownSrc.
Zero[SrcBits - 1])
9096 Src, DemandedSrcBits, DemandedElts, TLO.
DAG,
Depth + 1))
9103 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO,
Depth);
unsigned const MachineRegisterInfo * MRI
static MCRegister MatchRegisterName(StringRef Name)
static bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType)
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
#define NODE_NAME_CASE(node)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static MCRegister MatchRegisterAltName(StringRef Name)
Maps from the set of all alternative registernames to a register number.
Function Alias Analysis Results
static uint64_t getConstant(const Value *IndexValue)
static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static MachineBasicBlock * emitSelectPseudo(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode)
static SDValue unpackFromRegLoc(const CSKYSubtarget &Subtarget, SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static SDValue performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
const MCPhysReg ArgFPR32s[]
static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Dispatching routine to lower various 128-bit LoongArch vector shuffles.
static SDValue lowerVECTOR_SHUFFLE_XVSHUF4I(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVSHUF4I (if possible).
static SDValue lowerVECTOR_SHUFFLE_VPICKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPICKEV (if possible).
static SDValue combineSelectToBinOp(SDNode *N, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_XVPICKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPICKOD (if possible).
static SDValue unpackF64OnLA32DSoftABI(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const CCValAssign &HiVA, const SDLoc &DL)
static bool fitsRegularPattern(typename SmallVectorImpl< ValType >::const_iterator Begin, unsigned CheckStride, typename SmallVectorImpl< ValType >::const_iterator End, ValType ExpectedIndex, unsigned ExpectedIndexStride)
Determine whether a range fits a regular pattern of values.
static SDValue lowerVECTOR_SHUFFLE_XVREPLVEI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVREPLVEI (if possible).
static SDValue emitIntrinsicErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static cl::opt< bool > ZeroDivCheck("loongarch-check-zero-division", cl::Hidden, cl::desc("Trap on integer division by zero."), cl::init(false))
static int getEstimateRefinementSteps(EVT VT, const LoongArchSubtarget &Subtarget)
static void emitErrorAndReplaceIntrinsicResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, StringRef ErrorMsg, bool WithChain=true)
static SDValue lowerVECTOR_SHUFFLEAsByteRotate(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE as byte rotate (if possible).
static SDValue checkIntrinsicImmArg(SDValue Op, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
static SDValue performMOVFR2GR_SCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_VILVH(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VILVH (if possible).
static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI, unsigned ValNo, MVT ValVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsRet, Type *OrigTy)
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
static SDValue performSPLIT_PAIR_F64Combine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue performBITCASTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static MachineBasicBlock * emitSplitPairF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorBitSetImm(SDNode *Node, SelectionDAG &DAG)
static SDValue performSETCC_BITCASTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_XVPACKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPACKOD (if possible).
static std::optional< bool > matchSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue Val)
static SDValue lowerBUILD_VECTORAsBroadCastLoad(BuildVectorSDNode *BVOp, const SDLoc &DL, SelectionDAG &DAG)
#define CRC_CASE_EXT_BINARYOP(NAME, NODE)
static SDValue lowerVectorBitRevImm(SDNode *Node, SelectionDAG &DAG)
static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size, unsigned Depth)
static SDValue lowerVECTOR_SHUFFLEAsShift(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, const APInt &Zeroable)
Lower VECTOR_SHUFFLE as shift (if possible).
static SDValue lowerVECTOR_SHUFFLE_VSHUF4I(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into VSHUF4I (if possible).
static SDValue truncateVecElts(SDNode *Node, SelectionDAG &DAG)
static bool CC_LoongArch_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG)
static SDValue lowerVectorBitClear(SDNode *Node, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLE_XVPERM(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPERM (if possible).
static SDValue lowerVECTOR_SHUFFLE_VPACKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPACKEV (if possible).
static MachineBasicBlock * emitPseudoVMSKCOND(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue performSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static void replaceVPICKVE2GRResults(SDNode *Node, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
static SDValue lowerVECTOR_SHUFFLEAsZeroOrAnyExtend(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const APInt &Zeroable)
Lower VECTOR_SHUFFLE as ZERO_EXTEND Or ANY_EXTEND (if possible).
static SDValue legalizeIntrinsicImmArg(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, bool IsSigned=false)
static SDValue emitIntrinsicWithChainErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static bool CC_LoongArchAssign2GRLen(unsigned GRLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2)
const MCPhysReg ArgFPR64s[]
static MachineBasicBlock * emitPseudoCTPOP(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue performMOVGR2FR_WCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
#define IOCSRWR_CASE(NAME, NODE)
#define CRC_CASE_EXT_UNARYOP(NAME, NODE)
static SDValue lowerVECTOR_SHUFFLE_VPACKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPACKOD (if possible).
static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT, SDValue Src, const SDLoc &DL)
static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Dispatching routine to lower various 256-bit LoongArch vector shuffles.
static MachineBasicBlock * emitPseudoXVINSGR2VR(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue performEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static bool isSelectPseudo(MachineInstr &MI)
static SDValue foldBinOpIntoSelectIfProfitable(SDNode *BO, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorSplatImm(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
const MCPhysReg ArgGPRs[]
static SDValue lowerVECTOR_SHUFFLE_XVILVL(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVILVL (if possible).
static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, int NumOp, unsigned ExtOpc=ISD::ANY_EXTEND)
static void replaceVecCondBranchResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
#define ASRT_LE_GT_CASE(NAME)
static SDValue lowerVECTOR_SHUFFLE_XVPACKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPACKEV (if possible).
static SDValue performBR_CCCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static void computeZeroableShuffleElements(ArrayRef< int > Mask, SDValue V1, SDValue V2, APInt &KnownUndef, APInt &KnownZero)
Compute whether each element of a shuffle is zeroable.
static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue widenShuffleMask(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
static MachineBasicBlock * emitVecCondBranchPseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_XVILVH(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVILVH (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVSHUF(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVSHUF (if possible).
static SDValue lowerVECTOR_SHUFFLE_VREPLVEI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into VREPLVEI (if possible).
static void replaceCMP_XCHG_128Results(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static SDValue performBITREV_WCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static void canonicalizeShuffleVectorByLane(const SDLoc &DL, MutableArrayRef< int > Mask, MVT VT, SDValue &V1, SDValue &V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Shuffle vectors by lane to generate more optimized instructions.
#define IOCSRRD_CASE(NAME, NODE)
static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2, ArrayRef< int > Mask)
Attempts to match vector shuffle as byte rotation.
static SDValue lowerVECTOR_SHUFFLE_XVPICKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPICKEV (if possible).
static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode, unsigned ScalarSizeInBits, ArrayRef< int > Mask, int MaskOffset, const APInt &Zeroable)
Attempts to match a shuffle mask against the VBSLL, VBSRL, VSLLI and VSRLI instruction.
static SDValue lowerVECTOR_SHUFFLE_VILVL(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VILVL (if possible).
static SDValue lowerVectorBitClearImm(SDNode *Node, SelectionDAG &DAG)
static MachineBasicBlock * emitBuildPairF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLEAsLanePermuteAndShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE as lane permute and then shuffle (if possible).
static SDValue performVMSKLTZCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static void replaceINTRINSIC_WO_CHAINResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_VPICKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPICKOD (if possible).
static Intrinsic::ID getIntrinsicForMaskedAtomicRMWBinOp(unsigned GRLen, AtomicRMWInst::BinOp BinOp)
static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, ISD::CondCode &CC, SelectionDAG &DAG)
static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT, ArrayRef< int > Mask, SmallVectorImpl< int > &RepeatedMask)
Test whether a shuffle mask is equivalent within each sub-lane.
static SDValue lowerVECTOR_SHUFFLE_VSHUF(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VSHUF.
static LoongArchISD::NodeType getLoongArchWOpcode(unsigned Opcode)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
This file defines the SmallSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool inRange(const MCExpr *Expr, int64_t MinValue, int64_t MaxValue, bool AllowSymbol=false)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static bool isSequentialOrUndefInRange(ArrayRef< int > Mask, unsigned Pos, unsigned Size, int Low, int Step=1)
Return true if every element in Mask, beginning from position Pos and ending in Pos + Size,...
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
unsigned countr_zero() const
Count the number of trailing zero bits.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getCompareOperand()
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ USubCond
Subtract only if no unsigned overflow.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
Value * getPointerOperand()
bool isFloatingPointOperation() const
BinOp getOperation() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM Basic Block Representation.
bool test(unsigned Idx) const
A "pseudo-class" with methods for operating on BUILD_VECTORs.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
int64_t getLocMemOffset() const
unsigned getValNo() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
int64_t getSExtValue() const
This is an important base class in LLVM.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
The size in bits of the pointer representation in a given address space.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Argument * getArg(unsigned i) const
Common base class shared among various IRBuilders.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
LoongArchMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private Lo...
void addSExt32Register(Register Reg)
const LoongArchRegisterInfo * getRegisterInfo() const override
const LoongArchInstrInfo * getInstrInfo() const override
unsigned getGRLen() const
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override
Return true if result of the specified node is used by a return node only.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps, bool &UseOneConstNR, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Determine if the target supports unaligned memory accesses.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, Align &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool signExtendConstant(const ConstantInt *CI) const override
Return true if this constant should be sign extended when promoting to a larger type.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const override
Returns true if arguments should be sign-extended in lib calls.
bool isFPImmVLDILegal(const APFloat &Imm, EVT VT) const
bool shouldExtendTypeInLibCall(EVT Type) const override
Returns true if arguments should be extended in lib calls.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
void emitExpandAtomicRMW(AtomicRMWInst *AI) const override
Perform a atomicrmw expansion using a target-specific way.
ISD::NodeType getExtendForAtomicCmpSwapArg() const override
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_...
LoongArchTargetLowering(const TargetMachine &TM, const LoongArchSubtarget &STI)
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool hasAndNotCompare(SDValue Y) const override
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps) const override
Return a reciprocal estimate value for the input operand.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
Wrapper class representing physical registers. Should be passed by value.
bool hasFeature(unsigned Feature) const
static MVT getFloatingPointVT(unsigned BitWidth)
bool is128BitVector() const
Return true if this is a 128-bit vector type.
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool is256BitVector() const
Return true if this is a 256-bit vector type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT getHalfNumVectorElementsVT() const
Return a VT for a vector type with the same element type but half the number of elements.
MVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void push_back(MachineInstr *MI)
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
LLVM_ABI void collectDebugValues(SmallVectorImpl< MachineInstr * > &DbgValues)
Scan instructions immediately following MI and collect any matching DBG_VALUEs.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setIsKill(bool Val=true)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
EVT getMemoryVT() const
Return the type of the in-memory value.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVM_ABI bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
size_t use_size() const
Return the number of uses of this node.
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
bool isSafeToSpeculativelyExecute(unsigned Opcode) const
Some opcodes may create immediate undefined behavior when used with some values (integer division-by-...
SDValue getExtractSubvector(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Return the VT typed sub-vector of Vec at Idx.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
LLVM_ABI SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
LLVM_ABI std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI SDValue WidenVector(const SDValue &N, const SDLoc &DL)
Widen the vector up to the next power of two using INSERT_SUBVECTOR.
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI SDValue getCondCode(ISD::CondCode Cond)
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
ArrayRef< int > getMask() const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
typename SuperClass::const_iterator const_iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setMaxBytesForAlignment(unsigned MaxBytes)
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
TargetLowering(const TargetLowering &)=delete
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
bool useTLSDESC() const
Returns true if this target uses TLS Descriptors.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
This class is used to represent EVT's, which are used to parameterize some operations.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ABI bool isFreezeUndef(const SDNode *N)
Return true if the specified node is FREEZE(UNDEF).
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LLVM_ABI bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
LLVM_ABI NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
ABI getTargetABI(StringRef ABIName)
LLVM_ABI Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ Kill
The last use of a register.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
initializer< Ty > init(const Ty &Val)
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
LLVM_ABI bool widenShuffleMaskElts(int Scale, ArrayRef< int > Mask, SmallVectorImpl< int > &ScaledMask)
Try to transform a shuffle mask by replacing elements with the scaled index for an equivalent mask of...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
constexpr unsigned BitWidth
std::string join_items(Sep Separator, Args &&... Items)
Joins the strings in the parameter pack Items, adding Separator between the elements....
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
Align getNonZeroOrigAlign() const
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const
bool isBeforeLegalize() const
LLVM_ABI SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)