65 if (
F.getFnAttribute(
"disable-tail-calls").getValueAsBool())
71 AttrBuilder CallerAttrs(
F.getContext(),
F.getAttributes().getRetAttrs());
72 for (
const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
73 Attribute::DereferenceableOrNull, Attribute::NoAlias,
74 Attribute::NonNull, Attribute::NoUndef,
75 Attribute::Range, Attribute::NoFPClass})
82 if (CallerAttrs.
contains(Attribute::ZExt) ||
83 CallerAttrs.
contains(Attribute::SExt))
94 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
110 Register ArgReg = cast<RegisterSDNode>(
Value->getOperand(1))->getReg();
111 if (
MRI.getLiveInPhysReg(ArgReg) != Reg)
121 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt);
122 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt);
123 IsNoExt = Call->paramHasAttr(ArgIdx, Attribute::NoExt);
124 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg);
125 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet);
126 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest);
127 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal);
128 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated);
129 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca);
130 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned);
131 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
132 IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync);
133 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
134 Alignment = Call->getParamStackAlign(ArgIdx);
137 "multiple ABI attributes?");
153std::pair<SDValue, SDValue>
163 Args.reserve(Ops.
size());
166 for (
unsigned i = 0; i < Ops.
size(); ++i) {
168 Type *Ty = i < OpsTypeOverrides.
size() && OpsTypeOverrides[i]
169 ? OpsTypeOverrides[i]
178 Entry.IsZExt = !Entry.IsSExt;
182 Entry.IsSExt = Entry.IsZExt =
false;
184 Args.push_back(Entry);
188 if (LC == RTLIB::UNKNOWN_LIBCALL || !LibcallName)
198 bool zeroExtend = !signExtend;
203 signExtend = zeroExtend =
false;
215 return LowerCallTo(CLI);
219 LLVMContext &Context, std::vector<EVT> &MemOps,
unsigned Limit,
220 const MemOp &
Op,
unsigned DstAS,
unsigned SrcAS,
222 if (Limit != ~
unsigned(0) &&
Op.isMemcpyWithFixedDstAlign() &&
223 Op.getSrcAlign() <
Op.getDstAlign())
228 if (VT == MVT::Other) {
232 VT = MVT::LAST_INTEGER_VALUETYPE;
233 if (
Op.isFixedDstAlign())
240 MVT LVT = MVT::LAST_INTEGER_VALUETYPE;
251 unsigned NumMemOps = 0;
255 while (VTSize >
Size) {
266 else if (NewVT == MVT::i64 &&
278 if (NewVT == MVT::i8)
287 if (NumMemOps &&
Op.allowOverlap() && NewVTSize <
Size &&
289 VT, DstAS,
Op.isFixedDstAlign() ?
Op.getDstAlign() :
Align(1),
299 if (++NumMemOps > Limit)
302 MemOps.push_back(VT);
317 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS,
327 bool IsSignaling)
const {
332 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
333 &&
"Unsupported setcc type!");
336 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
337 bool ShouldInvertCC =
false;
341 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
342 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
343 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
347 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
348 (VT == MVT::f64) ? RTLIB::UNE_F64 :
349 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
353 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
354 (VT == MVT::f64) ? RTLIB::OGE_F64 :
355 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
359 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
360 (VT == MVT::f64) ? RTLIB::OLT_F64 :
361 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
365 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
366 (VT == MVT::f64) ? RTLIB::OLE_F64 :
367 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
371 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
372 (VT == MVT::f64) ? RTLIB::OGT_F64 :
373 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
376 ShouldInvertCC =
true;
379 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
380 (VT == MVT::f64) ? RTLIB::UO_F64 :
381 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
385 ShouldInvertCC =
true;
388 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
389 (VT == MVT::f64) ? RTLIB::UO_F64 :
390 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
391 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
392 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
393 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
397 ShouldInvertCC =
true;
400 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
401 (VT == MVT::f64) ? RTLIB::OGE_F64 :
402 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
405 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
406 (VT == MVT::f64) ? RTLIB::OGT_F64 :
407 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
410 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
411 (VT == MVT::f64) ? RTLIB::OLE_F64 :
412 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
415 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
416 (VT == MVT::f64) ? RTLIB::OLT_F64 :
417 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
425 SDValue Ops[2] = {NewLHS, NewRHS};
430 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain);
435 if (LC1Impl == RTLIB::Unsupported) {
437 "no libcall available to soften floating-point compare");
441 if (ShouldInvertCC) {
443 CCCode = getSetCCInverse(CCCode, RetVT);
446 if (LC2 == RTLIB::UNKNOWN_LIBCALL) {
451 if (LC2Impl == RTLIB::Unsupported) {
453 "no libcall available to soften floating-point compare");
457 "unordered call should be simple boolean");
467 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain);
470 CCCode = getSetCCInverse(CCCode, RetVT);
471 NewLHS = DAG.
getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode);
485 if (!isPositionIndependent())
524 if (!TM.shouldAssumeDSOLocal(GV))
528 if (isPositionIndependent())
544 const APInt &DemandedElts,
547 unsigned Opcode =
Op.getOpcode();
555 if (targetShrinkDemandedConstant(
Op,
DemandedBits, DemandedElts, TLO))
565 auto *Op1C = dyn_cast<ConstantSDNode>(
Op.getOperand(1));
566 if (!Op1C || Op1C->isOpaque())
570 const APInt &
C = Op1C->getAPIntValue();
575 EVT VT =
Op.getValueType();
592 EVT VT =
Op.getValueType();
607 "ShrinkDemandedOp only supports binary operators!");
608 assert(
Op.getNode()->getNumValues() == 1 &&
609 "ShrinkDemandedOp only supports nodes with one result!");
611 EVT VT =
Op.getValueType();
620 Op.getOperand(1).getValueType().getScalarSizeInBits() ==
BitWidth &&
621 "ShrinkDemandedOp only supports operands that have the same size!");
625 if (!
Op.getNode()->hasOneUse())
642 Op.getOpcode(), dl, SmallVT,
645 assert(DemandedSize <= SmallVTBits &&
"Narrowed below demanded bits?");
660 bool Simplified = SimplifyDemandedBits(
Op,
DemandedBits, Known, TLO);
669 const APInt &DemandedElts,
689 bool AssumeSingleUse)
const {
690 EVT VT =
Op.getValueType();
706 EVT VT =
Op.getValueType();
724 switch (
Op.getOpcode()) {
730 EVT SrcVT = Src.getValueType();
731 EVT DstVT =
Op.getValueType();
737 if (NumSrcEltBits == NumDstEltBits)
738 if (
SDValue V = SimplifyMultipleUseDemandedBits(
742 if (SrcVT.
isVector() && (NumDstEltBits % NumSrcEltBits) == 0) {
743 unsigned Scale = NumDstEltBits / NumSrcEltBits;
747 for (
unsigned i = 0; i != Scale; ++i) {
748 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
749 unsigned BitOffset = EltOffset * NumSrcEltBits;
752 DemandedSrcBits |=
Sub;
753 for (
unsigned j = 0; j != NumElts; ++j)
755 DemandedSrcElts.
setBit((j * Scale) + i);
759 if (
SDValue V = SimplifyMultipleUseDemandedBits(
760 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
765 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) {
766 unsigned Scale = NumSrcEltBits / NumDstEltBits;
770 for (
unsigned i = 0; i != NumElts; ++i)
771 if (DemandedElts[i]) {
772 unsigned Offset = (i % Scale) * NumDstEltBits;
774 DemandedSrcElts.
setBit(i / Scale);
777 if (
SDValue V = SimplifyMultipleUseDemandedBits(
778 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
792 return Op.getOperand(0);
794 return Op.getOperand(1);
805 return Op.getOperand(0);
807 return Op.getOperand(1);
817 return Op.getOperand(0);
819 return Op.getOperand(1);
825 return Op.getOperand(0);
829 return Op.getOperand(1);
835 if (std::optional<uint64_t> MaxSA =
838 unsigned ShAmt = *MaxSA;
839 unsigned NumSignBits =
842 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
850 if (std::optional<uint64_t> MaxSA =
853 unsigned ShAmt = *MaxSA;
857 unsigned NumSignBits =
889 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
896 if (NumSignBits >= (
BitWidth - ExBits + 1))
909 EVT SrcVT = Src.getValueType();
910 EVT DstVT =
Op.getValueType();
911 if (IsLE && DemandedElts == 1 &&
924 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
927 !DemandedElts[CIdx->getZExtValue()])
938 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
941 if (DemandedSubElts == 0)
951 bool AllUndef =
true, IdentityLHS =
true, IdentityRHS =
true;
952 for (
unsigned i = 0; i != NumElts; ++i) {
953 int M = ShuffleMask[i];
954 if (M < 0 || !DemandedElts[i])
957 IdentityLHS &= (M == (int)i);
958 IdentityRHS &= ((M - NumElts) == i);
964 return Op.getOperand(0);
966 return Op.getOperand(1);
976 if (
SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode(
986 unsigned Depth)
const {
987 EVT VT =
Op.getValueType();
994 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
1000 unsigned Depth)
const {
1002 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
1014 "SRL or SRA node is required here!");
1017 if (!N1C || !N1C->
isOne())
1064 unsigned ShiftOpc =
Op.getOpcode();
1065 bool IsSigned =
false;
1069 unsigned NumSigned = std::min(NumSignedA, NumSignedB) - 1;
1074 unsigned NumZero = std::min(NumZeroA, NumZeroB);
1080 if (NumZero >= 2 && NumSigned < NumZero) {
1085 if (NumSigned >= 1) {
1093 if (NumZero >= 1 && NumSigned < NumZero) {
1113 EVT VT =
Op.getValueType();
1127 Add.getOperand(1)) &&
1138 (isa<ConstantSDNode>(ExtOpA) || isa<ConstantSDNode>(ExtOpB)))
1158 unsigned Depth,
bool AssumeSingleUse)
const {
1161 "Mask size mismatches value type size!");
1166 EVT VT =
Op.getValueType();
1168 unsigned NumElts = OriginalDemandedElts.
getBitWidth();
1170 "Unexpected vector size");
1173 APInt DemandedElts = OriginalDemandedElts;
1193 cast<ConstantFPSDNode>(
Op)->getValueAPF().bitcastToAPInt());
1198 bool HasMultiUse =
false;
1199 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse()) {
1208 }
else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
1217 switch (
Op.getOpcode()) {
1221 if (!DemandedElts[0])
1226 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
1228 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO,
Depth + 1))
1233 if (DemandedElts == 1)
1246 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1255 auto *LD = cast<LoadSDNode>(
Op);
1256 if (getTargetConstantFromLoad(LD)) {
1262 EVT MemVT = LD->getMemoryVT();
1274 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
1279 APInt DemandedVecElts(DemandedElts);
1281 unsigned Idx = CIdx->getZExtValue();
1285 if (!DemandedElts[
Idx])
1292 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1298 if (SimplifyDemandedBits(Vec,
DemandedBits, DemandedVecElts, KnownVec, TLO,
1302 if (!!DemandedVecElts)
1315 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
1317 APInt DemandedSrcElts = DemandedElts;
1321 if (SimplifyDemandedBits(
Sub,
DemandedBits, DemandedSubElts, KnownSub, TLO,
1324 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, KnownSrc, TLO,
1330 if (!!DemandedSubElts)
1332 if (!!DemandedSrcElts)
1338 SDValue NewSub = SimplifyMultipleUseDemandedBits(
1340 SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1342 if (NewSub || NewSrc) {
1343 NewSub = NewSub ? NewSub :
Sub;
1344 NewSrc = NewSrc ? NewSrc : Src;
1357 if (Src.getValueType().isScalableVector())
1360 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1363 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, Known, TLO,
1369 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
1384 EVT SubVT =
Op.getOperand(0).getValueType();
1387 for (
unsigned i = 0; i != NumSubVecs; ++i) {
1388 APInt DemandedSubElts =
1389 DemandedElts.
extractBits(NumSubElts, i * NumSubElts);
1390 if (SimplifyDemandedBits(
Op.getOperand(i),
DemandedBits, DemandedSubElts,
1391 Known2, TLO,
Depth + 1))
1394 if (!!DemandedSubElts)
1404 APInt DemandedLHS, DemandedRHS;
1409 if (!!DemandedLHS || !!DemandedRHS) {
1415 if (!!DemandedLHS) {
1416 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedLHS, Known2, TLO,
1421 if (!!DemandedRHS) {
1422 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedRHS, Known2, TLO,
1429 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1431 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1433 if (DemandedOp0 || DemandedOp1) {
1434 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1435 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1470 LHSKnown.
One == ~RHSC->getAPIntValue()) {
1482 unsigned NumSubElts =
1499 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1503 Known2, TLO,
Depth + 1))
1525 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1527 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1529 if (DemandedOp0 || DemandedOp1) {
1530 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1531 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1543 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1549 if (SimplifyDemandedBits(Op0, ~Known.
One &
DemandedBits, DemandedElts,
1550 Known2, TLO,
Depth + 1)) {
1570 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1572 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1574 if (DemandedOp0 || DemandedOp1) {
1575 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1576 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1587 for (
int I = 0;
I != 2; ++
I) {
1590 SDValue Alt =
Op.getOperand(1 -
I).getOperand(0);
1591 SDValue C2 =
Op.getOperand(1 -
I).getOperand(1);
1593 for (
int J = 0; J != 2; ++J) {
1616 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1619 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedElts, Known2, TLO,
1646 if (
C->getAPIntValue() == Known2.
One) {
1655 if (!
C->isAllOnes() &&
DemandedBits.isSubsetOf(
C->getAPIntValue())) {
1667 if (ShiftC->getAPIntValue().ult(
BitWidth)) {
1668 uint64_t ShiftAmt = ShiftC->getZExtValue();
1671 : Ones.
lshr(ShiftAmt);
1673 isDesirableToCommuteXorWithShift(
Op.getNode())) {
1688 if (!
C || !
C->isAllOnes())
1694 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1696 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1698 if (DemandedOp0 || DemandedOp1) {
1699 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1700 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1710 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1711 Known, TLO,
Depth + 1))
1713 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1714 Known2, TLO,
Depth + 1))
1725 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1726 Known, TLO,
Depth + 1))
1728 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1729 Known2, TLO,
Depth + 1))
1736 if (SimplifyDemandedBits(
Op.getOperand(3),
DemandedBits, DemandedElts,
1737 Known, TLO,
Depth + 1))
1739 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1740 Known2, TLO,
Depth + 1))
1783 if (std::optional<uint64_t> KnownSA =
1785 unsigned ShAmt = *KnownSA;
1795 if (std::optional<uint64_t> InnerSA =
1797 unsigned C1 = *InnerSA;
1799 int Diff = ShAmt - C1;
1818 if (ShAmt < InnerBits &&
DemandedBits.getActiveBits() <= InnerBits &&
1819 isTypeDesirableForOp(
ISD::SHL, InnerVT)) {
1836 InnerOp, DemandedElts,
Depth + 2)) {
1837 unsigned InnerShAmt = *SA2;
1838 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits &&
1840 (InnerBits - InnerShAmt + ShAmt) &&
1854 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1867 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1868 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
1879 Op.getNode()->hasOneUse()) {
1887 isTypeDesirableForOp(
ISD::SHL, SmallVT) &&
1890 assert(DemandedSize <= SmallVTBits &&
1891 "Narrowed below demanded bits?");
1911 isTypeDesirableForOp(
ISD::SHL, HalfVT) &&
1920 Flags.setNoSignedWrap(IsNSW);
1921 Flags.setNoUnsignedWrap(IsNUW);
1926 NewShiftAmt, Flags);
1939 if (SimplifyDemandedBits(Op0, DemandedFromOp, DemandedElts, Known, TLO,
1952 if (std::optional<uint64_t> MaxSA =
1954 unsigned ShAmt = *MaxSA;
1955 unsigned NumSignBits =
1958 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
1968 if (std::optional<uint64_t> KnownSA =
1970 unsigned ShAmt = *KnownSA;
1980 if (std::optional<uint64_t> InnerSA =
1982 unsigned C1 = *InnerSA;
1984 int Diff = ShAmt - C1;
2000 if (std::optional<uint64_t> InnerSA =
2002 unsigned C1 = *InnerSA;
2004 unsigned Combined = std::min(C1 + ShAmt,
BitWidth - 1);
2016 if (
Op->getFlags().hasExact())
2025 isTypeDesirableForOp(
ISD::SRL, HalfVT) &&
2041 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
2050 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2051 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2065 if (std::optional<uint64_t> MaxSA =
2067 unsigned ShAmt = *MaxSA;
2071 unsigned NumSignBits =
2080 DemandedElts,
Depth + 1))
2104 if (std::optional<uint64_t> KnownSA =
2106 unsigned ShAmt = *KnownSA;
2113 if (std::optional<uint64_t> InnerSA =
2115 unsigned LowBits =
BitWidth - ShAmt;
2121 if (*InnerSA == ShAmt) {
2131 unsigned NumSignBits =
2133 if (NumSignBits > ShAmt)
2143 if (
Op->getFlags().hasExact())
2151 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
2161 Flags.setExact(
Op->getFlags().hasExact());
2179 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2180 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2190 DemandedElts,
Depth + 1))
2203 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2208 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1,
DemandedBits, DemandedElts,
2209 Known, TLO,
Depth + 1))
2218 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
2221 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO,
2225 Known2 <<= (IsFSHL ? Amt : (
BitWidth - Amt));
2226 Known >>= (IsFSHL ? (
BitWidth - Amt) : Amt);
2232 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2233 Op0, Demanded0, DemandedElts, TLO.
DAG,
Depth + 1);
2234 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2235 Op1, Demanded1, DemandedElts, TLO.
DAG,
Depth + 1);
2236 if (DemandedOp0 || DemandedOp1) {
2237 DemandedOp0 = DemandedOp0 ? DemandedOp0 : Op0;
2238 DemandedOp1 = DemandedOp1 ? DemandedOp1 : Op1;
2249 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts,
2250 Known2, TLO,
Depth + 1))
2266 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2272 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
2282 DemandedBits.countr_zero() >= (IsROTL ? Amt : RevAmt)) {
2287 DemandedBits.countl_zero() >= (IsROTL ? RevAmt : Amt)) {
2296 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO,
2306 unsigned Opc =
Op.getOpcode();
2313 unsigned NumSignBits =
2317 if (NumSignBits >= NumDemandedUpperBits)
2358 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2383 unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ;
2391 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2410 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2415 unsigned MinSignedBits =
2417 bool AlreadySignExtended = ExVTBits >= MinSignedBits;
2420 if (!AlreadySignExtended) {
2438 InputDemandedBits.
setBit(ExVTBits - 1);
2440 if (SimplifyDemandedBits(Op0, InputDemandedBits, DemandedElts, Known, TLO,
2448 if (Known.
Zero[ExVTBits - 1])
2452 if (Known.
One[ExVTBits - 1]) {
2462 EVT HalfVT =
Op.getOperand(0).getValueType();
2470 if (SimplifyDemandedBits(
Op.getOperand(0), MaskLo, KnownLo, TLO,
Depth + 1))
2473 if (SimplifyDemandedBits(
Op.getOperand(1), MaskHi, KnownHi, TLO,
Depth + 1))
2476 Known = KnownHi.
concat(KnownLo);
2485 EVT SrcVT = Src.getValueType();
2494 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2505 APInt InDemandedElts = DemandedElts.
zext(InElts);
2506 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2515 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2516 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2526 EVT SrcVT = Src.getValueType();
2531 APInt InDemandedElts = DemandedElts.
zext(InElts);
2536 InDemandedBits.
setBit(InBits - 1);
2542 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2557 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2578 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2579 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2589 EVT SrcVT = Src.getValueType();
2596 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2601 APInt InDemandedElts = DemandedElts.
zext(InElts);
2602 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2609 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2610 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2619 unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
2621 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO,
2631 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2632 Src, TruncMask, DemandedElts, TLO.
DAG,
Depth + 1))
2637 switch (Src.getOpcode()) {
2648 if (Src.getNode()->hasOneUse()) {
2660 std::optional<uint64_t> ShAmtC =
2662 if (!ShAmtC || *ShAmtC >=
BitWidth)
2688 EVT ZVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2690 if (SimplifyDemandedBits(
Op.getOperand(0), ~InMask |
DemandedBits, Known,
2694 Known.
Zero |= ~InMask;
2695 Known.
One &= (~Known.Zero);
2701 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount();
2702 unsigned EltBitWidth = Src.getScalarValueSizeInBits();
2710 if (
auto *CIdx = dyn_cast<ConstantSDNode>(
Idx))
2711 if (CIdx->getAPIntValue().ult(NumSrcElts))
2718 DemandedSrcBits = DemandedSrcBits.
trunc(EltBitWidth);
2720 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO,
2726 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2727 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2743 EVT SrcVT = Src.getValueType();
2753 if ((OpVTLegal || i32Legal) && VT.
isSimple() && SrcVT != MVT::f16 &&
2754 SrcVT != MVT::f128) {
2756 EVT Ty = OpVTLegal ? VT : MVT::i32;
2760 unsigned OpVTSizeInBits =
Op.getValueSizeInBits();
2761 if (!OpVTLegal && OpVTSizeInBits > 32)
2763 unsigned ShVal =
Op.getValueSizeInBits() - 1;
2773 unsigned Scale =
BitWidth / NumSrcEltBits;
2777 for (
unsigned i = 0; i != Scale; ++i) {
2778 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
2779 unsigned BitOffset = EltOffset * NumSrcEltBits;
2781 if (!
Sub.isZero()) {
2782 DemandedSrcBits |=
Sub;
2783 for (
unsigned j = 0; j != NumElts; ++j)
2784 if (DemandedElts[j])
2785 DemandedSrcElts.
setBit((j * Scale) + i);
2789 APInt KnownSrcUndef, KnownSrcZero;
2790 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2791 KnownSrcZero, TLO,
Depth + 1))
2795 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2796 KnownSrcBits, TLO,
Depth + 1))
2798 }
else if (IsLE && (NumSrcEltBits %
BitWidth) == 0) {
2800 unsigned Scale = NumSrcEltBits /
BitWidth;
2804 for (
unsigned i = 0; i != NumElts; ++i)
2805 if (DemandedElts[i]) {
2808 DemandedSrcElts.
setBit(i / Scale);
2812 APInt KnownSrcUndef, KnownSrcZero;
2813 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2814 KnownSrcZero, TLO,
Depth + 1))
2819 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2820 KnownSrcBits, TLO,
Depth + 1))
2825 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2826 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2848 if (
C &&
C->getAPIntValue().countr_zero() == CTZ) {
2867 SDValue Op0 =
Op.getOperand(0), Op1 =
Op.getOperand(1);
2872 auto GetDemandedBitsLHSMask = [&](
APInt Demanded,
2878 if (SimplifyDemandedBits(Op1, LoMask, DemandedElts, KnownOp1, TLO,
2880 SimplifyDemandedBits(Op0, GetDemandedBitsLHSMask(LoMask, KnownOp1),
2881 DemandedElts, KnownOp0, TLO,
Depth + 1) ||
2897 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2898 Op0, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2899 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2900 Op1, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2901 if (DemandedOp0 || DemandedOp1) {
2902 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
2903 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
2917 if (
C && !
C->isAllOnes() && !
C->isOne() &&
2918 (
C->getAPIntValue() | HighMask).isAllOnes()) {
2930 auto getShiftLeftAmt = [&HighMask](
SDValue Mul) ->
unsigned {
2957 if (
unsigned ShAmt = getShiftLeftAmt(Op0))
2960 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2961 return foldMul(
ISD::SUB, Op1.getOperand(0), Op0, ShAmt);
2965 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2966 return foldMul(
ISD::ADD, Op1.getOperand(0), Op0, ShAmt);
2974 Op.getOpcode() ==
ISD::ADD, Flags.hasNoSignedWrap(),
2975 Flags.hasNoUnsignedWrap(), KnownOp0, KnownOp1);
2986 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedElts, Known, TLO,
2996 Known.
Zero |= SignMask;
2997 Known.
One &= ~SignMask;
3013 if (SimplifyDemandedBits(Op0, ~SignMask0 &
DemandedBits, DemandedElts,
3014 Known, TLO,
Depth + 1) ||
3015 SimplifyDemandedBits(Op1, SignMask1, DemandedElts, Known2, TLO,
3028 Known.
Zero &= ~SignMask0;
3029 Known.
One &= ~SignMask0;
3039 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedElts, Known, TLO,
3044 Known.
Zero ^= SignMask;
3045 Known.
One ^= SignMask;
3056 if (
Op.getValueType().isScalableVector())
3058 if (SimplifyDemandedBitsForTargetNode(
Op,
DemandedBits, DemandedElts,
3071 if (!isTargetCanonicalConstantNode(
Op) &&
3075 auto *C = dyn_cast<ConstantSDNode>(V);
3076 return C && C->isOpaque();
3097 const APInt &DemandedElts,
3103 APInt KnownUndef, KnownZero;
3105 SimplifyDemandedVectorElts(
Op, DemandedElts, KnownUndef, KnownZero, TLO);
3117 const APInt &UndefOp0,
3118 const APInt &UndefOp1) {
3121 "Vector binop only");
3126 UndefOp1.
getBitWidth() == NumElts &&
"Bad type for undef analysis");
3128 auto getUndefOrConstantElt = [&](
SDValue V,
unsigned Index,
3129 const APInt &UndefVals) {
3130 if (UndefVals[Index])
3133 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
3137 auto *
C = dyn_cast<ConstantSDNode>(Elt);
3138 if (isa<ConstantFPSDNode>(Elt) || Elt.
isUndef() || (
C && !
C->isOpaque()))
3146 for (
unsigned i = 0; i != NumElts; ++i) {
3165 bool AssumeSingleUse)
const {
3166 EVT VT =
Op.getValueType();
3167 unsigned Opcode =
Op.getOpcode();
3168 APInt DemandedElts = OriginalDemandedElts;
3174 if (!shouldSimplifyDemandedVectorElts(
Op, TLO))
3182 "Mask size mismatches value type element count!");
3191 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse())
3195 if (DemandedElts == 0) {
3210 auto SimplifyDemandedVectorEltsBinOp = [&](
SDValue Op0,
SDValue Op1) {
3211 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts,
3213 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts,
3215 if (NewOp0 || NewOp1) {
3218 NewOp1 ? NewOp1 : Op1,
Op->getFlags());
3226 if (!DemandedElts[0]) {
3234 EVT SrcVT = Src.getValueType();
3246 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3256 EVT SrcVT = Src.getValueType();
3263 for (
unsigned I = 0;
I != NumElts; ++
I) {
3264 if (DemandedElts[
I]) {
3265 unsigned Offset =
I * EltSize;
3270 if (SimplifyDemandedBits(Src, DemandedSrcBits, Known, TLO,
Depth + 1))
3278 if (NumSrcElts == NumElts)
3279 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef,
3280 KnownZero, TLO,
Depth + 1);
3282 APInt SrcDemandedElts, SrcZero, SrcUndef;
3286 if ((NumElts % NumSrcElts) == 0) {
3287 unsigned Scale = NumElts / NumSrcElts;
3289 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3299 for (
unsigned i = 0; i != NumElts; ++i)
3300 if (DemandedElts[i]) {
3301 unsigned Ofs = (i % Scale) * EltSizeInBits;
3302 SrcDemandedBits.
setBits(Ofs, Ofs + EltSizeInBits);
3306 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known,
3314 for (
unsigned SubElt = 0; SubElt != Scale; ++SubElt) {
3318 for (
unsigned SrcElt = 0; SrcElt != NumSrcElts; ++SrcElt) {
3319 unsigned Elt = Scale * SrcElt + SubElt;
3320 if (DemandedElts[Elt])
3328 for (
unsigned i = 0; i != NumSrcElts; ++i) {
3329 if (SrcDemandedElts[i]) {
3331 KnownZero.
setBits(i * Scale, (i + 1) * Scale);
3333 KnownUndef.
setBits(i * Scale, (i + 1) * Scale);
3341 if ((NumSrcElts % NumElts) == 0) {
3342 unsigned Scale = NumSrcElts / NumElts;
3344 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3350 for (
unsigned i = 0; i != NumElts; ++i) {
3351 if (DemandedElts[i]) {
3381 [&](
SDValue Elt) { return Op.getOperand(0) != Elt; })) {
3383 bool Updated =
false;
3384 for (
unsigned i = 0; i != NumElts; ++i) {
3385 if (!DemandedElts[i] && !Ops[i].
isUndef()) {
3395 for (
unsigned i = 0; i != NumElts; ++i) {
3397 if (
SrcOp.isUndef()) {
3399 }
else if (EltSizeInBits ==
SrcOp.getScalarValueSizeInBits() &&
3407 EVT SubVT =
Op.getOperand(0).getValueType();
3410 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3413 APInt SubUndef, SubZero;
3414 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO,
3417 KnownUndef.
insertBits(SubUndef, i * NumSubElts);
3418 KnownZero.
insertBits(SubZero, i * NumSubElts);
3423 bool FoundNewSub =
false;
3425 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3428 SDValue NewSubOp = SimplifyMultipleUseDemandedVectorElts(
3429 SubOp, SubElts, TLO.
DAG,
Depth + 1);
3430 DemandedSubOps.
push_back(NewSubOp ? NewSubOp : SubOp);
3431 FoundNewSub = NewSubOp ?
true : FoundNewSub;
3447 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
3449 APInt DemandedSrcElts = DemandedElts;
3452 APInt SubUndef, SubZero;
3453 if (SimplifyDemandedVectorElts(
Sub, DemandedSubElts, SubUndef, SubZero, TLO,
3458 if (!DemandedSrcElts && !Src.isUndef())
3463 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero,
3471 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
3472 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3473 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts(
3475 if (NewSrc || NewSub) {
3476 NewSrc = NewSrc ? NewSrc : Src;
3477 NewSub = NewSub ? NewSub :
Sub;
3479 NewSub,
Op.getOperand(2));
3488 if (Src.getValueType().isScalableVector())
3491 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3494 APInt SrcUndef, SrcZero;
3495 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3503 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
3504 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3516 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
3520 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
3521 unsigned Idx = CIdx->getZExtValue();
3522 if (!DemandedElts[
Idx])
3525 APInt DemandedVecElts(DemandedElts);
3527 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
3528 KnownZero, TLO,
Depth + 1))
3537 APInt VecUndef, VecZero;
3538 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO,
3551 APInt UndefSel, ZeroSel;
3552 if (SimplifyDemandedVectorElts(Sel, DemandedElts, UndefSel, ZeroSel, TLO,
3557 APInt DemandedLHS(DemandedElts);
3558 APInt DemandedRHS(DemandedElts);
3559 APInt UndefLHS, ZeroLHS;
3560 APInt UndefRHS, ZeroRHS;
3561 if (SimplifyDemandedVectorElts(
LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO,
3564 if (SimplifyDemandedVectorElts(
RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO,
3568 KnownUndef = UndefLHS & UndefRHS;
3569 KnownZero = ZeroLHS & ZeroRHS;
3573 APInt DemandedSel = DemandedElts & ~KnownZero;
3574 if (DemandedSel != DemandedElts)
3575 if (SimplifyDemandedVectorElts(Sel, DemandedSel, UndefSel, ZeroSel, TLO,
3587 APInt DemandedLHS(NumElts, 0);
3588 APInt DemandedRHS(NumElts, 0);
3589 for (
unsigned i = 0; i != NumElts; ++i) {
3590 int M = ShuffleMask[i];
3591 if (M < 0 || !DemandedElts[i])
3593 assert(0 <= M && M < (
int)(2 * NumElts) &&
"Shuffle index out of range");
3594 if (M < (
int)NumElts)
3597 DemandedRHS.
setBit(M - NumElts);
3603 bool FoldLHS = !DemandedLHS && !
LHS.isUndef();
3604 bool FoldRHS = !DemandedRHS && !
RHS.isUndef();
3605 if (FoldLHS || FoldRHS) {
3614 APInt UndefLHS, ZeroLHS;
3615 APInt UndefRHS, ZeroRHS;
3616 if (SimplifyDemandedVectorElts(
LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO,
3619 if (SimplifyDemandedVectorElts(
RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO,
3624 bool Updated =
false;
3625 bool IdentityLHS =
true, IdentityRHS =
true;
3627 for (
unsigned i = 0; i != NumElts; ++i) {
3628 int &M = NewMask[i];
3631 if (!DemandedElts[i] || (M < (
int)NumElts && UndefLHS[M]) ||
3632 (M >= (
int)NumElts && UndefRHS[M - NumElts])) {
3636 IdentityLHS &= (M < 0) || (M == (
int)i);
3637 IdentityRHS &= (M < 0) || ((M - NumElts) == i);
3642 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.
LegalOps) {
3644 buildLegalVectorShuffle(VT,
DL,
LHS,
RHS, NewMask, TLO.
DAG);
3650 for (
unsigned i = 0; i != NumElts; ++i) {
3651 int M = ShuffleMask[i];
3654 }
else if (M < (
int)NumElts) {
3660 if (UndefRHS[M - NumElts])
3662 if (ZeroRHS[M - NumElts])
3671 APInt SrcUndef, SrcZero;
3673 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3674 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3675 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3682 Op.getValueSizeInBits() == Src.getValueSizeInBits() &&
3683 DemandedSrcElts == 1) {
3696 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() ==
ISD::AND &&
3697 Op->isOnlyUserOf(Src.getNode()) &&
3698 Op.getValueSizeInBits() == Src.getValueSizeInBits()) {
3700 EVT SrcVT = Src.getValueType();
3707 ISD::AND,
DL, SrcVT, {Src.getOperand(1), Mask})) {
3721 if (Op0 == Op1 &&
Op->isOnlyUserOf(Op0.
getNode())) {
3722 APInt UndefLHS, ZeroLHS;
3723 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3744 APInt UndefRHS, ZeroRHS;
3745 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3748 APInt UndefLHS, ZeroLHS;
3749 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3753 KnownZero = ZeroLHS & ZeroRHS;
3759 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3771 APInt UndefRHS, ZeroRHS;
3772 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3775 APInt UndefLHS, ZeroLHS;
3776 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3780 KnownZero = ZeroLHS;
3781 KnownUndef = UndefLHS & UndefRHS;
3786 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3797 APInt SrcUndef, SrcZero;
3798 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO,
3803 APInt DemandedElts0 = DemandedElts & ~SrcZero;
3804 if (SimplifyDemandedVectorElts(Op0, DemandedElts0, KnownUndef, KnownZero,
3808 KnownUndef &= DemandedElts0;
3809 KnownZero &= DemandedElts0;
3814 if (DemandedElts.
isSubsetOf(SrcZero | KnownZero | SrcUndef | KnownUndef))
3821 KnownZero |= SrcZero;
3822 KnownUndef &= SrcUndef;
3823 KnownUndef &= ~KnownZero;
3827 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3834 if (SimplifyDemandedVectorElts(
Op.getOperand(0), DemandedElts, KnownUndef,
3835 KnownZero, TLO,
Depth + 1))
3839 if (
SDValue NewOp = SimplifyMultipleUseDemandedVectorElts(
3840 Op.getOperand(0), DemandedElts, TLO.
DAG,
Depth + 1))
3854 if (SimplifyDemandedVectorElts(
Op.getOperand(0), DemandedElts, KnownUndef,
3855 KnownZero, TLO,
Depth + 1))
3861 if (SimplifyDemandedVectorEltsForTargetNode(
Op, DemandedElts, KnownUndef,
3862 KnownZero, TLO,
Depth))
3867 if (SimplifyDemandedBits(
Op,
DemandedBits, OriginalDemandedElts, Known,
3868 TLO,
Depth, AssumeSingleUse))
3874 assert((KnownUndef & KnownZero) == 0 &&
"Elements flagged as undef AND zero");
3888 const APInt &DemandedElts,
3890 unsigned Depth)
const {
3895 "Should use MaskedValueIsZero if you don't know whether Op"
3896 " is a target node!");
3903 unsigned Depth)
const {
3910 unsigned Depth)
const {
3922 unsigned Depth)
const {
3931 unsigned Depth)
const {
3936 "Should use ComputeNumSignBits if you don't know whether Op"
3937 " is a target node!");
3954 "Should use SimplifyDemandedVectorElts if you don't know whether Op"
3955 " is a target node!");
3966 "Should use SimplifyDemandedBits if you don't know whether Op"
3967 " is a target node!");
3968 computeKnownBitsForTargetNode(
Op, Known, DemandedElts, TLO.
DAG,
Depth);
3980 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
3981 " is a target node!");
4014 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op"
4015 " is a target node!");
4019 return !canCreateUndefOrPoisonForTargetNode(
Op, DemandedElts, DAG,
PoisonOnly,
4022 return DAG.isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly,
4034 "Should use canCreateUndefOrPoison if you don't know whether Op"
4035 " is a target node!");
4041 const APInt &DemandedElts,
4044 unsigned Depth)
const {
4049 "Should use isKnownNeverNaN if you don't know whether Op"
4050 " is a target node!");
4055 const APInt &DemandedElts,
4058 unsigned Depth)
const {
4063 "Should use isSplatValue if you don't know whether Op"
4064 " is a target node!");
4079 CVal = CN->getAPIntValue();
4080 EltWidth =
N.getValueType().getScalarSizeInBits();
4087 CVal = CVal.
trunc(EltWidth);
4093 return CVal.
isOne();
4135 return (
N->isOne() && !SExt) || (SExt && (
N->getValueType(0) != MVT::i1));
4138 return N->isAllOnes() && SExt;
4147 DAGCombinerInfo &DCI)
const {
4175 auto *AndC = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
4176 if (AndC &&
isNullConstant(N1) && AndC->getAPIntValue().isPowerOf2() &&
4179 AndC->getAPIntValue().getActiveBits());
4206 if (isXAndYEqZeroPreferableToXAndYEqY(
Cond, OpVT) &&
4214 if (DCI.isBeforeLegalizeOps() ||
4243 DAGCombinerInfo &DCI)
const {
4284SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
4289 if (!(C1 = dyn_cast<ConstantSDNode>(N1)))
4298 if (!(C01 = dyn_cast<ConstantSDNode>(N0->
getOperand(1))))
4302 EVT XVT =
X.getValueType();
4326 auto checkConstants = [&
I1, &I01]() ->
bool {
4331 if (checkConstants()) {
4339 if (!checkConstants())
4345 const unsigned KeptBits =
I1.logBase2();
4346 const unsigned KeptBitsMinusOne = I01.
logBase2();
4349 if (KeptBits != (KeptBitsMinusOne + 1))
4363 return DAG.
getSetCC(
DL, SCCVT, SExtInReg,
X, NewCond);
4367SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift(
4369 DAGCombinerInfo &DCI,
const SDLoc &
DL)
const {
4371 "Should be a comparison with 0.");
4373 "Valid only for [in]equality comparisons.");
4375 unsigned NewShiftOpcode;
4381 auto Match = [&NewShiftOpcode, &
X, &
C, &
Y, &DAG,
this](
SDValue V) {
4385 unsigned OldShiftOpcode =
V.getOpcode();
4386 switch (OldShiftOpcode) {
4398 C =
V.getOperand(0);
4403 Y =
V.getOperand(1);
4408 X, XC, CC,
Y, OldShiftOpcode, NewShiftOpcode, DAG);
4425 EVT VT =
X.getValueType();
4440 DAGCombinerInfo &DCI)
const {
4443 "Unexpected binop");
4471 if (!DCI.isCalledByLegalizer())
4472 DCI.AddToWorklist(YShl1.
getNode());
4487 if (CTPOP.getOpcode() !=
ISD::CTPOP || !CTPOP.hasOneUse())
4490 EVT CTVT = CTPOP.getValueType();
4491 SDValue CTOp = CTPOP.getOperand(0);
4511 for (
unsigned i = 0; i <
Passes; i++) {
4560 auto getRotateSource = [](
SDValue X) {
4562 return X.getOperand(0);
4569 if (
SDValue R = getRotateSource(N0))
4602 if (!C1 || !C1->
isZero())
4627 if (
Or.getOperand(0) ==
Other) {
4628 X =
Or.getOperand(0);
4629 Y =
Or.getOperand(1);
4632 if (
Or.getOperand(1) ==
Other) {
4633 X =
Or.getOperand(1);
4634 Y =
Or.getOperand(0);
4644 if (matchOr(F0, F1)) {
4651 if (matchOr(F1, F0)) {
4667 const SDLoc &dl)
const {
4677 bool N0ConstOrSplat =
4679 bool N1ConstOrSplat =
4687 if (N0ConstOrSplat && !N1ConstOrSplat &&
4690 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4696 if (!N0ConstOrSplat && !N1ConstOrSplat &&
4701 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4710 const APInt &C1 = N1C->getAPIntValue();
4730 return DAG.
getNode(LogicOp, dl, VT, IsXZero, IsYZero);
4760 if (
auto *N1C = dyn_cast<ConstantSDNode>(N1.
getNode())) {
4761 const APInt &C1 = N1C->getAPIntValue();
4776 if (
auto *
C = dyn_cast<ConstantSDNode>(N0->
getOperand(1)))
4777 if ((
C->getAPIntValue()+1).isPowerOf2()) {
4778 MinBits =
C->getAPIntValue().countr_one();
4786 }
else if (
auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
4789 MinBits = LN0->getMemoryVT().getSizeInBits();
4793 MinBits = LN0->getMemoryVT().getSizeInBits();
4804 MinBits >= ReqdBits) {
4806 if (isTypeDesirableForOp(
ISD::SETCC, MinVT)) {
4809 if (MinBits == 1 && C1 == 1)
4828 if (TopSetCC.
getValueType() == MVT::i1 && VT == MVT::i1 &&
4841 cast<CondCodeSDNode>(TopSetCC.
getOperand(2))->get(),
4860 auto *Lod = cast<LoadSDNode>(N0.
getOperand(0));
4862 unsigned bestWidth = 0, bestOffset = 0;
4863 if (Lod->isSimple() && Lod->isUnindexed() &&
4864 (Lod->getMemoryVT().isByteSized() ||
4866 unsigned memWidth = Lod->getMemoryVT().getStoreSizeInBits();
4868 unsigned maskWidth = origWidth;
4872 origWidth = Lod->getMemoryVT().getSizeInBits();
4876 for (
unsigned width = 8; width < origWidth; width *= 2) {
4881 unsigned maxOffset = origWidth - width;
4882 for (
unsigned offset = 0; offset <= maxOffset; offset += 8) {
4883 if (Mask.isSubsetOf(newMask)) {
4884 unsigned ptrOffset =
4886 unsigned IsFast = 0;
4887 assert((ptrOffset % 8) == 0 &&
"Non-Bytealigned pointer offset");
4892 *DAG.
getContext(), Layout, newVT, Lod->getAddressSpace(),
4893 NewAlign, Lod->getMemOperand()->getFlags(), &IsFast) &&
4895 bestOffset = ptrOffset / 8;
4896 bestMask = Mask.lshr(offset);
4910 if (bestOffset != 0)
4914 Lod->getPointerInfo().getWithOffset(bestOffset),
4915 Lod->getBaseAlign());
4994 ExtDstTy != ExtSrcTy &&
"Unexpected types!");
5001 return DAG.
getSetCC(dl, VT, ZextOp,
5003 }
else if ((N1C->isZero() || N1C->isOne()) &&
5050 return DAG.
getSetCC(dl, VT, Val, N1,
5053 }
else if (N1C->isOne()) {
5090 cast<VTSDNode>(Op0.
getOperand(1))->getVT() == MVT::i1)
5136 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1,
Cond, DCI, dl))
5143 const APInt &C1 = N1C->getAPIntValue();
5145 APInt MinVal, MaxVal;
5167 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
5187 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
5235 if (
SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift(
5236 VT, N0, N1,
Cond, DCI, dl))
5243 bool CmpZero = N1C->isZero();
5244 bool CmpNegOne = N1C->isAllOnes();
5245 if ((CmpZero || CmpNegOne) && N0.
hasOneUse()) {
5248 unsigned EltBits = V.getScalarValueSizeInBits();
5249 if (V.getOpcode() !=
ISD::OR || (EltBits % 2) != 0)
5256 isa<ConstantSDNode>(
RHS.getOperand(1)) &&
5257 RHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
5260 Hi =
RHS.getOperand(0);
5264 isa<ConstantSDNode>(
LHS.getOperand(1)) &&
5265 LHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
5268 Hi =
LHS.getOperand(0);
5276 unsigned HalfBits = EltBits / 2;
5287 if (IsConcat(N0,
Lo,
Hi))
5288 return MergeConcat(
Lo,
Hi);
5325 if (
auto *N1C = dyn_cast<ConstantSDNode>(N1.
getNode())) {
5326 const APInt &C1 = N1C->getAPIntValue();
5338 if (
auto *AndRHS = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5341 unsigned ShCt = AndRHS->getAPIntValue().logBase2();
5342 if (AndRHS->getAPIntValue().isPowerOf2() &&
5349 }
else if (
Cond ==
ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
5368 if (
auto *AndRHS = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5369 const APInt &AndRHSC = AndRHS->getAPIntValue();
5421 return DAG.
getSetCC(dl, VT, Shift, CmpRHS, NewCond);
5427 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) {
5428 auto *CFP = cast<ConstantFPSDNode>(N1);
5429 assert(!CFP->getValueAPF().isNaN() &&
"Unexpected NaN value");
5450 !
isFPImmLegal(CFP->getValueAPF(), CFP->getValueType(0))) {
5469 if (CFP->getValueAPF().isInfinity()) {
5470 bool IsNegInf = CFP->getValueAPF().isNegative();
5481 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5490 "Integer types should be handled by FoldSetCC");
5496 if (UOF ==
unsigned(EqTrue))
5501 if (NewCond !=
Cond &&
5504 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5511 if ((isSignedIntSetCC(
Cond) || isUnsignedIntSetCC(
Cond)) &&
5548 bool LegalRHSImm =
false;
5550 if (
auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
5551 if (
auto *LHSR = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5556 DAG.
getConstant(RHSC->getAPIntValue() - LHSR->getAPIntValue(),
5564 DAG.
getConstant(LHSR->getAPIntValue() ^ RHSC->getAPIntValue(),
5570 if (
auto *SUBC = dyn_cast<ConstantSDNode>(N0.
getOperand(0)))
5574 DAG.
getConstant(SUBC->getAPIntValue() - RHSC->getAPIntValue(),
5579 if (RHSC->getValueType(0).getSizeInBits() <= 64)
5588 if (
SDValue V = foldSetCCWithBinOp(VT, N0, N1,
Cond, dl, DCI))
5594 if (
SDValue V = foldSetCCWithBinOp(VT, N1, N0,
Cond, dl, DCI))
5597 if (
SDValue V = foldSetCCWithAnd(VT, N0, N1,
Cond, dl, DCI))
5600 if (
SDValue V = foldSetCCWithOr(VT, N0, N1,
Cond, dl, DCI))
5611 if (
SDValue Folded = buildUREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5614 if (
SDValue Folded = buildSREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5627 N0 = DAG.
getNOT(dl, Temp, OpVT);
5636 Temp = DAG.
getNOT(dl, N0, OpVT);
5643 Temp = DAG.
getNOT(dl, N1, OpVT);
5650 Temp = DAG.
getNOT(dl, N0, OpVT);
5657 Temp = DAG.
getNOT(dl, N1, OpVT);
5666 N0 = DAG.
getNode(ExtendCode, dl, VT, N0);
5693 if (
auto *GASD = dyn_cast<GlobalAddressSDNode>(
N)) {
5694 GA = GASD->getGlobal();
5695 Offset += GASD->getOffset();
5703 if (
auto *V = dyn_cast<ConstantSDNode>(N2)) {
5704 Offset += V->getSExtValue();
5708 if (
auto *V = dyn_cast<ConstantSDNode>(N1)) {
5709 Offset += V->getSExtValue();
5730 unsigned S = Constraint.
size();
5733 switch (Constraint[0]) {
5736 return C_RegisterClass;
5764 if (S > 1 && Constraint[0] ==
'{' && Constraint[S - 1] ==
'}') {
5765 if (S == 8 && Constraint.
substr(1, 6) ==
"memory")
5793 std::vector<SDValue> &Ops,
5796 if (Constraint.
size() > 1)
5799 char ConstraintLetter = Constraint[0];
5800 switch (ConstraintLetter) {
5816 if ((
C = dyn_cast<ConstantSDNode>(
Op)) && ConstraintLetter !=
's') {
5820 bool IsBool =
C->getConstantIntValue()->getBitWidth() == 1;
5830 if (ConstraintLetter !=
'n') {
5831 if (
const auto *GA = dyn_cast<GlobalAddressSDNode>(
Op)) {
5833 GA->getValueType(0),
5834 Offset + GA->getOffset()));
5837 if (
const auto *BA = dyn_cast<BlockAddressSDNode>(
Op)) {
5839 BA->getBlockAddress(), BA->getValueType(0),
5840 Offset + BA->getOffset(), BA->getTargetFlags()));
5843 if (isa<BasicBlockSDNode>(
Op)) {
5848 const unsigned OpCode =
Op.getOpcode();
5850 if ((
C = dyn_cast<ConstantSDNode>(
Op.getOperand(0))))
5851 Op =
Op.getOperand(1);
5854 (
C = dyn_cast<ConstantSDNode>(
Op.getOperand(1))))
5855 Op =
Op.getOperand(0);
5872std::pair<unsigned, const TargetRegisterClass *>
5878 assert(*(Constraint.
end() - 1) ==
'}' &&
"Not a brace enclosed constraint?");
5883 std::pair<unsigned, const TargetRegisterClass *> R =
5895 std::pair<unsigned, const TargetRegisterClass *> S =
5896 std::make_pair(PR, RC);
5918 assert(!ConstraintCode.empty() &&
"No known constraint!");
5919 return isdigit(
static_cast<unsigned char>(ConstraintCode[0]));
5925 assert(!ConstraintCode.empty() &&
"No known constraint!");
5926 return atoi(ConstraintCode.c_str());
5940 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
5941 unsigned maCount = 0;
5947 unsigned LabelNo = 0;
5950 ConstraintOperands.emplace_back(std::move(CI));
5960 switch (OpInfo.
Type) {
5970 assert(!Call.getType()->isVoidTy() &&
"Bad inline asm!");
5971 if (
auto *STy = dyn_cast<StructType>(Call.getType())) {
5976 assert(ResNo == 0 &&
"Asm only has one result!");
5986 OpInfo.
CallOperandVal = cast<CallBrInst>(&Call)->getIndirectDest(LabelNo);
5997 OpTy = Call.getParamElementType(ArgNo);
5998 assert(OpTy &&
"Indirect operand must have elementtype attribute");
6002 if (
StructType *STy = dyn_cast<StructType>(OpTy))
6003 if (STy->getNumElements() == 1)
6004 OpTy = STy->getElementType(0);
6009 unsigned BitSize =
DL.getTypeSizeInBits(OpTy);
6030 if (!ConstraintOperands.empty()) {
6032 unsigned bestMAIndex = 0;
6033 int bestWeight = -1;
6039 for (maIndex = 0; maIndex < maCount; ++maIndex) {
6041 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
6042 cIndex != eIndex; ++cIndex) {
6063 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
6068 weightSum += weight;
6071 if (weightSum > bestWeight) {
6072 bestWeight = weightSum;
6073 bestMAIndex = maIndex;
6080 cInfo.selectAlternative(bestMAIndex);
6085 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
6086 cIndex != eIndex; ++cIndex) {
6097 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
6100 std::pair<unsigned, const TargetRegisterClass *> InputRC =
6107 if ((OutOpIsIntOrFP != InOpIsIntOrFP) ||
6108 (MatchRC.second != InputRC.second)) {
6110 " with a matching output constraint of"
6111 " incompatible type!");
6117 return ConstraintOperands;
6152 if (maIndex >= (
int)
info.multipleAlternatives.size())
6153 rCodes = &
info.Codes;
6155 rCodes = &
info.multipleAlternatives[maIndex].Codes;
6159 for (
const std::string &rCode : *rCodes) {
6161 getSingleConstraintMatchWeight(
info, rCode.c_str());
6162 if (weight > BestWeight)
6163 BestWeight = weight;
6176 Value *CallOperandVal =
info.CallOperandVal;
6179 if (!CallOperandVal)
6182 switch (*constraint) {
6185 if (isa<ConstantInt>(CallOperandVal))
6186 weight = CW_Constant;
6189 if (isa<GlobalValue>(CallOperandVal))
6190 weight = CW_Constant;
6194 if (isa<ConstantFP>(CallOperandVal))
6195 weight = CW_Constant;
6208 weight = CW_Register;
6212 weight = CW_Default;
6246 Ret.reserve(OpInfo.
Codes.size());
6261 Ret.emplace_back(Code, CType);
6279 "need immediate or other");
6284 std::vector<SDValue> ResultOps;
6286 return !ResultOps.empty();
6294 assert(!OpInfo.
Codes.empty() &&
"Must have at least one constraint");
6297 if (OpInfo.
Codes.size() == 1) {
6305 unsigned BestIdx = 0;
6306 for (
const unsigned E =
G.size();
6313 if (BestIdx + 1 == E) {
6329 if (isa<ConstantInt>(v) || isa<Function>(v)) {
6333 if (isa<BasicBlock>(v) || isa<BlockAddress>(v)) {
6340 if (
const char *Repl = LowerXConstraint(OpInfo.
ConstraintVT)) {
6355 EVT VT =
N->getValueType(0);
6360 bool UseSRA =
false;
6366 APInt Divisor =
C->getAPIntValue();
6388 "Expected matchUnaryPredicate to return one element for scalable "
6393 assert(isa<ConstantSDNode>(Op1) &&
"Expected a constant");
6395 Factor = Factors[0];
6413 EVT VT =
N->getValueType(0);
6418 bool UseSRL =
false;
6424 APInt Divisor =
C->getAPIntValue();
6449 "Expected matchUnaryPredicate to return one element for scalable "
6454 assert(isa<ConstantSDNode>(Op1) &&
"Expected a constant");
6456 Factor = Factors[0];
6499 EVT VT =
N->getValueType(0);
6535 bool IsAfterLegalization,
6536 bool IsAfterLegalTypes,
6539 EVT VT =
N->getValueType(0);
6565 if (
N->getFlags().hasExact())
6574 const APInt &Divisor =
C->getAPIntValue();
6576 int NumeratorFactor = 0;
6587 NumeratorFactor = 1;
6590 NumeratorFactor = -1;
6607 SDValue MagicFactor, Factor, Shift, ShiftMask;
6615 Shifts.
size() == 1 && ShiftMasks.
size() == 1 &&
6616 "Expected matchUnaryPredicate to return one element for scalable "
6623 assert(isa<ConstantSDNode>(N1) &&
"Expected a constant");
6624 MagicFactor = MagicFactors[0];
6625 Factor = Factors[0];
6627 ShiftMask = ShiftMasks[0];
6673 SDValue Q = GetMULHS(N0, MagicFactor);
6703 bool IsAfterLegalization,
6704 bool IsAfterLegalTypes,
6707 EVT VT =
N->getValueType(0);
6733 if (
N->getFlags().hasExact())
6743 bool UseNPQ =
false, UsePreShift =
false, UsePostShift =
false;
6749 const APInt& Divisor =
C->getAPIntValue();
6751 SDValue PreShift, MagicFactor, NPQFactor, PostShift;
6755 if (Divisor.
isOne()) {
6756 PreShift = PostShift = DAG.
getUNDEF(ShSVT);
6757 MagicFactor = NPQFactor = DAG.
getUNDEF(SVT);
6761 Divisor, std::min(KnownLeadingZeros, Divisor.
countl_zero()));
6766 "We shouldn't generate an undefined shift!");
6768 "We shouldn't generate an undefined shift!");
6770 "Unexpected pre-shift");
6777 UseNPQ |= magics.
IsAdd;
6778 UsePreShift |= magics.
PreShift != 0;
6793 SDValue PreShift, PostShift, MagicFactor, NPQFactor;
6801 NPQFactors.
size() == 1 && PostShifts.
size() == 1 &&
6802 "Expected matchUnaryPredicate to return one for scalable vectors");
6808 assert(isa<ConstantSDNode>(N1) &&
"Expected a constant");
6809 PreShift = PreShifts[0];
6810 MagicFactor = MagicFactors[0];
6811 PostShift = PostShifts[0];
6863 Q = GetMULHU(Q, MagicFactor);
6876 NPQ = GetMULHU(NPQ, NPQFactor);
6895 return DAG.
getSelect(dl, VT, IsOne, N0, Q);
6909 if (SplatValue != Values.
end()) {
6914 Replacement = *SplatValue;
6918 if (!AlternativeReplacement)
6921 Replacement = AlternativeReplacement;
6934 DAGCombinerInfo &DCI,
6937 if (
SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
6940 DCI.AddToWorklist(
N);
6948TargetLowering::prepareUREMEqFold(
EVT SETCCVT,
SDValue REMNode,
6950 DAGCombinerInfo &DCI,
const SDLoc &
DL,
6958 "Only applicable for (in)equality comparisons.");
6971 bool ComparingWithAllZeros =
true;
6972 bool AllComparisonsWithNonZerosAreTautological =
true;
6973 bool HadTautologicalLanes =
false;
6974 bool AllLanesAreTautological =
true;
6975 bool HadEvenDivisor =
false;
6976 bool AllDivisorsArePowerOfTwo =
true;
6977 bool HadTautologicalInvertedLanes =
false;
6986 const APInt &
Cmp = CCmp->getAPIntValue();
6988 ComparingWithAllZeros &=
Cmp.isZero();
6994 bool TautologicalInvertedLane =
D.ule(Cmp);
6995 HadTautologicalInvertedLanes |= TautologicalInvertedLane;
7000 bool TautologicalLane =
D.isOne() || TautologicalInvertedLane;
7001 HadTautologicalLanes |= TautologicalLane;
7002 AllLanesAreTautological &= TautologicalLane;
7008 AllComparisonsWithNonZerosAreTautological &= TautologicalLane;
7011 unsigned K =
D.countr_zero();
7012 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
7016 HadEvenDivisor |= (
K != 0);
7019 AllDivisorsArePowerOfTwo &= D0.
isOne();
7023 unsigned W =
D.getBitWidth();
7025 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
7038 "We are expecting that K is always less than all-ones for ShSVT");
7041 if (TautologicalLane) {
7067 if (AllLanesAreTautological)
7072 if (AllDivisorsArePowerOfTwo)
7077 if (HadTautologicalLanes) {
7092 "Expected matchBinaryPredicate to return one element for "
7103 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) {
7107 "Expecting that the types on LHS and RHS of comparisons match.");
7117 if (HadEvenDivisor) {
7130 if (!HadTautologicalInvertedLanes)
7136 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
7143 SDValue TautologicalInvertedChannels =
7153 DL, SETCCVT, SETCCVT);
7155 Replacement, NewCC);
7163 TautologicalInvertedChannels);
7176 DAGCombinerInfo &DCI,
7179 if (
SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
7181 assert(Built.
size() <= 7 &&
"Max size prediction failed.");
7183 DCI.AddToWorklist(
N);
7191TargetLowering::prepareSREMEqFold(
EVT SETCCVT,
SDValue REMNode,
7193 DAGCombinerInfo &DCI,
const SDLoc &
DL,
7218 "Only applicable for (in)equality comparisons.");
7234 if (!CompTarget || !CompTarget->
isZero())
7237 bool HadIntMinDivisor =
false;
7238 bool HadOneDivisor =
false;
7239 bool AllDivisorsAreOnes =
true;
7240 bool HadEvenDivisor =
false;
7241 bool NeedToApplyOffset =
false;
7242 bool AllDivisorsArePowerOfTwo =
true;
7257 HadIntMinDivisor |=
D.isMinSignedValue();
7260 HadOneDivisor |=
D.isOne();
7261 AllDivisorsAreOnes &=
D.isOne();
7264 unsigned K =
D.countr_zero();
7265 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
7268 if (!
D.isMinSignedValue()) {
7271 HadEvenDivisor |= (
K != 0);
7276 AllDivisorsArePowerOfTwo &= D0.
isOne();
7280 unsigned W =
D.getBitWidth();
7282 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
7288 if (!
D.isMinSignedValue()) {
7291 NeedToApplyOffset |=
A != 0;
7298 "We are expecting that A is always less than all-ones for SVT");
7300 "We are expecting that K is always less than all-ones for ShSVT");
7340 if (AllDivisorsAreOnes)
7345 if (AllDivisorsArePowerOfTwo)
7348 SDValue PVal, AVal, KVal, QVal;
7350 if (HadOneDivisor) {
7370 QAmts.
size() == 1 &&
7371 "Expected matchUnaryPredicate to return one element for scalable "
7378 assert(isa<ConstantSDNode>(
D) &&
"Expected a constant");
7389 if (NeedToApplyOffset) {
7401 if (HadEvenDivisor) {
7416 if (!HadIntMinDivisor)
7422 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
7457 MaskedIsZero, Fold);
7465 EVT VT =
Op.getValueType();
7488 bool LegalOps,
bool OptForSize,
7490 unsigned Depth)
const {
7492 if (
Op.getOpcode() ==
ISD::FNEG ||
Op.getOpcode() == ISD::VP_FNEG) {
7494 return Op.getOperand(0);
7505 EVT VT =
Op.getValueType();
7506 unsigned Opcode =
Op.getOpcode();
7516 auto RemoveDeadNode = [&](
SDValue N) {
7517 if (
N &&
N.getNode()->use_empty())
7526 std::list<HandleSDNode> Handles;
7537 if (LegalOps && !IsOpLegal)
7540 APFloat V = cast<ConstantFPSDNode>(
Op)->getValueAPF();
7554 return !N.isUndef() && !isa<ConstantFPSDNode>(N);
7562 return N.isUndef() ||
7567 if (LegalOps && !IsOpLegal)
7576 APFloat V = cast<ConstantFPSDNode>(
C)->getValueAPF();
7584 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7595 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7598 Handles.emplace_back(NegX);
7603 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7609 if (NegX && (CostX <= CostY)) {
7613 RemoveDeadNode(NegY);
7622 RemoveDeadNode(NegX);
7629 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7651 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7654 Handles.emplace_back(NegX);
7659 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7665 if (NegX && (CostX <= CostY)) {
7669 RemoveDeadNode(NegY);
7675 if (
C->isExactlyValue(2.0) &&
Op.getOpcode() ==
ISD::FMUL)
7683 RemoveDeadNode(NegX);
7690 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7693 SDValue X =
Op.getOperand(0),
Y =
Op.getOperand(1), Z =
Op.getOperand(2);
7696 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ,
Depth);
7702 Handles.emplace_back(NegZ);
7707 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7710 Handles.emplace_back(NegX);
7715 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7721 if (NegX && (CostX <= CostY)) {
7722 Cost = std::min(CostX, CostZ);
7725 RemoveDeadNode(NegY);
7731 Cost = std::min(CostY, CostZ);
7734 RemoveDeadNode(NegX);
7742 if (
SDValue NegV = getNegatedExpression(
Op.getOperand(0), DAG, LegalOps,
7744 return DAG.
getNode(Opcode,
DL, VT, NegV);
7747 if (
SDValue NegV = getNegatedExpression(
Op.getOperand(0), DAG, LegalOps,
7758 getNegatedExpression(
LHS, DAG, LegalOps, OptForSize, CostLHS,
Depth);
7760 RemoveDeadNode(NegLHS);
7765 Handles.emplace_back(NegLHS);
7770 getNegatedExpression(
RHS, DAG, LegalOps, OptForSize, CostRHS,
Depth);
7778 RemoveDeadNode(NegLHS);
7779 RemoveDeadNode(NegRHS);
7783 Cost = std::min(CostLHS, CostRHS);
7784 return DAG.
getSelect(
DL, VT,
Op.getOperand(0), NegLHS, NegRHS);
7813 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI)
7826 if ((
Signed && HasSMUL_LOHI) || (!
Signed && HasUMUL_LOHI)) {
7854 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false)) {
7855 Result.push_back(
Lo);
7856 Result.push_back(
Hi);
7859 Result.push_back(Zero);
7860 Result.push_back(Zero);
7871 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
true)) {
7872 Result.push_back(
Lo);
7873 Result.push_back(
Hi);
7878 unsigned ShiftAmount = OuterBitSize - InnerBitSize;
7893 if (!MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false))
7896 Result.push_back(
Lo);
7903 Result.push_back(
Hi);
7916 if (!MakeMUL_LOHI(LL, RH,
Lo,
Hi,
false))
7923 if (!MakeMUL_LOHI(LH, RL,
Lo,
Hi,
false))
7975 bool Ok = expandMUL_LOHI(
N->getOpcode(),
N->getValueType(0),
SDLoc(
N),
7976 N->getOperand(0),
N->getOperand(1), Result, HiLoVT,
7977 DAG, Kind, LL, LH, RL, RH);
7979 assert(Result.size() == 2);
8011 unsigned Opcode =
N->getOpcode();
8012 EVT VT =
N->getValueType(0);
8019 "Unexpected opcode");
8021 auto *CN = dyn_cast<ConstantSDNode>(
N->getOperand(1));
8025 APInt Divisor = CN->getAPIntValue();
8033 if (Divisor.
uge(HalfMaxPlus1))
8051 unsigned TrailingZeros = 0;
8065 if (HalfMaxPlus1.
urem(Divisor).
isOne()) {
8066 assert(!LL == !LH &&
"Expected both input halves or no input halves!");
8068 std::tie(LL, LH) = DAG.
SplitScalar(
N->getOperand(0), dl, HiLoVT, HiLoVT);
8072 if (TrailingZeros) {
8140 std::tie(QuotL, QuotH) = DAG.
SplitScalar(Quotient, dl, HiLoVT, HiLoVT);
8141 Result.push_back(QuotL);
8142 Result.push_back(QuotH);
8148 if (TrailingZeros) {
8153 Result.push_back(RemL);
8169 EVT VT =
Node->getValueType(0);
8179 bool IsFSHL =
Node->getOpcode() == ISD::VP_FSHL;
8182 EVT ShVT = Z.getValueType();
8188 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
8189 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitWidthC, ShAmt, Mask, VL);
8190 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, IsFSHL ? ShAmt : InvShAmt, Mask,
8192 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT,
Y, IsFSHL ? InvShAmt : ShAmt, Mask,
8200 ShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, Z, BitMask, Mask, VL);
8204 InvShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, NotZ, BitMask, Mask, VL);
8207 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
8208 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitMask, ShAmt, Mask, VL);
8213 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, ShAmt, Mask, VL);
8215 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT, ShY1, InvShAmt, Mask, VL);
8218 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT, ShX1, InvShAmt, Mask, VL);
8219 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT,
Y, ShAmt, Mask, VL);
8222 return DAG.
getNode(ISD::VP_OR,
DL, VT, ShX, ShY, Mask, VL);
8227 if (Node->isVPOpcode())
8230 EVT VT = Node->getValueType(0);
8240 SDValue Z = Node->getOperand(2);
8243 bool IsFSHL = Node->getOpcode() ==
ISD::FSHL;
8246 EVT ShVT = Z.getValueType();
8316 EVT VT = Node->getValueType(0);
8318 bool IsLeft = Node->getOpcode() ==
ISD::ROTL;
8319 SDValue Op0 = Node->getOperand(0);
8320 SDValue Op1 = Node->getOperand(1);
8334 if (!AllowVectorOps && VT.
isVector() &&
8352 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8354 HsVal = DAG.
getNode(HsOpc,
DL, VT, Op0, HsAmt);
8360 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8371 assert(Node->getNumOperands() == 3 &&
"Not a double-shift!");
8372 EVT VT = Node->getValueType(0);
8378 SDValue ShOpLo = Node->getOperand(0);
8379 SDValue ShOpHi = Node->getOperand(1);
8380 SDValue ShAmt = Node->getOperand(2);
8423 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
8424 SDValue Src = Node->getOperand(OpNo);
8425 EVT SrcVT = Src.getValueType();
8426 EVT DstVT = Node->getValueType(0);
8430 if (SrcVT != MVT::f32 || DstVT != MVT::i64)
8433 if (Node->isStrictFPOpcode())
8496 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
8497 SDValue Src = Node->getOperand(OpNo);
8499 EVT SrcVT = Src.getValueType();
8500 EVT DstVT = Node->getValueType(0);
8521 if (Node->isStrictFPOpcode()) {
8523 { Node->getOperand(0), Src });
8524 Chain = Result.getValue(1);
8538 if (Node->isStrictFPOpcode()) {
8540 Node->getOperand(0),
true);
8546 bool Strict = Node->isStrictFPOpcode() ||
8565 if (Node->isStrictFPOpcode()) {
8567 { Chain, Src, FltOfs });
8589 Result = DAG.
getSelect(dl, DstVT, Sel, True, False);
8599 if (Node->isStrictFPOpcode())
8602 SDValue Src = Node->getOperand(0);
8603 EVT SrcVT = Src.getValueType();
8604 EVT DstVT = Node->getValueType(0);
8608 if (Node->getFlags().hasNonNeg() &&
8637 llvm::bit_cast<double>(UINT64_C(0x4530000000100000)), dl, DstVT);
8656 unsigned Opcode = Node->getOpcode();
8661 if (Node->getFlags().hasNoNaNs()) {
8663 EVT VT = Node->getValueType(0);
8668 SDValue Op1 = Node->getOperand(0);
8669 SDValue Op2 = Node->getOperand(1);
8679 if (
SDValue Expanded = expandVectorNaryOpBySplitting(Node, DAG))
8682 EVT VT = Node->getValueType(0);
8685 "Expanding fminnum/fmaxnum for scalable vectors is undefined.");
8692 SDValue Quiet0 = Node->getOperand(0);
8693 SDValue Quiet1 = Node->getOperand(1);
8695 if (!Node->getFlags().hasNoNaNs()) {
8708 return DAG.
getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags());
8714 if ((Node->getFlags().hasNoNaNs() ||
8717 (Node->getFlags().hasNoSignedZeros() ||
8720 unsigned IEEE2018Op =
8723 return DAG.
getNode(IEEE2018Op, dl, VT, Node->getOperand(0),
8724 Node->getOperand(1), Node->getFlags());
8727 if (
SDValue SelCC = createSelectForFMINNUM_FMAXNUM(Node, DAG))
8735 if (
SDValue Expanded = expandVectorNaryOpBySplitting(
N, DAG))
8741 unsigned Opc =
N->getOpcode();
8742 EVT VT =
N->getValueType(0);
8755 bool MinMaxMustRespectOrderedZero =
false;
8759 MinMaxMustRespectOrderedZero =
true;
8773 if (!
N->getFlags().hasNoNaNs() &&
8782 if (!MinMaxMustRespectOrderedZero && !
N->getFlags().hasNoSignedZeros() &&
8805 unsigned Opc = Node->getOpcode();
8806 EVT VT = Node->getValueType(0);
8816 if (!Flags.hasNoNaNs()) {
8832 if (Flags.hasNoNaNs() ||
8834 unsigned IEEE2019Op =
8842 if ((Flags.hasNoNaNs() ||
8868 if (
Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros() ||
8893 bool IsOrdered = NanTest ==
fcNone;
8894 bool IsUnordered = NanTest ==
fcNan;
8897 if (!IsOrdered && !IsUnordered)
8898 return std::nullopt;
8900 if (OrderedMask ==
fcZero &&
8906 return std::nullopt;
8913 EVT OperandVT =
Op.getValueType();
8925 if (OperandVT == MVT::ppcf128) {
8928 OperandVT = MVT::f64;
8935 bool IsF80 = (ScalarFloatVT == MVT::f80);
8939 if (Flags.hasNoFPExcept() &&
8942 bool IsInvertedFP =
false;
8946 FPTestMask = InvertedFPCheck;
8947 IsInvertedFP =
true;
8954 FPClassTest OrderedFPTestMask = FPTestMask & ~fcNan;
8959 OrderedFPTestMask = FPTestMask;
8961 const bool IsOrdered = FPTestMask == OrderedFPTestMask;
8963 if (std::optional<bool> IsCmp0 =
8966 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode,
8973 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode);
8976 if (FPTestMask ==
fcNan &&
8982 bool IsOrderedInf = FPTestMask ==
fcInf;
8985 : UnorderedCmpOpcode,
8996 IsOrderedInf ? OrderedCmpOpcode : UnorderedCmpOpcode);
9001 : UnorderedCmpOpcode,
9012 IsOrdered ? OrderedCmpOpcode : UnorderedCmpOpcode);
9031 return DAG.
getSetCC(
DL, ResultVT, Abs, SmallestNormal,
9032 IsOrdered ? OrderedOp : UnorderedOp);
9055 DAG.
getSetCC(
DL, ResultVT, Abs, SmallestNormal, IsNormalOp);
9057 return DAG.
getNode(LogicOp,
DL, ResultVT, IsFinite, IsNormal);
9064 bool IsInverted =
false;
9067 Test = InvertedCheck;
9083 const unsigned ExplicitIntBitInF80 = 63;
9084 APInt ExpMask = Inf;
9086 ExpMask.
clearBit(ExplicitIntBitInF80);
9100 const auto appendResult = [&](
SDValue PartialRes) {
9110 const auto getIntBitIsSet = [&]() ->
SDValue {
9111 if (!IntBitIsSetV) {
9112 APInt IntBitMask(BitSize, 0);
9113 IntBitMask.
setBit(ExplicitIntBitInF80);
9118 return IntBitIsSetV;
9139 Test &= ~fcPosFinite;
9144 Test &= ~fcNegFinite;
9146 appendResult(PartialRes);
9155 appendResult(ExpIsZero);
9165 else if (PartialCheck ==
fcZero)
9169 appendResult(PartialRes);
9182 appendResult(PartialRes);
9185 if (
unsigned PartialCheck =
Test &
fcInf) {
9188 else if (PartialCheck ==
fcInf)
9195 appendResult(PartialRes);
9198 if (
unsigned PartialCheck =
Test &
fcNan) {
9199 APInt InfWithQnanBit = Inf | QNaNBitMask;
9201 if (PartialCheck ==
fcNan) {
9214 }
else if (PartialCheck ==
fcQNan) {
9226 appendResult(PartialRes);
9231 APInt ExpLSB = ExpMask & ~(ExpMask.
shl(1));
9234 APInt ExpLimit = ExpMask - ExpLSB;
9247 appendResult(PartialRes);
9270 EVT VT = Node->getValueType(0);
9277 if (!(Len <= 128 && Len % 8 == 0))
9336 for (
unsigned Shift = 8; Shift < Len; Shift *= 2) {
9347 EVT VT = Node->getValueType(0);
9350 SDValue Mask = Node->getOperand(1);
9351 SDValue VL = Node->getOperand(2);
9356 if (!(Len <= 128 && Len % 8 == 0))
9368 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5;
9371 Tmp1 = DAG.
getNode(ISD::VP_AND, dl, VT,
9375 Op = DAG.
getNode(ISD::VP_SUB, dl, VT,
Op, Tmp1, Mask, VL);
9378 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op, Mask33, Mask, VL);
9379 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT,
9383 Op = DAG.
getNode(ISD::VP_ADD, dl, VT, Tmp2, Tmp3, Mask, VL);
9388 Tmp5 = DAG.
getNode(ISD::VP_ADD, dl, VT,
Op, Tmp4, Mask, VL);
9389 Op = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp5, Mask0F, Mask, VL);
9400 V = DAG.
getNode(ISD::VP_MUL, dl, VT,
Op, Mask01, Mask, VL);
9403 for (
unsigned Shift = 8; Shift < Len; Shift *= 2) {
9405 V = DAG.
getNode(ISD::VP_ADD, dl, VT, V,
9406 DAG.
getNode(ISD::VP_SHL, dl, VT, V, ShiftC, Mask, VL),
9416 EVT VT = Node->getValueType(0);
9455 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
9466 EVT VT = Node->getValueType(0);
9469 SDValue Mask = Node->getOperand(1);
9470 SDValue VL = Node->getOperand(2);
9480 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
9483 DAG.
getNode(ISD::VP_SRL, dl, VT,
Op, Tmp, Mask, VL), Mask,
9488 return DAG.
getNode(ISD::VP_CTPOP, dl, VT,
Op, Mask, VL);
9497 :
APInt(64, 0x0218A392CD3D5DBFULL);
9511 for (
unsigned i = 0; i <
BitWidth; i++) {
9537 EVT VT = Node->getValueType(0);
9572 if (
SDValue V = CTTZTableLookup(Node, DAG, dl, VT,
Op, NumBitsPerElt))
9594 SDValue Mask = Node->getOperand(1);
9595 SDValue VL = Node->getOperand(2);
9597 EVT VT = Node->getValueType(0);
9605 return DAG.
getNode(ISD::VP_CTPOP, dl, VT, Tmp, Mask, VL);
9619 EVT SrcVT = Source.getValueType();
9620 EVT ResVT =
N->getValueType(0);
9629 Source = DAG.
getNode(ISD::VP_SETCC,
DL, SrcVT, Source, AllZero,
9637 DAG.
getNode(ISD::VP_SELECT,
DL, ResVecVT, Source, StepVec,
Splat, EVL);
9638 return DAG.
getNode(ISD::VP_REDUCE_UMIN,
DL, ResVT, ExtEVL,
Select, Mask, EVL);
9645 EVT MaskVT = Mask.getValueType();
9655 true, &VScaleRange);
9679 bool IsNegative)
const {
9681 EVT VT =
N->getValueType(0);
9735 EVT VT =
N->getValueType(0);
9809 EVT VT =
N->getValueType(0);
9813 unsigned Opc =
N->getOpcode();
9822 "Unknown AVG node");
9834 return DAG.
getNode(ShiftOpc, dl, VT, Sum,
9886 return DAG.
getNode(SumOpc, dl, VT, Sign, Shift);
9891 EVT VT =
N->getValueType(0);
9898 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
9949 EVT VT =
N->getValueType(0);
9958 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
9967 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp1, Tmp2, Mask, EVL);
9977 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9981 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
9982 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
9983 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
9987 Tmp7 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9991 Tmp6 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9992 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
9995 Tmp5 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9996 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
10001 Tmp4 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp4,
10002 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
10005 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp3,
10006 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
10009 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
10010 DAG.
getConstant(255ULL << 8, dl, VT), Mask, EVL);
10013 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp7, Mask, EVL);
10014 Tmp6 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp6, Tmp5, Mask, EVL);
10015 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
10016 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
10017 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp6, Mask, EVL);
10018 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
10019 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp4, Mask, EVL);
10025 EVT VT =
N->getValueType(0);
10068 for (
unsigned I = 0, J = Sz-1;
I < Sz; ++
I, --J) {
10085 assert(
N->getOpcode() == ISD::VP_BITREVERSE);
10088 EVT VT =
N->getValueType(0);
10107 Tmp = (Sz > 8 ? DAG.
getNode(ISD::VP_BSWAP, dl, VT,
Op, Mask, EVL) :
Op);
10112 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
10118 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
10123 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
10129 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
10134 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
10140 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
10146std::pair<SDValue, SDValue>
10150 SDValue Chain = LD->getChain();
10151 SDValue BasePTR = LD->getBasePtr();
10152 EVT SrcVT = LD->getMemoryVT();
10153 EVT DstVT = LD->getValueType(0);
10185 LD->getPointerInfo(), SrcIntVT, LD->getBaseAlign(),
10186 LD->getMemOperand()->getFlags(), LD->getAAInfo());
10189 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
10190 unsigned ShiftIntoIdx =
10201 Scalar = DAG.
getNode(ExtendOp, SL, DstEltVT, Scalar);
10208 return std::make_pair(
Value, Load.getValue(1));
10217 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
10219 ExtType, SL, DstEltVT, Chain, BasePTR,
10220 LD->getPointerInfo().getWithOffset(
Idx * Stride), SrcEltVT,
10221 LD->getBaseAlign(), LD->getMemOperand()->getFlags(), LD->getAAInfo());
10232 return std::make_pair(
Value, NewChain);
10239 SDValue Chain = ST->getChain();
10240 SDValue BasePtr = ST->getBasePtr();
10242 EVT StVT = ST->getMemoryVT();
10268 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
10272 unsigned ShiftIntoIdx =
10281 return DAG.
getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(),
10282 ST->getBaseAlign(), ST->getMemOperand()->getFlags(),
10288 assert(Stride &&
"Zero stride!");
10292 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
10300 Chain, SL, Elt,
Ptr, ST->getPointerInfo().getWithOffset(
Idx * Stride),
10301 MemSclVT, ST->getBaseAlign(), ST->getMemOperand()->getFlags(),
10310std::pair<SDValue, SDValue>
10313 "unaligned indexed loads not implemented!");
10314 SDValue Chain = LD->getChain();
10316 EVT VT = LD->getValueType(0);
10317 EVT LoadedVT = LD->getMemoryVT();
10327 return scalarizeVectorLoad(LD, DAG);
10333 LD->getMemOperand());
10335 if (LoadedVT != VT)
10339 return std::make_pair(Result, newLoad.
getValue(1));
10347 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
10351 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.
getNode())->getIndex();
10353 SDValue StackPtr = StackBase;
10356 EVT PtrVT =
Ptr.getValueType();
10357 EVT StackPtrVT = StackPtr.getValueType();
10363 for (
unsigned i = 1; i < NumRegs; i++) {
10366 RegVT, dl, Chain,
Ptr, LD->getPointerInfo().getWithOffset(
Offset),
10367 LD->getBaseAlign(), LD->getMemOperand()->getFlags(), LD->getAAInfo());
10370 Load.getValue(1), dl, Load, StackPtr,
10381 8 * (LoadedBytes -
Offset));
10384 LD->getPointerInfo().getWithOffset(
Offset), MemVT, LD->getBaseAlign(),
10385 LD->getMemOperand()->getFlags(), LD->getAAInfo());
10390 Load.getValue(1), dl, Load, StackPtr,
10397 Load = DAG.
getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
10402 return std::make_pair(Load, TF);
10406 "Unaligned load of unsupported type.");
10415 Align Alignment = LD->getBaseAlign();
10416 unsigned IncrementSize = NumBits / 8;
10427 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10432 LD->getPointerInfo().getWithOffset(IncrementSize),
10433 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10436 Hi = DAG.
getExtLoad(HiExtType, dl, VT, Chain,
Ptr, LD->getPointerInfo(),
10437 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10442 LD->getPointerInfo().getWithOffset(IncrementSize),
10443 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10455 return std::make_pair(Result, TF);
10461 "unaligned indexed stores not implemented!");
10462 SDValue Chain = ST->getChain();
10464 SDValue Val = ST->getValue();
10466 Align Alignment = ST->getBaseAlign();
10468 EVT StoreMemVT = ST->getMemoryVT();
10484 Result = DAG.
getStore(Chain, dl, Result,
Ptr, ST->getPointerInfo(),
10485 Alignment, ST->getMemOperand()->getFlags());
10493 EVT PtrVT =
Ptr.getValueType();
10496 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
10500 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
10504 Chain, dl, Val, StackPtr,
10507 EVT StackPtrVT = StackPtr.getValueType();
10515 for (
unsigned i = 1; i < NumRegs; i++) {
10518 RegVT, dl, Store, StackPtr,
10522 ST->getPointerInfo().getWithOffset(
Offset),
10523 ST->getBaseAlign(),
10524 ST->getMemOperand()->getFlags()));
10543 Load.getValue(1), dl, Load,
Ptr,
10544 ST->getPointerInfo().getWithOffset(
Offset), LoadMemVT,
10545 ST->getBaseAlign(), ST->getMemOperand()->getFlags(), ST->getAAInfo()));
10552 "Unaligned store of unknown type.");
10556 unsigned IncrementSize = NumBits / 8;
10565 if (
auto *
C = dyn_cast<ConstantSDNode>(
Lo);
C && !
C->isOpaque())
10576 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
10577 ST->getMemOperand()->getFlags());
10582 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
10583 ST->getMemOperand()->getFlags(), ST->getAAInfo());
10594 bool IsCompressedMemory)
const {
10596 EVT AddrVT =
Addr.getValueType();
10597 EVT MaskVT = Mask.getValueType();
10599 "Incompatible types of Data and Mask");
10600 if (IsCompressedMemory) {
10603 "Cannot currently handle compressed memory with scalable vectors");
10609 MaskIntVT = MVT::i32;
10633 "Cannot index a scalable vector within a fixed-width vector");
10637 EVT IdxVT =
Idx.getValueType();
10643 if (
auto *IdxCst = dyn_cast<ConstantSDNode>(
Idx))
10644 if (IdxCst->getZExtValue() + (NumSubElts - 1) < NElts)
10658 unsigned MaxIndex = NumSubElts < NElts ? NElts - NumSubElts : 0;
10666 return getVectorSubVecPointer(
10667 DAG, VecPtr, VecVT,
10685 "Converting bits to bytes lost precision");
10687 "Sub-vector must be a vector with matching element type");
10691 EVT IdxVT = Index.getValueType();
10722 assert(EmuTlsVar &&
"Cannot find EmuTlsVar ");
10723 Args.emplace_back(DAG.
getGlobalAddress(EmuTlsVar, dl, PtrVT), VoidPtrType);
10730 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
10739 "Emulated TLS must have zero offset in GlobalAddressSDNode");
10740 return CallResult.first;
10751 EVT VT =
Op.getOperand(0).getValueType();
10753 if (VT.
bitsLT(MVT::i32)) {
10767 SDValue Op0 = Node->getOperand(0);
10768 SDValue Op1 = Node->getOperand(1);
10771 unsigned Opcode = Node->getOpcode();
10813 {Op0, Op1, DAG.getCondCode(CC)})) {
10820 {Op0, Op1, DAG.getCondCode(CC)})) {
10848 unsigned Opcode = Node->getOpcode();
10851 EVT VT =
LHS.getValueType();
10854 assert(VT ==
RHS.getValueType() &&
"Expected operands to be the same type");
10870 unsigned OverflowOp;
10885 llvm_unreachable(
"Expected method to receive signed or unsigned saturation "
10886 "addition or subtraction node.");
10894 unsigned BitWidth =
LHS.getScalarValueSizeInBits();
10897 SDValue SumDiff = Result.getValue(0);
10898 SDValue Overflow = Result.getValue(1);
10920 return DAG.
getSelect(dl, VT, Overflow, Zero, SumDiff);
10940 if (LHSIsNonNegative || RHSIsNonNegative) {
10942 return DAG.
getSelect(dl, VT, Overflow, SatMax, SumDiff);
10948 if (LHSIsNegative || RHSIsNegative) {
10950 return DAG.
getSelect(dl, VT, Overflow, SatMin, SumDiff);
10960 return DAG.
getSelect(dl, VT, Overflow, Result, SumDiff);
10964 unsigned Opcode = Node->getOpcode();
10967 EVT VT =
LHS.getValueType();
10968 EVT ResVT = Node->getValueType(0);
10999 unsigned Opcode = Node->getOpcode();
11003 EVT VT =
LHS.getValueType();
11008 "Expected a SHLSAT opcode");
11009 assert(VT ==
RHS.getValueType() &&
"Expected operands to be the same type");
11041 EVT VT =
LHS.getValueType();
11042 assert(
RHS.getValueType() == VT &&
"Mismatching operand types");
11044 assert((HiLHS && HiRHS) || (!HiLHS && !HiRHS));
11046 "Signed flag should only be set when HiLHS and RiRHS are null");
11054 unsigned HalfBits = Bits / 2;
11099 EVT VT =
LHS.getValueType();
11100 assert(
RHS.getValueType() == VT &&
"Mismatching operand types");
11104 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
11105 if (WideVT == MVT::i16)
11106 LC = RTLIB::MUL_I16;
11107 else if (WideVT == MVT::i32)
11108 LC = RTLIB::MUL_I32;
11109 else if (WideVT == MVT::i64)
11110 LC = RTLIB::MUL_I64;
11111 else if (WideVT == MVT::i128)
11112 LC = RTLIB::MUL_I128;
11137 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.
getDataLayout())) {
11143 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
11146 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
11149 "Ret value is a collection of constituent nodes holding result.");
11152 Lo = Ret.getOperand(0);
11153 Hi = Ret.getOperand(1);
11155 Lo = Ret.getOperand(1);
11156 Hi = Ret.getOperand(0);
11166 "Expected a fixed point multiplication opcode");
11171 EVT VT =
LHS.getValueType();
11172 unsigned Scale = Node->getConstantOperandVal(2);
11188 SDValue Product = Result.getValue(0);
11189 SDValue Overflow = Result.getValue(1);
11200 Result = DAG.
getSelect(dl, VT, ProdNeg, SatMin, SatMax);
11201 return DAG.
getSelect(dl, VT, Overflow, Result, Product);
11205 SDValue Product = Result.getValue(0);
11206 SDValue Overflow = Result.getValue(1);
11210 return DAG.
getSelect(dl, VT, Overflow, SatMax, Product);
11215 "Expected scale to be less than the number of bits if signed or at "
11216 "most the number of bits if unsigned.");
11218 "Expected both operands to be the same type");
11230 Lo = Result.getValue(0);
11231 Hi = Result.getValue(1);
11252 if (Scale == VTSize)
11298 return DAG.
getSelect(dl, VT, Overflow, ResultIfOverflow, Result);
11323 "Expected a fixed point division opcode");
11325 EVT VT =
LHS.getValueType();
11347 if (LHSLead + RHSTrail < Scale + (
unsigned)(Saturating &&
Signed))
11350 unsigned LHSShift = std::min(LHSLead, Scale);
11351 unsigned RHSShift = Scale - LHSShift;
11408 bool IsAdd = Node->getOpcode() ==
ISD::UADDO;
11414 SDValue NodeCarry = DAG.
getNode(OpcCarry, dl, Node->getVTList(),
11415 { LHS, RHS, CarryIn });
11424 EVT ResultType = Node->getValueType(1);
11435 DAG.
getSetCC(dl, SetCCType, Result,
11444 SetCC = DAG.
getSetCC(dl, SetCCType, Result,
LHS, CC);
11454 bool IsAdd = Node->getOpcode() ==
ISD::SADDO;
11459 EVT ResultType = Node->getValueType(1);
11485 DAG.
getNode(
ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl,
11486 ResultType, ResultType);
11492 EVT VT = Node->getValueType(0);
11500 const APInt &
C = RHSC->getAPIntValue();
11502 if (
C.isPowerOf2()) {
11504 bool UseArithShift =
isSigned && !
C.isMinSignedValue();
11507 Overflow = DAG.
getSetCC(dl, SetCCVT,
11509 dl, VT, Result, ShiftAmt),
11522 static const unsigned Ops[2][3] =
11545 forceExpandWideMUL(DAG, dl,
isSigned,
LHS,
RHS, BottomHalf, TopHalf);
11548 Result = BottomHalf;
11555 Overflow = DAG.
getSetCC(dl, SetCCVT, TopHalf,
11560 EVT RType = Node->getValueType(1);
11565 "Unexpected result type for S/UMULO legalization");
11573 EVT VT =
Op.getValueType();
11584 Op = DAG.
getNode(BaseOpcode, dl, HalfVT,
Lo,
Hi, Node->getFlags());
11589 return DAG.
getNode(Node->getOpcode(), dl, Node->getValueType(0),
Op,
11596 "Expanding reductions for scalable vectors is undefined.");
11605 for (
unsigned i = 1; i < NumElts; i++)
11606 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags());
11609 if (EltVT != Node->getValueType(0))
11616 SDValue AccOp = Node->getOperand(0);
11617 SDValue VecOp = Node->getOperand(1);
11625 "Expanding reductions for scalable vectors is undefined.");
11635 for (
unsigned i = 0; i < NumElts; i++)
11636 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags);
11643 EVT VT = Node->getValueType(0);
11648 SDValue Dividend = Node->getOperand(0);
11649 SDValue Divisor = Node->getOperand(1);
11652 Result = DAG.
getNode(DivRemOpc, dl, VTs, Dividend, Divisor).
getValue(1);
11657 SDValue Divide = DAG.
getNode(DivOpc, dl, VT, Dividend, Divisor);
11669 SDValue Src = Node->getOperand(0);
11672 EVT SrcVT = Src.getValueType();
11673 EVT DstVT = Node->getValueType(0);
11675 EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
11678 assert(SatWidth <= DstWidth &&
11679 "Expected saturation width smaller than result width");
11683 APInt MinInt, MaxInt;
11694 if (SrcVT == MVT::f16 || SrcVT == MVT::bf16) {
11696 SrcVT = Src.getValueType();
11718 if (AreExactFloatBounds && MinMaxLegal) {
11727 dl, DstVT, Clamped);
11739 return DAG.
getSelect(dl, DstVT, IsNan, ZeroInt, FpToInt);
11778 EVT OperandVT =
Op.getValueType();
11804 Op.getValueType());
11808 KeepNarrow = DAG.
getNode(
ISD::OR, dl, WideSetCCVT, KeepNarrow, AlreadyOdd);
11819 SDValue Adjust = DAG.
getSelect(dl, ResultIntVT, NarrowIsRd, One, NegativeOne);
11821 Op = DAG.
getSelect(dl, ResultIntVT, KeepNarrow, NarrowBits, Adjusted);
11828 EVT VT = Node->getValueType(0);
11831 if (Node->getConstantOperandVal(1) == 1) {
11834 EVT OperandVT =
Op.getValueType();
11846 EVT I32 =
F32.changeTypeToInteger();
11847 Op = expandRoundInexactToOdd(
F32,
Op, dl, DAG);
11872 EVT I16 = I32.isVector() ? I32.changeVectorElementType(MVT::i16) : MVT::i16;
11882 assert(Node->getValueType(0).isScalableVector() &&
11883 "Fixed length vector types expected to use SHUFFLE_VECTOR!");
11885 EVT VT = Node->getValueType(0);
11886 SDValue V1 = Node->getOperand(0);
11887 SDValue V2 = Node->getOperand(1);
11888 int64_t Imm = cast<ConstantSDNode>(Node->getOperand(2))->getSExtValue();
11907 EVT PtrVT = StackPtr.getValueType();
11909 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
11924 StackPtr = getVectorElementPointer(DAG, StackPtr, VT, Node->getOperand(2));
11926 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr,
11949 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr2,
11956 SDValue Vec = Node->getOperand(0);
11957 SDValue Mask = Node->getOperand(1);
11958 SDValue Passthru = Node->getOperand(2);
11962 EVT MaskVT = Mask.getValueType();
11971 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
11979 bool HasPassthru = !Passthru.
isUndef();
11985 Chain = DAG.
getStore(Chain,
DL, Passthru, StackPtr, PtrInfo);
11988 APInt PassthruSplatVal;
11989 bool IsSplatPassthru =
11992 if (IsSplatPassthru) {
11996 LastWriteVal = DAG.
getConstant(PassthruSplatVal,
DL, ScalarVT);
11997 }
else if (HasPassthru) {
12009 getVectorElementPointer(DAG, StackPtr, VecVT, Popcount);
12011 ScalarVT,
DL, Chain, LastElmtPtr,
12017 for (
unsigned I = 0;
I < NumElms;
I++) {
12019 SDValue OutPtr = getVectorElementPointer(DAG, StackPtr, VecVT, OutPos);
12021 Chain,
DL, ValI, OutPtr,
12033 if (HasPassthru &&
I == NumElms - 1) {
12039 OutPtr = getVectorElementPointer(DAG, StackPtr, VecVT, OutPos);
12043 LastWriteVal = DAG.
getSelect(
DL, ScalarVT, AllLanesSelected, ValI,
12046 Chain,
DL, LastWriteVal, OutPtr,
12051 return DAG.
getLoad(VecVT,
DL, Chain, StackPtr, PtrInfo);
12058 SDValue MulLHS =
N->getOperand(1);
12059 SDValue MulRHS =
N->getOperand(2);
12074 if (ExtMulOpVT != MulOpVT) {
12075 MulLHS = DAG.
getNode(ExtOpcLHS,
DL, ExtMulOpVT, MulLHS);
12076 MulRHS = DAG.
getNode(ExtOpcRHS,
DL, ExtMulOpVT, MulRHS);
12081 !ConstantOne.
isOne())
12088 std::deque<SDValue> Subvectors = {Acc};
12089 for (
unsigned I = 0;
I < ScaleFactor;
I++)
12093 while (Subvectors.size() > 1) {
12094 Subvectors.push_back(
12096 Subvectors.pop_front();
12097 Subvectors.pop_front();
12100 assert(Subvectors.size() == 1 &&
12101 "There should only be one subvector after tree flattening");
12103 return Subvectors[0];
12109 SDValue EVL,
bool &NeedInvert,
12111 bool IsSignaling)
const {
12112 MVT OpVT =
LHS.getSimpleValueType();
12114 NeedInvert =
false;
12115 assert(!EVL == !Mask &&
"VP Mask and EVL must either both be set or unset");
12116 bool IsNonVP = !EVL;
12131 bool NeedSwap =
false;
12132 InvCC = getSetCCInverse(CCCode, OpVT);
12148 if (OpVT == MVT::i1) {
12201 "If SETUE is expanded, SETOEQ or SETUNE must be legal!");
12206 "If SETO is expanded, SETOEQ must be legal!");
12223 NeedInvert = ((
unsigned)CCCode & 0x8U);
12264 SetCC1 = DAG.
getSetCC(dl, VT,
LHS,
RHS, CC1, Chain, IsSignaling);
12265 SetCC2 = DAG.
getSetCC(dl, VT,
LHS,
RHS, CC2, Chain, IsSignaling);
12273 SetCC1 = DAG.
getSetCC(dl, VT,
LHS,
LHS, CC1, Chain, IsSignaling);
12274 SetCC2 = DAG.
getSetCC(dl, VT,
RHS,
RHS, CC2, Chain, IsSignaling);
12301 EVT VT = Node->getValueType(0);
12313 unsigned Opcode = Node->getOpcode();
12320 for (
const SDValue &V : Node->op_values()) {
12351 std::optional<unsigned> ByteOffset;
12354 if (
auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo)) {
12355 int Elt = ConstEltNo->getZExtValue();
12369 unsigned IsFast = 0;
12377 getVectorElementPointer(DAG, OriginalLoad->
getBasePtr(), InVecVT, EltNo);
12382 if (ResultVT.
bitsGT(VecEltVT)) {
12389 NewPtr, MPI, VecEltVT, Alignment,
12399 if (ResultVT.
bitsLT(VecEltVT))
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
block Block Frequency Analysis
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
static bool isNonZeroModBitWidthOrUndef(const MachineRegisterInfo &MRI, Register Reg, unsigned BW)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
Function const char * Passes
const SmallVectorImpl< MachineOperand > & Cond
Contains matchers for matching SelectionDAG nodes and values.
static SDValue foldSetCCWithFunnelShift(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static bool lowerImmediateIfPossible(TargetLowering::ConstraintPair &P, SDValue Op, SelectionDAG *DAG, const TargetLowering &TLI)
If we have an immediate, see if we can lower it.
static SDValue expandVPFunnelShift(SDNode *Node, SelectionDAG &DAG)
static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, const APInt &UndefOp0, const APInt &UndefOp1)
Given a vector binary operation and known undefined elements for each input operand,...
static SDValue BuildExactUDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created)
Given an exact UDIV by a constant, create a multiplication with the multiplicative inverse of the con...
static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, SDValue Idx, EVT VecVT, const SDLoc &dl, ElementCount SubEC)
static unsigned getConstraintPiority(TargetLowering::ConstraintType CT)
Return a number indicating our preference for chosing a type of constraint over another,...
static std::optional< bool > isFCmpEqualZero(FPClassTest Test, const fltSemantics &Semantics, const MachineFunction &MF)
Returns a true value if if this FPClassTest can be performed with an ordered fcmp to 0,...
static void turnVectorIntoSplatVector(MutableArrayRef< SDValue > Values, std::function< bool(SDValue)> Predicate, SDValue AlternativeReplacement=SDValue())
If all values in Values that don't match the predicate are same 'splat' value, then replace all value...
static bool canExpandVectorCTPOP(const TargetLowering &TLI, EVT VT)
static SDValue foldSetCCWithRotate(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created)
Given an exact SDIV by a constant, create a multiplication with the multiplicative inverse of the con...
static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, SDValue N0, const APInt &C1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue combineShiftToAVG(SDValue Op, TargetLowering::TargetLoweringOpt &TLO, const TargetLowering &TLI, const APInt &DemandedBits, const APInt &DemandedElts, unsigned Depth)
This file describes how to lower LLVM code to machine code.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT, SelectionDAG &DAG)
Scalarize a vector store, bitcasting to TargetVT to determine the scalar type.
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
void clearAllBits()
Set every bit to 0.
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
void negate()
Negate this APInt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
unsigned countLeadingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
LLVM_ABI void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
void setAllBits()
Set every bit to 1.
LLVM_ABI APInt multiplicativeInverse() const
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void clearBits(unsigned LoBit, unsigned HiBit)
Clear the bits from LoBit (inclusive) to HiBit (exclusive) to 0.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void clearHighBits(unsigned hiBits)
Set top hiBits bits to 0.
int64_t getSExtValue() const
Get sign extended value.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void setBitVal(unsigned BitPosition, bool BitValue)
Set a given bit to a given value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool hasAttributes() const
Return true if the builder has IR-level attributes.
LLVM_ABI bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
LLVM_ABI bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
LLVM_ABI ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
ConstantFP - Floating Point Values [float, double].
This class represents a range of values.
const APInt & getAPIntValue() const
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
AttributeList getAttributes() const
Return the attribute list for this Function.
int64_t getOffset() const
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
std::vector< std::string > ConstraintCodeVector
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
Wrapper class representing physical registers. Should be passed by value.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setAdjustsStack(bool V)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
Function & getFunction()
Return the LLVM function that this machine code represents.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
Flags getFlags() const
Return the raw flags of the source value,.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
unsigned getAddressSpace() const
Return the address space for the associated pointer.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
const GlobalVariable * getNamedGlobal(StringRef Name) const
Return the global variable in the module with the specified name, of arbitrary type.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Class to represent pointers.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
bool willNotOverflowAdd(bool IsSigned, SDValue N0, SDValue N1) const
Determine if the result of the addition of 2 nodes can never overflow.
LLVM_ABI Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
SDValue getExtractVectorElt(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Extract element at Idx from Vec.
LLVM_ABI unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
bool isKnownNeverSNaN(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
LLVM_ABI bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
LLVM_ABI SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getExtractSubvector(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Return the VT typed sub-vector of Vec at Idx.
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
bool willNotOverflowSub(bool IsSigned, SDValue N0, SDValue N1) const
Determine if the result of the sub of 2 nodes can never overflow.
LLVM_ABI bool shouldOptForSize() const
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
LLVM_ABI std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
LLVM_ABI bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly=false, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI bool isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
LLVM_ABI bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
LLVM_ABI SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
LLVM_ABI SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI bool isKnownNeverZeroFloat(SDValue Op) const
Test whether the given floating point SDValue is known to never be positive or negative zero.
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
LLVM_ABI std::optional< uint64_t > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI SDValue getCondCode(ISD::CondCode Cond)
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVM_ABI std::optional< uint64_t > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Mask, SDValue EVL)
Helper function to make it easier to build VP_SETCCs if you just have an ISD::CondCode instead of an ...
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Class to represent struct types.
LLVM_ABI void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool isShuffleMaskLegal(ArrayRef< int >, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
virtual bool shouldRemoveRedundantExtend(SDValue Op) const
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset=std::nullopt) const
Return true if it is profitable to reduce a load to a smaller type.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual bool isSafeMemOpType(MVT) const
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
const TargetMachine & getTargetMachine() const
virtual bool isCtpopFast(EVT VT) const
Return true if ctpop instruction is fast.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
bool isPaddedAtMostSignificantBitsWhenStored(EVT VT) const
Indicates if any padding is guaranteed to go at the most significant bits when storing the type to me...
LegalizeAction getCondCodeAction(ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some oth...
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
virtual bool shouldExpandCmpUsingSelects(EVT VT) const
Should we expand [US]CMP nodes using two selects and two compares, or by doing arithmetic on boolean ...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual bool shouldExtendTypeInLibCall(EVT Type) const
Returns true if arguments should be extended in lib calls.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const
Return true if creating a shift of the type by the given amount is not profitable.
virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal for a comparison of the specified types on this ...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
ISD::CondCode getSoftFloatCmpLibcallPredicate(RTLIB::LibcallImpl Call) const
Get the comparison predicate that's to be used to test the result of the comparison libcall against z...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a cust...
virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
virtual bool hasAndNotCompare(SDValue Y) const
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
virtual bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Get the libcall impl routine name for the specified libcall.
virtual bool isCtlzFast() const
Return true if ctlz instruction is fast.
virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, bool IsSigned) const
Return true if it is more correct/profitable to use strict FP_TO_INT conversion operations - canonica...
NegatibleCost
Enum that specifies when a float negation is beneficial.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &) const
Returns the target specific optimal type for load and store operations as a result of memset,...
virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal or custom for a comparison of the specified type...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
MulExpansionKind
Enum that specifies when a multiplication should be expanded.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT.
SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Build sdiv by power-of-2 with conditional move instructions Ref: "Hacker's Delight" by Henry Warren 1...
virtual ConstraintWeight getMultipleConstraintMatchWeight(AsmOperandInfo &info, int maIndex) const
Examine constraint type and operand type and determine a weight value.
SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes.
bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]MULO.
bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL into two nodes.
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
virtual Align computeKnownAlignForTargetInstr(GISelValueTracking &Analysis, Register R, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine the known alignment for the pointer value R.
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
virtual bool isUsedByReturnOnly(SDNode *, SDValue &) const
Return true if result of the specified node is used by a return node only.
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
virtual bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand VP_BSWAP nodes.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed, const SDValue LHS, const SDValue RHS, SDValue &Lo, SDValue &Hi) const
Calculate full product of LHS and RHS either via a libcall or through brute force expansion of the mu...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand CTLZ/CTLZ_ZERO_UNDEF nodes.
SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand BITREVERSE nodes.
SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand CTTZ/CTTZ_ZERO_UNDEF nodes.
virtual SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, SDValue Addr, int JTI, SelectionDAG &DAG) const
Expands target specific indirect branch for the case of JumpTable expansion.
SDValue expandABD(SDNode *N, SelectionDAG &DAG) const
Expand ABDS/ABDU nodes.
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]SHLSAT.
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const
Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
virtual unsigned computeNumSignBitsForTargetInstr(GISelValueTracking &Analysis, Register R, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all bits from only some vector eleme...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::S(ADD|SUB)O.
SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand VP_BITREVERSE nodes.
SDValue expandABS(SDNode *N, SelectionDAG &DAG, bool IsNegative=false) const
Expand ABS nodes.
SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_* into an explicit calculation.
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
SDValue expandVPCTTZElements(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ_ELTS/VP_CTTZ_ELTS_ZERO_UNDEF nodes.
SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, bool IsAfterLegalTypes, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::SDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand float to UINT conversion.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
virtual bool SimplifyDemandedVectorEltsForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded vector elements, returning true on success...
bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const
Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
SDValue expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimumnum/fmaximumnum into multiple comparison with selects.
void forceExpandMultiply(SelectionDAG &DAG, const SDLoc &dl, bool Signed, SDValue &Lo, SDValue &Hi, SDValue LHS, SDValue RHS, SDValue HiLHS=SDValue(), SDValue HiRHS=SDValue()) const
Calculate the product twice the width of LHS and RHS.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
virtual const char * LowerXConstraint(EVT ConstraintVT) const
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand CTPOP nodes.
virtual void computeKnownBitsForTargetInstr(GISelValueTracking &Analysis, Register R, KnownBits &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, bool IsAfterLegalTypes, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::UDIV node expressing a divide by constant, return a DAG expression to select that will ...
SDValue expandVectorNaryOpBySplitting(SDNode *Node, SelectionDAG &DAG) const
~TargetLowering() override
SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand BSWAP nodes.
SDValue expandFMINIMUM_FMAXIMUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimum/fmaximum into multiple comparison with selects.
SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op, unsigned NumBitsPerElt) const
Expand CTTZ via Table Lookup.
bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, SDValue LL=SDValue(), SDValue LH=SDValue()) const
Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit urem by constant and other arit...
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
virtual void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool isPositionIndependent() const
std::pair< StringRef, TargetLowering::ConstraintType > ConstraintPair
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, const DenormalMode &Mode) const
Return a target-dependent comparison result if the input operand is suitable for use with a square ro...
ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const
Given an OpInfo with list of constraints codes as strings, return a sorted Vector of pairs of constra...
bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const
Expand float(f32) to SINT(i64) conversion.
virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, SDValue N1, MutableArrayRef< int > Mask, SelectionDAG &DAG) const
Tries to build a legal vector shuffle using the provided parameters or equivalent variations.
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
TargetLowering(const TargetLowering &)=delete
bool isConstFalseVal(SDValue N) const
Return if the N is a constant or constant vector equal to the false value from getBooleanContents().
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, const SDLoc &dl) const
Try to simplify a setcc built with the specified operands and cc.
SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const
Expand funnel shift.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, SDValue Mask, SDValue EVL, bool &NeedInvert, const SDLoc &dl, SDValue &Chain, bool IsSignaling=false) const
Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC on the current target.
bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const
Return if N is a True value when extended to VT.
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &DemandedBits, TargetLoweringOpt &TLO) const
Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
bool isConstTrueVal(SDValue N) const
Return if the N is a constant or constant vector equal to the true value from getBooleanContents().
SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTPOP nodes.
SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, SDValue LHS, SDValue RHS, unsigned Scale, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]DIVFIX[SAT].
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes.
SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const
Expand a vector VECTOR_COMPRESS into a sequence of extract element, store temporarily,...
virtual const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const
Try to convert the fminnum/fmaxnum to a compare/select sequence.
SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const
Expand rotations.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
virtual void computeKnownFPClassForTargetInstr(GISelValueTracking &Analysis, Register R, KnownFPClass &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
SDValue expandCMP(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]CMP.
void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, SelectionDAG &DAG) const
Expand shift-by-parts.
virtual bool isKnownNeverNaNForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
This method will be invoked for all target nodes and for any target-independent nodes that the target...
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT].
SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][MIN|MAX].
SDValue expandVectorFindLastActive(SDNode *N, SelectionDAG &DAG) const
Expand VECTOR_FIND_LAST_ACTIVE nodes.
SDValue expandPartialReduceMLA(SDNode *Node, SelectionDAG &DAG) const
Expands PARTIAL_REDUCE_S/UMLA nodes to a series of simpler operations, consisting of zext/sext,...
void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::U(ADD|SUB)O.
virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
SDValue scalarizeExtractedVectorLoad(EVT ResultVT, const SDLoc &DL, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad, SelectionDAG &DAG) const
Replace an extraction of a load with a narrowed load.
virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SREM lowering for power-of-2 denominators.
bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand UINT(i64) to double(f64) conversion.
bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, SDValue RHS, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, respectively,...
SDValue expandAVG(SDNode *N, SelectionDAG &DAG) const
Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
iterator_range< regclass_iterator > regclasses() const
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const
Return true if the given TargetRegisterClass has the ValueType T.
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI const fltSemantics & getFltSemantics() const
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
LLVM_ABI NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
LLVM_ABI bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Hook for matching ConstantSDNode predicate.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LLVM_ABI NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
FPClassTest invertFPClassTestIfSimpler(FPClassTest Test, bool UseFCmp)
Evaluates if the specified FP class test is better performed as the inverse (i.e.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
LLVM_ABI void reportFatalInternalError(Error Err)
Report a fatal error that indicates a bug in LLVM.
LLVM_ABI ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
auto find_if_not(R &&Range, UnaryPredicate P)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
LLVM_ABI bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
constexpr unsigned BitWidth
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
APFloat neg(APFloat X)
Returns the negated value of the argument.
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
opStatus
IEEE-754R 7: Default exception handling.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
EVT getHalfSizedIntegerVT(LLVMContext &Context) const
Finds the smallest simple value type that is greater than or equal to half the width of this EVT.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
LLVM_ABI const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
int MatchingInput
MatchingInput - If this is not -1, this is an output constraint where an input constraint is required...
ConstraintCodeVector Codes
Code - The constraint code, either the register name (in braces) or the constraint letter/number.
SubConstraintInfoVector multipleAlternatives
multipleAlternatives - If there are multiple alternative constraints, this array will contain them.
bool isIndirect
isIndirect - True if this operand is an indirect operand.
bool hasMatchingInput() const
hasMatchingInput - Return true if this is an output constraint that has a matching input constraint.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
static LLVM_ABI std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
KnownBits reverseBits() const
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
bool isSignUnknown() const
Returns true if we don't know the sign bit.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static LLVM_ABI std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static LLVM_ABI KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
static LLVM_ABI std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static LLVM_ABI std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
static LLVM_ABI std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
This class contains a discriminated union of information about pointers in memory operands,...
LLVM_ABI unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasNoUnsignedWrap() const
bool hasNoSignedWrap() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Magic data for optimising signed division by a constant.
unsigned ShiftAmount
shift amount
static LLVM_ABI SignedDivisionByConstantInfo get(const APInt &D)
Calculate the magic numbers required to implement a signed integer division by a constant as a sequen...
This contains information for each constraint that we are lowering.
MVT ConstraintVT
The ValueType for the operand value.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
std::string ConstraintCode
This contains the actual string for the code, like "m".
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
LLVM_ABI unsigned getMatchedOperand() const
If this is an input matching constraint, this method returns the output operand it matches.
LLVM_ABI bool isMatchingInputConstraint() const
Return true of this is an input operand that is a matching constraint like "4".
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setIsPostTypeLegalization(bool Value=true)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)
bool isBeforeLegalizeOps() const
LLVM_ABI void AddToWorklist(SDNode *N)
bool isCalledByLegalizer() const
bool isBeforeLegalize() const
LLVM_ABI void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setIsPostTypeLegalization(bool Value=true)
ArrayRef< EVT > OpsVTBeforeSoften
bool IsPostTypeLegalization
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT)
ArrayRef< Type * > OpsTypeOverrides
MakeLibCallOptions & setIsSigned(bool Value=true)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)
bool LegalOperations() const
Magic data for optimising unsigned division by a constant.
unsigned PreShift
pre-shift amount
static LLVM_ABI UnsignedDivisionByConstantInfo get(const APInt &D, unsigned LeadingZeros=0, bool AllowEvenDivisorOptimization=true)
Calculate the magic numbers required to implement an unsigned integer division by a constant as a seq...
unsigned PostShift
post-shift amount