99void SelectionDAG::DAGNodeDeletedListener::anchor() {}
100void SelectionDAG::DAGNodeInsertedListener::anchor() {}
102#define DEBUG_TYPE "selectiondag"
106 cl::desc(
"Gang up loads and stores generated by inlining of memcpy"));
109 cl::desc(
"Number limit for gluing ld/st of memcpy."),
114 cl::desc(
"DAG combiner limit number of steps when searching DAG "
115 "for predecessor nodes"));
132 return getValueAPF().bitwiseIsEqual(V);
153 if (
auto OptAPInt =
N->getOperand(0)->bitcastToAPInt()) {
155 N->getValueType(0).getVectorElementType().getSizeInBits();
156 SplatVal = OptAPInt->
trunc(EltSize);
161 auto *BV = dyn_cast<BuildVectorSDNode>(
N);
166 unsigned SplatBitSize;
168 unsigned EltSize =
N->getValueType(0).getVectorElementType().getSizeInBits();
173 const bool IsBigEndian =
false;
174 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
175 EltSize, IsBigEndian) &&
176 EltSize == SplatBitSize;
185 N =
N->getOperand(0).getNode();
194 unsigned i = 0, e =
N->getNumOperands();
197 while (i != e &&
N->getOperand(i).isUndef())
201 if (i == e)
return false;
213 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
214 if (OptAPInt->countr_one() < EltSize)
222 for (++i; i != e; ++i)
223 if (
N->getOperand(i) != NotZero && !
N->getOperand(i).isUndef())
231 N =
N->getOperand(0).getNode();
240 bool IsAllUndef =
true;
253 if (
auto OptAPInt =
Op->bitcastToAPInt()) {
254 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
255 if (OptAPInt->countr_zero() < EltSize)
282 if (!isa<ConstantSDNode>(
Op))
295 if (!isa<ConstantFPSDNode>(
Op))
303 assert(
N->getValueType(0).isVector() &&
"Expected a vector!");
305 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
306 if (EltSize <= NewEltSize)
310 return (
N->getOperand(0).getValueType().getScalarSizeInBits() <=
315 return (
N->getOperand(0).getValueType().getScalarSizeInBits() <=
325 if (!isa<ConstantSDNode>(
Op))
328 APInt C =
Op->getAsAPIntVal().trunc(EltSize);
329 if (
Signed &&
C.trunc(NewEltSize).sext(EltSize) !=
C)
331 if (!
Signed &&
C.trunc(NewEltSize).zext(EltSize) !=
C)
342 if (
N->getNumOperands() == 0)
348 return N->getOpcode() ==
ISD::FREEZE &&
N->getOperand(0).isUndef();
351template <
typename ConstNodeType>
353 std::function<
bool(ConstNodeType *)> Match,
354 bool AllowUndefs,
bool AllowTruncation) {
356 if (
auto *
C = dyn_cast<ConstNodeType>(
Op))
364 EVT SVT =
Op.getValueType().getScalarType();
366 if (AllowUndefs &&
Op.getOperand(i).isUndef()) {
372 auto *Cst = dyn_cast<ConstNodeType>(
Op.getOperand(i));
373 if (!Cst || (!AllowTruncation && Cst->getValueType(0) != SVT) ||
380template bool ISD::matchUnaryPredicateImpl<ConstantSDNode>(
382template bool ISD::matchUnaryPredicateImpl<ConstantFPSDNode>(
388 bool AllowUndefs,
bool AllowTypeMismatch) {
389 if (!AllowTypeMismatch &&
LHS.getValueType() !=
RHS.getValueType())
393 if (
auto *LHSCst = dyn_cast<ConstantSDNode>(
LHS))
394 if (
auto *RHSCst = dyn_cast<ConstantSDNode>(
RHS))
395 return Match(LHSCst, RHSCst);
398 if (
LHS.getOpcode() !=
RHS.getOpcode() ||
403 EVT SVT =
LHS.getValueType().getScalarType();
404 for (
unsigned i = 0, e =
LHS.getNumOperands(); i != e; ++i) {
407 bool LHSUndef = AllowUndefs && LHSOp.
isUndef();
408 bool RHSUndef = AllowUndefs && RHSOp.
isUndef();
409 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
410 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
411 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
413 if (!AllowTypeMismatch && (LHSOp.
getValueType() != SVT ||
416 if (!Match(LHSCst, RHSCst))
438 switch (VecReduceOpcode) {
443 case ISD::VP_REDUCE_FADD:
444 case ISD::VP_REDUCE_SEQ_FADD:
448 case ISD::VP_REDUCE_FMUL:
449 case ISD::VP_REDUCE_SEQ_FMUL:
452 case ISD::VP_REDUCE_ADD:
455 case ISD::VP_REDUCE_MUL:
458 case ISD::VP_REDUCE_AND:
461 case ISD::VP_REDUCE_OR:
464 case ISD::VP_REDUCE_XOR:
467 case ISD::VP_REDUCE_SMAX:
470 case ISD::VP_REDUCE_SMIN:
473 case ISD::VP_REDUCE_UMAX:
476 case ISD::VP_REDUCE_UMIN:
479 case ISD::VP_REDUCE_FMAX:
482 case ISD::VP_REDUCE_FMIN:
485 case ISD::VP_REDUCE_FMAXIMUM:
488 case ISD::VP_REDUCE_FMINIMUM:
497#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) \
500#include "llvm/IR/VPIntrinsics.def"
508#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
509#define VP_PROPERTY_BINARYOP return true;
510#define END_REGISTER_VP_SDNODE(VPSD) break;
511#include "llvm/IR/VPIntrinsics.def"
520 case ISD::VP_REDUCE_ADD:
521 case ISD::VP_REDUCE_MUL:
522 case ISD::VP_REDUCE_AND:
523 case ISD::VP_REDUCE_OR:
524 case ISD::VP_REDUCE_XOR:
525 case ISD::VP_REDUCE_SMAX:
526 case ISD::VP_REDUCE_SMIN:
527 case ISD::VP_REDUCE_UMAX:
528 case ISD::VP_REDUCE_UMIN:
529 case ISD::VP_REDUCE_FMAX:
530 case ISD::VP_REDUCE_FMIN:
531 case ISD::VP_REDUCE_FMAXIMUM:
532 case ISD::VP_REDUCE_FMINIMUM:
533 case ISD::VP_REDUCE_FADD:
534 case ISD::VP_REDUCE_FMUL:
535 case ISD::VP_REDUCE_SEQ_FADD:
536 case ISD::VP_REDUCE_SEQ_FMUL:
546#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, ...) \
549#include "llvm/IR/VPIntrinsics.def"
558#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
561#include "llvm/IR/VPIntrinsics.def"
571#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) case ISD::VPOPC:
572#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) return ISD::SDOPC;
573#define END_REGISTER_VP_SDNODE(VPOPC) break;
574#include "llvm/IR/VPIntrinsics.def"
583#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) break;
584#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) case ISD::SDOPC:
585#define END_REGISTER_VP_SDNODE(VPOPC) return ISD::VPOPC;
586#include "llvm/IR/VPIntrinsics.def"
633 bool isIntegerLike) {
658 bool IsInteger =
Type.isInteger();
663 unsigned Op = Op1 | Op2;
679 bool IsInteger =
Type.isInteger();
714 ID.AddPointer(VTList.
VTs);
720 for (
const auto &
Op : Ops) {
721 ID.AddPointer(
Op.getNode());
722 ID.AddInteger(
Op.getResNo());
729 for (
const auto &
Op : Ops) {
730 ID.AddPointer(
Op.getNode());
731 ID.AddInteger(
Op.getResNo());
744 switch (
N->getOpcode()) {
753 ID.AddPointer(
C->getConstantIntValue());
754 ID.AddBoolean(
C->isOpaque());
759 ID.AddPointer(cast<ConstantFPSDNode>(
N)->getConstantFPValue());
775 ID.AddInteger(cast<RegisterSDNode>(
N)->
getReg().
id());
778 ID.AddPointer(cast<RegisterMaskSDNode>(
N)->getRegMask());
781 ID.AddPointer(cast<SrcValueSDNode>(
N)->getValue());
785 ID.AddInteger(cast<FrameIndexSDNode>(
N)->getIndex());
788 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getGuid());
789 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getIndex());
790 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getAttributes());
794 ID.AddInteger(cast<JumpTableSDNode>(
N)->getIndex());
795 ID.AddInteger(cast<JumpTableSDNode>(
N)->getTargetFlags());
800 ID.AddInteger(CP->getAlign().value());
801 ID.AddInteger(CP->getOffset());
802 if (CP->isMachineConstantPoolEntry())
803 CP->getMachineCPVal()->addSelectionDAGCSEId(
ID);
805 ID.AddPointer(CP->getConstVal());
806 ID.AddInteger(CP->getTargetFlags());
818 ID.AddInteger(LD->getMemoryVT().getRawBits());
819 ID.AddInteger(LD->getRawSubclassData());
820 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
821 ID.AddInteger(LD->getMemOperand()->getFlags());
826 ID.AddInteger(ST->getMemoryVT().getRawBits());
827 ID.AddInteger(ST->getRawSubclassData());
828 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
829 ID.AddInteger(ST->getMemOperand()->getFlags());
840 case ISD::VP_LOAD_FF: {
841 const auto *LD = cast<VPLoadFFSDNode>(
N);
842 ID.AddInteger(LD->getMemoryVT().getRawBits());
843 ID.AddInteger(LD->getRawSubclassData());
844 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
845 ID.AddInteger(LD->getMemOperand()->getFlags());
848 case ISD::VP_STORE: {
856 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: {
863 case ISD::EXPERIMENTAL_VP_STRIDED_STORE: {
870 case ISD::VP_GATHER: {
878 case ISD::VP_SCATTER: {
970 ID.AddPointer(cast<MDNodeSDNode>(
N)->getMD());
976 if (
auto *MN = dyn_cast<MemIntrinsicSDNode>(
N)) {
977 ID.AddInteger(MN->getRawSubclassData());
978 ID.AddInteger(MN->getPointerInfo().getAddrSpace());
979 ID.AddInteger(MN->getMemOperand()->getFlags());
980 ID.AddInteger(MN->getMemoryVT().getRawBits());
1003 if (
N->getValueType(0) == MVT::Glue)
1006 switch (
N->getOpcode()) {
1014 for (
unsigned i = 1, e =
N->getNumValues(); i != e; ++i)
1015 if (
N->getValueType(i) == MVT::Glue)
1032 if (Node.use_empty())
1047 while (!DeadNodes.
empty()) {
1056 DUL->NodeDeleted(
N,
nullptr);
1059 RemoveNodeFromCSEMaps(
N);
1090 RemoveNodeFromCSEMaps(
N);
1094 DeleteNodeNotInCSEMaps(
N);
1097void SelectionDAG::DeleteNodeNotInCSEMaps(
SDNode *
N) {
1098 assert(
N->getIterator() != AllNodes.begin() &&
1099 "Cannot delete the entry node!");
1100 assert(
N->use_empty() &&
"Cannot delete a node that is not dead!");
1109 assert(!(V->isVariadic() && isParameter));
1111 ByvalParmDbgValues.push_back(V);
1113 DbgValues.push_back(V);
1114 for (
const SDNode *Node : V->getSDNodes())
1116 DbgValMap[Node].push_back(V);
1121 if (
I == DbgValMap.end())
1123 for (
auto &Val:
I->second)
1124 Val->setIsInvalidated();
1128void SelectionDAG::DeallocateNode(
SDNode *
N) {
1151void SelectionDAG::verifyNode(
SDNode *
N)
const {
1152 switch (
N->getOpcode()) {
1154 if (
N->isTargetOpcode())
1158 EVT VT =
N->getValueType(0);
1159 assert(
N->getNumValues() == 1 &&
"Too many results!");
1161 "Wrong return type!");
1162 assert(
N->getNumOperands() == 2 &&
"Wrong number of operands!");
1163 assert(
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType() &&
1164 "Mismatched operand types!");
1166 "Wrong operand type!");
1168 "Wrong return type size");
1172 assert(
N->getNumValues() == 1 &&
"Too many results!");
1173 assert(
N->getValueType(0).isVector() &&
"Wrong return type!");
1174 assert(
N->getNumOperands() ==
N->getValueType(0).getVectorNumElements() &&
1175 "Wrong number of operands!");
1176 EVT EltVT =
N->getValueType(0).getVectorElementType();
1178 assert((
Op.getValueType() == EltVT ||
1179 (EltVT.
isInteger() &&
Op.getValueType().isInteger() &&
1180 EltVT.
bitsLE(
Op.getValueType()))) &&
1181 "Wrong operand type!");
1182 assert(
Op.getValueType() ==
N->getOperand(0).getValueType() &&
1183 "Operands must all have the same type");
1195void SelectionDAG::InsertNode(
SDNode *
N) {
1196 AllNodes.push_back(
N);
1198 N->PersistentId = NextPersistentId++;
1201 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1202 DUL->NodeInserted(
N);
1209bool SelectionDAG::RemoveNodeFromCSEMaps(
SDNode *
N) {
1210 bool Erased =
false;
1211 switch (
N->getOpcode()) {
1214 assert(CondCodeNodes[cast<CondCodeSDNode>(
N)->
get()] &&
1215 "Cond code doesn't exist!");
1216 Erased = CondCodeNodes[cast<CondCodeSDNode>(
N)->get()] !=
nullptr;
1217 CondCodeNodes[cast<CondCodeSDNode>(
N)->get()] =
nullptr;
1220 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(
N)->getSymbol());
1224 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
1229 auto *MCSN = cast<MCSymbolSDNode>(
N);
1230 Erased = MCSymbols.erase(MCSN->getMCSymbol());
1234 EVT VT = cast<VTSDNode>(
N)->getVT();
1236 Erased = ExtendedValueTypeNodes.erase(VT);
1247 Erased = CSEMap.RemoveNode(
N);
1254 if (!Erased &&
N->getValueType(
N->getNumValues()-1) != MVT::Glue &&
1269SelectionDAG::AddModifiedNodeToCSEMaps(
SDNode *
N) {
1273 SDNode *Existing = CSEMap.GetOrInsertNode(
N);
1274 if (Existing !=
N) {
1279 if (
auto *MemNode = dyn_cast<MemSDNode>(Existing))
1280 MemNode->refineRanges(cast<MemSDNode>(
N)->getMemOperand());
1284 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1285 DUL->NodeDeleted(
N, Existing);
1286 DeleteNodeNotInCSEMaps(
N);
1292 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1293 DUL->NodeUpdated(
N);
1311 Node->intersectFlagsWith(
N->getFlags());
1331 Node->intersectFlagsWith(
N->getFlags());
1349 Node->intersectFlagsWith(
N->getFlags());
1362 : TM(tm), OptLevel(OL), EntryNode(ISD::EntryToken, 0,
DebugLoc(),
1365 InsertNode(&EntryNode);
1376 SDAGISelPass = PassPtr;
1380 LibInfo = LibraryInfo;
1386 FnVarLocs = VarLocs;
1390 assert(!UpdateListeners &&
"Dangling registered DAGUpdateListeners");
1392 OperandRecycler.clear(OperandAllocator);
1400void SelectionDAG::allnodes_clear() {
1401 assert(&*AllNodes.begin() == &EntryNode);
1402 AllNodes.remove(AllNodes.begin());
1403 while (!AllNodes.empty())
1404 DeallocateNode(&AllNodes.front());
1406 NextPersistentId = 0;
1412 SDNode *
N = CSEMap.FindNodeOrInsertPos(
ID, InsertPos);
1414 switch (
N->getOpcode()) {
1419 "debug location. Use another overload.");
1426 const SDLoc &
DL,
void *&InsertPos) {
1427 SDNode *
N = CSEMap.FindNodeOrInsertPos(
ID, InsertPos);
1429 switch (
N->getOpcode()) {
1435 if (
N->getDebugLoc() !=
DL.getDebugLoc())
1442 if (
DL.getIROrder() &&
DL.getIROrder() <
N->getIROrder())
1443 N->setDebugLoc(
DL.getDebugLoc());
1452 OperandRecycler.clear(OperandAllocator);
1453 OperandAllocator.
Reset();
1456 ExtendedValueTypeNodes.clear();
1457 ExternalSymbols.clear();
1458 TargetExternalSymbols.clear();
1464 EntryNode.UseList =
nullptr;
1465 InsertNode(&EntryNode);
1471 return VT.
bitsGT(
Op.getValueType())
1477std::pair<SDValue, SDValue>
1481 "Strict no-op FP extend/round not allowed.");
1488 return std::pair<SDValue, SDValue>(Res,
SDValue(Res.
getNode(), 1));
1492 return VT.
bitsGT(
Op.getValueType()) ?
1498 return VT.
bitsGT(
Op.getValueType()) ?
1504 return VT.
bitsGT(
Op.getValueType()) ?
1512 auto Type =
Op.getValueType();
1516 auto Size =
Op.getValueSizeInBits();
1527 auto Type =
Op.getValueType();
1531 auto Size =
Op.getValueSizeInBits();
1542 auto Type =
Op.getValueType();
1546 auto Size =
Op.getValueSizeInBits();
1564 EVT OpVT =
Op.getValueType();
1566 "Cannot getZeroExtendInReg FP types");
1568 "getZeroExtendInReg type should be vector iff the operand "
1572 "Vector element counts must match in getZeroExtendInReg");
1584 EVT OpVT =
Op.getValueType();
1586 "Cannot getVPZeroExtendInReg FP types");
1588 "getVPZeroExtendInReg type and operand type should be vector!");
1590 "Vector element counts must match in getZeroExtendInReg");
1629 return getNode(ISD::VP_XOR,
DL, VT, Val, TrueValue, Mask, EVL);
1640 return getNode(ISD::VP_ZERO_EXTEND,
DL, VT,
Op, Mask, EVL);
1642 return getNode(ISD::VP_TRUNCATE,
DL, VT,
Op, Mask, EVL);
1662 bool isT,
bool isO) {
1668 bool isT,
bool isO) {
1669 return getConstant(*ConstantInt::get(*Context, Val),
DL, VT, isT, isO);
1673 EVT VT,
bool isT,
bool isO) {
1681 if (isa<VectorType>(Elt->
getType()))
1696 Elt = ConstantInt::get(*
getContext(), NewVal);
1715 "Can only handle an even split!");
1719 for (
unsigned i = 0; i != Parts; ++i)
1721 NewVal.
extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits),
DL,
1722 ViaEltVT, isT, isO));
1727 unsigned ViaVecNumElts = VT.
getSizeInBits() / ViaEltSizeInBits;
1738 NewVal.
extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits),
DL,
1739 ViaEltVT, isT, isO));
1744 std::reverse(EltParts.
begin(), EltParts.
end());
1763 "APInt size does not match type size!");
1772 if ((
N = FindNodeOrInsertPos(
ID,
DL, IP)))
1777 N = newSDNode<ConstantSDNode>(isT, isO, Elt, VTs);
1778 CSEMap.InsertNode(
N, IP);
1790 bool isT,
bool isO) {
1798 IsTarget, IsOpaque);
1830 EVT VT,
bool isTarget) {
1838 if (isa<VectorType>(Elt->
getType()))
1851 if ((
N = FindNodeOrInsertPos(
ID,
DL, IP)))
1856 N = newSDNode<ConstantFPSDNode>(isTarget, Elt, VTs);
1857 CSEMap.InsertNode(
N, IP);
1871 if (EltVT == MVT::f32)
1873 if (EltVT == MVT::f64)
1875 if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1876 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1887 EVT VT, int64_t
Offset,
bool isTargetGA,
1888 unsigned TargetFlags) {
1889 assert((TargetFlags == 0 || isTargetGA) &&
1890 "Cannot set target flags on target-independent globals");
1908 ID.AddInteger(TargetFlags);
1910 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
1913 auto *
N = newSDNode<GlobalAddressSDNode>(
1914 Opc,
DL.getIROrder(),
DL.getDebugLoc(), GV, VTs,
Offset, TargetFlags);
1915 CSEMap.InsertNode(
N, IP);
1927 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1930 auto *
N = newSDNode<FrameIndexSDNode>(FI, VTs, isTarget);
1931 CSEMap.InsertNode(
N, IP);
1937 unsigned TargetFlags) {
1938 assert((TargetFlags == 0 || isTarget) &&
1939 "Cannot set target flags on target-independent jump tables");
1945 ID.AddInteger(TargetFlags);
1947 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1950 auto *
N = newSDNode<JumpTableSDNode>(JTI, VTs, isTarget, TargetFlags);
1951 CSEMap.InsertNode(
N, IP);
1965 bool isTarget,
unsigned TargetFlags) {
1966 assert((TargetFlags == 0 || isTarget) &&
1967 "Cannot set target flags on target-independent globals");
1976 ID.AddInteger(Alignment->value());
1979 ID.AddInteger(TargetFlags);
1981 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1984 auto *
N = newSDNode<ConstantPoolSDNode>(isTarget,
C, VTs,
Offset, *Alignment,
1986 CSEMap.InsertNode(
N, IP);
1995 bool isTarget,
unsigned TargetFlags) {
1996 assert((TargetFlags == 0 || isTarget) &&
1997 "Cannot set target flags on target-independent globals");
2004 ID.AddInteger(Alignment->value());
2006 C->addSelectionDAGCSEId(
ID);
2007 ID.AddInteger(TargetFlags);
2009 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2012 auto *
N = newSDNode<ConstantPoolSDNode>(isTarget,
C, VTs,
Offset, *Alignment,
2014 CSEMap.InsertNode(
N, IP);
2024 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2027 auto *
N = newSDNode<BasicBlockSDNode>(
MBB);
2028 CSEMap.InsertNode(
N, IP);
2035 ValueTypeNodes.size())
2042 N = newSDNode<VTSDNode>(VT);
2050 N = newSDNode<ExternalSymbolSDNode>(
false,
Sym, 0,
getVTList(VT));
2065 unsigned TargetFlags) {
2067 TargetExternalSymbols[std::pair<std::string, unsigned>(
Sym, TargetFlags)];
2069 N = newSDNode<ExternalSymbolSDNode>(
true,
Sym, TargetFlags,
getVTList(VT));
2075 if ((
unsigned)
Cond >= CondCodeNodes.size())
2076 CondCodeNodes.resize(
Cond+1);
2078 if (!CondCodeNodes[
Cond]) {
2079 auto *
N = newSDNode<CondCodeSDNode>(
Cond);
2080 CondCodeNodes[
Cond] =
N;
2088 bool ConstantFold) {
2090 "APInt size does not match type size!");
2107 bool ConstantFold) {
2108 if (EC.isScalable())
2121 const APInt &StepVal) {
2145 "Must have the same number of vector elements as mask elements!");
2147 "Invalid VECTOR_SHUFFLE");
2155 int NElts = Mask.size();
2157 [&](
int M) {
return M < (NElts * 2) && M >= -1; }) &&
2158 "Index out of range");
2166 for (
int i = 0; i != NElts; ++i)
2167 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
2183 for (
int i = 0; i < NElts; ++i) {
2184 if (MaskVec[i] <
Offset || MaskVec[i] >= (
Offset + NElts))
2188 if (UndefElements[MaskVec[i] -
Offset]) {
2194 if (!UndefElements[i])
2198 if (
auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
2199 BlendSplat(N1BV, 0);
2200 if (
auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
2201 BlendSplat(N2BV, NElts);
2206 bool AllLHS =
true, AllRHS =
true;
2208 for (
int i = 0; i != NElts; ++i) {
2209 if (MaskVec[i] >= NElts) {
2214 }
else if (MaskVec[i] >= 0) {
2218 if (AllLHS && AllRHS)
2220 if (AllLHS && !N2Undef)
2233 bool Identity =
true, AllSame =
true;
2234 for (
int i = 0; i != NElts; ++i) {
2235 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity =
false;
2236 if (MaskVec[i] != MaskVec[0]) AllSame =
false;
2238 if (Identity && NElts)
2248 V = V->getOperand(0);
2251 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
2271 if (AllSame && SameNumElts) {
2272 EVT BuildVT = BV->getValueType(0);
2289 for (
int i = 0; i != NElts; ++i)
2290 ID.AddInteger(MaskVec[i]);
2293 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
2299 int *MaskAlloc = OperandAllocator.
Allocate<
int>(NElts);
2302 auto *
N = newSDNode<ShuffleVectorSDNode>(VTs, dl.
getIROrder(),
2304 createOperands(
N, Ops);
2306 CSEMap.InsertNode(
N, IP);
2327 ID.AddInteger(Reg.id());
2329 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2332 auto *
N = newSDNode<RegisterSDNode>(Reg, VTs);
2334 CSEMap.InsertNode(
N, IP);
2342 ID.AddPointer(RegMask);
2344 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2347 auto *
N = newSDNode<RegisterMaskSDNode>(RegMask);
2348 CSEMap.InsertNode(
N, IP);
2363 ID.AddPointer(Label);
2365 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2370 createOperands(
N, Ops);
2372 CSEMap.InsertNode(
N, IP);
2378 int64_t
Offset,
bool isTarget,
2379 unsigned TargetFlags) {
2387 ID.AddInteger(TargetFlags);
2389 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2392 auto *
N = newSDNode<BlockAddressSDNode>(
Opc, VTs, BA,
Offset, TargetFlags);
2393 CSEMap.InsertNode(
N, IP);
2404 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2407 auto *
N = newSDNode<SrcValueSDNode>(V);
2408 CSEMap.InsertNode(
N, IP);
2419 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2422 auto *
N = newSDNode<MDNodeSDNode>(MD);
2423 CSEMap.InsertNode(
N, IP);
2429 if (VT == V.getValueType())
2436 unsigned SrcAS,
unsigned DestAS) {
2441 ID.AddInteger(SrcAS);
2442 ID.AddInteger(DestAS);
2445 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
2449 VTs, SrcAS, DestAS);
2450 createOperands(
N, Ops);
2452 CSEMap.InsertNode(
N, IP);
2464 EVT OpTy =
Op.getValueType();
2466 if (OpTy == ShTy || OpTy.
isVector())
return Op;
2481 if (
Op.getNode() != FPNode)
2485 while (!Worklist.
empty()) {
2518 std::optional<unsigned> CallRetResNo) {
2520 EVT VT = Node->getValueType(0);
2521 unsigned NumResults = Node->getNumValues();
2523 if (LC == RTLIB::UNKNOWN_LIBCALL)
2530 auto getVecDesc = [&]() ->
VecDesc const * {
2531 for (
bool Masked : {
false,
true}) {
2542 if (VT.
isVector() && !(VD = getVecDesc()))
2552 auto *ST = cast<StoreSDNode>(
User);
2553 SDValue StoreValue = ST->getValue();
2554 unsigned ResNo = StoreValue.
getResNo();
2556 if (CallRetResNo == ResNo)
2559 if (!ST->isSimple() || ST->getAddressSpace() != 0)
2562 if (StoresInChain && ST->getChain() != StoresInChain)
2566 if (ST->getAlign() <
2574 ResultStores[ResNo] = ST;
2575 StoresInChain = ST->getChain();
2581 for (
const SDValue &
Op : Node->op_values()) {
2582 EVT ArgVT =
Op.getValueType();
2584 Args.emplace_back(
Op, ArgTy);
2591 if (ResNo == CallRetResNo)
2593 EVT ResVT = Node->getValueType(ResNo);
2595 ResultPtrs[ResNo] = ResultPtr;
2596 Args.emplace_back(ResultPtr,
PointerTy);
2608 Type *RetType = CallRetResNo.has_value()
2609 ? Node->getValueType(*CallRetResNo).getTypeForEVT(Ctx)
2621 if (ResNo == CallRetResNo) {
2627 getLoad(Node->getValueType(ResNo),
DL, CallChain, ResultPtr, PtrInfo);
2633 PtrInfo = ST->getPointerInfo();
2639 Results.push_back(LoadResult);
2648 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2649 EVT VT = Node->getValueType(0);
2650 SDValue Tmp1 = Node->getOperand(0);
2651 SDValue Tmp2 = Node->getOperand(1);
2652 const MaybeAlign MA(Node->getConstantOperandVal(3));
2684 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2685 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2696 Align RedAlign = UseABI ?
DL.getABITypeAlign(Ty) :
DL.getPrefTypeAlign(Ty);
2706 if (RedAlign > StackAlign) {
2709 unsigned NumIntermediates;
2711 NumIntermediates, RegisterVT);
2713 Align RedAlign2 = UseABI ?
DL.getABITypeAlign(Ty) :
DL.getPrefTypeAlign(Ty);
2714 if (RedAlign2 < RedAlign)
2715 RedAlign = RedAlign2;
2720 RedAlign = std::min(RedAlign, StackAlign);
2735 false,
nullptr, StackID);
2750 "Don't know how to choose the maximum size when creating a stack "
2759 Align Align = std::max(
DL.getPrefTypeAlign(Ty1),
DL.getPrefTypeAlign(Ty2));
2767 auto GetUndefBooleanConstant = [&]() {
2806 return GetUndefBooleanConstant();
2811 return GetUndefBooleanConstant();
2820 const APInt &C2 = N2C->getAPIntValue();
2822 const APInt &C1 = N1C->getAPIntValue();
2829 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2830 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2832 if (N1CFP && N2CFP) {
2837 return GetUndefBooleanConstant();
2842 return GetUndefBooleanConstant();
2848 return GetUndefBooleanConstant();
2853 return GetUndefBooleanConstant();
2858 return GetUndefBooleanConstant();
2864 return GetUndefBooleanConstant();
2893 return getSetCC(dl, VT, N2, N1, SwappedCond);
2894 }
else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2909 return GetUndefBooleanConstant();
2920 unsigned BitWidth =
Op.getScalarValueSizeInBits();
2928 unsigned Depth)
const {
2936 const APInt &DemandedElts,
2937 unsigned Depth)
const {
2944 unsigned Depth )
const {
2950 unsigned Depth)
const {
2955 const APInt &DemandedElts,
2956 unsigned Depth)
const {
2957 EVT VT =
Op.getValueType();
2964 for (
unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
2965 if (!DemandedElts[EltIdx])
2969 KnownZeroElements.
setBit(EltIdx);
2971 return KnownZeroElements;
2981 unsigned Opcode = V.getOpcode();
2982 EVT VT = V.getValueType();
2985 "scalable demanded bits are ignored");
2997 UndefElts = V.getOperand(0).isUndef()
3006 APInt UndefLHS, UndefRHS;
3015 (DemandedElts & UndefLHS) == (DemandedElts & UndefRHS)) {
3016 UndefElts = UndefLHS | UndefRHS;
3046 for (
unsigned i = 0; i != NumElts; ++i) {
3052 if (!DemandedElts[i])
3054 if (Scl && Scl !=
Op)
3064 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
3065 for (
int i = 0; i != (int)NumElts; ++i) {
3071 if (!DemandedElts[i])
3073 if (M < (
int)NumElts)
3076 DemandedRHS.
setBit(M - NumElts);
3088 auto CheckSplatSrc = [&](
SDValue Src,
const APInt &SrcElts) {
3090 return (SrcElts.popcount() == 1) ||
3092 (SrcElts & SrcUndefs).
isZero());
3094 if (!DemandedLHS.
isZero())
3095 return CheckSplatSrc(V.getOperand(0), DemandedLHS);
3096 return CheckSplatSrc(V.getOperand(1), DemandedRHS);
3100 SDValue Src = V.getOperand(0);
3102 if (Src.getValueType().isScalableVector())
3105 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3118 SDValue Src = V.getOperand(0);
3120 if (Src.getValueType().isScalableVector())
3122 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3124 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3126 UndefElts = UndefSrcElts.
trunc(NumElts);
3132 SDValue Src = V.getOperand(0);
3133 EVT SrcVT = Src.getValueType();
3143 if ((
BitWidth % SrcBitWidth) == 0) {
3145 unsigned Scale =
BitWidth / SrcBitWidth;
3147 APInt ScaledDemandedElts =
3149 for (
unsigned I = 0;
I != Scale; ++
I) {
3153 SubDemandedElts &= ScaledDemandedElts;
3157 if (!SubUndefElts.
isZero())
3171 EVT VT = V.getValueType();
3181 (AllowUndefs || !UndefElts);
3187 EVT VT = V.getValueType();
3188 unsigned Opcode = V.getOpcode();
3209 SplatIdx = (UndefElts & DemandedElts).
countr_one();
3223 auto *SVN = cast<ShuffleVectorSDNode>(V);
3224 if (!SVN->isSplat())
3226 int Idx = SVN->getSplatIndex();
3227 int NumElts = V.getValueType().getVectorNumElements();
3228 SplatIdx =
Idx % NumElts;
3229 return V.getOperand(
Idx / NumElts);
3245 if (LegalSVT.
bitsLT(SVT))
3253std::optional<ConstantRange>
3255 unsigned Depth)
const {
3258 "Unknown shift node");
3260 unsigned BitWidth = V.getScalarValueSizeInBits();
3262 if (
auto *Cst = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
3263 const APInt &ShAmt = Cst->getAPIntValue();
3265 return std::nullopt;
3269 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1))) {
3270 const APInt *MinAmt =
nullptr, *MaxAmt =
nullptr;
3271 for (
unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3272 if (!DemandedElts[i])
3274 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
3276 MinAmt = MaxAmt =
nullptr;
3279 const APInt &ShAmt = SA->getAPIntValue();
3281 return std::nullopt;
3282 if (!MinAmt || MinAmt->
ugt(ShAmt))
3284 if (!MaxAmt || MaxAmt->ult(ShAmt))
3287 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
3288 "Failed to find matching min/max shift amounts");
3289 if (MinAmt && MaxAmt)
3299 return std::nullopt;
3302std::optional<unsigned>
3304 unsigned Depth)
const {
3307 "Unknown shift node");
3308 if (std::optional<ConstantRange> AmtRange =
3310 if (
const APInt *ShAmt = AmtRange->getSingleElement())
3311 return ShAmt->getZExtValue();
3312 return std::nullopt;
3315std::optional<unsigned>
3317 EVT VT = V.getValueType();
3324std::optional<unsigned>
3326 unsigned Depth)
const {
3329 "Unknown shift node");
3330 if (std::optional<ConstantRange> AmtRange =
3332 return AmtRange->getUnsignedMin().getZExtValue();
3333 return std::nullopt;
3336std::optional<unsigned>
3338 EVT VT = V.getValueType();
3345std::optional<unsigned>
3347 unsigned Depth)
const {
3350 "Unknown shift node");
3351 if (std::optional<ConstantRange> AmtRange =
3353 return AmtRange->getUnsignedMax().getZExtValue();
3354 return std::nullopt;
3357std::optional<unsigned>
3359 EVT VT = V.getValueType();
3370 EVT VT =
Op.getValueType();
3385 unsigned Depth)
const {
3386 unsigned BitWidth =
Op.getScalarValueSizeInBits();
3390 if (
auto OptAPInt =
Op->bitcastToAPInt()) {
3400 assert((!
Op.getValueType().isFixedLengthVector() ||
3401 NumElts ==
Op.getValueType().getVectorNumElements()) &&
3402 "Unexpected vector size");
3407 unsigned Opcode =
Op.getOpcode();
3415 "Expected SPLAT_VECTOR implicit truncation");
3422 unsigned ScalarSize =
Op.getOperand(0).getScalarValueSizeInBits();
3424 "Expected SPLAT_VECTOR_PARTS scalars to cover element width");
3431 const APInt &Step =
Op.getConstantOperandAPInt(0);
3440 const APInt MinNumElts =
3446 .
umul_ov(MinNumElts, Overflow);
3450 const APInt MaxValue = (MaxNumElts - 1).
umul_ov(Step, Overflow);
3458 assert(!
Op.getValueType().isScalableVector());
3462 if (!DemandedElts[i])
3471 "Expected BUILD_VECTOR implicit truncation");
3484 assert(!
Op.getValueType().isScalableVector());
3487 APInt DemandedLHS, DemandedRHS;
3491 DemandedLHS, DemandedRHS))
3496 if (!!DemandedLHS) {
3504 if (!!DemandedRHS) {
3513 const APInt &Multiplier =
Op.getConstantOperandAPInt(0);
3518 if (
Op.getValueType().isScalableVector())
3522 EVT SubVectorVT =
Op.getOperand(0).getValueType();
3525 for (
unsigned i = 0; i != NumSubVectors; ++i) {
3527 DemandedElts.
extractBits(NumSubVectorElts, i * NumSubVectorElts);
3528 if (!!DemandedSub) {
3540 if (
Op.getValueType().isScalableVector())
3547 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
3549 APInt DemandedSrcElts = DemandedElts;
3554 if (!!DemandedSubElts) {
3559 if (!!DemandedSrcElts) {
3569 if (
Op.getValueType().isScalableVector() || Src.getValueType().isScalableVector())
3572 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3578 if (
Op.getValueType().isScalableVector())
3582 if (DemandedElts != 1)
3593 if (
Op.getValueType().isScalableVector())
3613 if ((
BitWidth % SubBitWidth) == 0) {
3620 unsigned SubScale =
BitWidth / SubBitWidth;
3621 APInt SubDemandedElts(NumElts * SubScale, 0);
3622 for (
unsigned i = 0; i != NumElts; ++i)
3623 if (DemandedElts[i])
3624 SubDemandedElts.
setBit(i * SubScale);
3626 for (
unsigned i = 0; i != SubScale; ++i) {
3629 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
3630 Known.
insertBits(Known2, SubBitWidth * Shifts);
3635 if ((SubBitWidth %
BitWidth) == 0) {
3636 assert(
Op.getValueType().isVector() &&
"Expected bitcast to vector");
3641 unsigned SubScale = SubBitWidth /
BitWidth;
3642 APInt SubDemandedElts =
3647 for (
unsigned i = 0; i != NumElts; ++i)
3648 if (DemandedElts[i]) {
3649 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
3680 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3684 Op.getOperand(0), DemandedElts,
false,
Depth + 1);
3690 if (
Op->getFlags().hasNoSignedWrap() &&
3691 Op.getOperand(0) ==
Op.getOperand(1) &&
3718 unsigned SignBits1 =
3722 unsigned SignBits0 =
3728 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3731 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3732 if (
Op.getResNo() == 0)
3739 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3742 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3743 if (
Op.getResNo() == 0)
3796 if (
Op.getResNo() != 1)
3811 unsigned OpNo =
Op->isStrictFPOpcode() ? 1 : 0;
3823 bool NUW =
Op->getFlags().hasNoUnsignedWrap();
3824 bool NSW =
Op->getFlags().hasNoSignedWrap();
3831 if (std::optional<unsigned> ShMinAmt =
3840 Op->getFlags().hasExact());
3843 if (std::optional<unsigned> ShMinAmt =
3851 Op->getFlags().hasExact());
3857 unsigned Amt =
C->getAPIntValue().urem(
BitWidth);
3872 unsigned Amt =
C->getAPIntValue().urem(
BitWidth);
3878 DemandedElts,
Depth + 1);
3899 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3902 unsigned LoBits =
Op.getOperand(0).getScalarValueSizeInBits();
3903 unsigned HiBits =
Op.getOperand(1).getScalarValueSizeInBits();
3906 Known = Known2.
concat(Known);
3920 if (
Op.getResNo() == 0)
3928 EVT EVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
3966 ? cast<MaskedGatherSDNode>(
Op)->getExtensionType()
3967 : cast<MaskedLoadSDNode>(
Op)->getExtensionType();
3969 EVT MemVT = cast<MemSDNode>(
Op)->getMemoryVT();
3982 !
Op.getValueType().isScalableVector()) {
3996 for (
unsigned i = 0; i != NumElts; ++i) {
3997 if (!DemandedElts[i])
4000 if (
auto *CInt = dyn_cast<ConstantInt>(Elt)) {
4006 if (
auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4007 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4018 if (
auto *CInt = dyn_cast<ConstantInt>(Cst)) {
4020 }
else if (
auto *CFP = dyn_cast<ConstantFP>(Cst)) {
4026 }
else if (
Op.getResNo() == 0) {
4027 unsigned ScalarMemorySize = LD->getMemoryVT().getScalarSizeInBits();
4028 KnownBits KnownScalarMemory(ScalarMemorySize);
4029 if (
const MDNode *MD = LD->getRanges())
4040 Known = KnownScalarMemory;
4047 if (
Op.getValueType().isScalableVector())
4049 EVT InVT =
Op.getOperand(0).getValueType();
4061 if (
Op.getValueType().isScalableVector())
4063 EVT InVT =
Op.getOperand(0).getValueType();
4079 if (
Op.getValueType().isScalableVector())
4081 EVT InVT =
Op.getOperand(0).getValueType();
4098 EVT VT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
4101 Known.
Zero |= (~InMask);
4102 Known.
One &= (~Known.Zero);
4106 unsigned LogOfAlign =
Log2(cast<AssertAlignSDNode>(
Op)->
getAlign());
4126 Op.getOpcode() ==
ISD::ADD, Flags.hasNoSignedWrap(),
4127 Flags.hasNoUnsignedWrap(), Known, Known2);
4134 if (
Op.getResNo() == 1) {
4145 "We only compute knownbits for the difference here.");
4152 Borrow = Borrow.
trunc(1);
4166 if (
Op.getResNo() == 1) {
4177 assert(
Op.getResNo() == 0 &&
"We only compute knownbits for the sum here.");
4187 Carry = Carry.
trunc(1);
4223 const unsigned Index =
Op.getConstantOperandVal(1);
4224 const unsigned EltBitWidth =
Op.getValueSizeInBits();
4231 Known = Known.
trunc(EltBitWidth);
4247 Known = Known.
trunc(EltBitWidth);
4252 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4253 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4263 if (
Op.getValueType().isScalableVector())
4272 bool DemandedVal =
true;
4273 APInt DemandedVecElts = DemandedElts;
4274 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
4275 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4276 unsigned EltIdx = CEltNo->getZExtValue();
4277 DemandedVal = !!DemandedElts[EltIdx];
4286 if (!!DemandedVecElts) {
4304 Known = Known2.
abs();
4337 if (CstLow && CstHigh) {
4342 const APInt &ValueHigh = CstHigh->getAPIntValue();
4343 if (ValueLow.
sle(ValueHigh)) {
4346 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
4369 if (IsMax && CstLow) {
4393 EVT VT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
4399 if (
Op.getResNo() == 0) {
4400 auto *AT = cast<AtomicSDNode>(
Op);
4401 unsigned ScalarMemorySize = AT->getMemoryVT().getScalarSizeInBits();
4402 KnownBits KnownScalarMemory(ScalarMemorySize);
4403 if (
const MDNode *MD = AT->getRanges())
4406 switch (AT->getExtensionType()) {
4427 Known = KnownScalarMemory;
4435 if (
Op.getResNo() == 1) {
4461 if (
Op.getResNo() == 0) {
4462 auto *AT = cast<AtomicSDNode>(
Op);
4463 unsigned MemBits = AT->getMemoryVT().getScalarSizeInBits();
4485 if (
Op.getValueType().isScalableVector())
4631 return C->getAPIntValue().zextOrTrunc(
BitWidth).isPowerOf2();
4639 if (
C &&
C->getAPIntValue() == 1)
4649 if (
C &&
C->getAPIntValue().isSignMask())
4661 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
4662 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
4670 if (
C->getAPIntValue().zextOrTrunc(
BitWidth).isPowerOf2())
4708 return C1->getValueAPF().getExactLog2Abs() >= 0;
4717 EVT VT =
Op.getValueType();
4729 unsigned Depth)
const {
4730 EVT VT =
Op.getValueType();
4735 unsigned FirstAnswer = 1;
4737 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
4738 const APInt &Val =
C->getAPIntValue();
4748 unsigned Opcode =
Op.getOpcode();
4752 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getSizeInBits();
4753 return VTBits-Tmp+1;
4755 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getSizeInBits();
4762 unsigned NumSrcBits =
Op.getOperand(0).getValueSizeInBits();
4764 if (NumSrcSignBits > (NumSrcBits - VTBits))
4765 return NumSrcSignBits - (NumSrcBits - VTBits);
4772 if (!DemandedElts[i])
4779 APInt T =
C->getAPIntValue().trunc(VTBits);
4780 Tmp2 =
T.getNumSignBits();
4784 if (
SrcOp.getValueSizeInBits() != VTBits) {
4786 "Expected BUILD_VECTOR implicit truncation");
4787 unsigned ExtraBits =
SrcOp.getValueSizeInBits() - VTBits;
4788 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
4791 Tmp = std::min(Tmp, Tmp2);
4798 APInt DemandedLHS, DemandedRHS;
4802 DemandedLHS, DemandedRHS))
4805 Tmp = std::numeric_limits<unsigned>::max();
4808 if (!!DemandedRHS) {
4810 Tmp = std::min(Tmp, Tmp2);
4815 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
4831 if (VTBits == SrcBits)
4837 if ((SrcBits % VTBits) == 0) {
4840 unsigned Scale = SrcBits / VTBits;
4841 APInt SrcDemandedElts =
4851 for (
unsigned i = 0; i != NumElts; ++i)
4852 if (DemandedElts[i]) {
4853 unsigned SubOffset = i % Scale;
4854 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
4855 SubOffset = SubOffset * VTBits;
4856 if (Tmp <= SubOffset)
4858 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
4867 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getScalarSizeInBits();
4868 return VTBits - Tmp + 1;
4870 Tmp = VTBits -
Op.getOperand(0).getScalarValueSizeInBits();
4874 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getScalarSizeInBits();
4877 return std::max(Tmp, Tmp2);
4882 EVT SrcVT = Src.getValueType();
4890 if (std::optional<unsigned> ShAmt =
4892 Tmp = std::min(Tmp + *ShAmt, VTBits);
4895 if (std::optional<ConstantRange> ShAmtRange =
4897 unsigned MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
4898 unsigned MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
4906 EVT ExtVT = Ext.getValueType();
4907 SDValue Extendee = Ext.getOperand(0);
4909 unsigned SizeDifference =
4911 if (SizeDifference <= MinShAmt) {
4912 Tmp = SizeDifference +
4915 return Tmp - MaxShAmt;
4921 return Tmp - MaxShAmt;
4931 FirstAnswer = std::min(Tmp, Tmp2);
4941 if (Tmp == 1)
return 1;
4943 return std::min(Tmp, Tmp2);
4946 if (Tmp == 1)
return 1;
4948 return std::min(Tmp, Tmp2);
4960 if (CstLow && CstHigh) {
4965 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
4966 return std::min(Tmp, Tmp2);
4975 return std::min(Tmp, Tmp2);
4983 return std::min(Tmp, Tmp2);
4987 if (
Op.getResNo() == 0 &&
Op.getOperand(0) ==
Op.getOperand(1))
4998 if (
Op.getResNo() != 1)
5012 unsigned OpNo =
Op->isStrictFPOpcode() ? 1 : 0;
5029 unsigned RotAmt =
C->getAPIntValue().urem(VTBits);
5033 RotAmt = (VTBits - RotAmt) % VTBits;
5037 if (Tmp > (RotAmt + 1))
return (Tmp - RotAmt);
5045 if (Tmp == 1)
return 1;
5050 if (CRHS->isAllOnes()) {
5056 if ((Known.
Zero | 1).isAllOnes())
5066 if (Tmp2 == 1)
return 1;
5067 return std::min(Tmp, Tmp2) - 1;
5070 if (Tmp2 == 1)
return 1;
5075 if (CLHS->isZero()) {
5080 if ((Known.
Zero | 1).isAllOnes())
5094 if (Tmp == 1)
return 1;
5095 return std::min(Tmp, Tmp2) - 1;
5099 if (SignBitsOp0 == 1)
5102 if (SignBitsOp1 == 1)
5104 unsigned OutValidBits =
5105 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
5106 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
5114 return std::min(Tmp, Tmp2);
5123 unsigned NumSrcBits =
Op.getOperand(0).getScalarValueSizeInBits();
5125 if (NumSrcSignBits > (NumSrcBits - VTBits))
5126 return NumSrcSignBits - (NumSrcBits - VTBits);
5133 const int BitWidth =
Op.getValueSizeInBits();
5134 const int Items =
Op.getOperand(0).getValueSizeInBits() /
BitWidth;
5138 const int rIndex = Items - 1 -
Op.getConstantOperandVal(1);
5153 bool DemandedVal =
true;
5154 APInt DemandedVecElts = DemandedElts;
5155 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
5156 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
5157 unsigned EltIdx = CEltNo->getZExtValue();
5158 DemandedVal = !!DemandedElts[EltIdx];
5161 Tmp = std::numeric_limits<unsigned>::max();
5167 Tmp = std::min(Tmp, Tmp2);
5169 if (!!DemandedVecElts) {
5171 Tmp = std::min(Tmp, Tmp2);
5173 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5184 const unsigned BitWidth =
Op.getValueSizeInBits();
5185 const unsigned EltBitWidth =
Op.getOperand(0).getScalarValueSizeInBits();
5197 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
5198 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
5208 if (Src.getValueType().isScalableVector())
5211 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5220 Tmp = std::numeric_limits<unsigned>::max();
5221 EVT SubVectorVT =
Op.getOperand(0).getValueType();
5224 for (
unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
5226 DemandedElts.
extractBits(NumSubVectorElts, i * NumSubVectorElts);
5230 Tmp = std::min(Tmp, Tmp2);
5232 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5243 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
5245 APInt DemandedSrcElts = DemandedElts;
5248 Tmp = std::numeric_limits<unsigned>::max();
5249 if (!!DemandedSubElts) {
5254 if (!!DemandedSrcElts) {
5256 Tmp = std::min(Tmp, Tmp2);
5258 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5263 if (
const MDNode *Ranges = LD->getRanges()) {
5264 if (DemandedElts != 1)
5269 switch (LD->getExtensionType()) {
5304 auto *AT = cast<AtomicSDNode>(
Op);
5306 if (
Op.getResNo() == 0) {
5307 Tmp = AT->getMemoryVT().getScalarSizeInBits();
5313 switch (AT->getExtensionType()) {
5317 return VTBits - Tmp + 1;
5319 return VTBits - Tmp;
5324 return VTBits - Tmp + 1;
5326 return VTBits - Tmp;
5333 if (
Op.getResNo() == 0) {
5336 unsigned ExtType = LD->getExtensionType();
5340 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5341 return VTBits - Tmp + 1;
5343 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5344 return VTBits - Tmp;
5349 Type *CstTy = Cst->getType();
5354 for (
unsigned i = 0; i != NumElts; ++i) {
5355 if (!DemandedElts[i])
5358 if (
auto *CInt = dyn_cast<ConstantInt>(Elt)) {
5360 Tmp = std::min(Tmp,
Value.getNumSignBits());
5363 if (
auto *CFP = dyn_cast<ConstantFP>(Elt)) {
5364 APInt Value = CFP->getValueAPF().bitcastToAPInt();
5365 Tmp = std::min(Tmp,
Value.getNumSignBits());
5391 FirstAnswer = std::max(FirstAnswer, NumBits);
5402 unsigned Depth)
const {
5404 return Op.getScalarValueSizeInBits() - SignBits + 1;
5408 const APInt &DemandedElts,
5409 unsigned Depth)
const {
5411 return Op.getScalarValueSizeInBits() - SignBits + 1;
5415 unsigned Depth)
const {
5420 EVT VT =
Op.getValueType();
5428 const APInt &DemandedElts,
5430 unsigned Depth)
const {
5431 unsigned Opcode =
Op.getOpcode();
5461 if (!DemandedElts[i])
5471 if (Src.getValueType().isScalableVector())
5474 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5481 if (
Op.getValueType().isScalableVector())
5486 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
5488 APInt DemandedSrcElts = DemandedElts;
5502 auto *IndexC = dyn_cast<ConstantSDNode>(
Op.getOperand(1));
5503 EVT SrcVT = Src.getValueType();
5507 IndexC->getZExtValue());
5519 auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
5522 if (DemandedElts[IndexC->getZExtValue()] &&
5525 APInt InVecDemandedElts = DemandedElts;
5526 InVecDemandedElts.
clearBit(IndexC->getZExtValue());
5527 if (!!InVecDemandedElts &&
5551 APInt DemandedLHS, DemandedRHS;
5552 auto *SVN = cast<ShuffleVectorSDNode>(
Op);
5554 DemandedElts, DemandedLHS, DemandedRHS,
5557 if (!DemandedLHS.
isZero() &&
5561 if (!DemandedRHS.
isZero() &&
5609 return isGuaranteedNotToBeUndefOrPoison(V, DemandedElts,
5610 PoisonOnly, Depth + 1);
5635 return isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly, Depth + 1);
5641 unsigned Depth)
const {
5642 EVT VT =
Op.getValueType();
5652 unsigned Depth)
const {
5653 if (ConsiderFlags &&
Op->hasPoisonGeneratingFlags())
5656 unsigned Opcode =
Op.getOpcode();
5732 if (
Op.getOperand(0).getValueType().isInteger())
5739 unsigned CCOp = Opcode ==
ISD::SETCC ? 2 : 4;
5740 ISD::CondCode CCCode = cast<CondCodeSDNode>(
Op.getOperand(CCOp))->get();
5741 if (((
unsigned)CCCode & 0x10U))
5790 EVT VecVT =
Op.getOperand(0).getValueType();
5798 auto *SVN = cast<ShuffleVectorSDNode>(
Op);
5800 if (Elt < 0 && DemandedElts[
Idx])
5819 unsigned Opcode =
Op.getOpcode();
5821 return Op->getFlags().hasDisjoint() ||
5834 unsigned Depth)
const {
5835 EVT VT =
Op.getValueType();
5848 bool SNaN,
unsigned Depth)
const {
5849 assert(!DemandedElts.
isZero() &&
"No demanded elements");
5860 return !
C->getValueAPF().isNaN() ||
5861 (SNaN && !
C->getValueAPF().isSignaling());
5864 unsigned Opcode =
Op.getOpcode();
5964 auto *
Idx = dyn_cast<ConstantSDNode>(
Op.getOperand(1));
5965 EVT SrcVT = Src.getValueType();
5969 Idx->getZExtValue());
5976 if (Src.getValueType().isFixedLengthVector()) {
5977 unsigned Idx =
Op.getConstantOperandVal(1);
5978 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5989 unsigned Idx =
Op.getConstantOperandVal(2);
5995 APInt DemandedMask =
5997 APInt DemandedSrcElts = DemandedElts & ~DemandedMask;
6000 bool NeverNaN =
true;
6001 if (!DemandedSrcElts.
isZero())
6004 if (NeverNaN && !DemandedSubElts.
isZero())
6014 for (
unsigned I = 0;
I != NumElts; ++
I)
6015 if (DemandedElts[
I] &&
6041 assert(
Op.getValueType().isFloatingPoint() &&
6042 "Floating point type expected");
6053 assert(!
Op.getValueType().isFloatingPoint() &&
6054 "Floating point types unsupported - use isKnownNeverZeroFloat");
6063 switch (
Op.getOpcode()) {
6077 if (
Op->getFlags().hasNoSignedWrap() ||
Op->getFlags().hasNoUnsignedWrap())
6081 if (ValKnown.
One[0])
6141 if (
Op->getFlags().hasExact())
6157 if (
Op->getFlags().hasExact())
6162 if (
Op->getFlags().hasNoUnsignedWrap())
6173 std::optional<bool> ne =
6180 if (
Op->getFlags().hasNoSignedWrap() ||
Op->getFlags().hasNoUnsignedWrap())
6191 const APInt &Multiplier =
Op.getConstantOperandAPInt(0);
6205 return !C1->isNegative();
6212 if (
A ==
B)
return true;
6217 if (CA->isZero() && CB->isZero())
return true;
6226 return V.getOperand(0);
6233 SDValue ExtArg = V.getOperand(0);
6252 NotOperand = NotOperand->getOperand(0);
6254 if (
Other == NotOperand)
6257 return NotOperand ==
Other->getOperand(0) ||
6258 NotOperand ==
Other->getOperand(1);
6264 A =
A->getOperand(0);
6267 B =
B->getOperand(0);
6270 return MatchNoCommonBitsPattern(
A->getOperand(0),
A->getOperand(1),
B) ||
6271 MatchNoCommonBitsPattern(
A->getOperand(1),
A->getOperand(0),
B);
6277 assert(
A.getValueType() ==
B.getValueType() &&
6278 "Values must have the same type");
6288 if (cast<ConstantSDNode>(Step)->
isZero())
6297 int NumOps = Ops.
size();
6298 assert(NumOps != 0 &&
"Can't build an empty vector!");
6300 "BUILD_VECTOR cannot be used with scalable types");
6302 "Incorrect element count in BUILD_VECTOR!");
6310 bool IsIdentity =
true;
6311 for (
int i = 0; i != NumOps; ++i) {
6314 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
6315 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
6316 Ops[i].getConstantOperandAPInt(1) != i) {
6320 IdentitySrc = Ops[i].getOperand(0);
6333 assert(!Ops.
empty() &&
"Can't concatenate an empty list of vectors!");
6336 return Ops[0].getValueType() ==
Op.getValueType();
6338 "Concatenation of vectors with inconsistent value types!");
6341 "Incorrect element count in vector concatenation!");
6343 if (Ops.
size() == 1)
6354 bool IsIdentity =
true;
6355 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i) {
6357 unsigned IdentityIndex = i *
Op.getValueType().getVectorMinNumElements();
6359 Op.getOperand(0).getValueType() != VT ||
6360 (IdentitySrc &&
Op.getOperand(0) != IdentitySrc) ||
6361 Op.getConstantOperandVal(1) != IdentityIndex) {
6365 assert((!IdentitySrc || IdentitySrc ==
Op.getOperand(0)) &&
6366 "Unexpected identity source vector for concat of extracts");
6367 IdentitySrc =
Op.getOperand(0);
6370 assert(IdentitySrc &&
"Failed to set source vector of extracts");
6385 EVT OpVT =
Op.getValueType();
6397 SVT = (SVT.
bitsLT(
Op.getValueType()) ?
Op.getValueType() : SVT);
6421 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
6424 auto *
N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6425 CSEMap.InsertNode(
N, IP);
6438 return getNode(Opcode,
DL, VT, N1, Flags);
6489 "STEP_VECTOR can only be used with scalable types");
6492 "Unexpected step operand");
6513 "Invalid FP cast!");
6517 "Vector element count mismatch!");
6535 "Invalid SIGN_EXTEND!");
6537 "SIGN_EXTEND result type type should be vector iff the operand "
6542 "Vector element count mismatch!");
6565 unsigned NumSignExtBits =
6576 "Invalid ZERO_EXTEND!");
6578 "ZERO_EXTEND result type type should be vector iff the operand "
6583 "Vector element count mismatch!");
6621 "Invalid ANY_EXTEND!");
6623 "ANY_EXTEND result type type should be vector iff the operand "
6628 "Vector element count mismatch!");
6653 "Invalid TRUNCATE!");
6655 "TRUNCATE result type type should be vector iff the operand "
6660 "Vector element count mismatch!");
6687 assert(VT.
isVector() &&
"This DAG node is restricted to vector types.");
6689 "The input must be the same size or smaller than the result.");
6692 "The destination vector type must have fewer lanes than the input.");
6702 "BSWAP types must be a multiple of 16 bits!");
6716 "Cannot BITCAST between types of different sizes!");
6729 "Illegal SCALAR_TO_VECTOR node!");
6786 "Wrong operand type!");
6793 if (VT != MVT::Glue) {
6797 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
6798 E->intersectFlagsWith(Flags);
6802 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6804 createOperands(
N, Ops);
6805 CSEMap.InsertNode(
N, IP);
6807 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6808 createOperands(
N, Ops);
6842 if (!C2.getBoolValue())
6846 if (!C2.getBoolValue())
6850 if (!C2.getBoolValue())
6854 if (!C2.getBoolValue())
6874 return std::nullopt;
6879 bool IsUndef1,
const APInt &C2,
6881 if (!(IsUndef1 || IsUndef2))
6889 return std::nullopt;
6899 auto *C2 = dyn_cast<ConstantSDNode>(N2);
6902 int64_t
Offset = C2->getSExtValue();
6922 assert(Ops.
size() == 2 &&
"Div/rem should have 2 operands");
6929 [](
SDValue V) { return V.isUndef() ||
6930 isNullConstant(V); });
6951 unsigned NumOps = Ops.
size();
6967 if (
auto *
C = dyn_cast<ConstantSDNode>(N1)) {
6968 const APInt &Val =
C->getAPIntValue();
6972 C->isTargetOpcode(),
C->isOpaque());
6979 C->isTargetOpcode(),
C->isOpaque());
6984 C->isTargetOpcode(),
C->isOpaque());
6986 C->isTargetOpcode(),
C->isOpaque());
7032 if (VT == MVT::f16 &&
C->getValueType(0) == MVT::i16)
7034 if (VT == MVT::f32 &&
C->getValueType(0) == MVT::i32)
7036 if (VT == MVT::f64 &&
C->getValueType(0) == MVT::i64)
7038 if (VT == MVT::f128 &&
C->getValueType(0) == MVT::i128)
7045 if (
auto *
C = dyn_cast<ConstantFPSDNode>(N1)) {
7099 return getConstant(V.bitcastToAPInt().getZExtValue(),
DL, VT);
7102 if (VT == MVT::i16 &&
C->getValueType(0) == MVT::f16)
7105 if (VT == MVT::i16 &&
C->getValueType(0) == MVT::bf16)
7108 if (VT == MVT::i32 &&
C->getValueType(0) == MVT::f32)
7111 if (VT == MVT::i64 &&
C->getValueType(0) == MVT::f64)
7112 return getConstant(V.bitcastToAPInt().getZExtValue(),
DL, VT);
7127 if (
auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
7128 if (
auto *C2 = dyn_cast<ConstantSDNode>(Ops[1])) {
7129 if (C1->isOpaque() || C2->isOpaque())
7132 std::optional<APInt> FoldAttempt =
7133 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
7139 "Can't fold vectors ops with scalar operands");
7153 EVT EVT = cast<VTSDNode>(Ops[1])->getVT();
7162 if (
auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
7163 const APInt &Val = C1->getAPIntValue();
7164 return SignExtendInReg(Val, VT);
7169 llvm::EVT OpVT = Ops[0].getOperand(0).getValueType();
7176 const APInt &Val = cast<ConstantSDNode>(
Op)->getAPIntValue();
7177 ScalarOps.
push_back(SignExtendInReg(Val, OpVT));
7183 isa<ConstantSDNode>(Ops[0].getOperand(0)))
7185 SignExtendInReg(Ops[0].getConstantOperandAPInt(0),
7192 auto *C1 = dyn_cast<ConstantSDNode>(Ops[0]);
7193 auto *C2 = dyn_cast<ConstantSDNode>(Ops[1]);
7194 auto *C3 = dyn_cast<ConstantSDNode>(Ops[2]);
7196 if (C1 && C2 && C3) {
7197 if (C1->isOpaque() || C2->isOpaque() || C3->isOpaque())
7199 const APInt &V1 = C1->getAPIntValue(), &V2 = C2->getAPIntValue(),
7200 &V3 = C3->getAPIntValue();
7212 Ops[2].
getValueType() == VT &&
"FMA types must match!");
7216 if (C1 && C2 && C3) {
7237 Ops[0].getValueType() == VT && Ops[1].getValueType() == VT &&
7242 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
7243 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
7250 if (BV1->getConstantRawBits(IsLE, EltBits, RawBits1, UndefElts1) &&
7251 BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) {
7255 Opcode, RawBits1[
I], UndefElts1[
I], RawBits2[
I], UndefElts2[
I]);
7266 BVEltVT = BV1->getOperand(0).getValueType();
7269 BVEltVT = BV2->getOperand(0).getValueType();
7275 DstBits, RawBits, DstUndefs,
7278 for (
unsigned I = 0, E = DstBits.
size();
I != E; ++
I) {
7296 ? Ops[0].getConstantOperandAPInt(0) * RHSVal
7297 : Ops[0].getConstantOperandAPInt(0) << RHSVal;
7302 auto IsScalarOrSameVectorSize = [NumElts](
const SDValue &
Op) {
7303 return !
Op.getValueType().isVector() ||
7304 Op.getValueType().getVectorElementCount() == NumElts;
7307 auto IsBuildVectorSplatVectorOrUndef = [](
const SDValue &
Op) {
7316 if (!
llvm::all_of(Ops, IsBuildVectorSplatVectorOrUndef) ||
7345 for (
unsigned I = 0;
I != NumVectorElts;
I++) {
7348 EVT InSVT =
Op.getValueType().getScalarType();
7370 !isa<ConstantSDNode>(ScalarOp) &&
7391 if (LegalSVT != SVT)
7392 ScalarResult =
getNode(ExtendCode,
DL, LegalSVT, ScalarResult);
7406 if (Ops.
size() != 2)
7417 if (N1CFP && N2CFP) {
7468 if (N1C && N1C->getValueAPF().isNegZero() && N2.
isUndef())
7491 if (SrcEltVT == DstEltVT)
7499 if (SrcBitSize == DstBitSize) {
7504 if (
Op.getValueType() != SrcEltVT)
7547 for (
unsigned I = 0, E = RawBits.
size();
I != E; ++
I) {
7548 if (UndefElements[
I])
7569 ID.AddInteger(
A.value());
7572 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
7576 newSDNode<AssertAlignSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs,
A);
7577 createOperands(
N, {Val});
7579 CSEMap.InsertNode(
N, IP);
7592 return getNode(Opcode,
DL, VT, N1, N2, Flags);
7606 if ((N1C && !N2C) || (N1CFP && !N2CFP))
7620 "Operand is DELETED_NODE!");
7624 auto *N1C = dyn_cast<ConstantSDNode>(N1);
7625 auto *N2C = dyn_cast<ConstantSDNode>(N2);
7636 N2.
getValueType() == MVT::Other &&
"Invalid token factor!");
7640 if (N1 == N2)
return N1;
7656 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7658 N1.
getValueType() == VT &&
"Binary operator types must match!");
7661 if (N2CV && N2CV->
isZero())
7671 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7673 N1.
getValueType() == VT &&
"Binary operator types must match!");
7683 if (N2CV && N2CV->
isZero())
7697 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7699 N1.
getValueType() == VT &&
"Binary operator types must match!");
7704 const APInt &N2CImm = N2C->getAPIntValue();
7718 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7720 N1.
getValueType() == VT &&
"Binary operator types must match!");
7733 "Types of operands of UCMP/SCMP must match");
7735 "Operands and return type of must both be scalars or vectors");
7739 "Result and operands must have the same number of elements");
7745 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7747 N1.
getValueType() == VT &&
"Binary operator types must match!");
7751 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7753 N1.
getValueType() == VT &&
"Binary operator types must match!");
7759 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7761 N1.
getValueType() == VT &&
"Binary operator types must match!");
7767 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7769 N1.
getValueType() == VT &&
"Binary operator types must match!");
7780 N1.
getValueType() == VT &&
"Binary operator types must match!");
7788 "Invalid FCOPYSIGN!");
7793 const APInt &ShiftImm = N2C->getAPIntValue();
7805 "Shift operators return type must be the same as their first arg");
7807 "Shifts only work on integers");
7809 "Vector shift amounts must be in the same as their first arg");
7816 "Invalid use of small shift amount with oversized value!");
7823 if (N2CV && N2CV->
isZero())
7829 (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
7835 "AssertNoFPClass is used for a non-floating type");
7836 assert(isa<ConstantSDNode>(N2) &&
"NoFPClass is not Constant");
7839 BitmaskEnumDetail::Mask<FPClassTest>() &&
7840 "FPClassTest value too large");
7846 EVT EVT = cast<VTSDNode>(N2)->getVT();
7849 "Cannot *_EXTEND_INREG FP types");
7851 "AssertSExt/AssertZExt type should be the vector element type "
7852 "rather than the vector type!");
7858 EVT EVT = cast<VTSDNode>(N2)->getVT();
7861 "Cannot *_EXTEND_INREG FP types");
7863 "SIGN_EXTEND_INREG type should be vector iff the operand "
7867 "Vector element counts must match in SIGN_EXTEND_INREG");
7869 if (
EVT == VT)
return N1;
7877 "FP_TO_*INT_SAT type should be vector iff the operand type is "
7881 "Vector element counts must match in FP_TO_*INT_SAT");
7882 assert(!cast<VTSDNode>(N2)->getVT().isVector() &&
7883 "Type to saturate to must be a scalar.");
7890 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
7891 element type of the vector.");
7913 N2C->getZExtValue() % Factor);
7922 "BUILD_VECTOR used for scalable vectors");
7945 if (N1Op2C && N2C) {
7975 assert(N2C && (
unsigned)N2C->getZExtValue() < 2 &&
"Bad EXTRACT_ELEMENT!");
7979 "Wrong types for EXTRACT_ELEMENT!");
7990 unsigned Shift = ElementSize * N2C->getZExtValue();
7991 const APInt &Val = N1C->getAPIntValue();
7998 "Extract subvector VTs must be vectors!");
8000 "Extract subvector VTs must have the same element type!");
8002 "Cannot extract a scalable vector from a fixed length vector!");
8005 "Extract subvector must be from larger vector to smaller vector!");
8006 assert(N2C &&
"Extract subvector index must be a constant");
8010 "Extract subvector overflow!");
8011 assert(N2C->getAPIntValue().getBitWidth() ==
8013 "Constant index for EXTRACT_SUBVECTOR has an invalid size");
8015 "Extract index is not a multiple of the output vector length");
8030 return N1.
getOperand(N2C->getZExtValue() / Factor);
8150 if (VT != MVT::Glue) {
8154 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
8155 E->intersectFlagsWith(Flags);
8159 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
8161 createOperands(
N, Ops);
8162 CSEMap.InsertNode(
N, IP);
8164 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
8165 createOperands(
N, Ops);
8179 return getNode(Opcode,
DL, VT, N1, N2, N3, Flags);
8188 "Operand is DELETED_NODE!");
8207 "SETCC operands must have the same type!");
8209 "SETCC type should be vector iff the operand type is vector!");
8212 "SETCC vector element counts must match!");
8226 if (cast<ConstantSDNode>(N3)->
isZero())
8232 "INSERT_VECTOR_ELT vector type mismatch");
8234 "INSERT_VECTOR_ELT scalar fp/int mismatch");
8237 "INSERT_VECTOR_ELT fp scalar type mismatch");
8240 "INSERT_VECTOR_ELT int scalar size mismatch");
8242 auto *N3C = dyn_cast<ConstantSDNode>(N3);
8267 "Dest and insert subvector source types must match!");
8269 "Insert subvector VTs must be vectors!");
8271 "Insert subvector VTs must have the same element type!");
8273 "Cannot insert a scalable vector into a fixed length vector!");
8276 "Insert subvector must be from smaller vector to larger vector!");
8277 assert(isa<ConstantSDNode>(N3) &&
8278 "Insert subvector index must be constant");
8282 "Insert subvector overflow!");
8285 "Constant index for INSERT_SUBVECTOR has an invalid size");
8303 case ISD::VP_TRUNCATE:
8304 case ISD::VP_SIGN_EXTEND:
8305 case ISD::VP_ZERO_EXTEND:
8314 assert(VT == VecVT &&
"Vector and result type don't match.");
8316 "All inputs must be vectors.");
8317 assert(VecVT == PassthruVT &&
"Vector and passthru types don't match.");
8319 "Vector and mask must have same number of elements.");
8333 "Expected the second and third operands of the PARTIAL_REDUCE_MLA "
8334 "node to have the same type!");
8336 "Expected the first operand of the PARTIAL_REDUCE_MLA node to have "
8337 "the same type as its result!");
8340 "Expected the element count of the second and third operands of the "
8341 "PARTIAL_REDUCE_MLA node to be a positive integer multiple of the "
8342 "element count of the first operand and the result!");
8344 "Expected the second and third operands of the PARTIAL_REDUCE_MLA "
8345 "node to have an element type which is the same as or smaller than "
8346 "the element type of the first operand and result!");
8368 if (VT != MVT::Glue) {
8372 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
8373 E->intersectFlagsWith(Flags);
8377 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
8379 createOperands(
N, Ops);
8380 CSEMap.InsertNode(
N, IP);
8382 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
8383 createOperands(
N, Ops);
8395 SDValue Ops[] = { N1, N2, N3, N4 };
8396 return getNode(Opcode,
DL, VT, Ops, Flags);
8404 return getNode(Opcode,
DL, VT, N1, N2, N3, N4, Flags);
8410 SDValue Ops[] = { N1, N2, N3, N4, N5 };
8411 return getNode(Opcode,
DL, VT, Ops, Flags);
8420 return getNode(Opcode,
DL, VT, N1, N2, N3, N4, N5, Flags);
8437 if (FI->getIndex() < 0)
8452 assert(
C->getAPIntValue().getBitWidth() == 8);
8457 return DAG.
getConstant(Val, dl, VT,
false, IsOpaque);
8462 assert(
Value.getValueType() == MVT::i8 &&
"memset with non-byte fill value?");
8478 if (VT !=
Value.getValueType())
8491 if (Slice.
Array ==
nullptr) {
8500 unsigned NumVTBytes = NumVTBits / 8;
8501 unsigned NumBytes = std::min(NumVTBytes,
unsigned(Slice.
Length));
8503 APInt Val(NumVTBits, 0);
8505 for (
unsigned i = 0; i != NumBytes; ++i)
8508 for (
unsigned i = 0; i != NumBytes; ++i)
8509 Val |= (
uint64_t)(
unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
8528 APInt(
Base.getValueSizeInBits().getFixedValue(),
8529 Offset.getKnownMinValue()));
8540 EVT BasePtrVT =
Ptr.getValueType();
8552 G = cast<GlobalAddressSDNode>(Src);
8553 else if (Src.getOpcode() ==
ISD::ADD &&
8556 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
8557 SrcDelta = Src.getConstantOperandVal(1);
8563 SrcDelta +
G->getOffset());
8579 assert(OutLoadChains.
size() &&
"Missing loads in memcpy inlining");
8580 assert(OutStoreChains.
size() &&
"Missing stores in memcpy inlining");
8582 for (
unsigned i =
From; i < To; ++i) {
8584 GluedLoadChains.
push_back(OutLoadChains[i]);
8591 for (
unsigned i =
From; i < To; ++i) {
8592 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
8594 ST->getBasePtr(), ST->getMemoryVT(),
8595 ST->getMemOperand());
8617 std::vector<EVT> MemOps;
8618 bool DstAlignCanChange =
false;
8624 DstAlignCanChange =
true;
8626 if (!SrcAlign || Alignment > *SrcAlign)
8627 SrcAlign = Alignment;
8628 assert(SrcAlign &&
"SrcAlign must be set");
8632 bool isZeroConstant = CopyFromConstant && Slice.
Array ==
nullptr;
8634 const MemOp Op = isZeroConstant
8638 *SrcAlign, isVol, CopyFromConstant);
8644 if (DstAlignCanChange) {
8645 Type *Ty = MemOps[0].getTypeForEVT(
C);
8646 Align NewAlign =
DL.getABITypeAlign(Ty);
8652 if (!
TRI->hasStackRealignment(MF))
8654 NewAlign = std::min(NewAlign, *StackAlign);
8656 if (NewAlign > Alignment) {
8660 Alignment = NewAlign;
8668 const Value *SrcVal = dyn_cast_if_present<const Value *>(SrcPtrInfo.
V);
8670 BatchAA && SrcVal &&
8678 unsigned NumMemOps = MemOps.
size();
8680 for (
unsigned i = 0; i != NumMemOps; ++i) {
8685 if (VTSize >
Size) {
8688 assert(i == NumMemOps-1 && i != 0);
8689 SrcOff -= VTSize -
Size;
8690 DstOff -= VTSize -
Size;
8693 if (CopyFromConstant &&
8701 if (SrcOff < Slice.
Length) {
8703 SubSlice.
move(SrcOff);
8706 SubSlice.
Array =
nullptr;
8708 SubSlice.
Length = VTSize;
8711 if (
Value.getNode()) {
8715 DstPtrInfo.
getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
8720 if (!Store.getNode()) {
8729 bool isDereferenceable =
8732 if (isDereferenceable)
8747 DstPtrInfo.
getWithOffset(DstOff), VT, Alignment, MMOFlags, NewAAInfo);
8757 unsigned NumLdStInMemcpy = OutStoreChains.
size();
8759 if (NumLdStInMemcpy) {
8765 for (
unsigned i = 0; i < NumLdStInMemcpy; ++i) {
8771 if (NumLdStInMemcpy <= GluedLdStLimit) {
8773 NumLdStInMemcpy, OutLoadChains,
8776 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
8777 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
8778 unsigned GlueIter = 0;
8780 for (
unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
8781 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
8782 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
8785 OutLoadChains, OutStoreChains);
8786 GlueIter += GluedLdStLimit;
8790 if (RemainingLdStInMemcpy) {
8792 RemainingLdStInMemcpy, OutLoadChains,
8804 bool isVol,
bool AlwaysInline,
8818 std::vector<EVT> MemOps;
8819 bool DstAlignCanChange =
false;
8825 DstAlignCanChange =
true;
8827 if (!SrcAlign || Alignment > *SrcAlign)
8828 SrcAlign = Alignment;
8829 assert(SrcAlign &&
"SrcAlign must be set");
8839 if (DstAlignCanChange) {
8840 Type *Ty = MemOps[0].getTypeForEVT(
C);
8841 Align NewAlign =
DL.getABITypeAlign(Ty);
8847 if (!
TRI->hasStackRealignment(MF))
8849 NewAlign = std::min(NewAlign, *StackAlign);
8851 if (NewAlign > Alignment) {
8855 Alignment = NewAlign;
8869 unsigned NumMemOps = MemOps.
size();
8870 for (
unsigned i = 0; i < NumMemOps; i++) {
8875 bool isDereferenceable =
8878 if (isDereferenceable)
8884 SrcPtrInfo.
getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags, NewAAInfo);
8891 for (
unsigned i = 0; i < NumMemOps; i++) {
8897 Chain, dl, LoadValues[i],
8899 DstPtrInfo.
getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
8939 std::vector<EVT> MemOps;
8940 bool DstAlignCanChange =
false;
8947 DstAlignCanChange =
true;
8953 MemOp::Set(
Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
8957 if (DstAlignCanChange) {
8960 Align NewAlign =
DL.getABITypeAlign(Ty);
8966 if (!
TRI->hasStackRealignment(MF))
8968 NewAlign = std::min(NewAlign, *StackAlign);
8970 if (NewAlign > Alignment) {
8974 Alignment = NewAlign;
8980 unsigned NumMemOps = MemOps.size();
8983 EVT LargestVT = MemOps[0];
8984 for (
unsigned i = 1; i < NumMemOps; i++)
8985 if (MemOps[i].bitsGT(LargestVT))
8986 LargestVT = MemOps[i];
8993 for (
unsigned i = 0; i < NumMemOps; i++) {
8996 if (VTSize >
Size) {
8999 assert(i == NumMemOps-1 && i != 0);
9000 DstOff -= VTSize -
Size;
9007 if (VT.
bitsLT(LargestVT)) {
9027 assert(
Value.getValueType() == VT &&
"Value with wrong type.");
9052std::pair<SDValue, SDValue>
9066 bool IsTailCall =
false;
9085 Align Alignment,
bool isVol,
bool AlwaysInline,
const CallInst *CI,
9094 if (ConstantSize->
isZero())
9098 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
9099 isVol,
false, DstPtrInfo, SrcPtrInfo, AAInfo, BatchAA);
9100 if (Result.getNode())
9108 *
this, dl, Chain, Dst, Src,
Size, Alignment, isVol, AlwaysInline,
9109 DstPtrInfo, SrcPtrInfo);
9110 if (Result.getNode())
9117 assert(ConstantSize &&
"AlwaysInline requires a constant size!");
9119 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
9120 isVol,
true, DstPtrInfo, SrcPtrInfo, AAInfo, BatchAA);
9135 Args.emplace_back(Dst, PtrTy);
9136 Args.emplace_back(Src, PtrTy);
9140 bool IsTailCall =
false;
9143 if (OverrideTailCall.has_value()) {
9144 IsTailCall = *OverrideTailCall;
9150 ReturnsFirstArg && LowersToMemcpy);
9157 Dst.getValueType().getTypeForEVT(*
getContext()),
9163 std::pair<SDValue,SDValue> CallResult = TLI->
LowerCallTo(CLI);
9164 return CallResult.second;
9169 Type *SizeTy,
unsigned ElemSz,
9176 Args.emplace_back(Dst, ArgTy);
9177 Args.emplace_back(Src, ArgTy);
9178 Args.emplace_back(
Size, SizeTy);
9180 RTLIB::Libcall LibraryCall =
9182 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
9196 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
9197 return CallResult.second;
9203 std::optional<bool> OverrideTailCall,
9213 if (ConstantSize->
isZero())
9217 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
9218 isVol,
false, DstPtrInfo, SrcPtrInfo, AAInfo);
9219 if (Result.getNode())
9228 Alignment, isVol, DstPtrInfo, SrcPtrInfo);
9229 if (Result.getNode())
9242 Args.emplace_back(Dst, PtrTy);
9243 Args.emplace_back(Src, PtrTy);
9248 bool IsTailCall =
false;
9249 if (OverrideTailCall.has_value()) {
9250 IsTailCall = *OverrideTailCall;
9252 bool LowersToMemmove =
9257 ReturnsFirstArg && LowersToMemmove);
9263 Dst.getValueType().getTypeForEVT(*
getContext()),
9270 std::pair<SDValue,SDValue> CallResult = TLI->
LowerCallTo(CLI);
9271 return CallResult.second;
9276 Type *SizeTy,
unsigned ElemSz,
9283 Args.emplace_back(Dst, IntPtrTy);
9284 Args.emplace_back(Src, IntPtrTy);
9285 Args.emplace_back(
Size, SizeTy);
9287 RTLIB::Libcall LibraryCall =
9289 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
9303 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
9304 return CallResult.second;
9309 bool isVol,
bool AlwaysInline,
9318 if (ConstantSize->
isZero())
9323 isVol,
false, DstPtrInfo, AAInfo);
9325 if (Result.getNode())
9333 *
this, dl, Chain, Dst, Src,
Size, Alignment, isVol, AlwaysInline, DstPtrInfo);
9334 if (Result.getNode())
9341 assert(ConstantSize &&
"AlwaysInline requires a constant size!");
9344 isVol,
true, DstPtrInfo, AAInfo);
9346 "getMemsetStores must return a valid sequence when AlwaysInline");
9367 Args.emplace_back(
Size,
DL.getIntPtrType(Ctx));
9374 Args.emplace_back(Src, Src.getValueType().getTypeForEVT(Ctx));
9375 Args.emplace_back(
Size,
DL.getIntPtrType(Ctx));
9377 Dst.getValueType().getTypeForEVT(Ctx),
9382 bool LowersToMemset =
9393 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
9394 return CallResult.second;
9399 Type *SizeTy,
unsigned ElemSz,
9406 Args.emplace_back(
Size, SizeTy);
9408 RTLIB::Libcall LibraryCall =
9410 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
9424 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
9425 return CallResult.second;
9435 ID.AddInteger(getSyntheticNodeSubclassData<AtomicSDNode>(
9436 dl.
getIROrder(), Opcode, VTList, MemVT, MMO, ExtType));
9440 if (
auto *E = cast_or_null<AtomicSDNode>(FindNodeOrInsertPos(
ID, dl, IP))) {
9441 E->refineAlignment(MMO);
9442 E->refineRanges(MMO);
9447 VTList, MemVT, MMO, ExtType);
9448 createOperands(
N, Ops);
9450 CSEMap.InsertNode(
N, IP);
9466 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
9487 "Invalid Atomic Op");
9494 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
9507 if (Ops.
size() == 1)
9522 if (
Size.hasValue() && !
Size.getValue())
9539 (Opcode <= (
unsigned)std::numeric_limits<int>::max() &&
9541 "Opcode is not a memory-accessing opcode!");
9545 if (VTList.
VTs[VTList.
NumVTs-1] != MVT::Glue) {
9548 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
9549 Opcode, dl.
getIROrder(), VTList, MemVT, MMO));
9554 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9555 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
9560 VTList, MemVT, MMO);
9561 createOperands(
N, Ops);
9563 CSEMap.InsertNode(
N, IP);
9566 VTList, MemVT, MMO);
9567 createOperands(
N, Ops);
9576 SDValue Chain,
int FrameIndex) {
9587 ID.AddInteger(FrameIndex);
9589 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
9594 createOperands(
N, Ops);
9595 CSEMap.InsertNode(
N, IP);
9611 ID.AddInteger(Index);
9613 if (
SDNode *E = FindNodeOrInsertPos(
ID, Dl, IP))
9616 auto *
N = newSDNode<PseudoProbeSDNode>(
9618 createOperands(
N, Ops);
9619 CSEMap.InsertNode(
N, IP);
9640 !isa<ConstantSDNode>(
Ptr.getOperand(1)) ||
9641 !isa<FrameIndexSDNode>(
Ptr.getOperand(0)))
9644 int FI = cast<FrameIndexSDNode>(
Ptr.getOperand(0))->getIndex();
9647 Offset + cast<ConstantSDNode>(
Ptr.getOperand(1))->getSExtValue());
9673 "Invalid chain type");
9685 Alignment, AAInfo, Ranges);
9696 assert(VT == MemVT &&
"Non-extending load from different memory type!");
9700 "Should only be an extending load, not truncating!");
9702 "Cannot convert from FP to Int or Int -> FP!");
9704 "Cannot use an ext load to convert to or from a vector!");
9707 "Cannot use an ext load to change the number of vector elements!");
9714 "Range metadata and load type must match!");
9725 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
9726 dl.
getIROrder(), VTs, AM, ExtType, MemVT, MMO));
9730 if (
auto *E = cast_or_null<LoadSDNode>(FindNodeOrInsertPos(
ID, dl, IP))) {
9731 E->refineAlignment(MMO);
9732 E->refineRanges(MMO);
9736 ExtType, MemVT, MMO);
9737 createOperands(
N, Ops);
9739 CSEMap.InsertNode(
N, IP);
9753 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
9771 MemVT, Alignment, MMOFlags, AAInfo);
9786 assert(LD->getOffset().isUndef() &&
"Load is already a indexed load!");
9789 LD->getMemOperand()->getFlags() &
9792 LD->getChain(),
Base,
Offset, LD->getPointerInfo(),
9793 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
9826 bool IsTruncating) {
9830 IsTruncating =
false;
9831 }
else if (!IsTruncating) {
9832 assert(VT == SVT &&
"No-truncating store from different memory type!");
9835 "Should only be a truncating store, not extending!");
9838 "Cannot use trunc store to convert to or from a vector!");
9841 "Cannot use trunc store to change the number of vector elements!");
9852 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
9853 dl.
getIROrder(), VTs, AM, IsTruncating, SVT, MMO));
9857 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9858 cast<StoreSDNode>(E)->refineAlignment(MMO);
9862 IsTruncating, SVT, MMO);
9863 createOperands(
N, Ops);
9865 CSEMap.InsertNode(
N, IP);
9878 "Invalid chain type");
9888 PtrInfo, MMOFlags, SVT.
getStoreSize(), Alignment, AAInfo);
9903 assert(ST->getOffset().isUndef() &&
"Store is already a indexed store!");
9905 ST->getMemoryVT(), ST->getMemOperand(), AM,
9906 ST->isTruncatingStore());
9914 const MDNode *Ranges,
bool IsExpanding) {
9927 Alignment, AAInfo, Ranges);
9928 return getLoadVP(AM, ExtType, VT, dl, Chain,
Ptr,
Offset, Mask, EVL, MemVT,
9947 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadSDNode>(
9948 dl.
getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
9952 if (
auto *E = cast_or_null<VPLoadSDNode>(FindNodeOrInsertPos(
ID, dl, IP))) {
9953 E->refineAlignment(MMO);
9954 E->refineRanges(MMO);
9958 ExtType, IsExpanding, MemVT, MMO);
9959 createOperands(
N, Ops);
9961 CSEMap.InsertNode(
N, IP);
9977 Mask, EVL, PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges,
9986 Mask, EVL, VT, MMO, IsExpanding);
9995 const AAMDNodes &AAInfo,
bool IsExpanding) {
9998 EVL, PtrInfo, MemVT, Alignment, MMOFlags, AAInfo,
nullptr,
10008 EVL, MemVT, MMO, IsExpanding);
10014 auto *LD = cast<VPLoadSDNode>(OrigLoad);
10015 assert(LD->getOffset().isUndef() &&
"Load is already a indexed load!");
10018 LD->getMemOperand()->getFlags() &
10021 LD->getChain(),
Base,
Offset, LD->getMask(),
10022 LD->getVectorLength(), LD->getPointerInfo(),
10023 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo(),
10024 nullptr, LD->isExpandingLoad());
10031 bool IsCompressing) {
10041 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
10042 dl.
getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10045 void *IP =
nullptr;
10046 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10047 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
10051 IsTruncating, IsCompressing, MemVT, MMO);
10052 createOperands(
N, Ops);
10054 CSEMap.InsertNode(
N, IP);
10067 bool IsCompressing) {
10078 PtrInfo, MMOFlags, SVT.
getStoreSize(), Alignment, AAInfo);
10087 bool IsCompressing) {
10094 false, IsCompressing);
10097 "Should only be a truncating store, not extending!");
10100 "Cannot use trunc store to convert to or from a vector!");
10103 "Cannot use trunc store to change the number of vector elements!");
10107 SDValue Ops[] = {Chain, Val,
Ptr, Undef, Mask, EVL};
10111 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
10115 void *IP =
nullptr;
10116 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10117 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
10123 createOperands(
N, Ops);
10125 CSEMap.InsertNode(
N, IP);
10135 auto *ST = cast<VPStoreSDNode>(OrigStore);
10136 assert(ST->getOffset().isUndef() &&
"Store is already an indexed store!");
10138 SDValue Ops[] = {ST->getChain(), ST->getValue(),
Base,
10139 Offset, ST->getMask(), ST->getVectorLength()};
10142 ID.AddInteger(ST->getMemoryVT().getRawBits());
10143 ID.AddInteger(ST->getRawSubclassData());
10144 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
10145 ID.AddInteger(ST->getMemOperand()->getFlags());
10146 void *IP =
nullptr;
10147 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10150 auto *
N = newSDNode<VPStoreSDNode>(
10152 ST->isCompressingStore(), ST->getMemoryVT(), ST->getMemOperand());
10153 createOperands(
N, Ops);
10155 CSEMap.InsertNode(
N, IP);
10175 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedLoadSDNode>(
10176 DL.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
10179 void *IP =
nullptr;
10180 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10181 cast<VPStridedLoadSDNode>(E)->refineAlignment(MMO);
10186 newSDNode<VPStridedLoadSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs, AM,
10187 ExtType, IsExpanding, MemVT, MMO);
10188 createOperands(
N, Ops);
10189 CSEMap.InsertNode(
N, IP);
10200 bool IsExpanding) {
10203 Undef, Stride, Mask, EVL, VT, MMO, IsExpanding);
10212 Stride, Mask, EVL, MemVT, MMO, IsExpanding);
10221 bool IsTruncating,
bool IsCompressing) {
10231 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
10232 DL.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10234 void *IP =
nullptr;
10235 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10236 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
10239 auto *
N = newSDNode<VPStridedStoreSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
10240 VTs, AM, IsTruncating,
10241 IsCompressing, MemVT, MMO);
10242 createOperands(
N, Ops);
10244 CSEMap.InsertNode(
N, IP);
10256 bool IsCompressing) {
10263 false, IsCompressing);
10266 "Should only be a truncating store, not extending!");
10269 "Cannot use trunc store to convert to or from a vector!");
10272 "Cannot use trunc store to change the number of vector elements!");
10276 SDValue Ops[] = {Chain, Val,
Ptr, Undef, Stride, Mask, EVL};
10280 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
10283 void *IP =
nullptr;
10284 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10285 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
10288 auto *
N = newSDNode<VPStridedStoreSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
10290 IsCompressing, SVT, MMO);
10291 createOperands(
N, Ops);
10293 CSEMap.InsertNode(
N, IP);
10303 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
10308 ID.AddInteger(getSyntheticNodeSubclassData<VPGatherSDNode>(
10312 void *IP =
nullptr;
10313 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10314 cast<VPGatherSDNode>(E)->refineAlignment(MMO);
10319 VT, MMO, IndexType);
10320 createOperands(
N, Ops);
10322 assert(
N->getMask().getValueType().getVectorElementCount() ==
10323 N->getValueType(0).getVectorElementCount() &&
10324 "Vector width mismatch between mask and data");
10325 assert(
N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10326 N->getValueType(0).getVectorElementCount().isScalable() &&
10327 "Scalable flags of index and data do not match");
10329 N->getIndex().getValueType().getVectorElementCount(),
10330 N->getValueType(0).getVectorElementCount()) &&
10331 "Vector width mismatch between index and data");
10332 assert(isa<ConstantSDNode>(
N->getScale()) &&
10333 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10334 "Scale should be a constant power of 2");
10336 CSEMap.InsertNode(
N, IP);
10347 assert(Ops.
size() == 7 &&
"Incompatible number of operands");
10352 ID.AddInteger(getSyntheticNodeSubclassData<VPScatterSDNode>(
10356 void *IP =
nullptr;
10357 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10358 cast<VPScatterSDNode>(E)->refineAlignment(MMO);
10362 VT, MMO, IndexType);
10363 createOperands(
N, Ops);
10365 assert(
N->getMask().getValueType().getVectorElementCount() ==
10366 N->getValue().getValueType().getVectorElementCount() &&
10367 "Vector width mismatch between mask and data");
10369 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10370 N->getValue().getValueType().getVectorElementCount().isScalable() &&
10371 "Scalable flags of index and data do not match");
10373 N->getIndex().getValueType().getVectorElementCount(),
10374 N->getValue().getValueType().getVectorElementCount()) &&
10375 "Vector width mismatch between index and data");
10376 assert(isa<ConstantSDNode>(
N->getScale()) &&
10377 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10378 "Scale should be a constant power of 2");
10380 CSEMap.InsertNode(
N, IP);
10395 "Unindexed masked load with an offset!");
10402 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
10403 dl.
getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
10406 void *IP =
nullptr;
10407 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10408 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
10412 AM, ExtTy, isExpanding, MemVT, MMO);
10413 createOperands(
N, Ops);
10415 CSEMap.InsertNode(
N, IP);
10426 assert(LD->getOffset().isUndef() &&
"Masked load is already a indexed load!");
10428 Offset, LD->getMask(), LD->getPassThru(),
10429 LD->getMemoryVT(), LD->getMemOperand(), AM,
10430 LD->getExtensionType(), LD->isExpandingLoad());
10438 bool IsCompressing) {
10440 "Invalid chain type");
10443 "Unindexed masked store with an offset!");
10450 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
10451 dl.
getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10454 void *IP =
nullptr;
10455 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10456 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
10461 IsTruncating, IsCompressing, MemVT, MMO);
10462 createOperands(
N, Ops);
10464 CSEMap.InsertNode(
N, IP);
10475 assert(ST->getOffset().isUndef() &&
10476 "Masked store is already a indexed store!");
10478 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
10479 AM, ST->isTruncatingStore(), ST->isCompressingStore());
10487 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
10492 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
10493 dl.
getIROrder(), VTs, MemVT, MMO, IndexType, ExtTy));
10496 void *IP =
nullptr;
10497 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10498 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
10503 VTs, MemVT, MMO, IndexType, ExtTy);
10504 createOperands(
N, Ops);
10506 assert(
N->getPassThru().getValueType() ==
N->getValueType(0) &&
10507 "Incompatible type of the PassThru value in MaskedGatherSDNode");
10508 assert(
N->getMask().getValueType().getVectorElementCount() ==
10509 N->getValueType(0).getVectorElementCount() &&
10510 "Vector width mismatch between mask and data");
10511 assert(
N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10512 N->getValueType(0).getVectorElementCount().isScalable() &&
10513 "Scalable flags of index and data do not match");
10515 N->getIndex().getValueType().getVectorElementCount(),
10516 N->getValueType(0).getVectorElementCount()) &&
10517 "Vector width mismatch between index and data");
10518 assert(isa<ConstantSDNode>(
N->getScale()) &&
10519 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10520 "Scale should be a constant power of 2");
10522 CSEMap.InsertNode(
N, IP);
10534 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
10539 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
10540 dl.
getIROrder(), VTs, MemVT, MMO, IndexType, IsTrunc));
10543 void *IP =
nullptr;
10544 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10545 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
10550 VTs, MemVT, MMO, IndexType, IsTrunc);
10551 createOperands(
N, Ops);
10553 assert(
N->getMask().getValueType().getVectorElementCount() ==
10554 N->getValue().getValueType().getVectorElementCount() &&
10555 "Vector width mismatch between mask and data");
10557 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10558 N->getValue().getValueType().getVectorElementCount().isScalable() &&
10559 "Scalable flags of index and data do not match");
10561 N->getIndex().getValueType().getVectorElementCount(),
10562 N->getValue().getValueType().getVectorElementCount()) &&
10563 "Vector width mismatch between index and data");
10564 assert(isa<ConstantSDNode>(
N->getScale()) &&
10565 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10566 "Scale should be a constant power of 2");
10568 CSEMap.InsertNode(
N, IP);
10579 assert(Ops.
size() == 7 &&
"Incompatible number of operands");
10584 ID.AddInteger(getSyntheticNodeSubclassData<MaskedHistogramSDNode>(
10585 dl.
getIROrder(), VTs, MemVT, MMO, IndexType));
10588 void *IP =
nullptr;
10589 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10590 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
10595 VTs, MemVT, MMO, IndexType);
10596 createOperands(
N, Ops);
10598 assert(
N->getMask().getValueType().getVectorElementCount() ==
10599 N->getIndex().getValueType().getVectorElementCount() &&
10600 "Vector width mismatch between mask and data");
10601 assert(isa<ConstantSDNode>(
N->getScale()) &&
10602 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10603 "Scale should be a constant power of 2");
10604 assert(
N->getInc().getValueType().isInteger() &&
"Non integer update value");
10606 CSEMap.InsertNode(
N, IP);
10621 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadFFSDNode>(
DL.getIROrder(),
10625 void *IP =
nullptr;
10626 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10627 cast<VPLoadFFSDNode>(E)->refineAlignment(MMO);
10630 auto *
N = newSDNode<VPLoadFFSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs,
10632 createOperands(
N, Ops);
10634 CSEMap.InsertNode(
N, IP);
10649 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
10653 void *IP =
nullptr;
10654 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10659 createOperands(
N, Ops);
10661 CSEMap.InsertNode(
N, IP);
10676 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
10680 void *IP =
nullptr;
10681 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10686 createOperands(
N, Ops);
10688 CSEMap.InsertNode(
N, IP);
10699 if (
Cond.isUndef())
10734 return !Val || Val->getAPIntValue().uge(
X.getScalarValueSizeInBits());
10740 if (
X.getValueType().getScalarType() == MVT::i1)
10753 bool HasNan = (XC && XC->getValueAPF().isNaN()) ||
10755 bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
10758 if (Flags.hasNoNaNs() && (HasNan ||
X.isUndef() ||
Y.isUndef()))
10761 if (Flags.hasNoInfs() && (HasInf ||
X.isUndef() ||
Y.isUndef()))
10784 if (Opcode ==
ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
10799 switch (Ops.
size()) {
10800 case 0:
return getNode(Opcode,
DL, VT);
10802 case 2:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1]);
10803 case 3:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Ops[2]);
10810 return getNode(Opcode,
DL, VT, NewOps);
10818 return getNode(Opcode,
DL, VT, Ops, Flags);
10823 unsigned NumOps = Ops.
size();
10825 case 0:
return getNode(Opcode,
DL, VT);
10826 case 1:
return getNode(Opcode,
DL, VT, Ops[0], Flags);
10827 case 2:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Flags);
10828 case 3:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Ops[2], Flags);
10833 for (
const auto &
Op : Ops)
10835 "Operand is DELETED_NODE!");
10850 assert(NumOps == 5 &&
"SELECT_CC takes 5 operands!");
10852 "LHS and RHS of condition must have same type!");
10854 "True and False arms of SelectCC must have same type!");
10856 "select_cc node must be of same type as true and false value!");
10860 "Expected select_cc with vector result to have the same sized "
10861 "comparison type!");
10864 assert(NumOps == 5 &&
"BR_CC takes 5 operands!");
10866 "LHS/RHS of comparison should match types!");
10872 Opcode = ISD::VP_XOR;
10877 Opcode = ISD::VP_AND;
10879 case ISD::VP_REDUCE_MUL:
10882 Opcode = ISD::VP_REDUCE_AND;
10884 case ISD::VP_REDUCE_ADD:
10887 Opcode = ISD::VP_REDUCE_XOR;
10889 case ISD::VP_REDUCE_SMAX:
10890 case ISD::VP_REDUCE_UMIN:
10894 Opcode = ISD::VP_REDUCE_AND;
10896 case ISD::VP_REDUCE_SMIN:
10897 case ISD::VP_REDUCE_UMAX:
10901 Opcode = ISD::VP_REDUCE_OR;
10909 if (VT != MVT::Glue) {
10912 void *IP =
nullptr;
10914 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10915 E->intersectFlagsWith(Flags);
10919 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
10920 createOperands(
N, Ops);
10922 CSEMap.InsertNode(
N, IP);
10924 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
10925 createOperands(
N, Ops);
10928 N->setFlags(Flags);
10954 return getNode(Opcode,
DL, VTList, Ops, Flags);
10960 return getNode(Opcode,
DL, VTList.
VTs[0], Ops, Flags);
10963 for (
const auto &
Op : Ops)
10965 "Operand is DELETED_NODE!");
10974 "Invalid add/sub overflow op!");
10976 Ops[0].getValueType() == Ops[1].getValueType() &&
10977 Ops[0].getValueType() == VTList.
VTs[0] &&
10978 "Binary operator types must match!");
10979 SDValue N1 = Ops[0], N2 = Ops[1];
10985 if (N2CV && N2CV->
isZero()) {
11016 "Invalid add/sub overflow op!");
11018 Ops[0].getValueType() == Ops[1].getValueType() &&
11019 Ops[0].getValueType() == VTList.
VTs[0] &&
11020 Ops[2].getValueType() == VTList.
VTs[1] &&
11021 "Binary operator types must match!");
11027 VTList.
VTs[0] == Ops[0].getValueType() &&
11028 VTList.
VTs[0] == Ops[1].getValueType() &&
11029 "Binary operator types must match!");
11035 unsigned OutWidth = Width * 2;
11039 Val = Val.
sext(OutWidth);
11040 Mul =
Mul.sext(OutWidth);
11042 Val = Val.
zext(OutWidth);
11043 Mul =
Mul.zext(OutWidth);
11057 VTList.
VTs[0] == Ops[0].getValueType() &&
"frexp type mismatch");
11073 "Invalid STRICT_FP_EXTEND!");
11075 Ops[1].getValueType().isFloatingPoint() &&
"Invalid FP cast!");
11077 "STRICT_FP_EXTEND result type should be vector iff the operand "
11078 "type is vector!");
11081 Ops[1].getValueType().getVectorElementCount()) &&
11082 "Vector element count mismatch!");
11084 "Invalid fpext node, dst <= src!");
11087 assert(VTList.
NumVTs == 2 && Ops.
size() == 3 &&
"Invalid STRICT_FP_ROUND!");
11089 "STRICT_FP_ROUND result type should be vector iff the operand "
11090 "type is vector!");
11093 Ops[1].getValueType().getVectorElementCount()) &&
11094 "Vector element count mismatch!");
11096 Ops[1].getValueType().isFloatingPoint() &&
11097 VTList.
VTs[0].
bitsLT(Ops[1].getValueType()) &&
11099 (Ops[2]->getAsZExtVal() == 0 || Ops[2]->getAsZExtVal() == 1) &&
11100 "Invalid STRICT_FP_ROUND!");
11106 if (VTList.
VTs[VTList.
NumVTs-1] != MVT::Glue) {
11109 void *IP =
nullptr;
11110 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
11111 E->intersectFlagsWith(Flags);
11115 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTList);
11116 createOperands(
N, Ops);
11117 CSEMap.InsertNode(
N, IP);
11119 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTList);
11120 createOperands(
N, Ops);
11123 N->setFlags(Flags);
11138 return getNode(Opcode,
DL, VTList, Ops);
11144 return getNode(Opcode,
DL, VTList, Ops);
11149 SDValue Ops[] = { N1, N2, N3 };
11150 return getNode(Opcode,
DL, VTList, Ops);
11155 SDValue Ops[] = { N1, N2, N3, N4 };
11156 return getNode(Opcode,
DL, VTList, Ops);
11162 SDValue Ops[] = { N1, N2, N3, N4, N5 };
11163 return getNode(Opcode,
DL, VTList, Ops);
11170 return makeVTList(&(*EVTs.insert(VT).first), 1);
11179 void *IP =
nullptr;
11185 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 2);
11186 VTListMap.InsertNode(Result, IP);
11188 return Result->getSDVTList();
11198 void *IP =
nullptr;
11205 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 3);
11206 VTListMap.InsertNode(Result, IP);
11208 return Result->getSDVTList();
11219 void *IP =
nullptr;
11227 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 4);
11228 VTListMap.InsertNode(Result, IP);
11230 return Result->getSDVTList();
11234 unsigned NumVTs = VTs.
size();
11236 ID.AddInteger(NumVTs);
11237 for (
unsigned index = 0; index < NumVTs; index++) {
11238 ID.AddInteger(VTs[index].getRawBits());
11241 void *IP =
nullptr;
11246 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, NumVTs);
11247 VTListMap.InsertNode(Result, IP);
11249 return Result->getSDVTList();
11260 assert(
N->getNumOperands() == 1 &&
"Update with wrong number of operands");
11263 if (
Op ==
N->getOperand(0))
return N;
11266 void *InsertPos =
nullptr;
11267 if (
SDNode *Existing = FindModifiedNodeSlot(
N,
Op, InsertPos))
11272 if (!RemoveNodeFromCSEMaps(
N))
11273 InsertPos =
nullptr;
11276 N->OperandList[0].set(
Op);
11280 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
11285 assert(
N->getNumOperands() == 2 &&
"Update with wrong number of operands");
11288 if (Op1 ==
N->getOperand(0) && Op2 ==
N->getOperand(1))
11292 void *InsertPos =
nullptr;
11293 if (
SDNode *Existing = FindModifiedNodeSlot(
N, Op1, Op2, InsertPos))
11298 if (!RemoveNodeFromCSEMaps(
N))
11299 InsertPos =
nullptr;
11302 if (
N->OperandList[0] != Op1)
11303 N->OperandList[0].set(Op1);
11304 if (
N->OperandList[1] != Op2)
11305 N->OperandList[1].set(Op2);
11309 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
11315 SDValue Ops[] = { Op1, Op2, Op3 };
11322 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
11329 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
11335 unsigned NumOps = Ops.
size();
11336 assert(
N->getNumOperands() == NumOps &&
11337 "Update with wrong number of operands");
11340 if (std::equal(Ops.
begin(), Ops.
end(),
N->op_begin()))
11344 void *InsertPos =
nullptr;
11345 if (
SDNode *Existing = FindModifiedNodeSlot(
N, Ops, InsertPos))
11350 if (!RemoveNodeFromCSEMaps(
N))
11351 InsertPos =
nullptr;
11354 for (
unsigned i = 0; i != NumOps; ++i)
11355 if (
N->OperandList[i] != Ops[i])
11356 N->OperandList[i].set(Ops[i]);
11360 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
11377 if (NewMemRefs.
empty()) {
11383 if (NewMemRefs.
size() == 1) {
11384 N->MemRefs = NewMemRefs[0];
11390 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.
size());
11392 N->MemRefs = MemRefsBuffer;
11393 N->NumMemRefs =
static_cast<int>(NewMemRefs.
size());
11416 SDValue Ops[] = { Op1, Op2 };
11424 SDValue Ops[] = { Op1, Op2, Op3 };
11457 SDValue Ops[] = { Op1, Op2 };
11465 New->setNodeId(-1);
11485 unsigned Order = std::min(
N->getIROrder(), OLoc.
getIROrder());
11486 N->setIROrder(Order);
11509 void *IP =
nullptr;
11510 if (VTs.
VTs[VTs.
NumVTs-1] != MVT::Glue) {
11514 return UpdateSDLocOnMergeSDNode(ON,
SDLoc(
N));
11517 if (!RemoveNodeFromCSEMaps(
N))
11522 N->ValueList = VTs.
VTs;
11532 if (Used->use_empty())
11533 DeadNodeSet.
insert(Used);
11538 MN->clearMemRefs();
11542 createOperands(
N, Ops);
11546 if (!DeadNodeSet.
empty()) {
11548 for (
SDNode *
N : DeadNodeSet)
11549 if (
N->use_empty())
11555 CSEMap.InsertNode(
N, IP);
11560 unsigned OrigOpc = Node->getOpcode();
11565#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
11566 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
11567#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
11568 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
11569#include "llvm/IR/ConstrainedOps.def"
11572 assert(Node->getNumValues() == 2 &&
"Unexpected number of results!");
11575 SDValue InputChain = Node->getOperand(0);
11580 for (
unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
11623 SDValue Ops[] = { Op1, Op2 };
11631 SDValue Ops[] = { Op1, Op2, Op3 };
11645 SDValue Ops[] = { Op1, Op2 };
11653 SDValue Ops[] = { Op1, Op2, Op3 };
11668 SDValue Ops[] = { Op1, Op2 };
11677 SDValue Ops[] = { Op1, Op2, Op3 };
11698 bool DoCSE = VTs.
VTs[VTs.
NumVTs-1] != MVT::Glue;
11700 void *IP =
nullptr;
11706 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
11707 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E,
DL));
11712 N = newSDNode<MachineSDNode>(~Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
11713 createOperands(
N, Ops);
11716 CSEMap.InsertNode(
N, IP);
11729 VT, Operand, SRIdxVal);
11739 VT, Operand, Subreg, SRIdxVal);
11756 if (VTList.
VTs[VTList.
NumVTs - 1] != MVT::Glue) {
11759 void *IP =
nullptr;
11761 E->intersectFlagsWith(Flags);
11771 if (VTList.
VTs[VTList.
NumVTs - 1] != MVT::Glue) {
11774 void *IP =
nullptr;
11775 if (FindNodeOrInsertPos(
ID,
SDLoc(), IP))
11785 SDNode *
N,
unsigned R,
bool IsIndirect,
11787 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11788 "Expected inlined-at fields to agree");
11791 {}, IsIndirect,
DL, O,
11800 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11801 "Expected inlined-at fields to agree");
11814 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11815 "Expected inlined-at fields to agree");
11826 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11827 "Expected inlined-at fields to agree");
11830 Dependencies, IsIndirect,
DL, O,
11838 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11839 "Expected inlined-at fields to agree");
11842 {}, IsIndirect,
DL, O,
11850 unsigned O,
bool IsVariadic) {
11851 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11852 "Expected inlined-at fields to agree");
11855 DL, O, IsVariadic);
11859 unsigned OffsetInBits,
unsigned SizeInBits,
11860 bool InvalidateDbg) {
11863 assert(FromNode && ToNode &&
"Can't modify dbg values");
11868 if (
From == To || FromNode == ToNode)
11880 if (Dbg->isInvalidated())
11887 bool Changed =
false;
11888 auto NewLocOps = Dbg->copyLocationOps();
11890 NewLocOps.begin(), NewLocOps.end(),
11892 bool Match = Op == FromLocOp;
11902 auto *Expr = Dbg->getExpression();
11908 if (
auto FI = Expr->getFragmentInfo())
11909 if (OffsetInBits + SizeInBits > FI->SizeInBits)
11918 auto AdditionalDependencies = Dbg->getAdditionalDependencies();
11921 Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(),
11922 Dbg->getDebugLoc(), std::max(ToNode->
getIROrder(), Dbg->getOrder()),
11923 Dbg->isVariadic());
11926 if (InvalidateDbg) {
11928 Dbg->setIsInvalidated();
11929 Dbg->setIsEmitted();
11935 "Transferred DbgValues should depend on the new SDNode");
11941 if (!
N.getHasDebugValue())
11944 auto GetLocationOperand = [](
SDNode *Node,
unsigned ResNo) {
11945 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(Node))
11952 if (DV->isInvalidated())
11954 switch (
N.getOpcode()) {
11960 if (!isa<ConstantSDNode>(N0)) {
11961 bool RHSConstant = isa<ConstantSDNode>(N1);
11964 Offset =
N.getConstantOperandVal(1);
11967 if (!RHSConstant && DV->isIndirect())
11974 auto *DIExpr = DV->getExpression();
11975 auto NewLocOps = DV->copyLocationOps();
11976 bool Changed =
false;
11977 size_t OrigLocOpsSize = NewLocOps.size();
11978 for (
size_t i = 0; i < OrigLocOpsSize; ++i) {
11983 NewLocOps[i].getSDNode() != &
N)
11994 const auto *TmpDIExpr =
12002 NewLocOps.push_back(
RHS);
12008 assert(Changed &&
"Salvage target doesn't use N");
12011 DV->isVariadic() || OrigLocOpsSize != NewLocOps.size();
12013 auto AdditionalDependencies = DV->getAdditionalDependencies();
12015 DV->getVariable(), DIExpr, NewLocOps, AdditionalDependencies,
12016 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder(), IsVariadic);
12018 DV->setIsInvalidated();
12019 DV->setIsEmitted();
12021 N0.
getNode()->dumprFull(
this);
12022 dbgs() <<
" into " << *DIExpr <<
'\n');
12029 TypeSize ToSize =
N.getValueSizeInBits(0);
12033 auto NewLocOps = DV->copyLocationOps();
12034 bool Changed =
false;
12035 for (
size_t i = 0; i < NewLocOps.size(); ++i) {
12037 NewLocOps[i].getSDNode() != &
N)
12044 assert(Changed &&
"Salvage target doesn't use N");
12049 DV->getAdditionalDependencies(), DV->isIndirect(),
12050 DV->getDebugLoc(), DV->getOrder(), DV->isVariadic());
12053 DV->setIsInvalidated();
12054 DV->setIsEmitted();
12056 dbgs() <<
" into " << *DbgExpression <<
'\n');
12063 assert((!Dbg->getSDNodes().empty() ||
12066 return Op.getKind() == SDDbgOperand::FRAMEIX;
12068 "Salvaged DbgValue should depend on a new SDNode");
12076 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(
DL) &&
12077 "Expected inlined-at fields to agree");
12093 while (UI != UE &&
N == UI->
getUser())
12101 :
SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
12114 "Cannot replace with this method!");
12130 RAUWUpdateListener Listener(*
this, UI, UE);
12135 RemoveNodeFromCSEMaps(
User);
12150 AddModifiedNodeToCSEMaps(
User);
12166 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i)
12169 "Cannot use this version of ReplaceAllUsesWith!");
12177 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i)
12178 if (
From->hasAnyUseOfValue(i)) {
12179 assert((i < To->getNumValues()) &&
"Invalid To location");
12188 RAUWUpdateListener Listener(*
this, UI, UE);
12193 RemoveNodeFromCSEMaps(
User);
12209 AddModifiedNodeToCSEMaps(
User);
12223 if (
From->getNumValues() == 1)
12226 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i) {
12236 RAUWUpdateListener Listener(*
this, UI, UE);
12241 RemoveNodeFromCSEMaps(
User);
12247 bool To_IsDivergent =
false;
12256 if (To_IsDivergent !=
From->isDivergent())
12261 AddModifiedNodeToCSEMaps(
User);
12274 if (
From == To)
return;
12277 if (
From.getNode()->getNumValues() == 1) {
12289 UE =
From.getNode()->use_end();
12290 RAUWUpdateListener Listener(*
this, UI, UE);
12293 bool UserRemovedFromCSEMaps =
false;
12303 if (
Use.getResNo() !=
From.getResNo()) {
12310 if (!UserRemovedFromCSEMaps) {
12311 RemoveNodeFromCSEMaps(
User);
12312 UserRemovedFromCSEMaps =
true;
12322 if (!UserRemovedFromCSEMaps)
12327 AddModifiedNodeToCSEMaps(
User);
12346bool operator<(
const UseMemo &L,
const UseMemo &R) {
12347 return (intptr_t)L.User < (intptr_t)R.User;
12357 for (UseMemo &Memo :
Uses)
12358 if (Memo.User ==
N)
12359 Memo.User =
nullptr;
12371 switch (
Node->getOpcode()) {
12385 "Conflicting divergence information!");
12390 for (
const auto &
Op :
N->ops()) {
12391 EVT VT =
Op.getValueType();
12394 if (VT != MVT::Other &&
Op.getNode()->isDivergent() &&
12406 if (
N->SDNodeBits.IsDivergent != IsDivergent) {
12407 N->SDNodeBits.IsDivergent = IsDivergent;
12410 }
while (!Worklist.
empty());
12413void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
12415 Order.
reserve(AllNodes.size());
12417 unsigned NOps =
N.getNumOperands();
12420 Order.push_back(&
N);
12422 for (
size_t I = 0;
I != Order.size(); ++
I) {
12424 for (
auto *U :
N->users()) {
12425 unsigned &UnsortedOps = Degree[U];
12426 if (0 == --UnsortedOps)
12427 Order.push_back(U);
12432#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
12433void SelectionDAG::VerifyDAGDivergence() {
12434 std::vector<SDNode *> TopoOrder;
12435 CreateTopologicalOrder(TopoOrder);
12436 for (
auto *
N : TopoOrder) {
12438 "Divergence bit inconsistency detected");
12461 for (
unsigned i = 0; i != Num; ++i) {
12462 unsigned FromResNo =
From[i].getResNo();
12465 if (
Use.getResNo() == FromResNo) {
12467 Uses.push_back(Memo);
12474 RAUOVWUpdateListener Listener(*
this,
Uses);
12476 for (
unsigned UseIndex = 0, UseIndexEnd =
Uses.size();
12477 UseIndex != UseIndexEnd; ) {
12483 if (
User ==
nullptr) {
12489 RemoveNodeFromCSEMaps(
User);
12496 unsigned i =
Uses[UseIndex].Index;
12501 }
while (UseIndex != UseIndexEnd &&
Uses[UseIndex].
User ==
User);
12505 AddModifiedNodeToCSEMaps(
User);
12513 unsigned DAGSize = 0;
12529 unsigned Degree =
N.getNumOperands();
12532 N.setNodeId(DAGSize++);
12534 if (Q != SortedPos)
12535 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
12536 assert(SortedPos != AllNodes.end() &&
"Overran node list");
12540 N.setNodeId(Degree);
12552 unsigned Degree =
P->getNodeId();
12553 assert(Degree != 0 &&
"Invalid node degree");
12557 P->setNodeId(DAGSize++);
12558 if (
P->getIterator() != SortedPos)
12559 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(
P));
12560 assert(SortedPos != AllNodes.end() &&
"Overran node list");
12564 P->setNodeId(Degree);
12567 if (Node.getIterator() == SortedPos) {
12571 dbgs() <<
"Overran sorted position:\n";
12573 dbgs() <<
"Checking if this is due to cycles\n";
12580 assert(SortedPos == AllNodes.end() &&
12581 "Topological sort incomplete!");
12583 "First node in topological sort is not the entry token!");
12584 assert(AllNodes.front().getNodeId() == 0 &&
12585 "First node in topological sort has non-zero id!");
12586 assert(AllNodes.front().getNumOperands() == 0 &&
12587 "First node in topological sort has operands!");
12588 assert(AllNodes.back().getNodeId() == (
int)DAGSize-1 &&
12589 "Last node in topologic sort has unexpected id!");
12590 assert(AllNodes.back().use_empty() &&
12591 "Last node in topologic sort has users!");
12599 for (
SDNode *SD : DB->getSDNodes()) {
12603 SD->setHasDebugValue(
true);
12605 DbgInfo->
add(DB, isParameter);
12612 assert(isa<MemSDNode>(NewMemOpChain) &&
"Expected a memop node");
12618 if (OldChain == NewMemOpChain || OldChain.
use_empty())
12619 return NewMemOpChain;
12622 OldChain, NewMemOpChain);
12625 return TokenFactor;
12630 assert(isa<MemSDNode>(NewMemOp.
getNode()) &&
"Expected a memop node");
12638 assert(isa<ExternalSymbolSDNode>(
Op) &&
"Node should be an ExternalSymbol");
12640 auto *Symbol = cast<ExternalSymbolSDNode>(
Op)->getSymbol();
12644 if (OutFunction !=
nullptr)
12652 std::string ErrorStr;
12654 ErrorFormatter <<
"Undefined external symbol ";
12655 ErrorFormatter <<
'"' << Symbol <<
'"';
12665 return Const !=
nullptr && Const->isZero();
12674 return Const !=
nullptr && Const->isZero() && !Const->isNegative();
12679 return Const !=
nullptr && Const->isAllOnes();
12684 return Const !=
nullptr && Const->isOne();
12689 return Const !=
nullptr && Const->isMinSignedValue();
12693 unsigned OperandNo) {
12698 APInt Const = ConstV->getAPIntValue().trunc(V.getScalarValueSizeInBits());
12704 return Const.isZero();
12706 return Const.isOne();
12709 return Const.isAllOnes();
12711 return Const.isMinSignedValue();
12713 return Const.isMaxSignedValue();
12718 return OperandNo == 1 && Const.isZero();
12721 return OperandNo == 1 && Const.isOne();
12726 return ConstFP->isZero() &&
12727 (Flags.hasNoSignedZeros() || ConstFP->isNegative());
12729 return OperandNo == 1 && ConstFP->isZero() &&
12730 (Flags.hasNoSignedZeros() || !ConstFP->isNegative());
12732 return ConstFP->isExactlyValue(1.0);
12734 return OperandNo == 1 && ConstFP->isExactlyValue(1.0);
12738 EVT VT = V.getValueType();
12740 APFloat NeutralAF = !Flags.hasNoNaNs()
12742 : !Flags.hasNoInfs()
12748 return ConstFP->isExactlyValue(NeutralAF);
12757 V = V.getOperand(0);
12762 while (V.getOpcode() ==
ISD::BITCAST && V.getOperand(0).hasOneUse())
12763 V = V.getOperand(0);
12769 V = V.getOperand(0);
12775 V = V.getOperand(0);
12783 unsigned NumBits = V.getScalarValueSizeInBits();
12786 return C && (
C->getAPIntValue().countr_one() >= NumBits);
12790 bool AllowTruncation) {
12791 EVT VT =
N.getValueType();
12800 bool AllowTruncation) {
12807 EVT VecEltVT =
N->getValueType(0).getVectorElementType();
12808 if (
auto *CN = dyn_cast<ConstantSDNode>(
N->getOperand(0))) {
12809 EVT CVT = CN->getValueType(0);
12810 assert(CVT.
bitsGE(VecEltVT) &&
"Illegal splat_vector element extension");
12811 if (AllowTruncation || CVT == VecEltVT)
12818 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
12823 if (CN && (UndefElements.
none() || AllowUndefs)) {
12825 EVT NSVT =
N.getValueType().getScalarType();
12826 assert(CVT.
bitsGE(NSVT) &&
"Illegal build vector element extension");
12827 if (AllowTruncation || (CVT == NSVT))
12836 EVT VT =
N.getValueType();
12844 const APInt &DemandedElts,
12845 bool AllowUndefs) {
12852 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
12854 if (CN && (UndefElements.
none() || AllowUndefs))
12869 return C &&
C->isZero();
12875 return C &&
C->isOne();
12880 unsigned BitWidth =
N.getScalarValueSizeInBits();
12882 return C &&
C->isAllOnes() &&
C->getValueSizeInBits(0) ==
BitWidth;
12888 APInt(
C->getAPIntValue().getBitWidth(), 1));
12894 return C &&
C->isZero();
12903 :
SDNode(
Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
12927 std::vector<EVT> VTs;
12940const EVT *SDNode::getValueTypeList(
MVT VT) {
12941 static EVTArray SimpleVTArray;
12944 return &SimpleVTArray.VTs[VT.
SimpleTy];
12953 if (U.getResNo() ==
Value)
12991 return any_of(
N->op_values(),
12992 [
this](
SDValue Op) { return this == Op.getNode(); });
13006 unsigned Depth)
const {
13007 if (*
this == Dest)
return true;
13011 if (
Depth == 0)
return false;
13031 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
13036 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(*
this)) {
13037 if (Ld->isUnordered())
13038 return Ld->getChain().reachesChainWithoutSideEffects(Dest,
Depth-1);
13051 this->Flags &= Flags;
13057 bool AllowPartials) {
13066 return Op.getOpcode() ==
unsigned(BinOp);
13072 unsigned CandidateBinOp =
Op.getOpcode();
13073 if (
Op.getValueType().isFloatingPoint()) {
13075 switch (CandidateBinOp) {
13077 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
13087 auto PartialReduction = [&](
SDValue Op,
unsigned NumSubElts) {
13088 if (!AllowPartials || !
Op)
13090 EVT OpVT =
Op.getValueType();
13112 unsigned Stages =
Log2_32(
Op.getValueType().getVectorNumElements());
13114 for (
unsigned i = 0; i < Stages; ++i) {
13115 unsigned MaskEnd = (1 << i);
13117 if (
Op.getOpcode() != CandidateBinOp)
13118 return PartialReduction(PrevOp, MaskEnd);
13127 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
13134 return PartialReduction(PrevOp, MaskEnd);
13137 for (
int Index = 0; Index < (int)MaskEnd; ++Index)
13138 if (Shuffle->
getMaskElt(Index) != (int)(MaskEnd + Index))
13139 return PartialReduction(PrevOp, MaskEnd);
13146 while (
Op.getOpcode() == CandidateBinOp) {
13147 unsigned NumElts =
Op.getValueType().getVectorNumElements();
13155 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
13156 if (NumSrcElts != (2 * NumElts))
13171 EVT VT =
N->getValueType(0);
13180 else if (NE > ResNE)
13183 if (
N->getNumValues() == 2) {
13186 EVT VT1 =
N->getValueType(1);
13190 for (i = 0; i != NE; ++i) {
13191 for (
unsigned j = 0, e =
N->getNumOperands(); j != e; ++j) {
13192 SDValue Operand =
N->getOperand(j);
13205 for (; i < ResNE; ++i) {
13217 assert(
N->getNumValues() == 1 &&
13218 "Can't unroll a vector with multiple results!");
13224 for (i= 0; i != NE; ++i) {
13225 for (
unsigned j = 0, e =
N->getNumOperands(); j != e; ++j) {
13226 SDValue Operand =
N->getOperand(j);
13238 switch (
N->getOpcode()) {
13264 const auto *ASC = cast<AddrSpaceCastSDNode>(
N);
13266 ASC->getSrcAddressSpace(),
13267 ASC->getDestAddressSpace()));
13273 for (; i < ResNE; ++i)
13282 unsigned Opcode =
N->getOpcode();
13286 "Expected an overflow opcode");
13288 EVT ResVT =
N->getValueType(0);
13289 EVT OvVT =
N->getValueType(1);
13298 else if (NE > ResNE)
13310 for (
unsigned i = 0; i < NE; ++i) {
13311 SDValue Res =
getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
13334 if (LD->isVolatile() ||
Base->isVolatile())
13337 if (!LD->isSimple())
13339 if (LD->isIndexed() ||
Base->isIndexed())
13341 if (LD->getChain() !=
Base->getChain())
13343 EVT VT = LD->getMemoryVT();
13351 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *
this,
Offset))
13352 return (Dist * (int64_t)Bytes ==
Offset);
13361 int64_t GVOffset = 0;
13373 int FrameIdx = INT_MIN;
13374 int64_t FrameOffset = 0;
13376 FrameIdx = FI->getIndex();
13378 isa<FrameIndexSDNode>(
Ptr.getOperand(0))) {
13380 FrameIdx = cast<FrameIndexSDNode>(
Ptr.getOperand(0))->getIndex();
13381 FrameOffset =
Ptr.getConstantOperandVal(1);
13384 if (FrameIdx != INT_MIN) {
13389 return std::nullopt;
13399 "Split node must be a scalar type");
13404 return std::make_pair(
Lo,
Hi);
13417 return std::make_pair(LoVT, HiVT);
13425 bool *HiIsEmpty)
const {
13435 "Mixing fixed width and scalable vectors when enveloping a type");
13440 *HiIsEmpty =
false;
13448 return std::make_pair(LoVT, HiVT);
13453std::pair<SDValue, SDValue>
13458 "Splitting vector with an invalid mixture of fixed and scalable "
13461 N.getValueType().getVectorMinNumElements() &&
13462 "More vector elements requested than available!");
13471 return std::make_pair(
Lo,
Hi);
13478 EVT VT =
N.getValueType();
13480 "Expecting the mask to be an evenly-sized vector");
13488 return std::make_pair(
Lo,
Hi);
13493 EVT VT =
N.getValueType();
13501 unsigned Start,
unsigned Count,
13503 EVT VT =
Op.getValueType();
13506 if (EltVT ==
EVT())
13509 for (
unsigned i = Start, e = Start + Count; i != e; ++i) {
13521 return Val.MachineCPVal->getType();
13522 return Val.ConstVal->getType();
13526 unsigned &SplatBitSize,
13527 bool &HasAnyUndefs,
13528 unsigned MinSplatBits,
13529 bool IsBigEndian)
const {
13533 if (MinSplatBits > VecWidth)
13538 SplatValue =
APInt(VecWidth, 0);
13539 SplatUndef =
APInt(VecWidth, 0);
13546 assert(NumOps > 0 &&
"isConstantSplat has 0-size build vector");
13549 for (
unsigned j = 0; j < NumOps; ++j) {
13550 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
13552 unsigned BitPos = j * EltWidth;
13555 SplatUndef.
setBits(BitPos, BitPos + EltWidth);
13556 else if (
auto *CN = dyn_cast<ConstantSDNode>(OpVal))
13557 SplatValue.
insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
13558 else if (
auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
13559 SplatValue.
insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
13566 HasAnyUndefs = (SplatUndef != 0);
13569 while (VecWidth > 8) {
13574 unsigned HalfSize = VecWidth / 2;
13581 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
13582 MinSplatBits > HalfSize)
13585 SplatValue = HighValue | LowValue;
13586 SplatUndef = HighUndef & LowUndef;
13588 VecWidth = HalfSize;
13597 SplatBitSize = VecWidth;
13604 if (UndefElements) {
13605 UndefElements->
clear();
13606 UndefElements->
resize(NumOps);
13612 for (
unsigned i = 0; i != NumOps; ++i) {
13613 if (!DemandedElts[i])
13616 if (
Op.isUndef()) {
13618 (*UndefElements)[i] =
true;
13619 }
else if (!Splatted) {
13621 }
else if (Splatted !=
Op) {
13627 unsigned FirstDemandedIdx = DemandedElts.
countr_zero();
13629 "Can only have a splat without a constant for all undefs.");
13646 if (UndefElements) {
13647 UndefElements->
clear();
13648 UndefElements->
resize(NumOps);
13656 for (
unsigned I = 0;
I != NumOps; ++
I)
13658 (*UndefElements)[
I] =
true;
13661 for (
unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
13662 Sequence.append(SeqLen,
SDValue());
13663 for (
unsigned I = 0;
I != NumOps; ++
I) {
13664 if (!DemandedElts[
I])
13666 SDValue &SeqOp = Sequence[
I % SeqLen];
13668 if (
Op.isUndef()) {
13673 if (SeqOp && !SeqOp.
isUndef() && SeqOp !=
Op) {
13679 if (!Sequence.empty())
13683 assert(Sequence.empty() &&
"Failed to empty non-repeating sequence pattern");
13696 return dyn_cast_or_null<ConstantSDNode>(
13702 return dyn_cast_or_null<ConstantSDNode>(
getSplatValue(UndefElements));
13708 return dyn_cast_or_null<ConstantFPSDNode>(
13714 return dyn_cast_or_null<ConstantFPSDNode>(
getSplatValue(UndefElements));
13721 dyn_cast_or_null<ConstantFPSDNode>(
getSplatValue(UndefElements))) {
13724 const APFloat &APF = CN->getValueAPF();
13730 return IntVal.exactLogBase2();
13736 bool IsLittleEndian,
unsigned DstEltSizeInBits,
13744 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
13745 "Invalid bitcast scale");
13750 BitVector SrcUndeElements(NumSrcOps,
false);
13752 for (
unsigned I = 0;
I != NumSrcOps; ++
I) {
13754 if (
Op.isUndef()) {
13755 SrcUndeElements.
set(
I);
13758 auto *CInt = dyn_cast<ConstantSDNode>(
Op);
13759 auto *CFP = dyn_cast<ConstantFPSDNode>(
Op);
13760 assert((CInt || CFP) &&
"Unknown constant");
13761 SrcBitElements[
I] = CInt ? CInt->getAPIntValue().trunc(SrcEltSizeInBits)
13762 : CFP->getValueAPF().bitcastToAPInt();
13766 recastRawBits(IsLittleEndian, DstEltSizeInBits, RawBitElements,
13767 SrcBitElements, UndefElements, SrcUndeElements);
13772 unsigned DstEltSizeInBits,
13777 unsigned NumSrcOps = SrcBitElements.
size();
13778 unsigned SrcEltSizeInBits = SrcBitElements[0].getBitWidth();
13779 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
13780 "Invalid bitcast scale");
13781 assert(NumSrcOps == SrcUndefElements.
size() &&
13782 "Vector size mismatch");
13784 unsigned NumDstOps = (NumSrcOps * SrcEltSizeInBits) / DstEltSizeInBits;
13785 DstUndefElements.
clear();
13786 DstUndefElements.
resize(NumDstOps,
false);
13790 if (SrcEltSizeInBits <= DstEltSizeInBits) {
13791 unsigned Scale = DstEltSizeInBits / SrcEltSizeInBits;
13792 for (
unsigned I = 0;
I != NumDstOps; ++
I) {
13793 DstUndefElements.
set(
I);
13794 APInt &DstBits = DstBitElements[
I];
13795 for (
unsigned J = 0; J != Scale; ++J) {
13796 unsigned Idx = (
I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
13797 if (SrcUndefElements[
Idx])
13799 DstUndefElements.
reset(
I);
13800 const APInt &SrcBits = SrcBitElements[
Idx];
13802 "Illegal constant bitwidths");
13803 DstBits.
insertBits(SrcBits, J * SrcEltSizeInBits);
13810 unsigned Scale = SrcEltSizeInBits / DstEltSizeInBits;
13811 for (
unsigned I = 0;
I != NumSrcOps; ++
I) {
13812 if (SrcUndefElements[
I]) {
13813 DstUndefElements.
set(
I * Scale, (
I + 1) * Scale);
13816 const APInt &SrcBits = SrcBitElements[
I];
13817 for (
unsigned J = 0; J != Scale; ++J) {
13818 unsigned Idx = (
I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
13819 APInt &DstBits = DstBitElements[
Idx];
13820 DstBits = SrcBits.
extractBits(DstEltSizeInBits, J * DstEltSizeInBits);
13827 unsigned Opc =
Op.getOpcode();
13834std::optional<std::pair<APInt, APInt>>
13838 return std::nullopt;
13842 return std::nullopt;
13849 return std::nullopt;
13851 for (
unsigned i = 2; i < NumOps; ++i) {
13853 return std::nullopt;
13856 if (Val != (Start + (Stride * i)))
13857 return std::nullopt;
13860 return std::make_pair(Start, Stride);
13866 for (i = 0, e = Mask.size(); i != e && Mask[i] < 0; ++i)
13876 for (
int Idx = Mask[i]; i != e; ++i)
13877 if (Mask[i] >= 0 && Mask[i] !=
Idx)
13885 SDValue N,
bool AllowOpaques)
const {
13888 if (
auto *
C = dyn_cast<ConstantSDNode>(
N))
13889 return AllowOpaques || !
C->isOpaque();
13896 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(
N))
13902 isa<ConstantSDNode>(
N.getOperand(0)))
13909 if (isa<ConstantFPSDNode>(
N))
13916 isa<ConstantFPSDNode>(
N.getOperand(0)))
13926 return std::nullopt;
13928 EVT VT =
N->getValueType(0);
13936 return std::nullopt;
13942 return std::nullopt;
13950 assert(!Node->OperandList &&
"Node already has operands");
13952 "too many operands to fit into SDNode");
13953 SDUse *Ops = OperandRecycler.allocate(
13956 bool IsDivergent =
false;
13957 for (
unsigned I = 0;
I != Vals.
size(); ++
I) {
13958 Ops[
I].setUser(Node);
13959 Ops[
I].setInitial(Vals[
I]);
13963 if (VT != MVT::Other &&
13965 Ops[
I].
getNode()->isDivergent()) {
13966 IsDivergent =
true;
13970 Node->OperandList = Ops;
13973 Node->SDNodeBits.IsDivergent = IsDivergent;
13981 while (Vals.
size() > Limit) {
13982 unsigned SliceIdx = Vals.
size() - Limit;
14058 const SDLoc &DLoc) {
14062 RTLIB::Libcall LC =
static_cast<RTLIB::Libcall
>(
LibFunc);
14073 assert(
From && To &&
"Invalid SDNode; empty source SDValue?");
14074 auto I = SDEI.find(
From);
14075 if (
I == SDEI.end())
14080 NodeExtraInfo NEI =
I->second;
14089 SDEI[To] = std::move(NEI);
14106 auto VisitFrom = [&](
auto &&Self,
const SDNode *
N,
int MaxDepth) {
14107 if (MaxDepth == 0) {
14110 Leafs.emplace_back(
N);
14113 if (!FromReach.
insert(
N).second)
14116 Self(Self,
Op.getNode(), MaxDepth - 1);
14121 auto DeepCopyTo = [&](
auto &&Self,
const SDNode *
N) {
14124 if (!Visited.
insert(
N).second)
14129 if (
N == To &&
Op.getNode() == EntrySDN) {
14134 if (!Self(Self,
Op.getNode()))
14148 for (
int PrevDepth = 0, MaxDepth = 16; MaxDepth <= 1024;
14149 PrevDepth = MaxDepth, MaxDepth *= 2, Visited.
clear()) {
14154 for (
const SDNode *
N : StartFrom)
14155 VisitFrom(VisitFrom,
N, MaxDepth - PrevDepth);
14159 LLVM_DEBUG(
dbgs() << __func__ <<
": MaxDepth=" << MaxDepth <<
" too low\n");
14167 errs() <<
"warning: incomplete propagation of SelectionDAG::NodeExtraInfo\n";
14168 assert(
false &&
"From subgraph too complex - increase max. MaxDepth?");
14170 SDEI[To] = std::move(NEI);
14184 if (!Visited.
insert(
N).second) {
14185 errs() <<
"Detected cycle in SelectionDAG\n";
14186 dbgs() <<
"Offending node:\n";
14187 N->dumprFull(DAG);
dbgs() <<
"\n";
14203 bool check = force;
14204#ifdef EXPENSIVE_CHECKS
14208 assert(
N &&
"Checking nonexistent SDNode");
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isConstant(const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
#define __asan_unpoison_memory_region(p, size)
#define LLVM_LIKELY(EXPR)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Looks at all the uses of the given value Returns the Liveness deduced from the uses of this value Adds all uses that cause the result to be MaybeLive to MaybeLiveRetUses If the result is MaybeLiveUses might be modified but its content should be ignored(since it might not be complete). DeadArgumentEliminationPass
Given that RA is a live propagate it s liveness to any other values it uses(according to Uses). void DeadArgumentEliminationPass
Given that RA is a live value
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines a hash set that can be used to remove duplication of nodes in a graph.
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB)
static bool shouldLowerMemFuncForSize(const MachineFunction &MF)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
mir Rename Register Operands
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
Contains matchers for matching SelectionDAG nodes and values.
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo, BatchAAResults *BatchAA)
static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo)
Lower the call to 'memset' intrinsic function into a series of store operations.
static std::optional< APInt > FoldValueWithUndef(unsigned Opcode, const APInt &C1, bool IsUndef1, const APInt &C2, bool IsUndef2)
static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step, SelectionDAG &DAG)
static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned OpC, SDVTList VTList, ArrayRef< SDValue > OpList)
static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, const TargetLowering &TLI, const ConstantDataArraySlice &Slice)
getMemsetStringVal - Similar to getMemsetValue.
static cl::opt< bool > EnableMemCpyDAGOpt("enable-memcpy-dag-opt", cl::Hidden, cl::init(true), cl::desc("Gang up loads and stores generated by inlining of memcpy"))
static bool haveNoCommonBitsSetCommutative(SDValue A, SDValue B)
static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList)
AddNodeIDValueTypes - Value type lists are intern'd so we can represent them solely with their pointe...
static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef< int > M)
Swaps the values of N1 and N2.
static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice)
Returns true if memcpy source is constant data.
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo)
static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)
AddNodeIDOpcode - Add the node opcode to the NodeID data.
static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike)
static bool doNotCSE(SDNode *N)
doNotCSE - Return true if CSE should not be performed for this node.
static cl::opt< int > MaxLdStGlue("ldstmemcpy-glue-max", cl::desc("Number limit for gluing ld/st of memcpy."), cl::Hidden, cl::init(0))
static void AddNodeIDOperands(FoldingSetNodeID &ID, ArrayRef< SDValue > Ops)
AddNodeIDOperands - Various routines for adding operands to the NodeID data.
static bool canFoldStoreIntoLibCallOutputPointers(StoreSDNode *StoreNode, SDNode *FPNode)
Given a store node StoreNode, return true if it is safe to fold that node into FPNode,...
static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
Try to simplify vector concatenation to an input value, undef, or build vector.
static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, SelectionDAG &DAG, SDValue Ptr, int64_t Offset=0)
InferPointerInfo - If the specified ptr/offset is a frame index, infer a MachinePointerInfo record fr...
static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N)
If this is an SDNode with special info, add this info to the NodeID data.
static bool gluePropagatesDivergence(const SDNode *Node)
Return true if a glue output should propagate divergence information.
static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G)
static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs)
makeVTList - Return an instance of the SDVTList struct initialized with the specified members.
static void checkForCyclesHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallPtrSetImpl< const SDNode * > &Checked, const llvm::SelectionDAG *DAG)
static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SmallVector< SDValue, 32 > &OutChains, unsigned From, unsigned To, SmallVector< SDValue, 16 > &OutLoadChains, SmallVector< SDValue, 16 > &OutStoreChains)
static int isSignedOp(ISD::CondCode Opcode)
For an integer comparison, return 1 if the comparison is a signed operation and 2 if the result is an...
static std::optional< APInt > FoldValue(unsigned Opcode, const APInt &C1, const APInt &C2)
static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, unsigned AS)
static cl::opt< unsigned > MaxSteps("has-predecessor-max-steps", cl::Hidden, cl::init(8192), cl::desc("DAG combiner limit number of steps when searching DAG " "for predecessor nodes"))
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
opStatus add(const APFloat &RHS, roundingMode RM)
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
opStatus multiply(const APFloat &RHS, roundingMode RM)
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
opStatus mod(const APFloat &RHS)
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt usub_sat(const APInt &RHS) const
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
unsigned popcount() const
Count the number of bits set.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
LLVM_ABI APInt sadd_sat(const APInt &RHS) const
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
void clearAllBits()
Set every bit to 0.
LLVM_ABI APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
LLVM_ABI APInt reverseBits() const
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt sshl_sat(const APInt &RHS) const
LLVM_ABI APInt ushl_sat(const APInt &RHS) const
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
LLVM_ABI APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
LLVM_ABI void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
unsigned logBase2() const
LLVM_ABI APInt uadd_sat(const APInt &RHS) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
void setAllBits()
Set every bit to 1.
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
LLVM_ABI APInt byteSwap() const
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static bool isSameValue(const APInt &I1, const APInt &I2)
Determine if two APInts have the same value, after zero-extending one of them (if needed!...
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void clearBits(unsigned LoBit, unsigned HiBit)
Clear the bits from LoBit (inclusive) to HiBit (exclusive) to 0.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
LLVM_ABI APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
unsigned getSrcAddressSpace() const
unsigned getDestAddressSpace() const
Recycle small arrays allocated from a BumpPtrAllocator.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
This is an SDNode representing atomic operations.
static LLVM_ABI BaseIndexOffset match(const SDNode *N, const SelectionDAG &DAG)
Parses tree in N for base, index, offset addresses.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
void clear()
clear - Removes all bits from the bitvector.
bool none() const
none - Returns true if none of the bits are set.
size_type size() const
size - Returns the number of bits in this bitvector.
int64_t getOffset() const
unsigned getTargetFlags() const
const BlockAddress * getBlockAddress() const
The address of a basic block.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
A "pseudo-class" with methods for operating on BUILD_VECTORs.
LLVM_ABI bool getConstantRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &RawBitElements, BitVector &UndefElements) const
Extract the raw bit data from a build vector of Undef, Constant or ConstantFP node elements.
static LLVM_ABI void recastRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &DstBitElements, ArrayRef< APInt > SrcBitElements, BitVector &DstUndefElements, const BitVector &SrcUndefElements)
Recast bit data SrcBitElements to DstEltSizeInBits wide elements.
LLVM_ABI bool getRepeatedSequence(const APInt &DemandedElts, SmallVectorImpl< SDValue > &Sequence, BitVector *UndefElements=nullptr) const
Find the shortest repeating sequence of values in the build vector.
LLVM_ABI ConstantFPSDNode * getConstantFPSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant FP or null if this is not a constant FP splat.
LLVM_ABI std::optional< std::pair< APInt, APInt > > isConstantSequence() const
If this BuildVector is constant and represents the numerical series "<a, a+n, a+2n,...
LLVM_ABI SDValue getSplatValue(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted value or a null value if this is not a splat.
LLVM_ABI bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
LLVM_ABI ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
LLVM_ABI int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2,...
LLVM_ABI bool isConstant() const
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI bool isValueValidForType(EVT VT, const APFloat &Val)
const APFloat & getValueAPF() const
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
ConstantFP - Floating Point Values [float, double].
const APFloat & getValue() const
This is the shared class of boolean and integer constants.
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
const APInt & getValue() const
Return the constant as an APInt value reference.
bool isMachineConstantPoolEntry() const
LLVM_ABI Type * getType() const
This class represents a range of values.
LLVM_ABI ConstantRange multiply(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
LLVM_ABI OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
LLVM_ABI KnownBits toKnownBits() const
Return known bits for values in this range.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
This is an important base class in LLVM.
LLVM_ABI Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
static LLVM_ABI ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI const DIExpression * convertToVariadicExpression(const DIExpression *Expr)
If Expr is a non-variadic expression (i.e.
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
Base class for variables.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
LLVM_ABI unsigned getPointerTypeSizeInBits(Type *) const
The pointer representation size in bits for this type.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Implements a dense probed hash-table based set.
const char * getSymbol() const
unsigned getTargetFlags() const
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
MachineBasicBlock * MBB
MBB - The current block.
Data structure describing the variable locations in a function.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
AttributeList getAttributes() const
Return the attribute list for this Function.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
int64_t getOffset() const
LLVM_ABI unsigned getAddressSpace() const
unsigned getTargetFlags() const
const GlobalValue * getGlobal() const
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
This class is used to form a handle around another node that is persistent and is updated across invo...
const SDValue & getValue() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
constexpr bool isValid() const
This is an important class for using LLVM in a threaded context.
This SDNode is used for LIFETIME_START/LIFETIME_END values.
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
const MDOperand & getOperand(unsigned I) const
static MVT getIntegerVT(unsigned BitWidth)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
Abstract base class for all machine specific constantpool value subclasses.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
void setObjectAlignment(int ObjectIdx, Align Alignment)
setObjectAlignment - Change the alignment of the specified stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
bool isNonTemporal() const
const MDNode * getRanges() const
Return the range tag for the memory reference.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
bool isDereferenceable() const
This class contains meta information specific to a module.
An SDNode that represents everything that will be needed to construct a MachineInstr.
This class is used to represent an MGATHER node.
This class is used to represent an MLOAD node.
This class is used to represent an MSCATTER node.
This class is used to represent an MSTORE node.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
LLVM_ABI MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT memvt, MachineMemOperand *MMO)
MachineMemOperand * MMO
Memory reference information.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
unsigned getRawSubclassData() const
Return the SubclassData value, without HasDebugValue.
EVT getMemoryVT() const
Return the type of the in-memory value.
Representation for a specific memory location.
A Module instance is used to store all the information related to an LLVM module.
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Pass interface - Implemented by all 'passes'.
Class to represent pointers.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Analysis providing profile information.
void Deallocate(SubClass *E)
Deallocate - Release storage for the pointed-to object.
Wrapper class representing virtual and physical registers.
Keeps track of dbg_value information through SDISel.
BumpPtrAllocator & getAlloc()
LLVM_ABI void add(SDDbgValue *V, bool isParameter)
LLVM_ABI void erase(const SDNode *Node)
Invalidate all DbgValues attached to the node and remove it from the Node-to-DbgValues map.
ArrayRef< SDDbgValue * > getSDDbgValues(const SDNode *Node) const
Holds the information from a dbg_label node through SDISel.
Holds the information for a single machine location through SDISel; either an SDNode,...
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
@ SDNODE
Value is the result of an expression.
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
unsigned getIROrder() const
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
LLVM_ABI void dumprFull(const SelectionDAG *G=nullptr) const
printrFull to dbgs().
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVM_ABI bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
static constexpr size_t getMaxNumOperands()
Return the maximum number of operands that a SDNode can hold.
iterator_range< use_iterator > uses()
MemSDNodeBitfields MemSDNodeBits
LLVM_ABI void Profile(FoldingSetNodeID &ID) const
Gather unique data for the node.
bool getHasDebugValue() const
SDNodeFlags getFlags() const
void setNodeId(int Id)
Set unique node id.
LLVM_ABI void intersectFlagsWith(const SDNodeFlags Flags)
Clear any flags in this node that aren't also set in Flags.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
bool use_empty() const
Return true if there are no uses of this node.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
static LLVM_ABI bool areOnlyUsersOf(ArrayRef< const SDNode * > Nodes, const SDNode *N)
Return true if all the users of N are contained in Nodes.
LLVM_ABI bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
const APInt & getConstantOperandAPInt(unsigned Num) const
Helper method returns the APInt of a ConstantSDNode operand.
std::optional< APInt > bitcastToAPInt() const
LLVM_ABI bool hasPredecessor(const SDNode *N) const
Return true if N is a predecessor of this node.
LLVM_ABI bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
op_iterator op_end() const
op_iterator op_begin() const
LLVM_ABI void DropOperands()
Release the operands and set this node to have zero operands.
Represents a use of a SDNode.
EVT getValueType() const
Convenience function for get().getValueType().
SDNode * getUser()
This returns the SDNode that contains this Use.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
LLVM_ABI bool isOperandOf(const SDNode *N) const
Return true if the referenced return value is an operand of N.
LLVM_ABI bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
virtual bool isTargetMemoryOpcode(unsigned Opcode) const
Returns true if a node with the given target-specific opcode has a memory operand.
virtual void verifyTargetNode(const SelectionDAG &DAG, const SDNode *N) const
Checks that the given target-specific node is valid. Aborts if it is not.
virtual SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, bool AlwaysInline, MachinePointerInfo DstPtrInfo) const
Emit target-specific code that performs a memset.
virtual SDValue EmitTargetCodeForMemmove(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memmove.
virtual SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memcpy.
SDNodeFlags getFlags() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
LLVM_ABI SDValue getVPZeroExtendInReg(SDValue Op, SDValue Mask, SDValue EVL, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
LLVM_ABI SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op)
Return the specified value casted to the target's desired shift amount type.
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsExpanding=false)
SDValue getExtractVectorElt(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Extract element at Idx from Vec.
LLVM_ABI SDValue getSplatSourceVector(SDValue V, int &SplatIndex)
If V is a splatted value, return the source vector and its splat index.
LLVM_ABI SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
LLVM_ABI OverflowKind computeOverflowForUnsignedSub(SDValue N0, SDValue N1) const
Determine if the result of the unsigned sub of 2 nodes can overflow.
LLVM_ABI unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
bool isKnownNeverSNaN(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
LLVM_ABI std::optional< bool > isBoolConstant(SDValue N) const
Check if a value \op N is a constant using the target's BooleanContent for its type.
LLVM_ABI SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI void updateDivergence(SDNode *N)
LLVM_ABI SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
LLVM_ABI SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT, SDNodeFlags Flags)
Get the (commutative) neutral element for the given opcode, if it exists.
LLVM_ABI SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
LLVM_ABI SDValue getAtomicLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT MemVT, EVT VT, SDValue Chain, SDValue Ptr, MachineMemOperand *MMO)
LLVM_ABI SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
LLVM_ABI SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
LLVM_ABI SDNode * SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT)
These are used for target selectors to mutate the specified node to have the specified return type,...
LLVM_ABI SelectionDAG(const TargetMachine &TM, CodeGenOptLevel)
LLVM_ABI SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBitcastedSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
LLVM_ABI SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
LLVM_ABI SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
LLVM_ABI bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
LLVM_ABI void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
LLVM_ABI SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI std::optional< unsigned > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
OverflowKind
Used to represent the possible overflow behavior of an operation.
static LLVM_ABI unsigned getHasPredecessorMaxSteps()
LLVM_ABI bool haveNoCommonBitsSet(SDValue A, SDValue B) const
Return true if A and B have no common bits set.
SDValue getExtractSubvector(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Return the VT typed sub-vector of Vec at Idx.
LLVM_ABI bool cannotBeOrderedNegativeFP(SDValue Op) const
Test whether the given float value is known to be positive.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI bool calculateDivergence(SDNode *N)
LLVM_ABI SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, bool ConstantFold=true)
LLVM_ABI SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
LLVM_ABI SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
LLVM_ABI SDNode * mutateStrictFPToFP(SDNode *Node)
Mutate the specified strict FP node to its non-strict equivalent, unlinking the node from its chain a...
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getInsertSubvector(const SDLoc &DL, SDValue Vec, SDValue SubVec, unsigned Idx)
Insert SubVec at the Idx element of Vec.
LLVM_ABI SDValue getBitcastedZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
LLVM_ABI SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
LLVM_ABI SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI bool isEqualTo(SDValue A, SDValue B) const
Test whether two SDValues are known to compare equal.
static constexpr unsigned MaxRecursionDepth
LLVM_ABI SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI SDValue expandVACopy(SDNode *Node)
Expand the specified ISD::VACOPY node as the Legalize pass would.
LLVM_ABI SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI APInt computeVectorKnownZeroElements(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
For each demanded element of a vector, see if it is known to be zero.
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
LLVM_ABI std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
LLVM_ABI void salvageDebugInfo(SDNode &N)
To be invoked on an SDNode that is slated to be erased.
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
LLVM_ABI std::pair< SDValue, SDValue > UnrollVectorOverflowOp(SDNode *N, unsigned ResNE=0)
Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
LLVM_ABI SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcastedAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
LLVM_ABI void DeleteNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
LLVM_ABI std::optional< unsigned > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
LLVM_ABI SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal)
Try to simplify a select/vselect into 1 of its operands or a constant.
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
LLVM_ABI bool isConstantFPBuildVectorOrConstantFP(SDValue N) const
Test whether the given value is a constant FP or similar node.
const DataLayout & getDataLayout() const
LLVM_ABI SDValue expandVAArg(SDNode *Node)
Expand the specified ISD::VAARG node as the Legalize pass would.
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
const SelectionDAGTargetInfo & getSelectionDAGInfo() const
LLVM_ABI bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, int Dist) const
Return true if loads are next to each other and can be merged.
LLVM_ABI SDValue getMaskedHistogram(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
LLVM_ABI SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
LLVM_ABI SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI OverflowKind computeOverflowForUnsignedMul(SDValue N0, SDValue N1) const
Determine if the result of the unsigned mul of 2 nodes can overflow.
LLVM_ABI void copyExtraInfo(SDNode *From, SDNode *To)
Copy extra info associated with one node to another.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getLoadFFVP(EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachineMemOperand *MMO)
LLVM_ABI SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
LLVM_ABI void clear()
Clear state and free memory necessary to make this SelectionDAG ready to process a new block.
std::pair< SDValue, SDValue > getMemcmp(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, const CallInst *CI)
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
LLVM_ABI SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
LLVM_ABI std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
LLVM_ABI SDValue makeStateFunctionCall(unsigned LibFunc, SDValue Ptr, SDValue InChain, const SDLoc &DLoc)
Helper used to make a call to a library function that has one argument of pointer type.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly=false, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
LLVM_ABI SDValue getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
LLVM_ABI SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
LLVM_ABI OverflowKind computeOverflowForSignedMul(SDValue N0, SDValue N1) const
Determine if the result of the signed mul of 2 nodes can overflow.
LLVM_ABI MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
LLVM_ABI void dump() const
LLVM_ABI bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if '(Op & Mask) == Mask'.
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
LLVM_ABI void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
bool isConstantValueOfAnyType(SDValue N) const
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, Register VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
LLVM_ABI bool isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
LLVM_ABI SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
LLVM_ABI SDValue getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
LLVM_ABI SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
LLVM_ABI std::optional< unsigned > getValidMinimumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
LLVM_ABI SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
LLVM_ABI SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI std::pair< SDValue, SDValue > getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT)
Convert Op, which must be a STRICT operation of float type, to the float type VT, by either extending...
LLVM_ABI std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getVPLogicalNOT(const SDLoc &DL, SDValue Val, SDValue Mask, SDValue EVL, EVT VT)
Create a vector-predicated logical NOT operation as (VP_XOR Val, BooleanOne, Mask,...
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
iterator_range< allnodes_iterator > allnodes()
LLVM_ABI SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
LLVM_ABI SDValue WidenVector(const SDValue &N, const SDLoc &DL)
Widen the vector up to the next power of two using INSERT_SUBVECTOR.
LLVM_ABI bool isKnownNeverZeroFloat(SDValue Op) const
Test whether the given floating point SDValue is known to never be positive or negative zero.
LLVM_ABI SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
LLVM_ABI SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the Fra...
ArrayRef< SDDbgValue * > GetDbgValues(const SDNode *SD) const
Get the debug values which reference the given SDNode.
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI OverflowKind computeOverflowForSignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the signed addition of 2 nodes can overflow.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
LLVM_ABI unsigned AssignTopologicalOrder()
Topological-sort the AllNodes list and a assign a unique node id for each node in the DAG based on th...
ilist< SDNode >::size_type allnodes_size() const
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
LLVM_ABI SDValue FoldConstantBuildVector(BuildVectorSDNode *BV, const SDLoc &DL, EVT DstEltVT)
Fold BUILD_VECTOR of constants/undefs to the destination type BUILD_VECTOR of constants/undefs elemen...
LLVM_ABI SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
LLVM_ABI SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getTruncStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsCompressing=false)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
const TargetLibraryInfo & getLibInfo() const
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI bool MaskedVectorIsZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Return true if 'Op' is known to be zero in DemandedElts.
LLVM_ABI SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
LLVM_ABI SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
LLVM_ABI SDValue getExtStridedLoadVP(ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
LLVM_ABI SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
LLVM_ABI bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
LLVM_ABI SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
LLVM_ABI bool canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts, bool PoisonOnly=false, bool ConsiderFlags=true, unsigned Depth=0) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
LLVM_ABI OverflowKind computeOverflowForUnsignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the unsigned addition of 2 nodes can overflow.
SDValue getPOISON(EVT VT)
Return a POISON node. POISON does not have a useful SDLoc.
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT SVT, MachineMemOperand *MMO, bool IsCompressing=false)
LLVM_ABI void canonicalizeCommutativeBinop(unsigned Opcode, SDValue &N1, SDValue &N2) const
Swap N1 and N2 if Opcode is a commutative binary opcode and the canonical form expects the opposite o...
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI SDValue getCondCode(ISD::CondCode Cond)
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVM_ABI bool isKnownToBeAPowerOfTwoFP(SDValue Val, unsigned Depth=0) const
Test if the given fp value is known to be an integer power-of-2, either positive or negative.
LLVM_ABI OverflowKind computeOverflowForSignedSub(SDValue N0, SDValue N1) const
Determine if the result of the signed sub of 2 nodes can overflow.
LLVM_ABI bool expandMultipleResultFPLibCall(RTLIB::Libcall LC, SDNode *Node, SmallVectorImpl< SDValue > &Results, std::optional< unsigned > CallRetResNo={})
Expands a node with multiple results to an FP or vector libcall.
LLVMContext * getContext() const
LLVM_ABI SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, SDNodeFlags Flags)
Try to simplify a floating-point binary operation into 1 of its operands or a constant.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
LLVM_ABI bool isUndef(unsigned Opcode, ArrayRef< SDValue > Ops)
Return true if the result of this operation is always undefined.
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
LLVM_ABI SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
LLVM_ABI std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
LLVM_ABI SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops)
Fold floating-point operations when all operands are constants and/or undefined.
LLVM_ABI SDNode * getNodeIfExists(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops, const SDNodeFlags Flags)
Get the specified node if it's already available, or else return NULL.
LLVM_ABI void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE, Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, UniformityInfo *UA, ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin, MachineModuleInfo &MMI, FunctionVarLocs const *FnVarLocs)
Prepare this SelectionDAG to process code in the given MachineFunction.
LLVM_ABI std::optional< ConstantRange > getValidShiftAmountRange(SDValue V, const APInt &DemandedElts, unsigned Depth) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue FoldSymbolOffset(unsigned Opcode, EVT VT, const GlobalAddressSDNode *GA, const SDNode *N2)
LLVM_ABI SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
LLVM_ABI SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, ArrayRef< ISD::NodeType > CandidateBinOps, bool AllowPartials=false)
Match a binop + shuffle pyramid that represents a horizontal reduction over the elements of a vector ...
LLVM_ABI bool isADDLike(SDValue Op, bool NoWrap=false) const
Return true if the specified operand is an ISD::OR or ISD::XOR node that can be treated as an ISD::AD...
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
LLVM_ABI SDValue simplifyShift(SDValue X, SDValue Y)
Try to simplify a shift into 1 of its operands or a constant.
LLVM_ABI void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits=0, unsigned SizeInBits=0, bool InvalidateDbg=true)
Transfer debug values from one node to another, while optionally generating fragment expressions for ...
LLVM_ABI SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
LLVM_ABI SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
ilist< SDNode >::iterator allnodes_iterator
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
int getMaskElt(unsigned Idx) const
ArrayRef< int > getMask() const
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
static LLVM_ABI bool isSplatMask(ArrayRef< int > Mask)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Completely target-dependent object reference.
int64_t getOffset() const
unsigned getTargetFlags() const
Provides information about what library functions are available for the current target.
const VecDesc * getVectorMappingInfo(StringRef F, const ElementCount &VF, bool Masked) const
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself.
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual ISD::NodeType getExtendForAtomicOps() const
Returns how the platform's atomic operations are extended (ZERO_EXTEND, SIGN_EXTEND,...
const char * getMemcpyName() const
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
virtual bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const
Return true if the target shall perform extract vector element and store given that the vector is kno...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const
True if target has some particular form of dealing with pointer arithmetic semantics for pointers wit...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal for a comparison of the specified types on this ...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
virtual unsigned getMaxGluedStoresPerMemcpy() const
Get maximum # of store operations to be glued together.
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual bool hasVectorBlend() const
Return true if the target has a vector blend instruction.
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual bool isLegalStoreImmediate(int64_t Value) const
Return true if the specified immediate is legal for the value input of a store instruction.
virtual unsigned getVectorIdxWidth(const DataLayout &DL) const
Returns the type to be used for the index operand vector operations.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
virtual bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
virtual bool isSDNodeSourceOfDivergence(const SDNode *N, FunctionLoweringInfo *FLI, UniformityInfo *UA) const
virtual bool isSDNodeAlwaysUniform(const SDNode *N) const
virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
virtual const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
virtual bool isKnownNeverNaNForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
Primary interface to the complete machine description for the target machine.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const SelectionDAGTargetInfo * getSelectionDAGInfo() const
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
A Use represents the edge between a Value definition and its users.
LLVM_ABI void set(Value *Val)
User * getUser() const
Returns the User that contains this Use.
This class is used to represent an VP_GATHER node.
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Provides info so a possible vectorization of a function can be computed.
StringRef getVectorFnName() const
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt mulhu(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on zero-extended operands.
LLVM_ABI APInt avgCeilU(const APInt &C1, const APInt &C2)
Compute the ceil of the unsigned average of C1 and C2.
LLVM_ABI APInt avgFloorU(const APInt &C1, const APInt &C2)
Compute the floor of the unsigned average of C1 and C2.
LLVM_ABI APInt fshr(const APInt &Hi, const APInt &Lo, const APInt &Shift)
Perform a funnel shift right.
LLVM_ABI APInt mulhs(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on sign-extended operands.
APInt abds(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be signed.
LLVM_ABI APInt fshl(const APInt &Hi, const APInt &Lo, const APInt &Shift)
Perform a funnel shift left.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
APInt abdu(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be unsigned.
LLVM_ABI APInt avgFloorS(const APInt &C1, const APInt &C2)
Compute the floor of the signed average of C1 and C2.
LLVM_ABI APInt avgCeilS(const APInt &C1, const APInt &C2)
Compute the ceil of the signed average of C1 and C2.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ABI CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical AND between different comparisons of identical values: ((X op1 Y) & (X...
LLVM_ABI bool isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are ~0 ...
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ MDNODE_SDNODE
MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to reference metadata in the IR.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ POISON
POISON - A poison node.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ JUMP_TABLE_DEBUG_INFO
JUMP_TABLE_DEBUG_INFO - Jumptable debug info.
@ BSWAP
Byte Swap and Counting operators.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SRCVALUE
SRCVALUE - This is a node type that holds a Value* that is used to make reference to a value in the L...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ TargetIndex
TargetIndex - Like a constant pool entry, but with completely target-dependent semantics.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ AssertAlign
AssertAlign - These nodes record if a register contains a value that has a known alignment and the tr...
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ TargetGlobalAddress
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ LIFETIME_START
This corresponds to the llvm.lifetime.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ MGATHER
Masked gather and scatter - load and store operations for a vector of random addresses with additiona...
@ HANDLENODE
HANDLENODE node - Used as a handle for various purposes.
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ GET_FPENV_MEM
Gets the current floating-point environment.
@ PSEUDO_PROBE
Pseudo probe for AutoFDO, as a place holder in a basic block to improve the sample counts quality.
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ SPLAT_VECTOR_PARTS
SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the scalar values joined together a...
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ EXPERIMENTAL_VECTOR_HISTOGRAM
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ CALLSEQ_START
CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...
@ SET_FPENV_MEM
Sets the current floating point environment.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
LLVM_ABI NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
bool matchUnaryFpPredicate(SDValue Op, std::function< bool(ConstantFPSDNode *)> Match, bool AllowUndefs=false)
Hook for matching ConstantFPSDNode predicate.
bool isExtOpcode(unsigned Opcode)
LLVM_ABI bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
LLVM_ABI bool isVectorShrinkable(const SDNode *N, unsigned NewEltSize, bool Signed)
Returns true if the specified node is a vector where all elements can be truncated to the specified e...
LLVM_ABI bool isVPBinaryOp(unsigned Opcode)
Whether this is a vector-predicated binary operation opcode.
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ABI std::optional< unsigned > getBaseOpcodeForVP(unsigned Opcode, bool hasFPExcept)
Translate this VP Opcode to its corresponding non-VP Opcode.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
LLVM_ABI std::optional< unsigned > getVPMaskIdx(unsigned Opcode)
The operand position of the vector mask.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
LLVM_ABI std::optional< unsigned > getVPExplicitVectorLengthIdx(unsigned Opcode)
The operand position of the explicit vector length parameter.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
LLVM_ABI bool isFreezeUndef(const SDNode *N)
Return true if the specified node is FREEZE(UNDEF).
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI std::optional< unsigned > getVPForBaseOpcode(unsigned Opcode)
Translate this non-VP Opcode to its corresponding VP Opcode.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool matchUnaryPredicateImpl(SDValue Op, std::function< bool(ConstNodeType *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant BUI...
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
LLVM_ABI NodeType getInverseMinMaxOpcode(unsigned MinMaxOpc)
Given a MinMaxOpc of ISD::(U|S)MIN or ISD::(U|S)MAX, returns ISD::(U|S)MAX and ISD::(U|S)MIN,...
LLVM_ABI bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
LLVM_ABI bool isVPReduction(unsigned Opcode)
Whether this is a vector-predicated reduction opcode.
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Hook for matching ConstantSDNode predicate.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isBuildVectorOfConstantFPSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantFPSDNode or undef.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LLVM_ABI bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
LLVM_ABI NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LLVM_ABI bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
LLVM_ABI CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical OR between different comparisons of identical values: ((X op1 Y) | (X ...
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
LLVM_ABI Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
LLVM_ABI Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
LLVM_ABI Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
initializer< Ty > init(const Ty &Val)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
bool operator<(int64_t V1, const APSInt &V2)
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
void fill(R &&Range, T &&Value)
Provide wrappers to std::fill which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI SDValue peekThroughExtractSubvectors(SDValue V)
Return the non-extracted vector source operand of V if it exists.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
LLVM_ABI SDValue getBitwiseNotOperand(SDValue V, SDValue Mask, bool AllowUndefs)
If V is a bitwise not, returns the inverted operand.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
LLVM_ABI bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
LLVM_ABI bool isMinSignedConstant(SDValue V)
Returns true if V is a constant min signed integer value.
LLVM_ABI ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void checkForCycles(const SelectionDAG *DAG, bool force=false)
void sort(IteratorTy Start, IteratorTy End)
LLVM_READONLY APFloat minimumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimumNumber semantics.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI SDValue peekThroughTruncates(SDValue V)
Return the non-truncated source operand of V if it exists.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr std::underlying_type_t< Enum > to_underlying(Enum E)
Returns underlying integer value of an enum.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI SDValue peekThroughOneUseBitcasts(SDValue V)
Return the non-bitcasted and one-use source operand of V if it exists.
CodeGenOptLevel
Code generation optimization level.
LLVM_ABI bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
LLVM_ABI bool isNullConstantOrUndef(SDValue V)
Returns true if V is a constant integer zero or an UNDEF node.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
OutputIt copy(R &&Range, OutputIt Out)
constexpr unsigned BitWidth
bool funcReturnsFirstArgOfCall(const CallInst &CI)
Returns true if the parent of CI returns CI's first argument after calling CI.
LLVM_ABI bool isZeroOrZeroSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
LLVM_READONLY APFloat maximumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximumNumber semantics.
LLVM_ABI bool isOnesOrOnesSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
LLVM_ABI bool isNeutralConstant(unsigned Opc, SDNodeFlags Flags, SDValue V, unsigned OperandNo)
Returns true if V is a neutral element of Opc with Flags.
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
MDNode * TBAAStruct
The tag for type-based alias analysis (tbaa struct).
MDNode * TBAA
The tag for type-based alias analysis.
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
static constexpr roundingMode rmTowardNegative
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
static LLVM_ABI const fltSemantics & IEEEquad() LLVM_READNONE
static LLVM_ABI const fltSemantics & IEEEdouble() LLVM_READNONE
static LLVM_ABI const fltSemantics & IEEEhalf() LLVM_READNONE
static constexpr roundingMode rmTowardPositive
static LLVM_ABI const fltSemantics & BFloat() LLVM_READNONE
opStatus
IEEE-754R 7: Default exception handling.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represents offset+length into a ConstantDataArray.
uint64_t Length
Length of the slice.
uint64_t Offset
Slice starts at this Offset.
void move(uint64_t Delta)
Moves the Offset and adjusts Length accordingly.
const ConstantDataArray * Array
ConstantDataArray pointer.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
intptr_t getRawBits() const
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
LLVM_ABI const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
void makeNonNegative()
Make this value non-negative.
static LLVM_ABI KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static LLVM_ABI KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
void makeNegative()
Make this value negative.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
static LLVM_ABI KnownBits abdu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for abdu(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
static LLVM_ABI KnownBits avgFloorU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorU.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static LLVM_ABI KnownBits computeForSubBorrow(const KnownBits &LHS, KnownBits RHS, const KnownBits &Borrow)
Compute known bits results from subtracting RHS from LHS with 1-bit Borrow.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits abds(KnownBits LHS, KnownBits RHS)
Compute known bits for abds(LHS, RHS).
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static LLVM_ABI KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
static LLVM_ABI KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
static LLVM_ABI KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static LLVM_ABI KnownBits avgFloorS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorS.
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits computeForAddCarry(const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry)
Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits avgCeilU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilU.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
static LLVM_ABI KnownBits avgCeilS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilS.
This class contains a discriminated union of information about pointers in memory operands,...
LLVM_ABI bool isDereferenceable(unsigned Size, LLVMContext &C, const DataLayout &DL) const
Return true if memory region [V, V+Offset+Size) is known to be dereferenceable.
LLVM_ABI unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile)
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc=false)
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Clients of various APIs that cause global effects on the DAG can optionally implement this interface.
DAGUpdateListener *const Next
virtual void NodeDeleted(SDNode *N, SDNode *E)
The node N that was deleted and, if E is not null, an equivalent node E that replaced it.
virtual void NodeInserted(SDNode *N)
The node N that was inserted.
virtual void NodeUpdated(SDNode *N)
The node N that was updated.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)