99void SelectionDAG::DAGNodeDeletedListener::anchor() {}
100void SelectionDAG::DAGNodeInsertedListener::anchor() {}
102#define DEBUG_TYPE "selectiondag"
106 cl::desc(
"Gang up loads and stores generated by inlining of memcpy"));
109 cl::desc(
"Number limit for gluing ld/st of memcpy."),
114 cl::desc(
"DAG combiner limit number of steps when searching DAG "
115 "for predecessor nodes"));
132 return getValueAPF().bitwiseIsEqual(V);
153 if (
auto OptAPInt =
N->getOperand(0)->bitcastToAPInt()) {
155 N->getValueType(0).getVectorElementType().getSizeInBits();
156 SplatVal = OptAPInt->
trunc(EltSize);
161 auto *BV = dyn_cast<BuildVectorSDNode>(
N);
166 unsigned SplatBitSize;
168 unsigned EltSize =
N->getValueType(0).getVectorElementType().getSizeInBits();
173 const bool IsBigEndian =
false;
174 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
175 EltSize, IsBigEndian) &&
176 EltSize == SplatBitSize;
185 N =
N->getOperand(0).getNode();
194 unsigned i = 0, e =
N->getNumOperands();
197 while (i != e &&
N->getOperand(i).isUndef())
201 if (i == e)
return false;
213 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
214 if (OptAPInt->countr_one() < EltSize)
222 for (++i; i != e; ++i)
223 if (
N->getOperand(i) != NotZero && !
N->getOperand(i).isUndef())
231 N =
N->getOperand(0).getNode();
240 bool IsAllUndef =
true;
253 if (
auto OptAPInt =
Op->bitcastToAPInt()) {
254 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
255 if (OptAPInt->countr_zero() < EltSize)
282 if (!isa<ConstantSDNode>(
Op))
295 if (!isa<ConstantFPSDNode>(
Op))
303 assert(
N->getValueType(0).isVector() &&
"Expected a vector!");
305 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
306 if (EltSize <= NewEltSize)
310 return (
N->getOperand(0).getValueType().getScalarSizeInBits() <=
315 return (
N->getOperand(0).getValueType().getScalarSizeInBits() <=
325 if (!isa<ConstantSDNode>(
Op))
328 APInt C =
Op->getAsAPIntVal().trunc(EltSize);
329 if (
Signed &&
C.trunc(NewEltSize).sext(EltSize) !=
C)
331 if (!
Signed &&
C.trunc(NewEltSize).zext(EltSize) !=
C)
342 if (
N->getNumOperands() == 0)
348 return N->getOpcode() ==
ISD::FREEZE &&
N->getOperand(0).isUndef();
351template <
typename ConstNodeType>
353 std::function<
bool(ConstNodeType *)> Match,
354 bool AllowUndefs,
bool AllowTruncation) {
356 if (
auto *
C = dyn_cast<ConstNodeType>(
Op))
364 EVT SVT =
Op.getValueType().getScalarType();
366 if (AllowUndefs &&
Op.getOperand(i).isUndef()) {
372 auto *Cst = dyn_cast<ConstNodeType>(
Op.getOperand(i));
373 if (!Cst || (!AllowTruncation && Cst->getValueType(0) != SVT) ||
380template bool ISD::matchUnaryPredicateImpl<ConstantSDNode>(
382template bool ISD::matchUnaryPredicateImpl<ConstantFPSDNode>(
388 bool AllowUndefs,
bool AllowTypeMismatch) {
389 if (!AllowTypeMismatch &&
LHS.getValueType() !=
RHS.getValueType())
393 if (
auto *LHSCst = dyn_cast<ConstantSDNode>(
LHS))
394 if (
auto *RHSCst = dyn_cast<ConstantSDNode>(
RHS))
395 return Match(LHSCst, RHSCst);
398 if (
LHS.getOpcode() !=
RHS.getOpcode() ||
403 EVT SVT =
LHS.getValueType().getScalarType();
404 for (
unsigned i = 0, e =
LHS.getNumOperands(); i != e; ++i) {
407 bool LHSUndef = AllowUndefs && LHSOp.
isUndef();
408 bool RHSUndef = AllowUndefs && RHSOp.
isUndef();
409 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
410 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
411 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
413 if (!AllowTypeMismatch && (LHSOp.
getValueType() != SVT ||
416 if (!Match(LHSCst, RHSCst))
438 switch (VecReduceOpcode) {
443 case ISD::VP_REDUCE_FADD:
444 case ISD::VP_REDUCE_SEQ_FADD:
448 case ISD::VP_REDUCE_FMUL:
449 case ISD::VP_REDUCE_SEQ_FMUL:
452 case ISD::VP_REDUCE_ADD:
455 case ISD::VP_REDUCE_MUL:
458 case ISD::VP_REDUCE_AND:
461 case ISD::VP_REDUCE_OR:
464 case ISD::VP_REDUCE_XOR:
467 case ISD::VP_REDUCE_SMAX:
470 case ISD::VP_REDUCE_SMIN:
473 case ISD::VP_REDUCE_UMAX:
476 case ISD::VP_REDUCE_UMIN:
479 case ISD::VP_REDUCE_FMAX:
482 case ISD::VP_REDUCE_FMIN:
485 case ISD::VP_REDUCE_FMAXIMUM:
488 case ISD::VP_REDUCE_FMINIMUM:
497#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) \
500#include "llvm/IR/VPIntrinsics.def"
508#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
509#define VP_PROPERTY_BINARYOP return true;
510#define END_REGISTER_VP_SDNODE(VPSD) break;
511#include "llvm/IR/VPIntrinsics.def"
520 case ISD::VP_REDUCE_ADD:
521 case ISD::VP_REDUCE_MUL:
522 case ISD::VP_REDUCE_AND:
523 case ISD::VP_REDUCE_OR:
524 case ISD::VP_REDUCE_XOR:
525 case ISD::VP_REDUCE_SMAX:
526 case ISD::VP_REDUCE_SMIN:
527 case ISD::VP_REDUCE_UMAX:
528 case ISD::VP_REDUCE_UMIN:
529 case ISD::VP_REDUCE_FMAX:
530 case ISD::VP_REDUCE_FMIN:
531 case ISD::VP_REDUCE_FMAXIMUM:
532 case ISD::VP_REDUCE_FMINIMUM:
533 case ISD::VP_REDUCE_FADD:
534 case ISD::VP_REDUCE_FMUL:
535 case ISD::VP_REDUCE_SEQ_FADD:
536 case ISD::VP_REDUCE_SEQ_FMUL:
546#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, ...) \
549#include "llvm/IR/VPIntrinsics.def"
558#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
561#include "llvm/IR/VPIntrinsics.def"
571#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) case ISD::VPOPC:
572#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) return ISD::SDOPC;
573#define END_REGISTER_VP_SDNODE(VPOPC) break;
574#include "llvm/IR/VPIntrinsics.def"
583#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) break;
584#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) case ISD::SDOPC:
585#define END_REGISTER_VP_SDNODE(VPOPC) return ISD::VPOPC;
586#include "llvm/IR/VPIntrinsics.def"
633 bool isIntegerLike) {
658 bool IsInteger =
Type.isInteger();
663 unsigned Op = Op1 | Op2;
679 bool IsInteger =
Type.isInteger();
714 ID.AddPointer(VTList.
VTs);
720 for (
const auto &
Op : Ops) {
721 ID.AddPointer(
Op.getNode());
722 ID.AddInteger(
Op.getResNo());
729 for (
const auto &
Op : Ops) {
730 ID.AddPointer(
Op.getNode());
731 ID.AddInteger(
Op.getResNo());
744 switch (
N->getOpcode()) {
753 ID.AddPointer(
C->getConstantIntValue());
754 ID.AddBoolean(
C->isOpaque());
759 ID.AddPointer(cast<ConstantFPSDNode>(
N)->getConstantFPValue());
775 ID.AddInteger(cast<RegisterSDNode>(
N)->
getReg().
id());
778 ID.AddPointer(cast<RegisterMaskSDNode>(
N)->getRegMask());
781 ID.AddPointer(cast<SrcValueSDNode>(
N)->getValue());
785 ID.AddInteger(cast<FrameIndexSDNode>(
N)->getIndex());
788 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getGuid());
789 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getIndex());
790 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getAttributes());
794 ID.AddInteger(cast<JumpTableSDNode>(
N)->getIndex());
795 ID.AddInteger(cast<JumpTableSDNode>(
N)->getTargetFlags());
800 ID.AddInteger(CP->getAlign().value());
801 ID.AddInteger(CP->getOffset());
802 if (CP->isMachineConstantPoolEntry())
803 CP->getMachineCPVal()->addSelectionDAGCSEId(
ID);
805 ID.AddPointer(CP->getConstVal());
806 ID.AddInteger(CP->getTargetFlags());
818 ID.AddInteger(LD->getMemoryVT().getRawBits());
819 ID.AddInteger(LD->getRawSubclassData());
820 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
821 ID.AddInteger(LD->getMemOperand()->getFlags());
826 ID.AddInteger(ST->getMemoryVT().getRawBits());
827 ID.AddInteger(ST->getRawSubclassData());
828 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
829 ID.AddInteger(ST->getMemOperand()->getFlags());
840 case ISD::VP_LOAD_FF: {
841 const auto *LD = cast<VPLoadFFSDNode>(
N);
842 ID.AddInteger(LD->getMemoryVT().getRawBits());
843 ID.AddInteger(LD->getRawSubclassData());
844 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
845 ID.AddInteger(LD->getMemOperand()->getFlags());
848 case ISD::VP_STORE: {
856 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: {
863 case ISD::EXPERIMENTAL_VP_STRIDED_STORE: {
870 case ISD::VP_GATHER: {
878 case ISD::VP_SCATTER: {
970 ID.AddPointer(cast<MDNodeSDNode>(
N)->getMD());
976 if (
auto *MN = dyn_cast<MemIntrinsicSDNode>(
N)) {
977 ID.AddInteger(MN->getRawSubclassData());
978 ID.AddInteger(MN->getPointerInfo().getAddrSpace());
979 ID.AddInteger(MN->getMemOperand()->getFlags());
980 ID.AddInteger(MN->getMemoryVT().getRawBits());
1003 if (
N->getValueType(0) == MVT::Glue)
1006 switch (
N->getOpcode()) {
1014 for (
unsigned i = 1, e =
N->getNumValues(); i != e; ++i)
1015 if (
N->getValueType(i) == MVT::Glue)
1032 if (Node.use_empty())
1047 while (!DeadNodes.
empty()) {
1056 DUL->NodeDeleted(
N,
nullptr);
1059 RemoveNodeFromCSEMaps(
N);
1090 RemoveNodeFromCSEMaps(
N);
1094 DeleteNodeNotInCSEMaps(
N);
1097void SelectionDAG::DeleteNodeNotInCSEMaps(
SDNode *
N) {
1098 assert(
N->getIterator() != AllNodes.begin() &&
1099 "Cannot delete the entry node!");
1100 assert(
N->use_empty() &&
"Cannot delete a node that is not dead!");
1109 assert(!(V->isVariadic() && isParameter));
1111 ByvalParmDbgValues.push_back(V);
1113 DbgValues.push_back(V);
1114 for (
const SDNode *Node : V->getSDNodes())
1116 DbgValMap[Node].push_back(V);
1121 if (
I == DbgValMap.end())
1123 for (
auto &Val:
I->second)
1124 Val->setIsInvalidated();
1128void SelectionDAG::DeallocateNode(
SDNode *
N) {
1151void SelectionDAG::verifyNode(
SDNode *
N)
const {
1152 switch (
N->getOpcode()) {
1154 if (
N->isTargetOpcode())
1158 EVT VT =
N->getValueType(0);
1159 assert(
N->getNumValues() == 1 &&
"Too many results!");
1161 "Wrong return type!");
1162 assert(
N->getNumOperands() == 2 &&
"Wrong number of operands!");
1163 assert(
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType() &&
1164 "Mismatched operand types!");
1166 "Wrong operand type!");
1168 "Wrong return type size");
1172 assert(
N->getNumValues() == 1 &&
"Too many results!");
1173 assert(
N->getValueType(0).isVector() &&
"Wrong return type!");
1174 assert(
N->getNumOperands() ==
N->getValueType(0).getVectorNumElements() &&
1175 "Wrong number of operands!");
1176 EVT EltVT =
N->getValueType(0).getVectorElementType();
1178 assert((
Op.getValueType() == EltVT ||
1179 (EltVT.
isInteger() &&
Op.getValueType().isInteger() &&
1180 EltVT.
bitsLE(
Op.getValueType()))) &&
1181 "Wrong operand type!");
1182 assert(
Op.getValueType() ==
N->getOperand(0).getValueType() &&
1183 "Operands must all have the same type");
1195void SelectionDAG::InsertNode(
SDNode *
N) {
1196 AllNodes.push_back(
N);
1198 N->PersistentId = NextPersistentId++;
1201 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1202 DUL->NodeInserted(
N);
1209bool SelectionDAG::RemoveNodeFromCSEMaps(
SDNode *
N) {
1210 bool Erased =
false;
1211 switch (
N->getOpcode()) {
1214 assert(CondCodeNodes[cast<CondCodeSDNode>(
N)->
get()] &&
1215 "Cond code doesn't exist!");
1216 Erased = CondCodeNodes[cast<CondCodeSDNode>(
N)->get()] !=
nullptr;
1217 CondCodeNodes[cast<CondCodeSDNode>(
N)->get()] =
nullptr;
1220 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(
N)->getSymbol());
1224 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
1229 auto *MCSN = cast<MCSymbolSDNode>(
N);
1230 Erased = MCSymbols.erase(MCSN->getMCSymbol());
1234 EVT VT = cast<VTSDNode>(
N)->getVT();
1236 Erased = ExtendedValueTypeNodes.erase(VT);
1247 Erased = CSEMap.RemoveNode(
N);
1254 if (!Erased &&
N->getValueType(
N->getNumValues()-1) != MVT::Glue &&
1269SelectionDAG::AddModifiedNodeToCSEMaps(
SDNode *
N) {
1273 SDNode *Existing = CSEMap.GetOrInsertNode(
N);
1274 if (Existing !=
N) {
1279 if (
auto *MemNode = dyn_cast<MemSDNode>(Existing))
1280 MemNode->refineRanges(cast<MemSDNode>(
N)->getMemOperand());
1284 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1285 DUL->NodeDeleted(
N, Existing);
1286 DeleteNodeNotInCSEMaps(
N);
1292 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1293 DUL->NodeUpdated(
N);
1311 Node->intersectFlagsWith(
N->getFlags());
1331 Node->intersectFlagsWith(
N->getFlags());
1349 Node->intersectFlagsWith(
N->getFlags());
1362 : TM(tm), OptLevel(OL), EntryNode(ISD::EntryToken, 0,
DebugLoc(),
1365 InsertNode(&EntryNode);
1376 SDAGISelPass = PassPtr;
1380 LibInfo = LibraryInfo;
1386 FnVarLocs = VarLocs;
1390 assert(!UpdateListeners &&
"Dangling registered DAGUpdateListeners");
1392 OperandRecycler.clear(OperandAllocator);
1400void SelectionDAG::allnodes_clear() {
1401 assert(&*AllNodes.begin() == &EntryNode);
1402 AllNodes.remove(AllNodes.begin());
1403 while (!AllNodes.empty())
1404 DeallocateNode(&AllNodes.front());
1406 NextPersistentId = 0;
1412 SDNode *
N = CSEMap.FindNodeOrInsertPos(
ID, InsertPos);
1414 switch (
N->getOpcode()) {
1419 "debug location. Use another overload.");
1426 const SDLoc &
DL,
void *&InsertPos) {
1427 SDNode *
N = CSEMap.FindNodeOrInsertPos(
ID, InsertPos);
1429 switch (
N->getOpcode()) {
1435 if (
N->getDebugLoc() !=
DL.getDebugLoc())
1442 if (
DL.getIROrder() &&
DL.getIROrder() <
N->getIROrder())
1443 N->setDebugLoc(
DL.getDebugLoc());
1452 OperandRecycler.clear(OperandAllocator);
1453 OperandAllocator.
Reset();
1456 ExtendedValueTypeNodes.clear();
1457 ExternalSymbols.clear();
1458 TargetExternalSymbols.clear();
1464 EntryNode.UseList =
nullptr;
1465 InsertNode(&EntryNode);
1471 return VT.
bitsGT(
Op.getValueType())
1477std::pair<SDValue, SDValue>
1481 "Strict no-op FP extend/round not allowed.");
1488 return std::pair<SDValue, SDValue>(Res,
SDValue(Res.
getNode(), 1));
1492 return VT.
bitsGT(
Op.getValueType()) ?
1498 return VT.
bitsGT(
Op.getValueType()) ?
1504 return VT.
bitsGT(
Op.getValueType()) ?
1512 auto Type =
Op.getValueType();
1516 auto Size =
Op.getValueSizeInBits();
1527 auto Type =
Op.getValueType();
1531 auto Size =
Op.getValueSizeInBits();
1542 auto Type =
Op.getValueType();
1546 auto Size =
Op.getValueSizeInBits();
1564 EVT OpVT =
Op.getValueType();
1566 "Cannot getZeroExtendInReg FP types");
1568 "getZeroExtendInReg type should be vector iff the operand "
1572 "Vector element counts must match in getZeroExtendInReg");
1584 EVT OpVT =
Op.getValueType();
1586 "Cannot getVPZeroExtendInReg FP types");
1588 "getVPZeroExtendInReg type and operand type should be vector!");
1590 "Vector element counts must match in getZeroExtendInReg");
1629 return getNode(ISD::VP_XOR,
DL, VT, Val, TrueValue, Mask, EVL);
1640 return getNode(ISD::VP_ZERO_EXTEND,
DL, VT,
Op, Mask, EVL);
1642 return getNode(ISD::VP_TRUNCATE,
DL, VT,
Op, Mask, EVL);
1662 bool isT,
bool isO) {
1668 bool isT,
bool isO) {
1669 return getConstant(*ConstantInt::get(*Context, Val),
DL, VT, isT, isO);
1673 EVT VT,
bool isT,
bool isO) {
1681 if (isa<VectorType>(Elt->
getType()))
1696 Elt = ConstantInt::get(*
getContext(), NewVal);
1715 "Can only handle an even split!");
1719 for (
unsigned i = 0; i != Parts; ++i)
1721 NewVal.
extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits),
DL,
1722 ViaEltVT, isT, isO));
1727 unsigned ViaVecNumElts = VT.
getSizeInBits() / ViaEltSizeInBits;
1738 NewVal.
extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits),
DL,
1739 ViaEltVT, isT, isO));
1744 std::reverse(EltParts.
begin(), EltParts.
end());
1763 "APInt size does not match type size!");
1772 if ((
N = FindNodeOrInsertPos(
ID,
DL, IP)))
1777 N = newSDNode<ConstantSDNode>(isT, isO, Elt, VTs);
1778 CSEMap.InsertNode(
N, IP);
1790 bool isT,
bool isO) {
1798 IsTarget, IsOpaque);
1830 EVT VT,
bool isTarget) {
1838 if (isa<VectorType>(Elt->
getType()))
1851 if ((
N = FindNodeOrInsertPos(
ID,
DL, IP)))
1856 N = newSDNode<ConstantFPSDNode>(isTarget, Elt, VTs);
1857 CSEMap.InsertNode(
N, IP);
1871 if (EltVT == MVT::f32)
1873 if (EltVT == MVT::f64)
1875 if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1876 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1887 EVT VT, int64_t
Offset,
bool isTargetGA,
1888 unsigned TargetFlags) {
1889 assert((TargetFlags == 0 || isTargetGA) &&
1890 "Cannot set target flags on target-independent globals");
1908 ID.AddInteger(TargetFlags);
1910 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
1913 auto *
N = newSDNode<GlobalAddressSDNode>(
1914 Opc,
DL.getIROrder(),
DL.getDebugLoc(), GV, VTs,
Offset, TargetFlags);
1915 CSEMap.InsertNode(
N, IP);
1927 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1930 auto *
N = newSDNode<FrameIndexSDNode>(FI, VTs, isTarget);
1931 CSEMap.InsertNode(
N, IP);
1937 unsigned TargetFlags) {
1938 assert((TargetFlags == 0 || isTarget) &&
1939 "Cannot set target flags on target-independent jump tables");
1945 ID.AddInteger(TargetFlags);
1947 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1950 auto *
N = newSDNode<JumpTableSDNode>(JTI, VTs, isTarget, TargetFlags);
1951 CSEMap.InsertNode(
N, IP);
1965 bool isTarget,
unsigned TargetFlags) {
1966 assert((TargetFlags == 0 || isTarget) &&
1967 "Cannot set target flags on target-independent globals");
1976 ID.AddInteger(Alignment->value());
1979 ID.AddInteger(TargetFlags);
1981 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1984 auto *
N = newSDNode<ConstantPoolSDNode>(isTarget,
C, VTs,
Offset, *Alignment,
1986 CSEMap.InsertNode(
N, IP);
1995 bool isTarget,
unsigned TargetFlags) {
1996 assert((TargetFlags == 0 || isTarget) &&
1997 "Cannot set target flags on target-independent globals");
2004 ID.AddInteger(Alignment->value());
2006 C->addSelectionDAGCSEId(
ID);
2007 ID.AddInteger(TargetFlags);
2009 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2012 auto *
N = newSDNode<ConstantPoolSDNode>(isTarget,
C, VTs,
Offset, *Alignment,
2014 CSEMap.InsertNode(
N, IP);
2024 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2027 auto *
N = newSDNode<BasicBlockSDNode>(
MBB);
2028 CSEMap.InsertNode(
N, IP);
2035 ValueTypeNodes.size())
2042 N = newSDNode<VTSDNode>(VT);
2050 N = newSDNode<ExternalSymbolSDNode>(
false,
Sym, 0,
getVTList(VT));
2065 unsigned TargetFlags) {
2067 TargetExternalSymbols[std::pair<std::string, unsigned>(
Sym, TargetFlags)];
2069 N = newSDNode<ExternalSymbolSDNode>(
true,
Sym, TargetFlags,
getVTList(VT));
2075 if ((
unsigned)
Cond >= CondCodeNodes.size())
2076 CondCodeNodes.resize(
Cond+1);
2078 if (!CondCodeNodes[
Cond]) {
2079 auto *
N = newSDNode<CondCodeSDNode>(
Cond);
2080 CondCodeNodes[
Cond] =
N;
2088 bool ConstantFold) {
2090 "APInt size does not match type size!");
2107 bool ConstantFold) {
2108 if (EC.isScalable())
2121 const APInt &StepVal) {
2145 "Must have the same number of vector elements as mask elements!");
2147 "Invalid VECTOR_SHUFFLE");
2155 int NElts = Mask.size();
2157 [&](
int M) {
return M < (NElts * 2) && M >= -1; }) &&
2158 "Index out of range");
2166 for (
int i = 0; i != NElts; ++i)
2167 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
2183 for (
int i = 0; i < NElts; ++i) {
2184 if (MaskVec[i] <
Offset || MaskVec[i] >= (
Offset + NElts))
2188 if (UndefElements[MaskVec[i] -
Offset]) {
2194 if (!UndefElements[i])
2198 if (
auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
2199 BlendSplat(N1BV, 0);
2200 if (
auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
2201 BlendSplat(N2BV, NElts);
2206 bool AllLHS =
true, AllRHS =
true;
2208 for (
int i = 0; i != NElts; ++i) {
2209 if (MaskVec[i] >= NElts) {
2214 }
else if (MaskVec[i] >= 0) {
2218 if (AllLHS && AllRHS)
2220 if (AllLHS && !N2Undef)
2233 bool Identity =
true, AllSame =
true;
2234 for (
int i = 0; i != NElts; ++i) {
2235 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity =
false;
2236 if (MaskVec[i] != MaskVec[0]) AllSame =
false;
2238 if (Identity && NElts)
2248 V = V->getOperand(0);
2251 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
2271 if (AllSame && SameNumElts) {
2272 EVT BuildVT = BV->getValueType(0);
2289 for (
int i = 0; i != NElts; ++i)
2290 ID.AddInteger(MaskVec[i]);
2293 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
2299 int *MaskAlloc = OperandAllocator.
Allocate<
int>(NElts);
2302 auto *
N = newSDNode<ShuffleVectorSDNode>(VTs, dl.
getIROrder(),
2304 createOperands(
N, Ops);
2306 CSEMap.InsertNode(
N, IP);
2327 ID.AddInteger(Reg.id());
2329 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2332 auto *
N = newSDNode<RegisterSDNode>(Reg, VTs);
2334 CSEMap.InsertNode(
N, IP);
2342 ID.AddPointer(RegMask);
2344 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2347 auto *
N = newSDNode<RegisterMaskSDNode>(RegMask);
2348 CSEMap.InsertNode(
N, IP);
2363 ID.AddPointer(Label);
2365 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2370 createOperands(
N, Ops);
2372 CSEMap.InsertNode(
N, IP);
2378 int64_t
Offset,
bool isTarget,
2379 unsigned TargetFlags) {
2387 ID.AddInteger(TargetFlags);
2389 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2392 auto *
N = newSDNode<BlockAddressSDNode>(
Opc, VTs, BA,
Offset, TargetFlags);
2393 CSEMap.InsertNode(
N, IP);
2404 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2407 auto *
N = newSDNode<SrcValueSDNode>(V);
2408 CSEMap.InsertNode(
N, IP);
2419 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2422 auto *
N = newSDNode<MDNodeSDNode>(MD);
2423 CSEMap.InsertNode(
N, IP);
2429 if (VT == V.getValueType())
2436 unsigned SrcAS,
unsigned DestAS) {
2441 ID.AddInteger(SrcAS);
2442 ID.AddInteger(DestAS);
2445 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
2449 VTs, SrcAS, DestAS);
2450 createOperands(
N, Ops);
2452 CSEMap.InsertNode(
N, IP);
2464 EVT OpTy =
Op.getValueType();
2466 if (OpTy == ShTy || OpTy.
isVector())
return Op;
2481 if (
Op.getNode() != FPNode)
2485 while (!Worklist.
empty()) {
2518 std::optional<unsigned> CallRetResNo) {
2520 EVT VT = Node->getValueType(0);
2521 unsigned NumResults = Node->getNumValues();
2523 if (LC == RTLIB::UNKNOWN_LIBCALL)
2530 auto getVecDesc = [&]() ->
VecDesc const * {
2531 for (
bool Masked : {
false,
true}) {
2542 if (VT.
isVector() && !(VD = getVecDesc()))
2552 auto *ST = cast<StoreSDNode>(
User);
2553 SDValue StoreValue = ST->getValue();
2554 unsigned ResNo = StoreValue.
getResNo();
2556 if (CallRetResNo == ResNo)
2559 if (!ST->isSimple() || ST->getAddressSpace() != 0)
2562 if (StoresInChain && ST->getChain() != StoresInChain)
2566 if (ST->getAlign() <
2574 ResultStores[ResNo] = ST;
2575 StoresInChain = ST->getChain();
2581 for (
const SDValue &
Op : Node->op_values()) {
2582 EVT ArgVT =
Op.getValueType();
2584 Args.emplace_back(
Op, ArgTy);
2591 if (ResNo == CallRetResNo)
2593 EVT ResVT = Node->getValueType(ResNo);
2595 ResultPtrs[ResNo] = ResultPtr;
2596 Args.emplace_back(ResultPtr,
PointerTy);
2608 Type *RetType = CallRetResNo.has_value()
2609 ? Node->getValueType(*CallRetResNo).getTypeForEVT(Ctx)
2621 if (ResNo == CallRetResNo) {
2627 getLoad(Node->getValueType(ResNo),
DL, CallChain, ResultPtr, PtrInfo);
2633 PtrInfo = ST->getPointerInfo();
2639 Results.push_back(LoadResult);
2648 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2649 EVT VT = Node->getValueType(0);
2650 SDValue Tmp1 = Node->getOperand(0);
2651 SDValue Tmp2 = Node->getOperand(1);
2652 const MaybeAlign MA(Node->getConstantOperandVal(3));
2684 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2685 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2696 Align RedAlign = UseABI ?
DL.getABITypeAlign(Ty) :
DL.getPrefTypeAlign(Ty);
2706 if (RedAlign > StackAlign) {
2709 unsigned NumIntermediates;
2711 NumIntermediates, RegisterVT);
2713 Align RedAlign2 = UseABI ?
DL.getABITypeAlign(Ty) :
DL.getPrefTypeAlign(Ty);
2714 if (RedAlign2 < RedAlign)
2715 RedAlign = RedAlign2;
2720 RedAlign = std::min(RedAlign, StackAlign);
2735 false,
nullptr, StackID);
2750 "Don't know how to choose the maximum size when creating a stack "
2759 Align Align = std::max(
DL.getPrefTypeAlign(Ty1),
DL.getPrefTypeAlign(Ty2));
2767 auto GetUndefBooleanConstant = [&]() {
2806 return GetUndefBooleanConstant();
2811 return GetUndefBooleanConstant();
2820 const APInt &C2 = N2C->getAPIntValue();
2822 const APInt &C1 = N1C->getAPIntValue();
2829 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2830 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2832 if (N1CFP && N2CFP) {
2837 return GetUndefBooleanConstant();
2842 return GetUndefBooleanConstant();
2848 return GetUndefBooleanConstant();
2853 return GetUndefBooleanConstant();
2858 return GetUndefBooleanConstant();
2864 return GetUndefBooleanConstant();
2893 return getSetCC(dl, VT, N2, N1, SwappedCond);
2894 }
else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2909 return GetUndefBooleanConstant();
2920 unsigned BitWidth =
Op.getScalarValueSizeInBits();
2928 unsigned Depth)
const {
2936 const APInt &DemandedElts,
2937 unsigned Depth)
const {
2944 unsigned Depth )
const {
2950 unsigned Depth)
const {
2955 const APInt &DemandedElts,
2956 unsigned Depth)
const {
2957 EVT VT =
Op.getValueType();
2964 for (
unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
2965 if (!DemandedElts[EltIdx])
2969 KnownZeroElements.
setBit(EltIdx);
2971 return KnownZeroElements;
2981 unsigned Opcode = V.getOpcode();
2982 EVT VT = V.getValueType();
2985 "scalable demanded bits are ignored");
2997 UndefElts = V.getOperand(0).isUndef()
3006 APInt UndefLHS, UndefRHS;
3015 (DemandedElts & UndefLHS) == (DemandedElts & UndefRHS)) {
3016 UndefElts = UndefLHS | UndefRHS;
3046 for (
unsigned i = 0; i != NumElts; ++i) {
3052 if (!DemandedElts[i])
3054 if (Scl && Scl !=
Op)
3064 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
3065 for (
int i = 0; i != (int)NumElts; ++i) {
3071 if (!DemandedElts[i])
3073 if (M < (
int)NumElts)
3076 DemandedRHS.
setBit(M - NumElts);
3088 auto CheckSplatSrc = [&](
SDValue Src,
const APInt &SrcElts) {
3090 return (SrcElts.popcount() == 1) ||
3092 (SrcElts & SrcUndefs).
isZero());
3094 if (!DemandedLHS.
isZero())
3095 return CheckSplatSrc(V.getOperand(0), DemandedLHS);
3096 return CheckSplatSrc(V.getOperand(1), DemandedRHS);
3100 SDValue Src = V.getOperand(0);
3102 if (Src.getValueType().isScalableVector())
3105 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3118 SDValue Src = V.getOperand(0);
3120 if (Src.getValueType().isScalableVector())
3122 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3124 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3126 UndefElts = UndefSrcElts.
trunc(NumElts);
3132 SDValue Src = V.getOperand(0);
3133 EVT SrcVT = Src.getValueType();
3143 if ((
BitWidth % SrcBitWidth) == 0) {
3145 unsigned Scale =
BitWidth / SrcBitWidth;
3147 APInt ScaledDemandedElts =
3149 for (
unsigned I = 0;
I != Scale; ++
I) {
3153 SubDemandedElts &= ScaledDemandedElts;
3157 if (!SubUndefElts.
isZero())
3171 EVT VT = V.getValueType();
3181 (AllowUndefs || !UndefElts);
3187 EVT VT = V.getValueType();
3188 unsigned Opcode = V.getOpcode();
3209 SplatIdx = (UndefElts & DemandedElts).
countr_one();
3223 auto *SVN = cast<ShuffleVectorSDNode>(V);
3224 if (!SVN->isSplat())
3226 int Idx = SVN->getSplatIndex();
3227 int NumElts = V.getValueType().getVectorNumElements();
3228 SplatIdx =
Idx % NumElts;
3229 return V.getOperand(
Idx / NumElts);
3245 if (LegalSVT.
bitsLT(SVT))
3253std::optional<ConstantRange>
3255 unsigned Depth)
const {
3258 "Unknown shift node");
3260 unsigned BitWidth = V.getScalarValueSizeInBits();
3262 if (
auto *Cst = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
3263 const APInt &ShAmt = Cst->getAPIntValue();
3265 return std::nullopt;
3269 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1))) {
3270 const APInt *MinAmt =
nullptr, *MaxAmt =
nullptr;
3271 for (
unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3272 if (!DemandedElts[i])
3274 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
3276 MinAmt = MaxAmt =
nullptr;
3279 const APInt &ShAmt = SA->getAPIntValue();
3281 return std::nullopt;
3282 if (!MinAmt || MinAmt->
ugt(ShAmt))
3284 if (!MaxAmt || MaxAmt->ult(ShAmt))
3287 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
3288 "Failed to find matching min/max shift amounts");
3289 if (MinAmt && MaxAmt)
3299 return std::nullopt;
3302std::optional<uint64_t>
3304 unsigned Depth)
const {
3307 "Unknown shift node");
3308 if (std::optional<ConstantRange> AmtRange =
3310 if (
const APInt *ShAmt = AmtRange->getSingleElement())
3311 return ShAmt->getZExtValue();
3312 return std::nullopt;
3315std::optional<uint64_t>
3317 EVT VT = V.getValueType();
3324std::optional<uint64_t>
3326 unsigned Depth)
const {
3329 "Unknown shift node");
3330 if (std::optional<ConstantRange> AmtRange =
3332 return AmtRange->getUnsignedMin().getZExtValue();
3333 return std::nullopt;
3336std::optional<uint64_t>
3338 EVT VT = V.getValueType();
3345std::optional<uint64_t>
3347 unsigned Depth)
const {
3350 "Unknown shift node");
3351 if (std::optional<ConstantRange> AmtRange =
3353 return AmtRange->getUnsignedMax().getZExtValue();
3354 return std::nullopt;
3357std::optional<uint64_t>
3359 EVT VT = V.getValueType();
3370 EVT VT =
Op.getValueType();
3385 unsigned Depth)
const {
3386 unsigned BitWidth =
Op.getScalarValueSizeInBits();
3390 if (
auto OptAPInt =
Op->bitcastToAPInt()) {
3400 assert((!
Op.getValueType().isFixedLengthVector() ||
3401 NumElts ==
Op.getValueType().getVectorNumElements()) &&
3402 "Unexpected vector size");
3407 unsigned Opcode =
Op.getOpcode();
3415 "Expected SPLAT_VECTOR implicit truncation");
3422 unsigned ScalarSize =
Op.getOperand(0).getScalarValueSizeInBits();
3424 "Expected SPLAT_VECTOR_PARTS scalars to cover element width");
3431 const APInt &Step =
Op.getConstantOperandAPInt(0);
3440 const APInt MinNumElts =
3446 .
umul_ov(MinNumElts, Overflow);
3450 const APInt MaxValue = (MaxNumElts - 1).
umul_ov(Step, Overflow);
3458 assert(!
Op.getValueType().isScalableVector());
3462 if (!DemandedElts[i])
3471 "Expected BUILD_VECTOR implicit truncation");
3484 assert(!
Op.getValueType().isScalableVector());
3487 APInt DemandedLHS, DemandedRHS;
3491 DemandedLHS, DemandedRHS))
3496 if (!!DemandedLHS) {
3504 if (!!DemandedRHS) {
3513 const APInt &Multiplier =
Op.getConstantOperandAPInt(0);
3518 if (
Op.getValueType().isScalableVector())
3522 EVT SubVectorVT =
Op.getOperand(0).getValueType();
3525 for (
unsigned i = 0; i != NumSubVectors; ++i) {
3527 DemandedElts.
extractBits(NumSubVectorElts, i * NumSubVectorElts);
3528 if (!!DemandedSub) {
3540 if (
Op.getValueType().isScalableVector())
3547 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
3549 APInt DemandedSrcElts = DemandedElts;
3554 if (!!DemandedSubElts) {
3559 if (!!DemandedSrcElts) {
3569 if (
Op.getValueType().isScalableVector() || Src.getValueType().isScalableVector())
3572 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3578 if (
Op.getValueType().isScalableVector())
3582 if (DemandedElts != 1)
3593 if (
Op.getValueType().isScalableVector())
3613 if ((
BitWidth % SubBitWidth) == 0) {
3620 unsigned SubScale =
BitWidth / SubBitWidth;
3621 APInt SubDemandedElts(NumElts * SubScale, 0);
3622 for (
unsigned i = 0; i != NumElts; ++i)
3623 if (DemandedElts[i])
3624 SubDemandedElts.
setBit(i * SubScale);
3626 for (
unsigned i = 0; i != SubScale; ++i) {
3629 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
3630 Known.
insertBits(Known2, SubBitWidth * Shifts);
3635 if ((SubBitWidth %
BitWidth) == 0) {
3636 assert(
Op.getValueType().isVector() &&
"Expected bitcast to vector");
3641 unsigned SubScale = SubBitWidth /
BitWidth;
3642 APInt SubDemandedElts =
3647 for (
unsigned i = 0; i != NumElts; ++i)
3648 if (DemandedElts[i]) {
3649 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
3680 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3684 Op.getOperand(0), DemandedElts,
false,
Depth + 1);
3690 if (
Op->getFlags().hasNoSignedWrap() &&
3691 Op.getOperand(0) ==
Op.getOperand(1) &&
3718 unsigned SignBits1 =
3722 unsigned SignBits0 =
3728 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3731 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3732 if (
Op.getResNo() == 0)
3739 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3742 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3743 if (
Op.getResNo() == 0)
3796 if (
Op.getResNo() != 1)
3811 unsigned OpNo =
Op->isStrictFPOpcode() ? 1 : 0;
3823 bool NUW =
Op->getFlags().hasNoUnsignedWrap();
3824 bool NSW =
Op->getFlags().hasNoSignedWrap();
3831 if (std::optional<uint64_t> ShMinAmt =
3840 Op->getFlags().hasExact());
3843 if (std::optional<uint64_t> ShMinAmt =
3851 Op->getFlags().hasExact());
3856 unsigned Amt =
C->getAPIntValue().urem(
BitWidth);
3862 DemandedElts,
Depth + 1);
3883 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3886 unsigned LoBits =
Op.getOperand(0).getScalarValueSizeInBits();
3887 unsigned HiBits =
Op.getOperand(1).getScalarValueSizeInBits();
3890 Known = Known2.
concat(Known);
3904 if (
Op.getResNo() == 0)
3912 EVT EVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
3950 ? cast<MaskedGatherSDNode>(
Op)->getExtensionType()
3951 : cast<MaskedLoadSDNode>(
Op)->getExtensionType();
3953 EVT MemVT = cast<MemSDNode>(
Op)->getMemoryVT();
3966 !
Op.getValueType().isScalableVector()) {
3980 for (
unsigned i = 0; i != NumElts; ++i) {
3981 if (!DemandedElts[i])
3984 if (
auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3990 if (
auto *CFP = dyn_cast<ConstantFP>(Elt)) {
3991 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4002 if (
auto *CInt = dyn_cast<ConstantInt>(Cst)) {
4004 }
else if (
auto *CFP = dyn_cast<ConstantFP>(Cst)) {
4010 }
else if (
Op.getResNo() == 0) {
4011 unsigned ScalarMemorySize = LD->getMemoryVT().getScalarSizeInBits();
4012 KnownBits KnownScalarMemory(ScalarMemorySize);
4013 if (
const MDNode *MD = LD->getRanges())
4024 Known = KnownScalarMemory;
4031 if (
Op.getValueType().isScalableVector())
4033 EVT InVT =
Op.getOperand(0).getValueType();
4045 if (
Op.getValueType().isScalableVector())
4047 EVT InVT =
Op.getOperand(0).getValueType();
4063 if (
Op.getValueType().isScalableVector())
4065 EVT InVT =
Op.getOperand(0).getValueType();
4082 EVT VT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
4085 Known.
Zero |= (~InMask);
4086 Known.
One &= (~Known.Zero);
4090 unsigned LogOfAlign =
Log2(cast<AssertAlignSDNode>(
Op)->
getAlign());
4110 Op.getOpcode() ==
ISD::ADD, Flags.hasNoSignedWrap(),
4111 Flags.hasNoUnsignedWrap(), Known, Known2);
4118 if (
Op.getResNo() == 1) {
4129 "We only compute knownbits for the difference here.");
4136 Borrow = Borrow.
trunc(1);
4150 if (
Op.getResNo() == 1) {
4161 assert(
Op.getResNo() == 0 &&
"We only compute knownbits for the sum here.");
4171 Carry = Carry.
trunc(1);
4207 const unsigned Index =
Op.getConstantOperandVal(1);
4208 const unsigned EltBitWidth =
Op.getValueSizeInBits();
4215 Known = Known.
trunc(EltBitWidth);
4231 Known = Known.
trunc(EltBitWidth);
4236 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4237 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4247 if (
Op.getValueType().isScalableVector())
4256 bool DemandedVal =
true;
4257 APInt DemandedVecElts = DemandedElts;
4258 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
4259 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4260 unsigned EltIdx = CEltNo->getZExtValue();
4261 DemandedVal = !!DemandedElts[EltIdx];
4270 if (!!DemandedVecElts) {
4288 Known = Known2.
abs();
4321 if (CstLow && CstHigh) {
4326 const APInt &ValueHigh = CstHigh->getAPIntValue();
4327 if (ValueLow.
sle(ValueHigh)) {
4330 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
4353 if (IsMax && CstLow) {
4377 EVT VT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
4383 if (
Op.getResNo() == 0) {
4384 auto *AT = cast<AtomicSDNode>(
Op);
4385 unsigned ScalarMemorySize = AT->getMemoryVT().getScalarSizeInBits();
4386 KnownBits KnownScalarMemory(ScalarMemorySize);
4387 if (
const MDNode *MD = AT->getRanges())
4390 switch (AT->getExtensionType()) {
4411 Known = KnownScalarMemory;
4419 if (
Op.getResNo() == 1) {
4445 if (
Op.getResNo() == 0) {
4446 auto *AT = cast<AtomicSDNode>(
Op);
4447 unsigned MemBits = AT->getMemoryVT().getScalarSizeInBits();
4469 if (
Op.getValueType().isScalableVector())
4615 return C->getAPIntValue().zextOrTrunc(
BitWidth).isPowerOf2();
4623 if (
C &&
C->getAPIntValue() == 1)
4633 if (
C &&
C->getAPIntValue().isSignMask())
4645 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
4646 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
4654 if (
C->getAPIntValue().zextOrTrunc(
BitWidth).isPowerOf2())
4692 return C1->getValueAPF().getExactLog2Abs() >= 0;
4701 EVT VT =
Op.getValueType();
4713 unsigned Depth)
const {
4714 EVT VT =
Op.getValueType();
4719 unsigned FirstAnswer = 1;
4721 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
4722 const APInt &Val =
C->getAPIntValue();
4732 unsigned Opcode =
Op.getOpcode();
4736 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getSizeInBits();
4737 return VTBits-Tmp+1;
4739 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getSizeInBits();
4746 unsigned NumSrcBits =
Op.getOperand(0).getValueSizeInBits();
4748 if (NumSrcSignBits > (NumSrcBits - VTBits))
4749 return NumSrcSignBits - (NumSrcBits - VTBits);
4756 if (!DemandedElts[i])
4763 APInt T =
C->getAPIntValue().trunc(VTBits);
4764 Tmp2 =
T.getNumSignBits();
4768 if (
SrcOp.getValueSizeInBits() != VTBits) {
4770 "Expected BUILD_VECTOR implicit truncation");
4771 unsigned ExtraBits =
SrcOp.getValueSizeInBits() - VTBits;
4772 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
4775 Tmp = std::min(Tmp, Tmp2);
4782 APInt DemandedLHS, DemandedRHS;
4786 DemandedLHS, DemandedRHS))
4789 Tmp = std::numeric_limits<unsigned>::max();
4792 if (!!DemandedRHS) {
4794 Tmp = std::min(Tmp, Tmp2);
4799 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
4815 if (VTBits == SrcBits)
4821 if ((SrcBits % VTBits) == 0) {
4824 unsigned Scale = SrcBits / VTBits;
4825 APInt SrcDemandedElts =
4835 for (
unsigned i = 0; i != NumElts; ++i)
4836 if (DemandedElts[i]) {
4837 unsigned SubOffset = i % Scale;
4838 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
4839 SubOffset = SubOffset * VTBits;
4840 if (Tmp <= SubOffset)
4842 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
4851 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getScalarSizeInBits();
4852 return VTBits - Tmp + 1;
4854 Tmp = VTBits -
Op.getOperand(0).getScalarValueSizeInBits();
4858 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getScalarSizeInBits();
4861 return std::max(Tmp, Tmp2);
4866 EVT SrcVT = Src.getValueType();
4874 if (std::optional<uint64_t> ShAmt =
4876 Tmp = std::min<uint64_t>(Tmp + *ShAmt, VTBits);
4879 if (std::optional<ConstantRange> ShAmtRange =
4881 uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
4882 uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
4890 EVT ExtVT = Ext.getValueType();
4891 SDValue Extendee = Ext.getOperand(0);
4895 if (SizeDifference <= MinShAmt) {
4896 Tmp = SizeDifference +
4899 return Tmp - MaxShAmt;
4905 return Tmp - MaxShAmt;
4915 FirstAnswer = std::min(Tmp, Tmp2);
4925 if (Tmp == 1)
return 1;
4927 return std::min(Tmp, Tmp2);
4930 if (Tmp == 1)
return 1;
4932 return std::min(Tmp, Tmp2);
4944 if (CstLow && CstHigh) {
4949 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
4950 return std::min(Tmp, Tmp2);
4959 return std::min(Tmp, Tmp2);
4967 return std::min(Tmp, Tmp2);
4971 if (
Op.getResNo() == 0 &&
Op.getOperand(0) ==
Op.getOperand(1))
4982 if (
Op.getResNo() != 1)
4996 unsigned OpNo =
Op->isStrictFPOpcode() ? 1 : 0;
5013 unsigned RotAmt =
C->getAPIntValue().urem(VTBits);
5017 RotAmt = (VTBits - RotAmt) % VTBits;
5021 if (Tmp > (RotAmt + 1))
return (Tmp - RotAmt);
5029 if (Tmp == 1)
return 1;
5034 if (CRHS->isAllOnes()) {
5040 if ((Known.
Zero | 1).isAllOnes())
5050 if (Tmp2 == 1)
return 1;
5051 return std::min(Tmp, Tmp2) - 1;
5054 if (Tmp2 == 1)
return 1;
5059 if (CLHS->isZero()) {
5064 if ((Known.
Zero | 1).isAllOnes())
5078 if (Tmp == 1)
return 1;
5079 return std::min(Tmp, Tmp2) - 1;
5083 if (SignBitsOp0 == 1)
5086 if (SignBitsOp1 == 1)
5088 unsigned OutValidBits =
5089 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
5090 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
5098 return std::min(Tmp, Tmp2);
5107 unsigned NumSrcBits =
Op.getOperand(0).getScalarValueSizeInBits();
5109 if (NumSrcSignBits > (NumSrcBits - VTBits))
5110 return NumSrcSignBits - (NumSrcBits - VTBits);
5117 const int BitWidth =
Op.getValueSizeInBits();
5118 const int Items =
Op.getOperand(0).getValueSizeInBits() /
BitWidth;
5122 const int rIndex = Items - 1 -
Op.getConstantOperandVal(1);
5137 bool DemandedVal =
true;
5138 APInt DemandedVecElts = DemandedElts;
5139 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
5140 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
5141 unsigned EltIdx = CEltNo->getZExtValue();
5142 DemandedVal = !!DemandedElts[EltIdx];
5145 Tmp = std::numeric_limits<unsigned>::max();
5151 Tmp = std::min(Tmp, Tmp2);
5153 if (!!DemandedVecElts) {
5155 Tmp = std::min(Tmp, Tmp2);
5157 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5168 const unsigned BitWidth =
Op.getValueSizeInBits();
5169 const unsigned EltBitWidth =
Op.getOperand(0).getScalarValueSizeInBits();
5181 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
5182 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
5192 if (Src.getValueType().isScalableVector())
5195 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5204 Tmp = std::numeric_limits<unsigned>::max();
5205 EVT SubVectorVT =
Op.getOperand(0).getValueType();
5208 for (
unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
5210 DemandedElts.
extractBits(NumSubVectorElts, i * NumSubVectorElts);
5214 Tmp = std::min(Tmp, Tmp2);
5216 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5227 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
5229 APInt DemandedSrcElts = DemandedElts;
5232 Tmp = std::numeric_limits<unsigned>::max();
5233 if (!!DemandedSubElts) {
5238 if (!!DemandedSrcElts) {
5240 Tmp = std::min(Tmp, Tmp2);
5242 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5247 if (
const MDNode *Ranges = LD->getRanges()) {
5248 if (DemandedElts != 1)
5253 switch (LD->getExtensionType()) {
5288 auto *AT = cast<AtomicSDNode>(
Op);
5290 if (
Op.getResNo() == 0) {
5291 Tmp = AT->getMemoryVT().getScalarSizeInBits();
5297 switch (AT->getExtensionType()) {
5301 return VTBits - Tmp + 1;
5303 return VTBits - Tmp;
5308 return VTBits - Tmp + 1;
5310 return VTBits - Tmp;
5317 if (
Op.getResNo() == 0) {
5320 unsigned ExtType = LD->getExtensionType();
5324 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5325 return VTBits - Tmp + 1;
5327 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5328 return VTBits - Tmp;
5333 Type *CstTy = Cst->getType();
5338 for (
unsigned i = 0; i != NumElts; ++i) {
5339 if (!DemandedElts[i])
5342 if (
auto *CInt = dyn_cast<ConstantInt>(Elt)) {
5344 Tmp = std::min(Tmp,
Value.getNumSignBits());
5347 if (
auto *CFP = dyn_cast<ConstantFP>(Elt)) {
5348 APInt Value = CFP->getValueAPF().bitcastToAPInt();
5349 Tmp = std::min(Tmp,
Value.getNumSignBits());
5375 FirstAnswer = std::max(FirstAnswer, NumBits);
5386 unsigned Depth)
const {
5388 return Op.getScalarValueSizeInBits() - SignBits + 1;
5392 const APInt &DemandedElts,
5393 unsigned Depth)
const {
5395 return Op.getScalarValueSizeInBits() - SignBits + 1;
5399 unsigned Depth)
const {
5404 EVT VT =
Op.getValueType();
5412 const APInt &DemandedElts,
5414 unsigned Depth)
const {
5415 unsigned Opcode =
Op.getOpcode();
5445 if (!DemandedElts[i])
5455 if (Src.getValueType().isScalableVector())
5458 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5465 if (
Op.getValueType().isScalableVector())
5470 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
5472 APInt DemandedSrcElts = DemandedElts;
5486 auto *IndexC = dyn_cast<ConstantSDNode>(
Op.getOperand(1));
5487 EVT SrcVT = Src.getValueType();
5491 IndexC->getZExtValue());
5503 auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
5506 if (DemandedElts[IndexC->getZExtValue()] &&
5509 APInt InVecDemandedElts = DemandedElts;
5510 InVecDemandedElts.
clearBit(IndexC->getZExtValue());
5511 if (!!InVecDemandedElts &&
5535 APInt DemandedLHS, DemandedRHS;
5536 auto *SVN = cast<ShuffleVectorSDNode>(
Op);
5538 DemandedElts, DemandedLHS, DemandedRHS,
5541 if (!DemandedLHS.
isZero() &&
5545 if (!DemandedRHS.
isZero() &&
5593 return isGuaranteedNotToBeUndefOrPoison(V, DemandedElts,
5594 PoisonOnly, Depth + 1);
5619 return isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly, Depth + 1);
5625 unsigned Depth)
const {
5626 EVT VT =
Op.getValueType();
5636 unsigned Depth)
const {
5637 if (ConsiderFlags &&
Op->hasPoisonGeneratingFlags())
5640 unsigned Opcode =
Op.getOpcode();
5716 if (
Op.getOperand(0).getValueType().isInteger())
5723 unsigned CCOp = Opcode ==
ISD::SETCC ? 2 : 4;
5724 ISD::CondCode CCCode = cast<CondCodeSDNode>(
Op.getOperand(CCOp))->get();
5725 if (((
unsigned)CCCode & 0x10U))
5774 EVT VecVT =
Op.getOperand(0).getValueType();
5782 auto *SVN = cast<ShuffleVectorSDNode>(
Op);
5784 if (Elt < 0 && DemandedElts[
Idx])
5803 unsigned Opcode =
Op.getOpcode();
5805 return Op->getFlags().hasDisjoint() ||
5818 unsigned Depth)
const {
5819 EVT VT =
Op.getValueType();
5832 bool SNaN,
unsigned Depth)
const {
5833 assert(!DemandedElts.
isZero() &&
"No demanded elements");
5844 return !
C->getValueAPF().isNaN() ||
5845 (SNaN && !
C->getValueAPF().isSignaling());
5848 unsigned Opcode =
Op.getOpcode();
5948 auto *
Idx = dyn_cast<ConstantSDNode>(
Op.getOperand(1));
5949 EVT SrcVT = Src.getValueType();
5953 Idx->getZExtValue());
5960 if (Src.getValueType().isFixedLengthVector()) {
5961 unsigned Idx =
Op.getConstantOperandVal(1);
5962 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5973 unsigned Idx =
Op.getConstantOperandVal(2);
5979 APInt DemandedMask =
5981 APInt DemandedSrcElts = DemandedElts & ~DemandedMask;
5984 bool NeverNaN =
true;
5985 if (!DemandedSrcElts.
isZero())
5988 if (NeverNaN && !DemandedSubElts.
isZero())
5998 for (
unsigned I = 0;
I != NumElts; ++
I)
5999 if (DemandedElts[
I] &&
6025 assert(
Op.getValueType().isFloatingPoint() &&
6026 "Floating point type expected");
6037 assert(!
Op.getValueType().isFloatingPoint() &&
6038 "Floating point types unsupported - use isKnownNeverZeroFloat");
6047 switch (
Op.getOpcode()) {
6061 if (
Op->getFlags().hasNoSignedWrap() ||
Op->getFlags().hasNoUnsignedWrap())
6065 if (ValKnown.
One[0])
6125 if (
Op->getFlags().hasExact())
6141 if (
Op->getFlags().hasExact())
6146 if (
Op->getFlags().hasNoUnsignedWrap())
6157 std::optional<bool> ne =
6164 if (
Op->getFlags().hasNoSignedWrap() ||
Op->getFlags().hasNoUnsignedWrap())
6175 const APInt &Multiplier =
Op.getConstantOperandAPInt(0);
6189 return !C1->isNegative();
6196 if (
A ==
B)
return true;
6201 if (CA->isZero() && CB->isZero())
return true;
6210 return V.getOperand(0);
6217 SDValue ExtArg = V.getOperand(0);
6236 NotOperand = NotOperand->getOperand(0);
6238 if (
Other == NotOperand)
6241 return NotOperand ==
Other->getOperand(0) ||
6242 NotOperand ==
Other->getOperand(1);
6248 A =
A->getOperand(0);
6251 B =
B->getOperand(0);
6254 return MatchNoCommonBitsPattern(
A->getOperand(0),
A->getOperand(1),
B) ||
6255 MatchNoCommonBitsPattern(
A->getOperand(1),
A->getOperand(0),
B);
6261 assert(
A.getValueType() ==
B.getValueType() &&
6262 "Values must have the same type");
6272 if (cast<ConstantSDNode>(Step)->
isZero())
6281 int NumOps = Ops.
size();
6282 assert(NumOps != 0 &&
"Can't build an empty vector!");
6284 "BUILD_VECTOR cannot be used with scalable types");
6286 "Incorrect element count in BUILD_VECTOR!");
6294 bool IsIdentity =
true;
6295 for (
int i = 0; i != NumOps; ++i) {
6298 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
6299 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
6300 Ops[i].getConstantOperandAPInt(1) != i) {
6304 IdentitySrc = Ops[i].getOperand(0);
6317 assert(!Ops.
empty() &&
"Can't concatenate an empty list of vectors!");
6320 return Ops[0].getValueType() ==
Op.getValueType();
6322 "Concatenation of vectors with inconsistent value types!");
6325 "Incorrect element count in vector concatenation!");
6327 if (Ops.
size() == 1)
6338 bool IsIdentity =
true;
6339 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i) {
6341 unsigned IdentityIndex = i *
Op.getValueType().getVectorMinNumElements();
6343 Op.getOperand(0).getValueType() != VT ||
6344 (IdentitySrc &&
Op.getOperand(0) != IdentitySrc) ||
6345 Op.getConstantOperandVal(1) != IdentityIndex) {
6349 assert((!IdentitySrc || IdentitySrc ==
Op.getOperand(0)) &&
6350 "Unexpected identity source vector for concat of extracts");
6351 IdentitySrc =
Op.getOperand(0);
6354 assert(IdentitySrc &&
"Failed to set source vector of extracts");
6369 EVT OpVT =
Op.getValueType();
6381 SVT = (SVT.
bitsLT(
Op.getValueType()) ?
Op.getValueType() : SVT);
6405 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
6408 auto *
N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6409 CSEMap.InsertNode(
N, IP);
6422 return getNode(Opcode,
DL, VT, N1, Flags);
6473 "STEP_VECTOR can only be used with scalable types");
6476 "Unexpected step operand");
6497 "Invalid FP cast!");
6501 "Vector element count mismatch!");
6519 "Invalid SIGN_EXTEND!");
6521 "SIGN_EXTEND result type type should be vector iff the operand "
6526 "Vector element count mismatch!");
6549 unsigned NumSignExtBits =
6560 "Invalid ZERO_EXTEND!");
6562 "ZERO_EXTEND result type type should be vector iff the operand "
6567 "Vector element count mismatch!");
6605 "Invalid ANY_EXTEND!");
6607 "ANY_EXTEND result type type should be vector iff the operand "
6612 "Vector element count mismatch!");
6637 "Invalid TRUNCATE!");
6639 "TRUNCATE result type type should be vector iff the operand "
6644 "Vector element count mismatch!");
6671 assert(VT.
isVector() &&
"This DAG node is restricted to vector types.");
6673 "The input must be the same size or smaller than the result.");
6676 "The destination vector type must have fewer lanes than the input.");
6686 "BSWAP types must be a multiple of 16 bits!");
6700 "Cannot BITCAST between types of different sizes!");
6713 "Illegal SCALAR_TO_VECTOR node!");
6770 "Wrong operand type!");
6777 if (VT != MVT::Glue) {
6781 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
6782 E->intersectFlagsWith(Flags);
6786 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6788 createOperands(
N, Ops);
6789 CSEMap.InsertNode(
N, IP);
6791 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6792 createOperands(
N, Ops);
6826 if (!C2.getBoolValue())
6830 if (!C2.getBoolValue())
6834 if (!C2.getBoolValue())
6838 if (!C2.getBoolValue())
6858 return std::nullopt;
6863 bool IsUndef1,
const APInt &C2,
6865 if (!(IsUndef1 || IsUndef2))
6873 return std::nullopt;
6883 auto *C2 = dyn_cast<ConstantSDNode>(N2);
6886 int64_t
Offset = C2->getSExtValue();
6906 assert(Ops.
size() == 2 &&
"Div/rem should have 2 operands");
6913 [](
SDValue V) { return V.isUndef() ||
6914 isNullConstant(V); });
6935 unsigned NumOps = Ops.
size();
6951 if (
auto *
C = dyn_cast<ConstantSDNode>(N1)) {
6952 const APInt &Val =
C->getAPIntValue();
6956 C->isTargetOpcode(),
C->isOpaque());
6963 C->isTargetOpcode(),
C->isOpaque());
6968 C->isTargetOpcode(),
C->isOpaque());
6970 C->isTargetOpcode(),
C->isOpaque());
7016 if (VT == MVT::f16 &&
C->getValueType(0) == MVT::i16)
7018 if (VT == MVT::f32 &&
C->getValueType(0) == MVT::i32)
7020 if (VT == MVT::f64 &&
C->getValueType(0) == MVT::i64)
7022 if (VT == MVT::f128 &&
C->getValueType(0) == MVT::i128)
7029 if (
auto *
C = dyn_cast<ConstantFPSDNode>(N1)) {
7083 return getConstant(V.bitcastToAPInt().getZExtValue(),
DL, VT);
7086 if (VT == MVT::i16 &&
C->getValueType(0) == MVT::f16)
7089 if (VT == MVT::i16 &&
C->getValueType(0) == MVT::bf16)
7092 if (VT == MVT::i32 &&
C->getValueType(0) == MVT::f32)
7095 if (VT == MVT::i64 &&
C->getValueType(0) == MVT::f64)
7096 return getConstant(V.bitcastToAPInt().getZExtValue(),
DL, VT);
7111 if (
auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
7112 if (
auto *C2 = dyn_cast<ConstantSDNode>(Ops[1])) {
7113 if (C1->isOpaque() || C2->isOpaque())
7116 std::optional<APInt> FoldAttempt =
7117 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
7123 "Can't fold vectors ops with scalar operands");
7137 EVT EVT = cast<VTSDNode>(Ops[1])->getVT();
7146 if (
auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
7147 const APInt &Val = C1->getAPIntValue();
7148 return SignExtendInReg(Val, VT);
7153 llvm::EVT OpVT = Ops[0].getOperand(0).getValueType();
7160 const APInt &Val = cast<ConstantSDNode>(
Op)->getAPIntValue();
7161 ScalarOps.
push_back(SignExtendInReg(Val, OpVT));
7167 isa<ConstantSDNode>(Ops[0].getOperand(0)))
7169 SignExtendInReg(Ops[0].getConstantOperandAPInt(0),
7176 auto *C1 = dyn_cast<ConstantSDNode>(Ops[0]);
7177 auto *C2 = dyn_cast<ConstantSDNode>(Ops[1]);
7178 auto *C3 = dyn_cast<ConstantSDNode>(Ops[2]);
7180 if (C1 && C2 && C3) {
7181 if (C1->isOpaque() || C2->isOpaque() || C3->isOpaque())
7183 const APInt &V1 = C1->getAPIntValue(), &V2 = C2->getAPIntValue(),
7184 &V3 = C3->getAPIntValue();
7196 Ops[2].
getValueType() == VT &&
"FMA types must match!");
7200 if (C1 && C2 && C3) {
7221 Ops[0].getValueType() == VT && Ops[1].getValueType() == VT &&
7226 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
7227 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
7234 if (BV1->getConstantRawBits(IsLE, EltBits, RawBits1, UndefElts1) &&
7235 BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) {
7239 Opcode, RawBits1[
I], UndefElts1[
I], RawBits2[
I], UndefElts2[
I]);
7250 BVEltVT = BV1->getOperand(0).getValueType();
7253 BVEltVT = BV2->getOperand(0).getValueType();
7259 DstBits, RawBits, DstUndefs,
7262 for (
unsigned I = 0, E = DstBits.
size();
I != E; ++
I) {
7280 ? Ops[0].getConstantOperandAPInt(0) * RHSVal
7281 : Ops[0].getConstantOperandAPInt(0) << RHSVal;
7286 auto IsScalarOrSameVectorSize = [NumElts](
const SDValue &
Op) {
7287 return !
Op.getValueType().isVector() ||
7288 Op.getValueType().getVectorElementCount() == NumElts;
7291 auto IsBuildVectorSplatVectorOrUndef = [](
const SDValue &
Op) {
7300 if (!
llvm::all_of(Ops, IsBuildVectorSplatVectorOrUndef) ||
7329 for (
unsigned I = 0;
I != NumVectorElts;
I++) {
7332 EVT InSVT =
Op.getValueType().getScalarType();
7354 !isa<ConstantSDNode>(ScalarOp) &&
7375 if (LegalSVT != SVT)
7376 ScalarResult =
getNode(ExtendCode,
DL, LegalSVT, ScalarResult);
7390 if (Ops.
size() != 2)
7401 if (N1CFP && N2CFP) {
7452 if (N1C && N1C->getValueAPF().isNegZero() && N2.
isUndef())
7475 if (SrcEltVT == DstEltVT)
7483 if (SrcBitSize == DstBitSize) {
7488 if (
Op.getValueType() != SrcEltVT)
7531 for (
unsigned I = 0, E = RawBits.
size();
I != E; ++
I) {
7532 if (UndefElements[
I])
7553 ID.AddInteger(
A.value());
7556 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
7560 newSDNode<AssertAlignSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs,
A);
7561 createOperands(
N, {Val});
7563 CSEMap.InsertNode(
N, IP);
7576 return getNode(Opcode,
DL, VT, N1, N2, Flags);
7590 if ((N1C && !N2C) || (N1CFP && !N2CFP))
7604 "Operand is DELETED_NODE!");
7608 auto *N1C = dyn_cast<ConstantSDNode>(N1);
7609 auto *N2C = dyn_cast<ConstantSDNode>(N2);
7620 N2.
getValueType() == MVT::Other &&
"Invalid token factor!");
7624 if (N1 == N2)
return N1;
7640 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7642 N1.
getValueType() == VT &&
"Binary operator types must match!");
7645 if (N2CV && N2CV->
isZero())
7655 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7657 N1.
getValueType() == VT &&
"Binary operator types must match!");
7667 if (N2CV && N2CV->
isZero())
7681 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7683 N1.
getValueType() == VT &&
"Binary operator types must match!");
7688 const APInt &N2CImm = N2C->getAPIntValue();
7702 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7704 N1.
getValueType() == VT &&
"Binary operator types must match!");
7717 "Types of operands of UCMP/SCMP must match");
7719 "Operands and return type of must both be scalars or vectors");
7723 "Result and operands must have the same number of elements");
7729 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7731 N1.
getValueType() == VT &&
"Binary operator types must match!");
7735 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7737 N1.
getValueType() == VT &&
"Binary operator types must match!");
7743 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7745 N1.
getValueType() == VT &&
"Binary operator types must match!");
7751 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7753 N1.
getValueType() == VT &&
"Binary operator types must match!");
7764 N1.
getValueType() == VT &&
"Binary operator types must match!");
7772 "Invalid FCOPYSIGN!");
7777 const APInt &ShiftImm = N2C->getAPIntValue();
7789 "Shift operators return type must be the same as their first arg");
7791 "Shifts only work on integers");
7793 "Vector shift amounts must be in the same as their first arg");
7800 "Invalid use of small shift amount with oversized value!");
7807 if (N2CV && N2CV->
isZero())
7813 (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
7819 "AssertNoFPClass is used for a non-floating type");
7820 assert(isa<ConstantSDNode>(N2) &&
"NoFPClass is not Constant");
7823 BitmaskEnumDetail::Mask<FPClassTest>() &&
7824 "FPClassTest value too large");
7830 EVT EVT = cast<VTSDNode>(N2)->getVT();
7833 "Cannot *_EXTEND_INREG FP types");
7835 "AssertSExt/AssertZExt type should be the vector element type "
7836 "rather than the vector type!");
7842 EVT EVT = cast<VTSDNode>(N2)->getVT();
7845 "Cannot *_EXTEND_INREG FP types");
7847 "SIGN_EXTEND_INREG type should be vector iff the operand "
7851 "Vector element counts must match in SIGN_EXTEND_INREG");
7853 if (
EVT == VT)
return N1;
7861 "FP_TO_*INT_SAT type should be vector iff the operand type is "
7865 "Vector element counts must match in FP_TO_*INT_SAT");
7866 assert(!cast<VTSDNode>(N2)->getVT().isVector() &&
7867 "Type to saturate to must be a scalar.");
7874 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
7875 element type of the vector.");
7897 N2C->getZExtValue() % Factor);
7906 "BUILD_VECTOR used for scalable vectors");
7929 if (N1Op2C && N2C) {
7959 assert(N2C && (
unsigned)N2C->getZExtValue() < 2 &&
"Bad EXTRACT_ELEMENT!");
7963 "Wrong types for EXTRACT_ELEMENT!");
7974 unsigned Shift = ElementSize * N2C->getZExtValue();
7975 const APInt &Val = N1C->getAPIntValue();
7982 "Extract subvector VTs must be vectors!");
7984 "Extract subvector VTs must have the same element type!");
7986 "Cannot extract a scalable vector from a fixed length vector!");
7989 "Extract subvector must be from larger vector to smaller vector!");
7990 assert(N2C &&
"Extract subvector index must be a constant");
7994 "Extract subvector overflow!");
7995 assert(N2C->getAPIntValue().getBitWidth() ==
7997 "Constant index for EXTRACT_SUBVECTOR has an invalid size");
7999 "Extract index is not a multiple of the output vector length");
8014 return N1.
getOperand(N2C->getZExtValue() / Factor);
8134 if (VT != MVT::Glue) {
8138 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
8139 E->intersectFlagsWith(Flags);
8143 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
8145 createOperands(
N, Ops);
8146 CSEMap.InsertNode(
N, IP);
8148 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
8149 createOperands(
N, Ops);
8163 return getNode(Opcode,
DL, VT, N1, N2, N3, Flags);
8172 "Operand is DELETED_NODE!");
8191 "SETCC operands must have the same type!");
8193 "SETCC type should be vector iff the operand type is vector!");
8196 "SETCC vector element counts must match!");
8210 if (cast<ConstantSDNode>(N3)->
isZero())
8216 "INSERT_VECTOR_ELT vector type mismatch");
8218 "INSERT_VECTOR_ELT scalar fp/int mismatch");
8221 "INSERT_VECTOR_ELT fp scalar type mismatch");
8224 "INSERT_VECTOR_ELT int scalar size mismatch");
8226 auto *N3C = dyn_cast<ConstantSDNode>(N3);
8251 "Dest and insert subvector source types must match!");
8253 "Insert subvector VTs must be vectors!");
8255 "Insert subvector VTs must have the same element type!");
8257 "Cannot insert a scalable vector into a fixed length vector!");
8260 "Insert subvector must be from smaller vector to larger vector!");
8261 assert(isa<ConstantSDNode>(N3) &&
8262 "Insert subvector index must be constant");
8266 "Insert subvector overflow!");
8269 "Constant index for INSERT_SUBVECTOR has an invalid size");
8287 case ISD::VP_TRUNCATE:
8288 case ISD::VP_SIGN_EXTEND:
8289 case ISD::VP_ZERO_EXTEND:
8298 assert(VT == VecVT &&
"Vector and result type don't match.");
8300 "All inputs must be vectors.");
8301 assert(VecVT == PassthruVT &&
"Vector and passthru types don't match.");
8303 "Vector and mask must have same number of elements.");
8317 "Expected the second and third operands of the PARTIAL_REDUCE_MLA "
8318 "node to have the same type!");
8320 "Expected the first operand of the PARTIAL_REDUCE_MLA node to have "
8321 "the same type as its result!");
8324 "Expected the element count of the second and third operands of the "
8325 "PARTIAL_REDUCE_MLA node to be a positive integer multiple of the "
8326 "element count of the first operand and the result!");
8328 "Expected the second and third operands of the PARTIAL_REDUCE_MLA "
8329 "node to have an element type which is the same as or smaller than "
8330 "the element type of the first operand and result!");
8352 if (VT != MVT::Glue) {
8356 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
8357 E->intersectFlagsWith(Flags);
8361 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
8363 createOperands(
N, Ops);
8364 CSEMap.InsertNode(
N, IP);
8366 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
8367 createOperands(
N, Ops);
8379 SDValue Ops[] = { N1, N2, N3, N4 };
8380 return getNode(Opcode,
DL, VT, Ops, Flags);
8388 return getNode(Opcode,
DL, VT, N1, N2, N3, N4, Flags);
8394 SDValue Ops[] = { N1, N2, N3, N4, N5 };
8395 return getNode(Opcode,
DL, VT, Ops, Flags);
8404 return getNode(Opcode,
DL, VT, N1, N2, N3, N4, N5, Flags);
8421 if (FI->getIndex() < 0)
8436 assert(
C->getAPIntValue().getBitWidth() == 8);
8441 return DAG.
getConstant(Val, dl, VT,
false, IsOpaque);
8446 assert(
Value.getValueType() == MVT::i8 &&
"memset with non-byte fill value?");
8462 if (VT !=
Value.getValueType())
8475 if (Slice.
Array ==
nullptr) {
8484 unsigned NumVTBytes = NumVTBits / 8;
8485 unsigned NumBytes = std::min(NumVTBytes,
unsigned(Slice.
Length));
8487 APInt Val(NumVTBits, 0);
8489 for (
unsigned i = 0; i != NumBytes; ++i)
8492 for (
unsigned i = 0; i != NumBytes; ++i)
8493 Val |= (
uint64_t)(
unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
8512 APInt(
Base.getValueSizeInBits().getFixedValue(),
8513 Offset.getKnownMinValue()));
8524 EVT BasePtrVT =
Ptr.getValueType();
8536 G = cast<GlobalAddressSDNode>(Src);
8537 else if (Src.getOpcode() ==
ISD::ADD &&
8540 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
8541 SrcDelta = Src.getConstantOperandVal(1);
8547 SrcDelta +
G->getOffset());
8563 assert(OutLoadChains.
size() &&
"Missing loads in memcpy inlining");
8564 assert(OutStoreChains.
size() &&
"Missing stores in memcpy inlining");
8566 for (
unsigned i =
From; i < To; ++i) {
8568 GluedLoadChains.
push_back(OutLoadChains[i]);
8575 for (
unsigned i =
From; i < To; ++i) {
8576 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
8578 ST->getBasePtr(), ST->getMemoryVT(),
8579 ST->getMemOperand());
8601 std::vector<EVT> MemOps;
8602 bool DstAlignCanChange =
false;
8608 DstAlignCanChange =
true;
8610 if (!SrcAlign || Alignment > *SrcAlign)
8611 SrcAlign = Alignment;
8612 assert(SrcAlign &&
"SrcAlign must be set");
8616 bool isZeroConstant = CopyFromConstant && Slice.
Array ==
nullptr;
8618 const MemOp Op = isZeroConstant
8622 *SrcAlign, isVol, CopyFromConstant);
8628 if (DstAlignCanChange) {
8629 Type *Ty = MemOps[0].getTypeForEVT(
C);
8630 Align NewAlign =
DL.getABITypeAlign(Ty);
8636 if (!
TRI->hasStackRealignment(MF))
8638 NewAlign = std::min(NewAlign, *StackAlign);
8640 if (NewAlign > Alignment) {
8644 Alignment = NewAlign;
8652 const Value *SrcVal = dyn_cast_if_present<const Value *>(SrcPtrInfo.
V);
8654 BatchAA && SrcVal &&
8662 unsigned NumMemOps = MemOps.
size();
8664 for (
unsigned i = 0; i != NumMemOps; ++i) {
8669 if (VTSize >
Size) {
8672 assert(i == NumMemOps-1 && i != 0);
8673 SrcOff -= VTSize -
Size;
8674 DstOff -= VTSize -
Size;
8677 if (CopyFromConstant &&
8685 if (SrcOff < Slice.
Length) {
8687 SubSlice.
move(SrcOff);
8690 SubSlice.
Array =
nullptr;
8692 SubSlice.
Length = VTSize;
8695 if (
Value.getNode()) {
8699 DstPtrInfo.
getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
8704 if (!Store.getNode()) {
8713 bool isDereferenceable =
8716 if (isDereferenceable)
8731 DstPtrInfo.
getWithOffset(DstOff), VT, Alignment, MMOFlags, NewAAInfo);
8741 unsigned NumLdStInMemcpy = OutStoreChains.
size();
8743 if (NumLdStInMemcpy) {
8749 for (
unsigned i = 0; i < NumLdStInMemcpy; ++i) {
8755 if (NumLdStInMemcpy <= GluedLdStLimit) {
8757 NumLdStInMemcpy, OutLoadChains,
8760 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
8761 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
8762 unsigned GlueIter = 0;
8764 for (
unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
8765 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
8766 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
8769 OutLoadChains, OutStoreChains);
8770 GlueIter += GluedLdStLimit;
8774 if (RemainingLdStInMemcpy) {
8776 RemainingLdStInMemcpy, OutLoadChains,
8788 bool isVol,
bool AlwaysInline,
8802 std::vector<EVT> MemOps;
8803 bool DstAlignCanChange =
false;
8809 DstAlignCanChange =
true;
8811 if (!SrcAlign || Alignment > *SrcAlign)
8812 SrcAlign = Alignment;
8813 assert(SrcAlign &&
"SrcAlign must be set");
8823 if (DstAlignCanChange) {
8824 Type *Ty = MemOps[0].getTypeForEVT(
C);
8825 Align NewAlign =
DL.getABITypeAlign(Ty);
8831 if (!
TRI->hasStackRealignment(MF))
8833 NewAlign = std::min(NewAlign, *StackAlign);
8835 if (NewAlign > Alignment) {
8839 Alignment = NewAlign;
8853 unsigned NumMemOps = MemOps.
size();
8854 for (
unsigned i = 0; i < NumMemOps; i++) {
8859 bool isDereferenceable =
8862 if (isDereferenceable)
8868 SrcPtrInfo.
getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags, NewAAInfo);
8875 for (
unsigned i = 0; i < NumMemOps; i++) {
8881 Chain, dl, LoadValues[i],
8883 DstPtrInfo.
getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
8923 std::vector<EVT> MemOps;
8924 bool DstAlignCanChange =
false;
8931 DstAlignCanChange =
true;
8937 MemOp::Set(
Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
8941 if (DstAlignCanChange) {
8944 Align NewAlign =
DL.getABITypeAlign(Ty);
8950 if (!
TRI->hasStackRealignment(MF))
8952 NewAlign = std::min(NewAlign, *StackAlign);
8954 if (NewAlign > Alignment) {
8958 Alignment = NewAlign;
8964 unsigned NumMemOps = MemOps.size();
8967 EVT LargestVT = MemOps[0];
8968 for (
unsigned i = 1; i < NumMemOps; i++)
8969 if (MemOps[i].bitsGT(LargestVT))
8970 LargestVT = MemOps[i];
8977 for (
unsigned i = 0; i < NumMemOps; i++) {
8980 if (VTSize >
Size) {
8983 assert(i == NumMemOps-1 && i != 0);
8984 DstOff -= VTSize -
Size;
8991 if (VT.
bitsLT(LargestVT)) {
9011 assert(
Value.getValueType() == VT &&
"Value with wrong type.");
9036std::pair<SDValue, SDValue>
9050 bool IsTailCall =
false;
9069 Align Alignment,
bool isVol,
bool AlwaysInline,
const CallInst *CI,
9078 if (ConstantSize->
isZero())
9082 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
9083 isVol,
false, DstPtrInfo, SrcPtrInfo, AAInfo, BatchAA);
9084 if (Result.getNode())
9092 *
this, dl, Chain, Dst, Src,
Size, Alignment, isVol, AlwaysInline,
9093 DstPtrInfo, SrcPtrInfo);
9094 if (Result.getNode())
9101 assert(ConstantSize &&
"AlwaysInline requires a constant size!");
9103 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
9104 isVol,
true, DstPtrInfo, SrcPtrInfo, AAInfo, BatchAA);
9119 Args.emplace_back(Dst, PtrTy);
9120 Args.emplace_back(Src, PtrTy);
9124 bool IsTailCall =
false;
9127 if (OverrideTailCall.has_value()) {
9128 IsTailCall = *OverrideTailCall;
9134 ReturnsFirstArg && LowersToMemcpy);
9141 Dst.getValueType().getTypeForEVT(*
getContext()),
9147 std::pair<SDValue,SDValue> CallResult = TLI->
LowerCallTo(CLI);
9148 return CallResult.second;
9153 Type *SizeTy,
unsigned ElemSz,
9160 Args.emplace_back(Dst, ArgTy);
9161 Args.emplace_back(Src, ArgTy);
9162 Args.emplace_back(
Size, SizeTy);
9164 RTLIB::Libcall LibraryCall =
9166 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
9180 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
9181 return CallResult.second;
9187 std::optional<bool> OverrideTailCall,
9197 if (ConstantSize->
isZero())
9201 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
9202 isVol,
false, DstPtrInfo, SrcPtrInfo, AAInfo);
9203 if (Result.getNode())
9212 Alignment, isVol, DstPtrInfo, SrcPtrInfo);
9213 if (Result.getNode())
9226 Args.emplace_back(Dst, PtrTy);
9227 Args.emplace_back(Src, PtrTy);
9232 bool IsTailCall =
false;
9233 if (OverrideTailCall.has_value()) {
9234 IsTailCall = *OverrideTailCall;
9236 bool LowersToMemmove =
9241 ReturnsFirstArg && LowersToMemmove);
9247 Dst.getValueType().getTypeForEVT(*
getContext()),
9254 std::pair<SDValue,SDValue> CallResult = TLI->
LowerCallTo(CLI);
9255 return CallResult.second;
9260 Type *SizeTy,
unsigned ElemSz,
9267 Args.emplace_back(Dst, IntPtrTy);
9268 Args.emplace_back(Src, IntPtrTy);
9269 Args.emplace_back(
Size, SizeTy);
9271 RTLIB::Libcall LibraryCall =
9273 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
9287 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
9288 return CallResult.second;
9293 bool isVol,
bool AlwaysInline,
9302 if (ConstantSize->
isZero())
9307 isVol,
false, DstPtrInfo, AAInfo);
9309 if (Result.getNode())
9317 *
this, dl, Chain, Dst, Src,
Size, Alignment, isVol, AlwaysInline, DstPtrInfo);
9318 if (Result.getNode())
9325 assert(ConstantSize &&
"AlwaysInline requires a constant size!");
9328 isVol,
true, DstPtrInfo, AAInfo);
9330 "getMemsetStores must return a valid sequence when AlwaysInline");
9351 Args.emplace_back(
Size,
DL.getIntPtrType(Ctx));
9358 Args.emplace_back(Src, Src.getValueType().getTypeForEVT(Ctx));
9359 Args.emplace_back(
Size,
DL.getIntPtrType(Ctx));
9361 Dst.getValueType().getTypeForEVT(Ctx),
9366 bool LowersToMemset =
9377 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
9378 return CallResult.second;
9383 Type *SizeTy,
unsigned ElemSz,
9390 Args.emplace_back(
Size, SizeTy);
9392 RTLIB::Libcall LibraryCall =
9394 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
9408 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
9409 return CallResult.second;
9419 ID.AddInteger(getSyntheticNodeSubclassData<AtomicSDNode>(
9420 dl.
getIROrder(), Opcode, VTList, MemVT, MMO, ExtType));
9424 if (
auto *E = cast_or_null<AtomicSDNode>(FindNodeOrInsertPos(
ID, dl, IP))) {
9425 E->refineAlignment(MMO);
9426 E->refineRanges(MMO);
9431 VTList, MemVT, MMO, ExtType);
9432 createOperands(
N, Ops);
9434 CSEMap.InsertNode(
N, IP);
9450 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
9471 "Invalid Atomic Op");
9478 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
9491 if (Ops.
size() == 1)
9506 if (
Size.hasValue() && !
Size.getValue())
9523 (Opcode <= (
unsigned)std::numeric_limits<int>::max() &&
9525 "Opcode is not a memory-accessing opcode!");
9529 if (VTList.
VTs[VTList.
NumVTs-1] != MVT::Glue) {
9532 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
9533 Opcode, dl.
getIROrder(), VTList, MemVT, MMO));
9538 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9539 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
9544 VTList, MemVT, MMO);
9545 createOperands(
N, Ops);
9547 CSEMap.InsertNode(
N, IP);
9550 VTList, MemVT, MMO);
9551 createOperands(
N, Ops);
9560 SDValue Chain,
int FrameIndex) {
9571 ID.AddInteger(FrameIndex);
9573 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
9578 createOperands(
N, Ops);
9579 CSEMap.InsertNode(
N, IP);
9595 ID.AddInteger(Index);
9597 if (
SDNode *E = FindNodeOrInsertPos(
ID, Dl, IP))
9600 auto *
N = newSDNode<PseudoProbeSDNode>(
9602 createOperands(
N, Ops);
9603 CSEMap.InsertNode(
N, IP);
9624 !isa<ConstantSDNode>(
Ptr.getOperand(1)) ||
9625 !isa<FrameIndexSDNode>(
Ptr.getOperand(0)))
9628 int FI = cast<FrameIndexSDNode>(
Ptr.getOperand(0))->getIndex();
9631 Offset + cast<ConstantSDNode>(
Ptr.getOperand(1))->getSExtValue());
9657 "Invalid chain type");
9669 Alignment, AAInfo, Ranges);
9680 assert(VT == MemVT &&
"Non-extending load from different memory type!");
9684 "Should only be an extending load, not truncating!");
9686 "Cannot convert from FP to Int or Int -> FP!");
9688 "Cannot use an ext load to convert to or from a vector!");
9691 "Cannot use an ext load to change the number of vector elements!");
9698 "Range metadata and load type must match!");
9709 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
9710 dl.
getIROrder(), VTs, AM, ExtType, MemVT, MMO));
9714 if (
auto *E = cast_or_null<LoadSDNode>(FindNodeOrInsertPos(
ID, dl, IP))) {
9715 E->refineAlignment(MMO);
9716 E->refineRanges(MMO);
9720 ExtType, MemVT, MMO);
9721 createOperands(
N, Ops);
9723 CSEMap.InsertNode(
N, IP);
9737 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
9755 MemVT, Alignment, MMOFlags, AAInfo);
9770 assert(LD->getOffset().isUndef() &&
"Load is already a indexed load!");
9773 LD->getMemOperand()->getFlags() &
9776 LD->getChain(),
Base,
Offset, LD->getPointerInfo(),
9777 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
9810 bool IsTruncating) {
9814 IsTruncating =
false;
9815 }
else if (!IsTruncating) {
9816 assert(VT == SVT &&
"No-truncating store from different memory type!");
9819 "Should only be a truncating store, not extending!");
9822 "Cannot use trunc store to convert to or from a vector!");
9825 "Cannot use trunc store to change the number of vector elements!");
9836 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
9837 dl.
getIROrder(), VTs, AM, IsTruncating, SVT, MMO));
9841 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9842 cast<StoreSDNode>(E)->refineAlignment(MMO);
9846 IsTruncating, SVT, MMO);
9847 createOperands(
N, Ops);
9849 CSEMap.InsertNode(
N, IP);
9862 "Invalid chain type");
9872 PtrInfo, MMOFlags, SVT.
getStoreSize(), Alignment, AAInfo);
9887 assert(ST->getOffset().isUndef() &&
"Store is already a indexed store!");
9889 ST->getMemoryVT(), ST->getMemOperand(), AM,
9890 ST->isTruncatingStore());
9898 const MDNode *Ranges,
bool IsExpanding) {
9911 Alignment, AAInfo, Ranges);
9912 return getLoadVP(AM, ExtType, VT, dl, Chain,
Ptr,
Offset, Mask, EVL, MemVT,
9931 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadSDNode>(
9932 dl.
getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
9936 if (
auto *E = cast_or_null<VPLoadSDNode>(FindNodeOrInsertPos(
ID, dl, IP))) {
9937 E->refineAlignment(MMO);
9938 E->refineRanges(MMO);
9942 ExtType, IsExpanding, MemVT, MMO);
9943 createOperands(
N, Ops);
9945 CSEMap.InsertNode(
N, IP);
9961 Mask, EVL, PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges,
9970 Mask, EVL, VT, MMO, IsExpanding);
9979 const AAMDNodes &AAInfo,
bool IsExpanding) {
9982 EVL, PtrInfo, MemVT, Alignment, MMOFlags, AAInfo,
nullptr,
9992 EVL, MemVT, MMO, IsExpanding);
9998 auto *LD = cast<VPLoadSDNode>(OrigLoad);
9999 assert(LD->getOffset().isUndef() &&
"Load is already a indexed load!");
10002 LD->getMemOperand()->getFlags() &
10005 LD->getChain(),
Base,
Offset, LD->getMask(),
10006 LD->getVectorLength(), LD->getPointerInfo(),
10007 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo(),
10008 nullptr, LD->isExpandingLoad());
10015 bool IsCompressing) {
10025 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
10026 dl.
getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10029 void *IP =
nullptr;
10030 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10031 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
10035 IsTruncating, IsCompressing, MemVT, MMO);
10036 createOperands(
N, Ops);
10038 CSEMap.InsertNode(
N, IP);
10051 bool IsCompressing) {
10062 PtrInfo, MMOFlags, SVT.
getStoreSize(), Alignment, AAInfo);
10071 bool IsCompressing) {
10078 false, IsCompressing);
10081 "Should only be a truncating store, not extending!");
10084 "Cannot use trunc store to convert to or from a vector!");
10087 "Cannot use trunc store to change the number of vector elements!");
10091 SDValue Ops[] = {Chain, Val,
Ptr, Undef, Mask, EVL};
10095 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
10099 void *IP =
nullptr;
10100 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10101 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
10107 createOperands(
N, Ops);
10109 CSEMap.InsertNode(
N, IP);
10119 auto *ST = cast<VPStoreSDNode>(OrigStore);
10120 assert(ST->getOffset().isUndef() &&
"Store is already an indexed store!");
10122 SDValue Ops[] = {ST->getChain(), ST->getValue(),
Base,
10123 Offset, ST->getMask(), ST->getVectorLength()};
10126 ID.AddInteger(ST->getMemoryVT().getRawBits());
10127 ID.AddInteger(ST->getRawSubclassData());
10128 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
10129 ID.AddInteger(ST->getMemOperand()->getFlags());
10130 void *IP =
nullptr;
10131 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10134 auto *
N = newSDNode<VPStoreSDNode>(
10136 ST->isCompressingStore(), ST->getMemoryVT(), ST->getMemOperand());
10137 createOperands(
N, Ops);
10139 CSEMap.InsertNode(
N, IP);
10159 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedLoadSDNode>(
10160 DL.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
10163 void *IP =
nullptr;
10164 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10165 cast<VPStridedLoadSDNode>(E)->refineAlignment(MMO);
10170 newSDNode<VPStridedLoadSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs, AM,
10171 ExtType, IsExpanding, MemVT, MMO);
10172 createOperands(
N, Ops);
10173 CSEMap.InsertNode(
N, IP);
10184 bool IsExpanding) {
10187 Undef, Stride, Mask, EVL, VT, MMO, IsExpanding);
10196 Stride, Mask, EVL, MemVT, MMO, IsExpanding);
10205 bool IsTruncating,
bool IsCompressing) {
10215 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
10216 DL.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10218 void *IP =
nullptr;
10219 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10220 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
10223 auto *
N = newSDNode<VPStridedStoreSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
10224 VTs, AM, IsTruncating,
10225 IsCompressing, MemVT, MMO);
10226 createOperands(
N, Ops);
10228 CSEMap.InsertNode(
N, IP);
10240 bool IsCompressing) {
10247 false, IsCompressing);
10250 "Should only be a truncating store, not extending!");
10253 "Cannot use trunc store to convert to or from a vector!");
10256 "Cannot use trunc store to change the number of vector elements!");
10260 SDValue Ops[] = {Chain, Val,
Ptr, Undef, Stride, Mask, EVL};
10264 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
10267 void *IP =
nullptr;
10268 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10269 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
10272 auto *
N = newSDNode<VPStridedStoreSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
10274 IsCompressing, SVT, MMO);
10275 createOperands(
N, Ops);
10277 CSEMap.InsertNode(
N, IP);
10287 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
10292 ID.AddInteger(getSyntheticNodeSubclassData<VPGatherSDNode>(
10296 void *IP =
nullptr;
10297 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10298 cast<VPGatherSDNode>(E)->refineAlignment(MMO);
10303 VT, MMO, IndexType);
10304 createOperands(
N, Ops);
10306 assert(
N->getMask().getValueType().getVectorElementCount() ==
10307 N->getValueType(0).getVectorElementCount() &&
10308 "Vector width mismatch between mask and data");
10309 assert(
N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10310 N->getValueType(0).getVectorElementCount().isScalable() &&
10311 "Scalable flags of index and data do not match");
10313 N->getIndex().getValueType().getVectorElementCount(),
10314 N->getValueType(0).getVectorElementCount()) &&
10315 "Vector width mismatch between index and data");
10316 assert(isa<ConstantSDNode>(
N->getScale()) &&
10317 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10318 "Scale should be a constant power of 2");
10320 CSEMap.InsertNode(
N, IP);
10331 assert(Ops.
size() == 7 &&
"Incompatible number of operands");
10336 ID.AddInteger(getSyntheticNodeSubclassData<VPScatterSDNode>(
10340 void *IP =
nullptr;
10341 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10342 cast<VPScatterSDNode>(E)->refineAlignment(MMO);
10346 VT, MMO, IndexType);
10347 createOperands(
N, Ops);
10349 assert(
N->getMask().getValueType().getVectorElementCount() ==
10350 N->getValue().getValueType().getVectorElementCount() &&
10351 "Vector width mismatch between mask and data");
10353 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10354 N->getValue().getValueType().getVectorElementCount().isScalable() &&
10355 "Scalable flags of index and data do not match");
10357 N->getIndex().getValueType().getVectorElementCount(),
10358 N->getValue().getValueType().getVectorElementCount()) &&
10359 "Vector width mismatch between index and data");
10360 assert(isa<ConstantSDNode>(
N->getScale()) &&
10361 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10362 "Scale should be a constant power of 2");
10364 CSEMap.InsertNode(
N, IP);
10379 "Unindexed masked load with an offset!");
10386 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
10387 dl.
getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
10390 void *IP =
nullptr;
10391 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10392 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
10396 AM, ExtTy, isExpanding, MemVT, MMO);
10397 createOperands(
N, Ops);
10399 CSEMap.InsertNode(
N, IP);
10410 assert(LD->getOffset().isUndef() &&
"Masked load is already a indexed load!");
10412 Offset, LD->getMask(), LD->getPassThru(),
10413 LD->getMemoryVT(), LD->getMemOperand(), AM,
10414 LD->getExtensionType(), LD->isExpandingLoad());
10422 bool IsCompressing) {
10424 "Invalid chain type");
10427 "Unindexed masked store with an offset!");
10434 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
10435 dl.
getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10438 void *IP =
nullptr;
10439 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10440 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
10445 IsTruncating, IsCompressing, MemVT, MMO);
10446 createOperands(
N, Ops);
10448 CSEMap.InsertNode(
N, IP);
10459 assert(ST->getOffset().isUndef() &&
10460 "Masked store is already a indexed store!");
10462 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
10463 AM, ST->isTruncatingStore(), ST->isCompressingStore());
10471 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
10476 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
10477 dl.
getIROrder(), VTs, MemVT, MMO, IndexType, ExtTy));
10480 void *IP =
nullptr;
10481 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10482 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
10487 VTs, MemVT, MMO, IndexType, ExtTy);
10488 createOperands(
N, Ops);
10490 assert(
N->getPassThru().getValueType() ==
N->getValueType(0) &&
10491 "Incompatible type of the PassThru value in MaskedGatherSDNode");
10492 assert(
N->getMask().getValueType().getVectorElementCount() ==
10493 N->getValueType(0).getVectorElementCount() &&
10494 "Vector width mismatch between mask and data");
10495 assert(
N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10496 N->getValueType(0).getVectorElementCount().isScalable() &&
10497 "Scalable flags of index and data do not match");
10499 N->getIndex().getValueType().getVectorElementCount(),
10500 N->getValueType(0).getVectorElementCount()) &&
10501 "Vector width mismatch between index and data");
10502 assert(isa<ConstantSDNode>(
N->getScale()) &&
10503 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10504 "Scale should be a constant power of 2");
10506 CSEMap.InsertNode(
N, IP);
10518 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
10523 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
10524 dl.
getIROrder(), VTs, MemVT, MMO, IndexType, IsTrunc));
10527 void *IP =
nullptr;
10528 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10529 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
10534 VTs, MemVT, MMO, IndexType, IsTrunc);
10535 createOperands(
N, Ops);
10537 assert(
N->getMask().getValueType().getVectorElementCount() ==
10538 N->getValue().getValueType().getVectorElementCount() &&
10539 "Vector width mismatch between mask and data");
10541 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10542 N->getValue().getValueType().getVectorElementCount().isScalable() &&
10543 "Scalable flags of index and data do not match");
10545 N->getIndex().getValueType().getVectorElementCount(),
10546 N->getValue().getValueType().getVectorElementCount()) &&
10547 "Vector width mismatch between index and data");
10548 assert(isa<ConstantSDNode>(
N->getScale()) &&
10549 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10550 "Scale should be a constant power of 2");
10552 CSEMap.InsertNode(
N, IP);
10563 assert(Ops.
size() == 7 &&
"Incompatible number of operands");
10568 ID.AddInteger(getSyntheticNodeSubclassData<MaskedHistogramSDNode>(
10569 dl.
getIROrder(), VTs, MemVT, MMO, IndexType));
10572 void *IP =
nullptr;
10573 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10574 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
10579 VTs, MemVT, MMO, IndexType);
10580 createOperands(
N, Ops);
10582 assert(
N->getMask().getValueType().getVectorElementCount() ==
10583 N->getIndex().getValueType().getVectorElementCount() &&
10584 "Vector width mismatch between mask and data");
10585 assert(isa<ConstantSDNode>(
N->getScale()) &&
10586 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10587 "Scale should be a constant power of 2");
10588 assert(
N->getInc().getValueType().isInteger() &&
"Non integer update value");
10590 CSEMap.InsertNode(
N, IP);
10605 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadFFSDNode>(
DL.getIROrder(),
10609 void *IP =
nullptr;
10610 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10611 cast<VPLoadFFSDNode>(E)->refineAlignment(MMO);
10614 auto *
N = newSDNode<VPLoadFFSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs,
10616 createOperands(
N, Ops);
10618 CSEMap.InsertNode(
N, IP);
10633 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
10637 void *IP =
nullptr;
10638 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10643 createOperands(
N, Ops);
10645 CSEMap.InsertNode(
N, IP);
10660 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
10664 void *IP =
nullptr;
10665 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10670 createOperands(
N, Ops);
10672 CSEMap.InsertNode(
N, IP);
10683 if (
Cond.isUndef())
10718 return !Val || Val->getAPIntValue().uge(
X.getScalarValueSizeInBits());
10724 if (
X.getValueType().getScalarType() == MVT::i1)
10737 bool HasNan = (XC && XC->getValueAPF().isNaN()) ||
10739 bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
10742 if (Flags.hasNoNaNs() && (HasNan ||
X.isUndef() ||
Y.isUndef()))
10745 if (Flags.hasNoInfs() && (HasInf ||
X.isUndef() ||
Y.isUndef()))
10768 if (Opcode ==
ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
10783 switch (Ops.
size()) {
10784 case 0:
return getNode(Opcode,
DL, VT);
10786 case 2:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1]);
10787 case 3:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Ops[2]);
10794 return getNode(Opcode,
DL, VT, NewOps);
10802 return getNode(Opcode,
DL, VT, Ops, Flags);
10807 unsigned NumOps = Ops.
size();
10809 case 0:
return getNode(Opcode,
DL, VT);
10810 case 1:
return getNode(Opcode,
DL, VT, Ops[0], Flags);
10811 case 2:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Flags);
10812 case 3:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Ops[2], Flags);
10817 for (
const auto &
Op : Ops)
10819 "Operand is DELETED_NODE!");
10834 assert(NumOps == 5 &&
"SELECT_CC takes 5 operands!");
10836 "LHS and RHS of condition must have same type!");
10838 "True and False arms of SelectCC must have same type!");
10840 "select_cc node must be of same type as true and false value!");
10844 "Expected select_cc with vector result to have the same sized "
10845 "comparison type!");
10848 assert(NumOps == 5 &&
"BR_CC takes 5 operands!");
10850 "LHS/RHS of comparison should match types!");
10856 Opcode = ISD::VP_XOR;
10861 Opcode = ISD::VP_AND;
10863 case ISD::VP_REDUCE_MUL:
10866 Opcode = ISD::VP_REDUCE_AND;
10868 case ISD::VP_REDUCE_ADD:
10871 Opcode = ISD::VP_REDUCE_XOR;
10873 case ISD::VP_REDUCE_SMAX:
10874 case ISD::VP_REDUCE_UMIN:
10878 Opcode = ISD::VP_REDUCE_AND;
10880 case ISD::VP_REDUCE_SMIN:
10881 case ISD::VP_REDUCE_UMAX:
10885 Opcode = ISD::VP_REDUCE_OR;
10893 if (VT != MVT::Glue) {
10896 void *IP =
nullptr;
10898 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10899 E->intersectFlagsWith(Flags);
10903 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
10904 createOperands(
N, Ops);
10906 CSEMap.InsertNode(
N, IP);
10908 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
10909 createOperands(
N, Ops);
10912 N->setFlags(Flags);
10938 return getNode(Opcode,
DL, VTList, Ops, Flags);
10944 return getNode(Opcode,
DL, VTList.
VTs[0], Ops, Flags);
10947 for (
const auto &
Op : Ops)
10949 "Operand is DELETED_NODE!");
10958 "Invalid add/sub overflow op!");
10960 Ops[0].getValueType() == Ops[1].getValueType() &&
10961 Ops[0].getValueType() == VTList.
VTs[0] &&
10962 "Binary operator types must match!");
10963 SDValue N1 = Ops[0], N2 = Ops[1];
10969 if (N2CV && N2CV->
isZero()) {
11000 "Invalid add/sub overflow op!");
11002 Ops[0].getValueType() == Ops[1].getValueType() &&
11003 Ops[0].getValueType() == VTList.
VTs[0] &&
11004 Ops[2].getValueType() == VTList.
VTs[1] &&
11005 "Binary operator types must match!");
11011 VTList.
VTs[0] == Ops[0].getValueType() &&
11012 VTList.
VTs[0] == Ops[1].getValueType() &&
11013 "Binary operator types must match!");
11019 unsigned OutWidth = Width * 2;
11023 Val = Val.
sext(OutWidth);
11024 Mul =
Mul.sext(OutWidth);
11026 Val = Val.
zext(OutWidth);
11027 Mul =
Mul.zext(OutWidth);
11041 VTList.
VTs[0] == Ops[0].getValueType() &&
"frexp type mismatch");
11057 "Invalid STRICT_FP_EXTEND!");
11059 Ops[1].getValueType().isFloatingPoint() &&
"Invalid FP cast!");
11061 "STRICT_FP_EXTEND result type should be vector iff the operand "
11062 "type is vector!");
11065 Ops[1].getValueType().getVectorElementCount()) &&
11066 "Vector element count mismatch!");
11068 "Invalid fpext node, dst <= src!");
11071 assert(VTList.
NumVTs == 2 && Ops.
size() == 3 &&
"Invalid STRICT_FP_ROUND!");
11073 "STRICT_FP_ROUND result type should be vector iff the operand "
11074 "type is vector!");
11077 Ops[1].getValueType().getVectorElementCount()) &&
11078 "Vector element count mismatch!");
11080 Ops[1].getValueType().isFloatingPoint() &&
11081 VTList.
VTs[0].
bitsLT(Ops[1].getValueType()) &&
11083 (Ops[2]->getAsZExtVal() == 0 || Ops[2]->getAsZExtVal() == 1) &&
11084 "Invalid STRICT_FP_ROUND!");
11090 if (VTList.
VTs[VTList.
NumVTs-1] != MVT::Glue) {
11093 void *IP =
nullptr;
11094 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
11095 E->intersectFlagsWith(Flags);
11099 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTList);
11100 createOperands(
N, Ops);
11101 CSEMap.InsertNode(
N, IP);
11103 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTList);
11104 createOperands(
N, Ops);
11107 N->setFlags(Flags);
11122 return getNode(Opcode,
DL, VTList, Ops);
11128 return getNode(Opcode,
DL, VTList, Ops);
11133 SDValue Ops[] = { N1, N2, N3 };
11134 return getNode(Opcode,
DL, VTList, Ops);
11139 SDValue Ops[] = { N1, N2, N3, N4 };
11140 return getNode(Opcode,
DL, VTList, Ops);
11146 SDValue Ops[] = { N1, N2, N3, N4, N5 };
11147 return getNode(Opcode,
DL, VTList, Ops);
11154 return makeVTList(&(*EVTs.insert(VT).first), 1);
11163 void *IP =
nullptr;
11169 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 2);
11170 VTListMap.InsertNode(Result, IP);
11172 return Result->getSDVTList();
11182 void *IP =
nullptr;
11189 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 3);
11190 VTListMap.InsertNode(Result, IP);
11192 return Result->getSDVTList();
11203 void *IP =
nullptr;
11211 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 4);
11212 VTListMap.InsertNode(Result, IP);
11214 return Result->getSDVTList();
11218 unsigned NumVTs = VTs.
size();
11220 ID.AddInteger(NumVTs);
11221 for (
unsigned index = 0; index < NumVTs; index++) {
11222 ID.AddInteger(VTs[index].getRawBits());
11225 void *IP =
nullptr;
11230 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, NumVTs);
11231 VTListMap.InsertNode(Result, IP);
11233 return Result->getSDVTList();
11244 assert(
N->getNumOperands() == 1 &&
"Update with wrong number of operands");
11247 if (
Op ==
N->getOperand(0))
return N;
11250 void *InsertPos =
nullptr;
11251 if (
SDNode *Existing = FindModifiedNodeSlot(
N,
Op, InsertPos))
11256 if (!RemoveNodeFromCSEMaps(
N))
11257 InsertPos =
nullptr;
11260 N->OperandList[0].set(
Op);
11264 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
11269 assert(
N->getNumOperands() == 2 &&
"Update with wrong number of operands");
11272 if (Op1 ==
N->getOperand(0) && Op2 ==
N->getOperand(1))
11276 void *InsertPos =
nullptr;
11277 if (
SDNode *Existing = FindModifiedNodeSlot(
N, Op1, Op2, InsertPos))
11282 if (!RemoveNodeFromCSEMaps(
N))
11283 InsertPos =
nullptr;
11286 if (
N->OperandList[0] != Op1)
11287 N->OperandList[0].set(Op1);
11288 if (
N->OperandList[1] != Op2)
11289 N->OperandList[1].set(Op2);
11293 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
11299 SDValue Ops[] = { Op1, Op2, Op3 };
11306 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
11313 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
11319 unsigned NumOps = Ops.
size();
11320 assert(
N->getNumOperands() == NumOps &&
11321 "Update with wrong number of operands");
11324 if (std::equal(Ops.
begin(), Ops.
end(),
N->op_begin()))
11328 void *InsertPos =
nullptr;
11329 if (
SDNode *Existing = FindModifiedNodeSlot(
N, Ops, InsertPos))
11334 if (!RemoveNodeFromCSEMaps(
N))
11335 InsertPos =
nullptr;
11338 for (
unsigned i = 0; i != NumOps; ++i)
11339 if (
N->OperandList[i] != Ops[i])
11340 N->OperandList[i].set(Ops[i]);
11344 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
11361 if (NewMemRefs.
empty()) {
11367 if (NewMemRefs.
size() == 1) {
11368 N->MemRefs = NewMemRefs[0];
11374 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.
size());
11376 N->MemRefs = MemRefsBuffer;
11377 N->NumMemRefs =
static_cast<int>(NewMemRefs.
size());
11400 SDValue Ops[] = { Op1, Op2 };
11408 SDValue Ops[] = { Op1, Op2, Op3 };
11441 SDValue Ops[] = { Op1, Op2 };
11449 New->setNodeId(-1);
11469 unsigned Order = std::min(
N->getIROrder(), OLoc.
getIROrder());
11470 N->setIROrder(Order);
11493 void *IP =
nullptr;
11494 if (VTs.
VTs[VTs.
NumVTs-1] != MVT::Glue) {
11498 return UpdateSDLocOnMergeSDNode(ON,
SDLoc(
N));
11501 if (!RemoveNodeFromCSEMaps(
N))
11506 N->ValueList = VTs.
VTs;
11516 if (Used->use_empty())
11517 DeadNodeSet.
insert(Used);
11522 MN->clearMemRefs();
11526 createOperands(
N, Ops);
11530 if (!DeadNodeSet.
empty()) {
11532 for (
SDNode *
N : DeadNodeSet)
11533 if (
N->use_empty())
11539 CSEMap.InsertNode(
N, IP);
11544 unsigned OrigOpc = Node->getOpcode();
11549#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
11550 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
11551#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
11552 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
11553#include "llvm/IR/ConstrainedOps.def"
11556 assert(Node->getNumValues() == 2 &&
"Unexpected number of results!");
11559 SDValue InputChain = Node->getOperand(0);
11564 for (
unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
11607 SDValue Ops[] = { Op1, Op2 };
11615 SDValue Ops[] = { Op1, Op2, Op3 };
11629 SDValue Ops[] = { Op1, Op2 };
11637 SDValue Ops[] = { Op1, Op2, Op3 };
11652 SDValue Ops[] = { Op1, Op2 };
11661 SDValue Ops[] = { Op1, Op2, Op3 };
11682 bool DoCSE = VTs.
VTs[VTs.
NumVTs-1] != MVT::Glue;
11684 void *IP =
nullptr;
11690 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
11691 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E,
DL));
11696 N = newSDNode<MachineSDNode>(~Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
11697 createOperands(
N, Ops);
11700 CSEMap.InsertNode(
N, IP);
11713 VT, Operand, SRIdxVal);
11723 VT, Operand, Subreg, SRIdxVal);
11740 if (VTList.
VTs[VTList.
NumVTs - 1] != MVT::Glue) {
11743 void *IP =
nullptr;
11745 E->intersectFlagsWith(Flags);
11755 if (VTList.
VTs[VTList.
NumVTs - 1] != MVT::Glue) {
11758 void *IP =
nullptr;
11759 if (FindNodeOrInsertPos(
ID,
SDLoc(), IP))
11769 SDNode *
N,
unsigned R,
bool IsIndirect,
11771 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11772 "Expected inlined-at fields to agree");
11775 {}, IsIndirect,
DL, O,
11784 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11785 "Expected inlined-at fields to agree");
11798 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11799 "Expected inlined-at fields to agree");
11810 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11811 "Expected inlined-at fields to agree");
11814 Dependencies, IsIndirect,
DL, O,
11822 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11823 "Expected inlined-at fields to agree");
11826 {}, IsIndirect,
DL, O,
11834 unsigned O,
bool IsVariadic) {
11835 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11836 "Expected inlined-at fields to agree");
11839 DL, O, IsVariadic);
11843 unsigned OffsetInBits,
unsigned SizeInBits,
11844 bool InvalidateDbg) {
11847 assert(FromNode && ToNode &&
"Can't modify dbg values");
11852 if (
From == To || FromNode == ToNode)
11864 if (Dbg->isInvalidated())
11871 bool Changed =
false;
11872 auto NewLocOps = Dbg->copyLocationOps();
11874 NewLocOps.begin(), NewLocOps.end(),
11876 bool Match = Op == FromLocOp;
11886 auto *Expr = Dbg->getExpression();
11892 if (
auto FI = Expr->getFragmentInfo())
11893 if (OffsetInBits + SizeInBits > FI->SizeInBits)
11902 auto AdditionalDependencies = Dbg->getAdditionalDependencies();
11905 Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(),
11906 Dbg->getDebugLoc(), std::max(ToNode->
getIROrder(), Dbg->getOrder()),
11907 Dbg->isVariadic());
11910 if (InvalidateDbg) {
11912 Dbg->setIsInvalidated();
11913 Dbg->setIsEmitted();
11919 "Transferred DbgValues should depend on the new SDNode");
11925 if (!
N.getHasDebugValue())
11928 auto GetLocationOperand = [](
SDNode *Node,
unsigned ResNo) {
11929 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(Node))
11936 if (DV->isInvalidated())
11938 switch (
N.getOpcode()) {
11944 if (!isa<ConstantSDNode>(N0)) {
11945 bool RHSConstant = isa<ConstantSDNode>(N1);
11948 Offset =
N.getConstantOperandVal(1);
11951 if (!RHSConstant && DV->isIndirect())
11958 auto *DIExpr = DV->getExpression();
11959 auto NewLocOps = DV->copyLocationOps();
11960 bool Changed =
false;
11961 size_t OrigLocOpsSize = NewLocOps.size();
11962 for (
size_t i = 0; i < OrigLocOpsSize; ++i) {
11967 NewLocOps[i].getSDNode() != &
N)
11978 const auto *TmpDIExpr =
11986 NewLocOps.push_back(
RHS);
11992 assert(Changed &&
"Salvage target doesn't use N");
11995 DV->isVariadic() || OrigLocOpsSize != NewLocOps.size();
11997 auto AdditionalDependencies = DV->getAdditionalDependencies();
11999 DV->getVariable(), DIExpr, NewLocOps, AdditionalDependencies,
12000 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder(), IsVariadic);
12002 DV->setIsInvalidated();
12003 DV->setIsEmitted();
12005 N0.
getNode()->dumprFull(
this);
12006 dbgs() <<
" into " << *DIExpr <<
'\n');
12013 TypeSize ToSize =
N.getValueSizeInBits(0);
12017 auto NewLocOps = DV->copyLocationOps();
12018 bool Changed =
false;
12019 for (
size_t i = 0; i < NewLocOps.size(); ++i) {
12021 NewLocOps[i].getSDNode() != &
N)
12028 assert(Changed &&
"Salvage target doesn't use N");
12033 DV->getAdditionalDependencies(), DV->isIndirect(),
12034 DV->getDebugLoc(), DV->getOrder(), DV->isVariadic());
12037 DV->setIsInvalidated();
12038 DV->setIsEmitted();
12040 dbgs() <<
" into " << *DbgExpression <<
'\n');
12047 assert((!Dbg->getSDNodes().empty() ||
12050 return Op.getKind() == SDDbgOperand::FRAMEIX;
12052 "Salvaged DbgValue should depend on a new SDNode");
12060 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(
DL) &&
12061 "Expected inlined-at fields to agree");
12077 while (UI != UE &&
N == UI->
getUser())
12085 :
SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
12098 "Cannot replace with this method!");
12114 RAUWUpdateListener Listener(*
this, UI, UE);
12119 RemoveNodeFromCSEMaps(
User);
12134 AddModifiedNodeToCSEMaps(
User);
12150 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i)
12153 "Cannot use this version of ReplaceAllUsesWith!");
12161 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i)
12162 if (
From->hasAnyUseOfValue(i)) {
12163 assert((i < To->getNumValues()) &&
"Invalid To location");
12172 RAUWUpdateListener Listener(*
this, UI, UE);
12177 RemoveNodeFromCSEMaps(
User);
12193 AddModifiedNodeToCSEMaps(
User);
12207 if (
From->getNumValues() == 1)
12210 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i) {
12220 RAUWUpdateListener Listener(*
this, UI, UE);
12225 RemoveNodeFromCSEMaps(
User);
12231 bool To_IsDivergent =
false;
12240 if (To_IsDivergent !=
From->isDivergent())
12245 AddModifiedNodeToCSEMaps(
User);
12258 if (
From == To)
return;
12261 if (
From.getNode()->getNumValues() == 1) {
12273 UE =
From.getNode()->use_end();
12274 RAUWUpdateListener Listener(*
this, UI, UE);
12277 bool UserRemovedFromCSEMaps =
false;
12287 if (
Use.getResNo() !=
From.getResNo()) {
12294 if (!UserRemovedFromCSEMaps) {
12295 RemoveNodeFromCSEMaps(
User);
12296 UserRemovedFromCSEMaps =
true;
12306 if (!UserRemovedFromCSEMaps)
12311 AddModifiedNodeToCSEMaps(
User);
12330bool operator<(
const UseMemo &L,
const UseMemo &R) {
12331 return (intptr_t)L.User < (intptr_t)R.User;
12341 for (UseMemo &Memo :
Uses)
12342 if (Memo.User ==
N)
12343 Memo.User =
nullptr;
12355 switch (
Node->getOpcode()) {
12369 "Conflicting divergence information!");
12374 for (
const auto &
Op :
N->ops()) {
12375 EVT VT =
Op.getValueType();
12378 if (VT != MVT::Other &&
Op.getNode()->isDivergent() &&
12390 if (
N->SDNodeBits.IsDivergent != IsDivergent) {
12391 N->SDNodeBits.IsDivergent = IsDivergent;
12394 }
while (!Worklist.
empty());
12397void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
12399 Order.
reserve(AllNodes.size());
12401 unsigned NOps =
N.getNumOperands();
12404 Order.push_back(&
N);
12406 for (
size_t I = 0;
I != Order.size(); ++
I) {
12408 for (
auto *U :
N->users()) {
12409 unsigned &UnsortedOps = Degree[U];
12410 if (0 == --UnsortedOps)
12411 Order.push_back(U);
12416#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
12417void SelectionDAG::VerifyDAGDivergence() {
12418 std::vector<SDNode *> TopoOrder;
12419 CreateTopologicalOrder(TopoOrder);
12420 for (
auto *
N : TopoOrder) {
12422 "Divergence bit inconsistency detected");
12445 for (
unsigned i = 0; i != Num; ++i) {
12446 unsigned FromResNo =
From[i].getResNo();
12449 if (
Use.getResNo() == FromResNo) {
12451 Uses.push_back(Memo);
12458 RAUOVWUpdateListener Listener(*
this,
Uses);
12460 for (
unsigned UseIndex = 0, UseIndexEnd =
Uses.size();
12461 UseIndex != UseIndexEnd; ) {
12467 if (
User ==
nullptr) {
12473 RemoveNodeFromCSEMaps(
User);
12480 unsigned i =
Uses[UseIndex].Index;
12485 }
while (UseIndex != UseIndexEnd &&
Uses[UseIndex].
User ==
User);
12489 AddModifiedNodeToCSEMaps(
User);
12497 unsigned DAGSize = 0;
12513 unsigned Degree =
N.getNumOperands();
12516 N.setNodeId(DAGSize++);
12518 if (Q != SortedPos)
12519 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
12520 assert(SortedPos != AllNodes.end() &&
"Overran node list");
12524 N.setNodeId(Degree);
12536 unsigned Degree =
P->getNodeId();
12537 assert(Degree != 0 &&
"Invalid node degree");
12541 P->setNodeId(DAGSize++);
12542 if (
P->getIterator() != SortedPos)
12543 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(
P));
12544 assert(SortedPos != AllNodes.end() &&
"Overran node list");
12548 P->setNodeId(Degree);
12551 if (Node.getIterator() == SortedPos) {
12555 dbgs() <<
"Overran sorted position:\n";
12557 dbgs() <<
"Checking if this is due to cycles\n";
12564 assert(SortedPos == AllNodes.end() &&
12565 "Topological sort incomplete!");
12567 "First node in topological sort is not the entry token!");
12568 assert(AllNodes.front().getNodeId() == 0 &&
12569 "First node in topological sort has non-zero id!");
12570 assert(AllNodes.front().getNumOperands() == 0 &&
12571 "First node in topological sort has operands!");
12572 assert(AllNodes.back().getNodeId() == (
int)DAGSize-1 &&
12573 "Last node in topologic sort has unexpected id!");
12574 assert(AllNodes.back().use_empty() &&
12575 "Last node in topologic sort has users!");
12583 for (
SDNode *SD : DB->getSDNodes()) {
12587 SD->setHasDebugValue(
true);
12589 DbgInfo->
add(DB, isParameter);
12596 assert(isa<MemSDNode>(NewMemOpChain) &&
"Expected a memop node");
12602 if (OldChain == NewMemOpChain || OldChain.
use_empty())
12603 return NewMemOpChain;
12606 OldChain, NewMemOpChain);
12609 return TokenFactor;
12614 assert(isa<MemSDNode>(NewMemOp.
getNode()) &&
"Expected a memop node");
12622 assert(isa<ExternalSymbolSDNode>(
Op) &&
"Node should be an ExternalSymbol");
12624 auto *Symbol = cast<ExternalSymbolSDNode>(
Op)->getSymbol();
12628 if (OutFunction !=
nullptr)
12636 std::string ErrorStr;
12638 ErrorFormatter <<
"Undefined external symbol ";
12639 ErrorFormatter <<
'"' << Symbol <<
'"';
12649 return Const !=
nullptr && Const->isZero();
12658 return Const !=
nullptr && Const->isZero() && !Const->isNegative();
12663 return Const !=
nullptr && Const->isAllOnes();
12668 return Const !=
nullptr && Const->isOne();
12673 return Const !=
nullptr && Const->isMinSignedValue();
12677 unsigned OperandNo) {
12682 APInt Const = ConstV->getAPIntValue().trunc(V.getScalarValueSizeInBits());
12688 return Const.isZero();
12690 return Const.isOne();
12693 return Const.isAllOnes();
12695 return Const.isMinSignedValue();
12697 return Const.isMaxSignedValue();
12702 return OperandNo == 1 && Const.isZero();
12705 return OperandNo == 1 && Const.isOne();
12710 return ConstFP->isZero() &&
12711 (Flags.hasNoSignedZeros() || ConstFP->isNegative());
12713 return OperandNo == 1 && ConstFP->isZero() &&
12714 (Flags.hasNoSignedZeros() || !ConstFP->isNegative());
12716 return ConstFP->isExactlyValue(1.0);
12718 return OperandNo == 1 && ConstFP->isExactlyValue(1.0);
12722 EVT VT = V.getValueType();
12724 APFloat NeutralAF = !Flags.hasNoNaNs()
12726 : !Flags.hasNoInfs()
12732 return ConstFP->isExactlyValue(NeutralAF);
12741 V = V.getOperand(0);
12746 while (V.getOpcode() ==
ISD::BITCAST && V.getOperand(0).hasOneUse())
12747 V = V.getOperand(0);
12753 V = V.getOperand(0);
12759 V = V.getOperand(0);
12767 unsigned NumBits = V.getScalarValueSizeInBits();
12770 return C && (
C->getAPIntValue().countr_one() >= NumBits);
12774 bool AllowTruncation) {
12775 EVT VT =
N.getValueType();
12784 bool AllowTruncation) {
12791 EVT VecEltVT =
N->getValueType(0).getVectorElementType();
12792 if (
auto *CN = dyn_cast<ConstantSDNode>(
N->getOperand(0))) {
12793 EVT CVT = CN->getValueType(0);
12794 assert(CVT.
bitsGE(VecEltVT) &&
"Illegal splat_vector element extension");
12795 if (AllowTruncation || CVT == VecEltVT)
12802 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
12807 if (CN && (UndefElements.
none() || AllowUndefs)) {
12809 EVT NSVT =
N.getValueType().getScalarType();
12810 assert(CVT.
bitsGE(NSVT) &&
"Illegal build vector element extension");
12811 if (AllowTruncation || (CVT == NSVT))
12820 EVT VT =
N.getValueType();
12828 const APInt &DemandedElts,
12829 bool AllowUndefs) {
12836 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
12838 if (CN && (UndefElements.
none() || AllowUndefs))
12853 return C &&
C->isZero();
12859 return C &&
C->isOne();
12864 unsigned BitWidth =
N.getScalarValueSizeInBits();
12866 return C &&
C->isAllOnes() &&
C->getValueSizeInBits(0) ==
BitWidth;
12872 APInt(
C->getAPIntValue().getBitWidth(), 1));
12878 return C &&
C->isZero();
12887 :
SDNode(
Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
12911 std::vector<EVT> VTs;
12924const EVT *SDNode::getValueTypeList(
MVT VT) {
12925 static EVTArray SimpleVTArray;
12928 return &SimpleVTArray.VTs[VT.
SimpleTy];
12937 if (U.getResNo() ==
Value)
12975 return any_of(
N->op_values(),
12976 [
this](
SDValue Op) { return this == Op.getNode(); });
12990 unsigned Depth)
const {
12991 if (*
this == Dest)
return true;
12995 if (
Depth == 0)
return false;
13015 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
13020 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(*
this)) {
13021 if (Ld->isUnordered())
13022 return Ld->getChain().reachesChainWithoutSideEffects(Dest,
Depth-1);
13035 this->Flags &= Flags;
13041 bool AllowPartials) {
13050 return Op.getOpcode() ==
unsigned(BinOp);
13056 unsigned CandidateBinOp =
Op.getOpcode();
13057 if (
Op.getValueType().isFloatingPoint()) {
13059 switch (CandidateBinOp) {
13061 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
13071 auto PartialReduction = [&](
SDValue Op,
unsigned NumSubElts) {
13072 if (!AllowPartials || !
Op)
13074 EVT OpVT =
Op.getValueType();
13096 unsigned Stages =
Log2_32(
Op.getValueType().getVectorNumElements());
13098 for (
unsigned i = 0; i < Stages; ++i) {
13099 unsigned MaskEnd = (1 << i);
13101 if (
Op.getOpcode() != CandidateBinOp)
13102 return PartialReduction(PrevOp, MaskEnd);
13111 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
13118 return PartialReduction(PrevOp, MaskEnd);
13121 for (
int Index = 0; Index < (int)MaskEnd; ++Index)
13122 if (Shuffle->
getMaskElt(Index) != (int)(MaskEnd + Index))
13123 return PartialReduction(PrevOp, MaskEnd);
13130 while (
Op.getOpcode() == CandidateBinOp) {
13131 unsigned NumElts =
Op.getValueType().getVectorNumElements();
13139 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
13140 if (NumSrcElts != (2 * NumElts))
13155 EVT VT =
N->getValueType(0);
13164 else if (NE > ResNE)
13167 if (
N->getNumValues() == 2) {
13170 EVT VT1 =
N->getValueType(1);
13174 for (i = 0; i != NE; ++i) {
13175 for (
unsigned j = 0, e =
N->getNumOperands(); j != e; ++j) {
13176 SDValue Operand =
N->getOperand(j);
13189 for (; i < ResNE; ++i) {
13201 assert(
N->getNumValues() == 1 &&
13202 "Can't unroll a vector with multiple results!");
13208 for (i= 0; i != NE; ++i) {
13209 for (
unsigned j = 0, e =
N->getNumOperands(); j != e; ++j) {
13210 SDValue Operand =
N->getOperand(j);
13222 switch (
N->getOpcode()) {
13248 const auto *ASC = cast<AddrSpaceCastSDNode>(
N);
13250 ASC->getSrcAddressSpace(),
13251 ASC->getDestAddressSpace()));
13257 for (; i < ResNE; ++i)
13266 unsigned Opcode =
N->getOpcode();
13270 "Expected an overflow opcode");
13272 EVT ResVT =
N->getValueType(0);
13273 EVT OvVT =
N->getValueType(1);
13282 else if (NE > ResNE)
13294 for (
unsigned i = 0; i < NE; ++i) {
13295 SDValue Res =
getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
13318 if (LD->isVolatile() ||
Base->isVolatile())
13321 if (!LD->isSimple())
13323 if (LD->isIndexed() ||
Base->isIndexed())
13325 if (LD->getChain() !=
Base->getChain())
13327 EVT VT = LD->getMemoryVT();
13335 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *
this,
Offset))
13336 return (Dist * (int64_t)Bytes ==
Offset);
13345 int64_t GVOffset = 0;
13357 int FrameIdx = INT_MIN;
13358 int64_t FrameOffset = 0;
13360 FrameIdx = FI->getIndex();
13362 isa<FrameIndexSDNode>(
Ptr.getOperand(0))) {
13364 FrameIdx = cast<FrameIndexSDNode>(
Ptr.getOperand(0))->getIndex();
13365 FrameOffset =
Ptr.getConstantOperandVal(1);
13368 if (FrameIdx != INT_MIN) {
13373 return std::nullopt;
13383 "Split node must be a scalar type");
13388 return std::make_pair(
Lo,
Hi);
13401 return std::make_pair(LoVT, HiVT);
13409 bool *HiIsEmpty)
const {
13419 "Mixing fixed width and scalable vectors when enveloping a type");
13424 *HiIsEmpty =
false;
13432 return std::make_pair(LoVT, HiVT);
13437std::pair<SDValue, SDValue>
13442 "Splitting vector with an invalid mixture of fixed and scalable "
13445 N.getValueType().getVectorMinNumElements() &&
13446 "More vector elements requested than available!");
13455 return std::make_pair(
Lo,
Hi);
13462 EVT VT =
N.getValueType();
13464 "Expecting the mask to be an evenly-sized vector");
13472 return std::make_pair(
Lo,
Hi);
13477 EVT VT =
N.getValueType();
13485 unsigned Start,
unsigned Count,
13487 EVT VT =
Op.getValueType();
13490 if (EltVT ==
EVT())
13493 for (
unsigned i = Start, e = Start + Count; i != e; ++i) {
13505 return Val.MachineCPVal->getType();
13506 return Val.ConstVal->getType();
13510 unsigned &SplatBitSize,
13511 bool &HasAnyUndefs,
13512 unsigned MinSplatBits,
13513 bool IsBigEndian)
const {
13517 if (MinSplatBits > VecWidth)
13522 SplatValue =
APInt(VecWidth, 0);
13523 SplatUndef =
APInt(VecWidth, 0);
13530 assert(NumOps > 0 &&
"isConstantSplat has 0-size build vector");
13533 for (
unsigned j = 0; j < NumOps; ++j) {
13534 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
13536 unsigned BitPos = j * EltWidth;
13539 SplatUndef.
setBits(BitPos, BitPos + EltWidth);
13540 else if (
auto *CN = dyn_cast<ConstantSDNode>(OpVal))
13541 SplatValue.
insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
13542 else if (
auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
13543 SplatValue.
insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
13550 HasAnyUndefs = (SplatUndef != 0);
13553 while (VecWidth > 8) {
13558 unsigned HalfSize = VecWidth / 2;
13565 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
13566 MinSplatBits > HalfSize)
13569 SplatValue = HighValue | LowValue;
13570 SplatUndef = HighUndef & LowUndef;
13572 VecWidth = HalfSize;
13581 SplatBitSize = VecWidth;
13588 if (UndefElements) {
13589 UndefElements->
clear();
13590 UndefElements->
resize(NumOps);
13596 for (
unsigned i = 0; i != NumOps; ++i) {
13597 if (!DemandedElts[i])
13600 if (
Op.isUndef()) {
13602 (*UndefElements)[i] =
true;
13603 }
else if (!Splatted) {
13605 }
else if (Splatted !=
Op) {
13611 unsigned FirstDemandedIdx = DemandedElts.
countr_zero();
13613 "Can only have a splat without a constant for all undefs.");
13630 if (UndefElements) {
13631 UndefElements->
clear();
13632 UndefElements->
resize(NumOps);
13640 for (
unsigned I = 0;
I != NumOps; ++
I)
13642 (*UndefElements)[
I] =
true;
13645 for (
unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
13646 Sequence.append(SeqLen,
SDValue());
13647 for (
unsigned I = 0;
I != NumOps; ++
I) {
13648 if (!DemandedElts[
I])
13650 SDValue &SeqOp = Sequence[
I % SeqLen];
13652 if (
Op.isUndef()) {
13657 if (SeqOp && !SeqOp.
isUndef() && SeqOp !=
Op) {
13663 if (!Sequence.empty())
13667 assert(Sequence.empty() &&
"Failed to empty non-repeating sequence pattern");
13680 return dyn_cast_or_null<ConstantSDNode>(
13686 return dyn_cast_or_null<ConstantSDNode>(
getSplatValue(UndefElements));
13692 return dyn_cast_or_null<ConstantFPSDNode>(
13698 return dyn_cast_or_null<ConstantFPSDNode>(
getSplatValue(UndefElements));
13705 dyn_cast_or_null<ConstantFPSDNode>(
getSplatValue(UndefElements))) {
13708 const APFloat &APF = CN->getValueAPF();
13714 return IntVal.exactLogBase2();
13720 bool IsLittleEndian,
unsigned DstEltSizeInBits,
13728 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
13729 "Invalid bitcast scale");
13734 BitVector SrcUndeElements(NumSrcOps,
false);
13736 for (
unsigned I = 0;
I != NumSrcOps; ++
I) {
13738 if (
Op.isUndef()) {
13739 SrcUndeElements.
set(
I);
13742 auto *CInt = dyn_cast<ConstantSDNode>(
Op);
13743 auto *CFP = dyn_cast<ConstantFPSDNode>(
Op);
13744 assert((CInt || CFP) &&
"Unknown constant");
13745 SrcBitElements[
I] = CInt ? CInt->getAPIntValue().trunc(SrcEltSizeInBits)
13746 : CFP->getValueAPF().bitcastToAPInt();
13750 recastRawBits(IsLittleEndian, DstEltSizeInBits, RawBitElements,
13751 SrcBitElements, UndefElements, SrcUndeElements);
13756 unsigned DstEltSizeInBits,
13761 unsigned NumSrcOps = SrcBitElements.
size();
13762 unsigned SrcEltSizeInBits = SrcBitElements[0].getBitWidth();
13763 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
13764 "Invalid bitcast scale");
13765 assert(NumSrcOps == SrcUndefElements.
size() &&
13766 "Vector size mismatch");
13768 unsigned NumDstOps = (NumSrcOps * SrcEltSizeInBits) / DstEltSizeInBits;
13769 DstUndefElements.
clear();
13770 DstUndefElements.
resize(NumDstOps,
false);
13774 if (SrcEltSizeInBits <= DstEltSizeInBits) {
13775 unsigned Scale = DstEltSizeInBits / SrcEltSizeInBits;
13776 for (
unsigned I = 0;
I != NumDstOps; ++
I) {
13777 DstUndefElements.
set(
I);
13778 APInt &DstBits = DstBitElements[
I];
13779 for (
unsigned J = 0; J != Scale; ++J) {
13780 unsigned Idx = (
I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
13781 if (SrcUndefElements[
Idx])
13783 DstUndefElements.
reset(
I);
13784 const APInt &SrcBits = SrcBitElements[
Idx];
13786 "Illegal constant bitwidths");
13787 DstBits.
insertBits(SrcBits, J * SrcEltSizeInBits);
13794 unsigned Scale = SrcEltSizeInBits / DstEltSizeInBits;
13795 for (
unsigned I = 0;
I != NumSrcOps; ++
I) {
13796 if (SrcUndefElements[
I]) {
13797 DstUndefElements.
set(
I * Scale, (
I + 1) * Scale);
13800 const APInt &SrcBits = SrcBitElements[
I];
13801 for (
unsigned J = 0; J != Scale; ++J) {
13802 unsigned Idx = (
I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
13803 APInt &DstBits = DstBitElements[
Idx];
13804 DstBits = SrcBits.
extractBits(DstEltSizeInBits, J * DstEltSizeInBits);
13811 unsigned Opc =
Op.getOpcode();
13818std::optional<std::pair<APInt, APInt>>
13822 return std::nullopt;
13826 return std::nullopt;
13833 return std::nullopt;
13835 for (
unsigned i = 2; i < NumOps; ++i) {
13837 return std::nullopt;
13840 if (Val != (Start + (Stride * i)))
13841 return std::nullopt;
13844 return std::make_pair(Start, Stride);
13850 for (i = 0, e = Mask.size(); i != e && Mask[i] < 0; ++i)
13860 for (
int Idx = Mask[i]; i != e; ++i)
13861 if (Mask[i] >= 0 && Mask[i] !=
Idx)
13869 SDValue N,
bool AllowOpaques)
const {
13872 if (
auto *
C = dyn_cast<ConstantSDNode>(
N))
13873 return AllowOpaques || !
C->isOpaque();
13880 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(
N))
13886 isa<ConstantSDNode>(
N.getOperand(0)))
13893 if (isa<ConstantFPSDNode>(
N))
13900 isa<ConstantFPSDNode>(
N.getOperand(0)))
13910 return std::nullopt;
13912 EVT VT =
N->getValueType(0);
13920 return std::nullopt;
13926 return std::nullopt;
13934 assert(!Node->OperandList &&
"Node already has operands");
13936 "too many operands to fit into SDNode");
13937 SDUse *Ops = OperandRecycler.allocate(
13940 bool IsDivergent =
false;
13941 for (
unsigned I = 0;
I != Vals.
size(); ++
I) {
13942 Ops[
I].setUser(Node);
13943 Ops[
I].setInitial(Vals[
I]);
13947 if (VT != MVT::Other &&
13949 Ops[
I].
getNode()->isDivergent()) {
13950 IsDivergent =
true;
13954 Node->OperandList = Ops;
13957 Node->SDNodeBits.IsDivergent = IsDivergent;
13965 while (Vals.
size() > Limit) {
13966 unsigned SliceIdx = Vals.
size() - Limit;
14042 const SDLoc &DLoc) {
14046 RTLIB::Libcall LC =
static_cast<RTLIB::Libcall
>(
LibFunc);
14057 assert(
From && To &&
"Invalid SDNode; empty source SDValue?");
14058 auto I = SDEI.find(
From);
14059 if (
I == SDEI.end())
14064 NodeExtraInfo NEI =
I->second;
14073 SDEI[To] = std::move(NEI);
14090 auto VisitFrom = [&](
auto &&Self,
const SDNode *
N,
int MaxDepth) {
14091 if (MaxDepth == 0) {
14094 Leafs.emplace_back(
N);
14097 if (!FromReach.
insert(
N).second)
14100 Self(Self,
Op.getNode(), MaxDepth - 1);
14105 auto DeepCopyTo = [&](
auto &&Self,
const SDNode *
N) {
14108 if (!Visited.
insert(
N).second)
14113 if (
N == To &&
Op.getNode() == EntrySDN) {
14118 if (!Self(Self,
Op.getNode()))
14132 for (
int PrevDepth = 0, MaxDepth = 16; MaxDepth <= 1024;
14133 PrevDepth = MaxDepth, MaxDepth *= 2, Visited.
clear()) {
14138 for (
const SDNode *
N : StartFrom)
14139 VisitFrom(VisitFrom,
N, MaxDepth - PrevDepth);
14143 LLVM_DEBUG(
dbgs() << __func__ <<
": MaxDepth=" << MaxDepth <<
" too low\n");
14151 errs() <<
"warning: incomplete propagation of SelectionDAG::NodeExtraInfo\n";
14152 assert(
false &&
"From subgraph too complex - increase max. MaxDepth?");
14154 SDEI[To] = std::move(NEI);
14168 if (!Visited.
insert(
N).second) {
14169 errs() <<
"Detected cycle in SelectionDAG\n";
14170 dbgs() <<
"Offending node:\n";
14171 N->dumprFull(DAG);
dbgs() <<
"\n";
14187 bool check = force;
14188#ifdef EXPENSIVE_CHECKS
14192 assert(
N &&
"Checking nonexistent SDNode");
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isConstant(const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
#define __asan_unpoison_memory_region(p, size)
#define LLVM_LIKELY(EXPR)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Looks at all the uses of the given value Returns the Liveness deduced from the uses of this value Adds all uses that cause the result to be MaybeLive to MaybeLiveRetUses If the result is MaybeLiveUses might be modified but its content should be ignored(since it might not be complete). DeadArgumentEliminationPass
Given that RA is a live propagate it s liveness to any other values it uses(according to Uses). void DeadArgumentEliminationPass
Given that RA is a live value
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines a hash set that can be used to remove duplication of nodes in a graph.
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB)
static bool shouldLowerMemFuncForSize(const MachineFunction &MF)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
mir Rename Register Operands
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
Contains matchers for matching SelectionDAG nodes and values.
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo, BatchAAResults *BatchAA)
static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo)
Lower the call to 'memset' intrinsic function into a series of store operations.
static std::optional< APInt > FoldValueWithUndef(unsigned Opcode, const APInt &C1, bool IsUndef1, const APInt &C2, bool IsUndef2)
static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step, SelectionDAG &DAG)
static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned OpC, SDVTList VTList, ArrayRef< SDValue > OpList)
static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, const TargetLowering &TLI, const ConstantDataArraySlice &Slice)
getMemsetStringVal - Similar to getMemsetValue.
static cl::opt< bool > EnableMemCpyDAGOpt("enable-memcpy-dag-opt", cl::Hidden, cl::init(true), cl::desc("Gang up loads and stores generated by inlining of memcpy"))
static bool haveNoCommonBitsSetCommutative(SDValue A, SDValue B)
static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList)
AddNodeIDValueTypes - Value type lists are intern'd so we can represent them solely with their pointe...
static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef< int > M)
Swaps the values of N1 and N2.
static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice)
Returns true if memcpy source is constant data.
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo)
static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)
AddNodeIDOpcode - Add the node opcode to the NodeID data.
static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike)
static bool doNotCSE(SDNode *N)
doNotCSE - Return true if CSE should not be performed for this node.
static cl::opt< int > MaxLdStGlue("ldstmemcpy-glue-max", cl::desc("Number limit for gluing ld/st of memcpy."), cl::Hidden, cl::init(0))
static void AddNodeIDOperands(FoldingSetNodeID &ID, ArrayRef< SDValue > Ops)
AddNodeIDOperands - Various routines for adding operands to the NodeID data.
static bool canFoldStoreIntoLibCallOutputPointers(StoreSDNode *StoreNode, SDNode *FPNode)
Given a store node StoreNode, return true if it is safe to fold that node into FPNode,...
static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
Try to simplify vector concatenation to an input value, undef, or build vector.
static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, SelectionDAG &DAG, SDValue Ptr, int64_t Offset=0)
InferPointerInfo - If the specified ptr/offset is a frame index, infer a MachinePointerInfo record fr...
static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N)
If this is an SDNode with special info, add this info to the NodeID data.
static bool gluePropagatesDivergence(const SDNode *Node)
Return true if a glue output should propagate divergence information.
static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G)
static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs)
makeVTList - Return an instance of the SDVTList struct initialized with the specified members.
static void checkForCyclesHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallPtrSetImpl< const SDNode * > &Checked, const llvm::SelectionDAG *DAG)
static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SmallVector< SDValue, 32 > &OutChains, unsigned From, unsigned To, SmallVector< SDValue, 16 > &OutLoadChains, SmallVector< SDValue, 16 > &OutStoreChains)
static int isSignedOp(ISD::CondCode Opcode)
For an integer comparison, return 1 if the comparison is a signed operation and 2 if the result is an...
static std::optional< APInt > FoldValue(unsigned Opcode, const APInt &C1, const APInt &C2)
static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, unsigned AS)
static cl::opt< unsigned > MaxSteps("has-predecessor-max-steps", cl::Hidden, cl::init(8192), cl::desc("DAG combiner limit number of steps when searching DAG " "for predecessor nodes"))
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
opStatus add(const APFloat &RHS, roundingMode RM)
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
opStatus multiply(const APFloat &RHS, roundingMode RM)
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
opStatus mod(const APFloat &RHS)
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt usub_sat(const APInt &RHS) const
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
unsigned popcount() const
Count the number of bits set.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
LLVM_ABI APInt sadd_sat(const APInt &RHS) const
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
void clearAllBits()
Set every bit to 0.
LLVM_ABI APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
LLVM_ABI APInt reverseBits() const
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt sshl_sat(const APInt &RHS) const
LLVM_ABI APInt ushl_sat(const APInt &RHS) const
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
LLVM_ABI APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
LLVM_ABI void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
unsigned logBase2() const
LLVM_ABI APInt uadd_sat(const APInt &RHS) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
void setAllBits()
Set every bit to 1.
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
LLVM_ABI APInt byteSwap() const
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static bool isSameValue(const APInt &I1, const APInt &I2)
Determine if two APInts have the same value, after zero-extending one of them (if needed!...
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void clearBits(unsigned LoBit, unsigned HiBit)
Clear the bits from LoBit (inclusive) to HiBit (exclusive) to 0.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
LLVM_ABI APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
unsigned getSrcAddressSpace() const
unsigned getDestAddressSpace() const
Recycle small arrays allocated from a BumpPtrAllocator.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
This is an SDNode representing atomic operations.
static LLVM_ABI BaseIndexOffset match(const SDNode *N, const SelectionDAG &DAG)
Parses tree in N for base, index, offset addresses.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
void clear()
clear - Removes all bits from the bitvector.
bool none() const
none - Returns true if none of the bits are set.
size_type size() const
size - Returns the number of bits in this bitvector.
int64_t getOffset() const
unsigned getTargetFlags() const
const BlockAddress * getBlockAddress() const
The address of a basic block.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
A "pseudo-class" with methods for operating on BUILD_VECTORs.
LLVM_ABI bool getConstantRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &RawBitElements, BitVector &UndefElements) const
Extract the raw bit data from a build vector of Undef, Constant or ConstantFP node elements.
static LLVM_ABI void recastRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &DstBitElements, ArrayRef< APInt > SrcBitElements, BitVector &DstUndefElements, const BitVector &SrcUndefElements)
Recast bit data SrcBitElements to DstEltSizeInBits wide elements.
LLVM_ABI bool getRepeatedSequence(const APInt &DemandedElts, SmallVectorImpl< SDValue > &Sequence, BitVector *UndefElements=nullptr) const
Find the shortest repeating sequence of values in the build vector.
LLVM_ABI ConstantFPSDNode * getConstantFPSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant FP or null if this is not a constant FP splat.
LLVM_ABI std::optional< std::pair< APInt, APInt > > isConstantSequence() const
If this BuildVector is constant and represents the numerical series "<a, a+n, a+2n,...
LLVM_ABI SDValue getSplatValue(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted value or a null value if this is not a splat.
LLVM_ABI bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
LLVM_ABI ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
LLVM_ABI int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2,...
LLVM_ABI bool isConstant() const
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI bool isValueValidForType(EVT VT, const APFloat &Val)
const APFloat & getValueAPF() const
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
ConstantFP - Floating Point Values [float, double].
const APFloat & getValue() const
This is the shared class of boolean and integer constants.
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
const APInt & getValue() const
Return the constant as an APInt value reference.
bool isMachineConstantPoolEntry() const
LLVM_ABI Type * getType() const
This class represents a range of values.
LLVM_ABI ConstantRange multiply(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
LLVM_ABI OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
LLVM_ABI KnownBits toKnownBits() const
Return known bits for values in this range.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
This is an important base class in LLVM.
LLVM_ABI Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
static LLVM_ABI ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI const DIExpression * convertToVariadicExpression(const DIExpression *Expr)
If Expr is a non-variadic expression (i.e.
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
Base class for variables.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
LLVM_ABI unsigned getPointerTypeSizeInBits(Type *) const
The pointer representation size in bits for this type.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Implements a dense probed hash-table based set.
const char * getSymbol() const
unsigned getTargetFlags() const
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
MachineBasicBlock * MBB
MBB - The current block.
Data structure describing the variable locations in a function.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
AttributeList getAttributes() const
Return the attribute list for this Function.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
int64_t getOffset() const
LLVM_ABI unsigned getAddressSpace() const
unsigned getTargetFlags() const
const GlobalValue * getGlobal() const
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
This class is used to form a handle around another node that is persistent and is updated across invo...
const SDValue & getValue() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
constexpr bool isValid() const
This is an important class for using LLVM in a threaded context.
This SDNode is used for LIFETIME_START/LIFETIME_END values.
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
const MDOperand & getOperand(unsigned I) const
static MVT getIntegerVT(unsigned BitWidth)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
Abstract base class for all machine specific constantpool value subclasses.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
void setObjectAlignment(int ObjectIdx, Align Alignment)
setObjectAlignment - Change the alignment of the specified stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
bool isNonTemporal() const
const MDNode * getRanges() const
Return the range tag for the memory reference.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
bool isDereferenceable() const
This class contains meta information specific to a module.
An SDNode that represents everything that will be needed to construct a MachineInstr.
This class is used to represent an MGATHER node.
This class is used to represent an MLOAD node.
This class is used to represent an MSCATTER node.
This class is used to represent an MSTORE node.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
LLVM_ABI MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT memvt, MachineMemOperand *MMO)
MachineMemOperand * MMO
Memory reference information.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
unsigned getRawSubclassData() const
Return the SubclassData value, without HasDebugValue.
EVT getMemoryVT() const
Return the type of the in-memory value.
Representation for a specific memory location.
A Module instance is used to store all the information related to an LLVM module.
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Pass interface - Implemented by all 'passes'.
Class to represent pointers.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Analysis providing profile information.
void Deallocate(SubClass *E)
Deallocate - Release storage for the pointed-to object.
Wrapper class representing virtual and physical registers.
Keeps track of dbg_value information through SDISel.
BumpPtrAllocator & getAlloc()
LLVM_ABI void add(SDDbgValue *V, bool isParameter)
LLVM_ABI void erase(const SDNode *Node)
Invalidate all DbgValues attached to the node and remove it from the Node-to-DbgValues map.
ArrayRef< SDDbgValue * > getSDDbgValues(const SDNode *Node) const
Holds the information from a dbg_label node through SDISel.
Holds the information for a single machine location through SDISel; either an SDNode,...
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
@ SDNODE
Value is the result of an expression.
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
unsigned getIROrder() const
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
LLVM_ABI void dumprFull(const SelectionDAG *G=nullptr) const
printrFull to dbgs().
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVM_ABI bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
static constexpr size_t getMaxNumOperands()
Return the maximum number of operands that a SDNode can hold.
iterator_range< use_iterator > uses()
MemSDNodeBitfields MemSDNodeBits
LLVM_ABI void Profile(FoldingSetNodeID &ID) const
Gather unique data for the node.
bool getHasDebugValue() const
SDNodeFlags getFlags() const
void setNodeId(int Id)
Set unique node id.
LLVM_ABI void intersectFlagsWith(const SDNodeFlags Flags)
Clear any flags in this node that aren't also set in Flags.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
bool use_empty() const
Return true if there are no uses of this node.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
static LLVM_ABI bool areOnlyUsersOf(ArrayRef< const SDNode * > Nodes, const SDNode *N)
Return true if all the users of N are contained in Nodes.
LLVM_ABI bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
const APInt & getConstantOperandAPInt(unsigned Num) const
Helper method returns the APInt of a ConstantSDNode operand.
std::optional< APInt > bitcastToAPInt() const
LLVM_ABI bool hasPredecessor(const SDNode *N) const
Return true if N is a predecessor of this node.
LLVM_ABI bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
op_iterator op_end() const
op_iterator op_begin() const
LLVM_ABI void DropOperands()
Release the operands and set this node to have zero operands.
Represents a use of a SDNode.
EVT getValueType() const
Convenience function for get().getValueType().
SDNode * getUser()
This returns the SDNode that contains this Use.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
LLVM_ABI bool isOperandOf(const SDNode *N) const
Return true if the referenced return value is an operand of N.
LLVM_ABI bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
virtual bool isTargetMemoryOpcode(unsigned Opcode) const
Returns true if a node with the given target-specific opcode has a memory operand.
virtual void verifyTargetNode(const SelectionDAG &DAG, const SDNode *N) const
Checks that the given target-specific node is valid. Aborts if it is not.
virtual SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, bool AlwaysInline, MachinePointerInfo DstPtrInfo) const
Emit target-specific code that performs a memset.
virtual SDValue EmitTargetCodeForMemmove(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memmove.
virtual SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memcpy.
SDNodeFlags getFlags() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
LLVM_ABI SDValue getVPZeroExtendInReg(SDValue Op, SDValue Mask, SDValue EVL, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
LLVM_ABI SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op)
Return the specified value casted to the target's desired shift amount type.
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsExpanding=false)
SDValue getExtractVectorElt(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Extract element at Idx from Vec.
LLVM_ABI SDValue getSplatSourceVector(SDValue V, int &SplatIndex)
If V is a splatted value, return the source vector and its splat index.
LLVM_ABI SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
LLVM_ABI OverflowKind computeOverflowForUnsignedSub(SDValue N0, SDValue N1) const
Determine if the result of the unsigned sub of 2 nodes can overflow.
LLVM_ABI unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
bool isKnownNeverSNaN(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
LLVM_ABI std::optional< bool > isBoolConstant(SDValue N) const
Check if a value \op N is a constant using the target's BooleanContent for its type.
LLVM_ABI SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI void updateDivergence(SDNode *N)
LLVM_ABI SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
LLVM_ABI SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT, SDNodeFlags Flags)
Get the (commutative) neutral element for the given opcode, if it exists.
LLVM_ABI SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
LLVM_ABI SDValue getAtomicLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT MemVT, EVT VT, SDValue Chain, SDValue Ptr, MachineMemOperand *MMO)
LLVM_ABI SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
LLVM_ABI SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
LLVM_ABI SDNode * SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT)
These are used for target selectors to mutate the specified node to have the specified return type,...
LLVM_ABI SelectionDAG(const TargetMachine &TM, CodeGenOptLevel)
LLVM_ABI SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBitcastedSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
LLVM_ABI SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
LLVM_ABI SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
LLVM_ABI bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
LLVM_ABI void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
LLVM_ABI SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
OverflowKind
Used to represent the possible overflow behavior of an operation.
static LLVM_ABI unsigned getHasPredecessorMaxSteps()
LLVM_ABI bool haveNoCommonBitsSet(SDValue A, SDValue B) const
Return true if A and B have no common bits set.
SDValue getExtractSubvector(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Return the VT typed sub-vector of Vec at Idx.
LLVM_ABI bool cannotBeOrderedNegativeFP(SDValue Op) const
Test whether the given float value is known to be positive.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI bool calculateDivergence(SDNode *N)
LLVM_ABI SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, bool ConstantFold=true)
LLVM_ABI SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
LLVM_ABI SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
LLVM_ABI SDNode * mutateStrictFPToFP(SDNode *Node)
Mutate the specified strict FP node to its non-strict equivalent, unlinking the node from its chain a...
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getInsertSubvector(const SDLoc &DL, SDValue Vec, SDValue SubVec, unsigned Idx)
Insert SubVec at the Idx element of Vec.
LLVM_ABI SDValue getBitcastedZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
LLVM_ABI SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI std::optional< uint64_t > getValidMinimumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
LLVM_ABI SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI bool isEqualTo(SDValue A, SDValue B) const
Test whether two SDValues are known to compare equal.
static constexpr unsigned MaxRecursionDepth
LLVM_ABI SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI SDValue expandVACopy(SDNode *Node)
Expand the specified ISD::VACOPY node as the Legalize pass would.
LLVM_ABI SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI APInt computeVectorKnownZeroElements(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
For each demanded element of a vector, see if it is known to be zero.
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
LLVM_ABI std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
LLVM_ABI void salvageDebugInfo(SDNode &N)
To be invoked on an SDNode that is slated to be erased.
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
LLVM_ABI std::pair< SDValue, SDValue > UnrollVectorOverflowOp(SDNode *N, unsigned ResNE=0)
Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
LLVM_ABI SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcastedAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
LLVM_ABI void DeleteNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
LLVM_ABI SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal)
Try to simplify a select/vselect into 1 of its operands or a constant.
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
LLVM_ABI bool isConstantFPBuildVectorOrConstantFP(SDValue N) const
Test whether the given value is a constant FP or similar node.
const DataLayout & getDataLayout() const
LLVM_ABI SDValue expandVAArg(SDNode *Node)
Expand the specified ISD::VAARG node as the Legalize pass would.
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
const SelectionDAGTargetInfo & getSelectionDAGInfo() const
LLVM_ABI bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, int Dist) const
Return true if loads are next to each other and can be merged.
LLVM_ABI SDValue getMaskedHistogram(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
LLVM_ABI SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
LLVM_ABI SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI OverflowKind computeOverflowForUnsignedMul(SDValue N0, SDValue N1) const
Determine if the result of the unsigned mul of 2 nodes can overflow.
LLVM_ABI void copyExtraInfo(SDNode *From, SDNode *To)
Copy extra info associated with one node to another.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getLoadFFVP(EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachineMemOperand *MMO)
LLVM_ABI SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
LLVM_ABI void clear()
Clear state and free memory necessary to make this SelectionDAG ready to process a new block.
std::pair< SDValue, SDValue > getMemcmp(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, const CallInst *CI)
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
LLVM_ABI SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
LLVM_ABI std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
LLVM_ABI SDValue makeStateFunctionCall(unsigned LibFunc, SDValue Ptr, SDValue InChain, const SDLoc &DLoc)
Helper used to make a call to a library function that has one argument of pointer type.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly=false, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
LLVM_ABI SDValue getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
LLVM_ABI SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
LLVM_ABI OverflowKind computeOverflowForSignedMul(SDValue N0, SDValue N1) const
Determine if the result of the signed mul of 2 nodes can overflow.
LLVM_ABI MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
LLVM_ABI void dump() const
LLVM_ABI bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if '(Op & Mask) == Mask'.
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
LLVM_ABI void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
bool isConstantValueOfAnyType(SDValue N) const
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, Register VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
LLVM_ABI bool isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
LLVM_ABI SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
LLVM_ABI SDValue getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
LLVM_ABI SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
LLVM_ABI SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
LLVM_ABI SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
LLVM_ABI SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI std::pair< SDValue, SDValue > getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT)
Convert Op, which must be a STRICT operation of float type, to the float type VT, by either extending...
LLVM_ABI std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getVPLogicalNOT(const SDLoc &DL, SDValue Val, SDValue Mask, SDValue EVL, EVT VT)
Create a vector-predicated logical NOT operation as (VP_XOR Val, BooleanOne, Mask,...
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
iterator_range< allnodes_iterator > allnodes()
LLVM_ABI SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
LLVM_ABI SDValue WidenVector(const SDValue &N, const SDLoc &DL)
Widen the vector up to the next power of two using INSERT_SUBVECTOR.
LLVM_ABI bool isKnownNeverZeroFloat(SDValue Op) const
Test whether the given floating point SDValue is known to never be positive or negative zero.
LLVM_ABI SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
LLVM_ABI SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the Fra...
ArrayRef< SDDbgValue * > GetDbgValues(const SDNode *SD) const
Get the debug values which reference the given SDNode.
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI OverflowKind computeOverflowForSignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the signed addition of 2 nodes can overflow.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
LLVM_ABI unsigned AssignTopologicalOrder()
Topological-sort the AllNodes list and a assign a unique node id for each node in the DAG based on th...
ilist< SDNode >::size_type allnodes_size() const
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
LLVM_ABI SDValue FoldConstantBuildVector(BuildVectorSDNode *BV, const SDLoc &DL, EVT DstEltVT)
Fold BUILD_VECTOR of constants/undefs to the destination type BUILD_VECTOR of constants/undefs elemen...
LLVM_ABI SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
LLVM_ABI SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getTruncStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsCompressing=false)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
const TargetLibraryInfo & getLibInfo() const
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI bool MaskedVectorIsZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Return true if 'Op' is known to be zero in DemandedElts.
LLVM_ABI SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
LLVM_ABI SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
LLVM_ABI SDValue getExtStridedLoadVP(ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
LLVM_ABI SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
LLVM_ABI bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
LLVM_ABI SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
LLVM_ABI bool canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts, bool PoisonOnly=false, bool ConsiderFlags=true, unsigned Depth=0) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
LLVM_ABI OverflowKind computeOverflowForUnsignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the unsigned addition of 2 nodes can overflow.
SDValue getPOISON(EVT VT)
Return a POISON node. POISON does not have a useful SDLoc.
LLVM_ABI std::optional< uint64_t > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT SVT, MachineMemOperand *MMO, bool IsCompressing=false)
LLVM_ABI void canonicalizeCommutativeBinop(unsigned Opcode, SDValue &N1, SDValue &N2) const
Swap N1 and N2 if Opcode is a commutative binary opcode and the canonical form expects the opposite o...
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI SDValue getCondCode(ISD::CondCode Cond)
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVM_ABI bool isKnownToBeAPowerOfTwoFP(SDValue Val, unsigned Depth=0) const
Test if the given fp value is known to be an integer power-of-2, either positive or negative.
LLVM_ABI OverflowKind computeOverflowForSignedSub(SDValue N0, SDValue N1) const
Determine if the result of the signed sub of 2 nodes can overflow.
LLVM_ABI bool expandMultipleResultFPLibCall(RTLIB::Libcall LC, SDNode *Node, SmallVectorImpl< SDValue > &Results, std::optional< unsigned > CallRetResNo={})
Expands a node with multiple results to an FP or vector libcall.
LLVM_ABI std::optional< uint64_t > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
LLVMContext * getContext() const
LLVM_ABI SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, SDNodeFlags Flags)
Try to simplify a floating-point binary operation into 1 of its operands or a constant.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
LLVM_ABI bool isUndef(unsigned Opcode, ArrayRef< SDValue > Ops)
Return true if the result of this operation is always undefined.
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
LLVM_ABI SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
LLVM_ABI std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
LLVM_ABI SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops)
Fold floating-point operations when all operands are constants and/or undefined.
LLVM_ABI SDNode * getNodeIfExists(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops, const SDNodeFlags Flags)
Get the specified node if it's already available, or else return NULL.
LLVM_ABI void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE, Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, UniformityInfo *UA, ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin, MachineModuleInfo &MMI, FunctionVarLocs const *FnVarLocs)
Prepare this SelectionDAG to process code in the given MachineFunction.
LLVM_ABI std::optional< ConstantRange > getValidShiftAmountRange(SDValue V, const APInt &DemandedElts, unsigned Depth) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue FoldSymbolOffset(unsigned Opcode, EVT VT, const GlobalAddressSDNode *GA, const SDNode *N2)
LLVM_ABI SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
LLVM_ABI SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, ArrayRef< ISD::NodeType > CandidateBinOps, bool AllowPartials=false)
Match a binop + shuffle pyramid that represents a horizontal reduction over the elements of a vector ...
LLVM_ABI bool isADDLike(SDValue Op, bool NoWrap=false) const
Return true if the specified operand is an ISD::OR or ISD::XOR node that can be treated as an ISD::AD...
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
LLVM_ABI SDValue simplifyShift(SDValue X, SDValue Y)
Try to simplify a shift into 1 of its operands or a constant.
LLVM_ABI void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits=0, unsigned SizeInBits=0, bool InvalidateDbg=true)
Transfer debug values from one node to another, while optionally generating fragment expressions for ...
LLVM_ABI SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
LLVM_ABI SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
ilist< SDNode >::iterator allnodes_iterator
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
int getMaskElt(unsigned Idx) const
ArrayRef< int > getMask() const
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
static LLVM_ABI bool isSplatMask(ArrayRef< int > Mask)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Completely target-dependent object reference.
int64_t getOffset() const
unsigned getTargetFlags() const
Provides information about what library functions are available for the current target.
const VecDesc * getVectorMappingInfo(StringRef F, const ElementCount &VF, bool Masked) const
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself.
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual ISD::NodeType getExtendForAtomicOps() const
Returns how the platform's atomic operations are extended (ZERO_EXTEND, SIGN_EXTEND,...
const char * getMemcpyName() const
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
virtual bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const
Return true if the target shall perform extract vector element and store given that the vector is kno...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const
True if target has some particular form of dealing with pointer arithmetic semantics for pointers wit...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal for a comparison of the specified types on this ...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
virtual unsigned getMaxGluedStoresPerMemcpy() const
Get maximum # of store operations to be glued together.
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual bool hasVectorBlend() const
Return true if the target has a vector blend instruction.
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual bool isLegalStoreImmediate(int64_t Value) const
Return true if the specified immediate is legal for the value input of a store instruction.
virtual unsigned getVectorIdxWidth(const DataLayout &DL) const
Returns the type to be used for the index operand vector operations.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
virtual bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
virtual bool isSDNodeSourceOfDivergence(const SDNode *N, FunctionLoweringInfo *FLI, UniformityInfo *UA) const
virtual bool isSDNodeAlwaysUniform(const SDNode *N) const
virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
virtual const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
virtual bool isKnownNeverNaNForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
Primary interface to the complete machine description for the target machine.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const SelectionDAGTargetInfo * getSelectionDAGInfo() const
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
A Use represents the edge between a Value definition and its users.
LLVM_ABI void set(Value *Val)
User * getUser() const
Returns the User that contains this Use.
This class is used to represent an VP_GATHER node.
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Provides info so a possible vectorization of a function can be computed.
StringRef getVectorFnName() const
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt mulhu(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on zero-extended operands.
LLVM_ABI APInt avgCeilU(const APInt &C1, const APInt &C2)
Compute the ceil of the unsigned average of C1 and C2.
LLVM_ABI APInt avgFloorU(const APInt &C1, const APInt &C2)
Compute the floor of the unsigned average of C1 and C2.
LLVM_ABI APInt fshr(const APInt &Hi, const APInt &Lo, const APInt &Shift)
Perform a funnel shift right.
LLVM_ABI APInt mulhs(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on sign-extended operands.
APInt abds(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be signed.
LLVM_ABI APInt fshl(const APInt &Hi, const APInt &Lo, const APInt &Shift)
Perform a funnel shift left.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
APInt abdu(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be unsigned.
LLVM_ABI APInt avgFloorS(const APInt &C1, const APInt &C2)
Compute the floor of the signed average of C1 and C2.
LLVM_ABI APInt avgCeilS(const APInt &C1, const APInt &C2)
Compute the ceil of the signed average of C1 and C2.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ABI CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical AND between different comparisons of identical values: ((X op1 Y) & (X...
LLVM_ABI bool isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are ~0 ...
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ MDNODE_SDNODE
MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to reference metadata in the IR.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ POISON
POISON - A poison node.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ JUMP_TABLE_DEBUG_INFO
JUMP_TABLE_DEBUG_INFO - Jumptable debug info.
@ BSWAP
Byte Swap and Counting operators.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SRCVALUE
SRCVALUE - This is a node type that holds a Value* that is used to make reference to a value in the L...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ TargetIndex
TargetIndex - Like a constant pool entry, but with completely target-dependent semantics.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ AssertAlign
AssertAlign - These nodes record if a register contains a value that has a known alignment and the tr...
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ TargetGlobalAddress
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ LIFETIME_START
This corresponds to the llvm.lifetime.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ MGATHER
Masked gather and scatter - load and store operations for a vector of random addresses with additiona...
@ HANDLENODE
HANDLENODE node - Used as a handle for various purposes.
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ GET_FPENV_MEM
Gets the current floating-point environment.
@ PSEUDO_PROBE
Pseudo probe for AutoFDO, as a place holder in a basic block to improve the sample counts quality.
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ SPLAT_VECTOR_PARTS
SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the scalar values joined together a...
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ EXPERIMENTAL_VECTOR_HISTOGRAM
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ CALLSEQ_START
CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...
@ SET_FPENV_MEM
Sets the current floating point environment.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
LLVM_ABI NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
bool matchUnaryFpPredicate(SDValue Op, std::function< bool(ConstantFPSDNode *)> Match, bool AllowUndefs=false)
Hook for matching ConstantFPSDNode predicate.
bool isExtOpcode(unsigned Opcode)
LLVM_ABI bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
LLVM_ABI bool isVectorShrinkable(const SDNode *N, unsigned NewEltSize, bool Signed)
Returns true if the specified node is a vector where all elements can be truncated to the specified e...
LLVM_ABI bool isVPBinaryOp(unsigned Opcode)
Whether this is a vector-predicated binary operation opcode.
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ABI std::optional< unsigned > getBaseOpcodeForVP(unsigned Opcode, bool hasFPExcept)
Translate this VP Opcode to its corresponding non-VP Opcode.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
LLVM_ABI std::optional< unsigned > getVPMaskIdx(unsigned Opcode)
The operand position of the vector mask.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
LLVM_ABI std::optional< unsigned > getVPExplicitVectorLengthIdx(unsigned Opcode)
The operand position of the explicit vector length parameter.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
LLVM_ABI bool isFreezeUndef(const SDNode *N)
Return true if the specified node is FREEZE(UNDEF).
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI std::optional< unsigned > getVPForBaseOpcode(unsigned Opcode)
Translate this non-VP Opcode to its corresponding VP Opcode.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool matchUnaryPredicateImpl(SDValue Op, std::function< bool(ConstNodeType *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant BUI...
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
LLVM_ABI NodeType getInverseMinMaxOpcode(unsigned MinMaxOpc)
Given a MinMaxOpc of ISD::(U|S)MIN or ISD::(U|S)MAX, returns ISD::(U|S)MAX and ISD::(U|S)MIN,...
LLVM_ABI bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
LLVM_ABI bool isVPReduction(unsigned Opcode)
Whether this is a vector-predicated reduction opcode.
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Hook for matching ConstantSDNode predicate.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isBuildVectorOfConstantFPSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantFPSDNode or undef.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LLVM_ABI bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
LLVM_ABI NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LLVM_ABI bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
LLVM_ABI CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical OR between different comparisons of identical values: ((X op1 Y) | (X ...
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
LLVM_ABI Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
LLVM_ABI Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
LLVM_ABI Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
initializer< Ty > init(const Ty &Val)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
bool operator<(int64_t V1, const APSInt &V2)
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
void fill(R &&Range, T &&Value)
Provide wrappers to std::fill which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI SDValue peekThroughExtractSubvectors(SDValue V)
Return the non-extracted vector source operand of V if it exists.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
LLVM_ABI SDValue getBitwiseNotOperand(SDValue V, SDValue Mask, bool AllowUndefs)
If V is a bitwise not, returns the inverted operand.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
LLVM_ABI bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
LLVM_ABI bool isMinSignedConstant(SDValue V)
Returns true if V is a constant min signed integer value.
LLVM_ABI ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void checkForCycles(const SelectionDAG *DAG, bool force=false)
void sort(IteratorTy Start, IteratorTy End)
LLVM_READONLY APFloat minimumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimumNumber semantics.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI SDValue peekThroughTruncates(SDValue V)
Return the non-truncated source operand of V if it exists.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr std::underlying_type_t< Enum > to_underlying(Enum E)
Returns underlying integer value of an enum.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI SDValue peekThroughOneUseBitcasts(SDValue V)
Return the non-bitcasted and one-use source operand of V if it exists.
CodeGenOptLevel
Code generation optimization level.
LLVM_ABI bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
LLVM_ABI bool isNullConstantOrUndef(SDValue V)
Returns true if V is a constant integer zero or an UNDEF node.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
OutputIt copy(R &&Range, OutputIt Out)
constexpr unsigned BitWidth
bool funcReturnsFirstArgOfCall(const CallInst &CI)
Returns true if the parent of CI returns CI's first argument after calling CI.
LLVM_ABI bool isZeroOrZeroSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
LLVM_READONLY APFloat maximumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximumNumber semantics.
LLVM_ABI bool isOnesOrOnesSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
LLVM_ABI bool isNeutralConstant(unsigned Opc, SDNodeFlags Flags, SDValue V, unsigned OperandNo)
Returns true if V is a neutral element of Opc with Flags.
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
MDNode * TBAAStruct
The tag for type-based alias analysis (tbaa struct).
MDNode * TBAA
The tag for type-based alias analysis.
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
static constexpr roundingMode rmTowardNegative
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
static LLVM_ABI const fltSemantics & IEEEquad() LLVM_READNONE
static LLVM_ABI const fltSemantics & IEEEdouble() LLVM_READNONE
static LLVM_ABI const fltSemantics & IEEEhalf() LLVM_READNONE
static constexpr roundingMode rmTowardPositive
static LLVM_ABI const fltSemantics & BFloat() LLVM_READNONE
opStatus
IEEE-754R 7: Default exception handling.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represents offset+length into a ConstantDataArray.
uint64_t Length
Length of the slice.
uint64_t Offset
Slice starts at this Offset.
void move(uint64_t Delta)
Moves the Offset and adjusts Length accordingly.
const ConstantDataArray * Array
ConstantDataArray pointer.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
intptr_t getRawBits() const
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
LLVM_ABI const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
void makeNonNegative()
Make this value non-negative.
static LLVM_ABI KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static LLVM_ABI KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
void makeNegative()
Make this value negative.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
static LLVM_ABI KnownBits abdu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for abdu(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
static LLVM_ABI KnownBits avgFloorU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorU.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static LLVM_ABI KnownBits computeForSubBorrow(const KnownBits &LHS, KnownBits RHS, const KnownBits &Borrow)
Compute known bits results from subtracting RHS from LHS with 1-bit Borrow.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits abds(KnownBits LHS, KnownBits RHS)
Compute known bits for abds(LHS, RHS).
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static LLVM_ABI KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
static LLVM_ABI KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
static LLVM_ABI KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static LLVM_ABI KnownBits avgFloorS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorS.
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits computeForAddCarry(const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry)
Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits avgCeilU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilU.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
static LLVM_ABI KnownBits avgCeilS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilS.
This class contains a discriminated union of information about pointers in memory operands,...
LLVM_ABI bool isDereferenceable(unsigned Size, LLVMContext &C, const DataLayout &DL) const
Return true if memory region [V, V+Offset+Size) is known to be dereferenceable.
LLVM_ABI unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile)
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc=false)
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Clients of various APIs that cause global effects on the DAG can optionally implement this interface.
DAGUpdateListener *const Next
virtual void NodeDeleted(SDNode *N, SDNode *E)
The node N that was deleted and, if E is not null, an equivalent node E that replaced it.
virtual void NodeInserted(SDNode *N)
The node N that was inserted.
virtual void NodeUpdated(SDNode *N)
The node N that was updated.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)