101void SelectionDAG::DAGNodeDeletedListener::anchor() {}
102void SelectionDAG::DAGNodeInsertedListener::anchor() {}
104#define DEBUG_TYPE "selectiondag"
108 cl::desc(
"Gang up loads and stores generated by inlining of memcpy"));
111 cl::desc(
"Number limit for gluing ld/st of memcpy."),
116 cl::desc(
"DAG combiner limit number of steps when searching DAG "
117 "for predecessor nodes"));
134 return getValueAPF().bitwiseIsEqual(V);
156 N->getValueType(0).getVectorElementType().getSizeInBits();
157 if (
auto *Op0 = dyn_cast<ConstantSDNode>(
N->getOperand(0))) {
158 SplatVal = Op0->getAPIntValue().
trunc(EltSize);
161 if (
auto *Op0 = dyn_cast<ConstantFPSDNode>(
N->getOperand(0))) {
162 SplatVal = Op0->getValueAPF().bitcastToAPInt().
trunc(EltSize);
167 auto *BV = dyn_cast<BuildVectorSDNode>(
N);
172 unsigned SplatBitSize;
174 unsigned EltSize =
N->getValueType(0).getVectorElementType().getSizeInBits();
179 const bool IsBigEndian =
false;
180 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
181 EltSize, IsBigEndian) &&
182 EltSize == SplatBitSize;
191 N =
N->getOperand(0).getNode();
200 unsigned i = 0, e =
N->getNumOperands();
203 while (i != e &&
N->getOperand(i).isUndef())
207 if (i == e)
return false;
218 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
220 if (CN->getAPIntValue().countr_one() < EltSize)
223 if (CFPN->getValueAPF().bitcastToAPInt().countr_one() < EltSize)
231 for (++i; i != e; ++i)
232 if (
N->getOperand(i) != NotZero && !
N->getOperand(i).isUndef())
240 N =
N->getOperand(0).getNode();
249 bool IsAllUndef =
true;
262 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
264 if (CN->getAPIntValue().countr_zero() < EltSize)
267 if (CFPN->getValueAPF().bitcastToAPInt().countr_zero() < EltSize)
294 if (!isa<ConstantSDNode>(
Op))
307 if (!isa<ConstantFPSDNode>(
Op))
315 assert(
N->getValueType(0).isVector() &&
"Expected a vector!");
317 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
318 if (EltSize <= NewEltSize)
322 return (
N->getOperand(0).getValueType().getScalarSizeInBits() <=
327 return (
N->getOperand(0).getValueType().getScalarSizeInBits() <=
337 if (!isa<ConstantSDNode>(
Op))
340 APInt C =
Op->getAsAPIntVal().trunc(EltSize);
341 if (
Signed &&
C.trunc(NewEltSize).sext(EltSize) !=
C)
343 if (!
Signed &&
C.trunc(NewEltSize).zext(EltSize) !=
C)
354 if (
N->getNumOperands() == 0)
360 return N->getOpcode() ==
ISD::FREEZE &&
N->getOperand(0).isUndef();
363template <
typename ConstNodeType>
365 std::function<
bool(ConstNodeType *)>
Match,
366 bool AllowUndefs,
bool AllowTruncation) {
368 if (
auto *
C = dyn_cast<ConstNodeType>(
Op))
376 EVT SVT =
Op.getValueType().getScalarType();
378 if (AllowUndefs &&
Op.getOperand(i).isUndef()) {
384 auto *Cst = dyn_cast<ConstNodeType>(
Op.getOperand(i));
385 if (!Cst || (!AllowTruncation && Cst->getValueType(0) != SVT) ||
392template bool ISD::matchUnaryPredicateImpl<ConstantSDNode>(
394template bool ISD::matchUnaryPredicateImpl<ConstantFPSDNode>(
400 bool AllowUndefs,
bool AllowTypeMismatch) {
401 if (!AllowTypeMismatch &&
LHS.getValueType() !=
RHS.getValueType())
405 if (
auto *LHSCst = dyn_cast<ConstantSDNode>(
LHS))
406 if (
auto *RHSCst = dyn_cast<ConstantSDNode>(
RHS))
407 return Match(LHSCst, RHSCst);
410 if (
LHS.getOpcode() !=
RHS.getOpcode() ||
415 EVT SVT =
LHS.getValueType().getScalarType();
416 for (
unsigned i = 0, e =
LHS.getNumOperands(); i != e; ++i) {
419 bool LHSUndef = AllowUndefs && LHSOp.
isUndef();
420 bool RHSUndef = AllowUndefs && RHSOp.
isUndef();
421 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
422 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
423 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
425 if (!AllowTypeMismatch && (LHSOp.
getValueType() != SVT ||
428 if (!
Match(LHSCst, RHSCst))
450 switch (VecReduceOpcode) {
455 case ISD::VP_REDUCE_FADD:
456 case ISD::VP_REDUCE_SEQ_FADD:
460 case ISD::VP_REDUCE_FMUL:
461 case ISD::VP_REDUCE_SEQ_FMUL:
464 case ISD::VP_REDUCE_ADD:
467 case ISD::VP_REDUCE_MUL:
470 case ISD::VP_REDUCE_AND:
473 case ISD::VP_REDUCE_OR:
476 case ISD::VP_REDUCE_XOR:
479 case ISD::VP_REDUCE_SMAX:
482 case ISD::VP_REDUCE_SMIN:
485 case ISD::VP_REDUCE_UMAX:
488 case ISD::VP_REDUCE_UMIN:
491 case ISD::VP_REDUCE_FMAX:
494 case ISD::VP_REDUCE_FMIN:
497 case ISD::VP_REDUCE_FMAXIMUM:
500 case ISD::VP_REDUCE_FMINIMUM:
509#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) \
512#include "llvm/IR/VPIntrinsics.def"
520#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
521#define VP_PROPERTY_BINARYOP return true;
522#define END_REGISTER_VP_SDNODE(VPSD) break;
523#include "llvm/IR/VPIntrinsics.def"
532 case ISD::VP_REDUCE_ADD:
533 case ISD::VP_REDUCE_MUL:
534 case ISD::VP_REDUCE_AND:
535 case ISD::VP_REDUCE_OR:
536 case ISD::VP_REDUCE_XOR:
537 case ISD::VP_REDUCE_SMAX:
538 case ISD::VP_REDUCE_SMIN:
539 case ISD::VP_REDUCE_UMAX:
540 case ISD::VP_REDUCE_UMIN:
541 case ISD::VP_REDUCE_FMAX:
542 case ISD::VP_REDUCE_FMIN:
543 case ISD::VP_REDUCE_FMAXIMUM:
544 case ISD::VP_REDUCE_FMINIMUM:
545 case ISD::VP_REDUCE_FADD:
546 case ISD::VP_REDUCE_FMUL:
547 case ISD::VP_REDUCE_SEQ_FADD:
548 case ISD::VP_REDUCE_SEQ_FMUL:
558#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, ...) \
561#include "llvm/IR/VPIntrinsics.def"
570#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
573#include "llvm/IR/VPIntrinsics.def"
583#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) case ISD::VPOPC:
584#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) return ISD::SDOPC;
585#define END_REGISTER_VP_SDNODE(VPOPC) break;
586#include "llvm/IR/VPIntrinsics.def"
595#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) break;
596#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) case ISD::SDOPC:
597#define END_REGISTER_VP_SDNODE(VPOPC) return ISD::VPOPC;
598#include "llvm/IR/VPIntrinsics.def"
645 bool isIntegerLike) {
670 bool IsInteger =
Type.isInteger();
675 unsigned Op = Op1 | Op2;
691 bool IsInteger =
Type.isInteger();
726 ID.AddPointer(VTList.
VTs);
732 for (
const auto &
Op : Ops) {
733 ID.AddPointer(
Op.getNode());
734 ID.AddInteger(
Op.getResNo());
741 for (
const auto &
Op : Ops) {
742 ID.AddPointer(
Op.getNode());
743 ID.AddInteger(
Op.getResNo());
756 switch (
N->getOpcode()) {
765 ID.AddPointer(
C->getConstantIntValue());
766 ID.AddBoolean(
C->isOpaque());
771 ID.AddPointer(cast<ConstantFPSDNode>(
N)->getConstantFPValue());
787 ID.AddInteger(cast<RegisterSDNode>(
N)->
getReg().
id());
790 ID.AddPointer(cast<RegisterMaskSDNode>(
N)->getRegMask());
793 ID.AddPointer(cast<SrcValueSDNode>(
N)->getValue());
797 ID.AddInteger(cast<FrameIndexSDNode>(
N)->getIndex());
801 if (cast<LifetimeSDNode>(
N)->hasOffset()) {
802 ID.AddInteger(cast<LifetimeSDNode>(
N)->
getSize());
807 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getGuid());
808 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getIndex());
809 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getAttributes());
813 ID.AddInteger(cast<JumpTableSDNode>(
N)->getIndex());
814 ID.AddInteger(cast<JumpTableSDNode>(
N)->getTargetFlags());
819 ID.AddInteger(CP->getAlign().value());
820 ID.AddInteger(CP->getOffset());
821 if (CP->isMachineConstantPoolEntry())
822 CP->getMachineCPVal()->addSelectionDAGCSEId(
ID);
824 ID.AddPointer(CP->getConstVal());
825 ID.AddInteger(CP->getTargetFlags());
837 ID.AddInteger(LD->getMemoryVT().getRawBits());
838 ID.AddInteger(LD->getRawSubclassData());
839 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
840 ID.AddInteger(LD->getMemOperand()->getFlags());
845 ID.AddInteger(ST->getMemoryVT().getRawBits());
846 ID.AddInteger(ST->getRawSubclassData());
847 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
848 ID.AddInteger(ST->getMemOperand()->getFlags());
859 case ISD::VP_STORE: {
867 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: {
874 case ISD::EXPERIMENTAL_VP_STRIDED_STORE: {
881 case ISD::VP_GATHER: {
889 case ISD::VP_SCATTER: {
984 if (
auto *MN = dyn_cast<MemIntrinsicSDNode>(
N)) {
985 ID.AddInteger(MN->getRawSubclassData());
986 ID.AddInteger(MN->getPointerInfo().getAddrSpace());
987 ID.AddInteger(MN->getMemOperand()->getFlags());
988 ID.AddInteger(MN->getMemoryVT().getRawBits());
1011 if (
N->getValueType(0) == MVT::Glue)
1014 switch (
N->getOpcode()) {
1022 for (
unsigned i = 1, e =
N->getNumValues(); i != e; ++i)
1023 if (
N->getValueType(i) == MVT::Glue)
1040 if (Node.use_empty())
1055 while (!DeadNodes.
empty()) {
1064 DUL->NodeDeleted(
N,
nullptr);
1067 RemoveNodeFromCSEMaps(
N);
1098 RemoveNodeFromCSEMaps(
N);
1102 DeleteNodeNotInCSEMaps(
N);
1105void SelectionDAG::DeleteNodeNotInCSEMaps(
SDNode *
N) {
1106 assert(
N->getIterator() != AllNodes.begin() &&
1107 "Cannot delete the entry node!");
1108 assert(
N->use_empty() &&
"Cannot delete a node that is not dead!");
1117 assert(!(V->isVariadic() && isParameter));
1119 ByvalParmDbgValues.push_back(V);
1121 DbgValues.push_back(V);
1122 for (
const SDNode *Node : V->getSDNodes())
1124 DbgValMap[Node].push_back(V);
1129 if (
I == DbgValMap.end())
1131 for (
auto &Val:
I->second)
1132 Val->setIsInvalidated();
1136void SelectionDAG::DeallocateNode(
SDNode *
N) {
1160 switch (
N->getOpcode()) {
1166 EVT VT =
N->getValueType(0);
1167 assert(
N->getNumValues() == 1 &&
"Too many results!");
1169 "Wrong return type!");
1170 assert(
N->getNumOperands() == 2 &&
"Wrong number of operands!");
1171 assert(
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType() &&
1172 "Mismatched operand types!");
1174 "Wrong operand type!");
1176 "Wrong return type size");
1180 assert(
N->getNumValues() == 1 &&
"Too many results!");
1181 assert(
N->getValueType(0).isVector() &&
"Wrong return type!");
1182 assert(
N->getNumOperands() ==
N->getValueType(0).getVectorNumElements() &&
1183 "Wrong number of operands!");
1184 EVT EltVT =
N->getValueType(0).getVectorElementType();
1186 assert((
Op.getValueType() == EltVT ||
1187 (EltVT.
isInteger() &&
Op.getValueType().isInteger() &&
1188 EltVT.
bitsLE(
Op.getValueType()))) &&
1189 "Wrong operand type!");
1190 assert(
Op.getValueType() ==
N->getOperand(0).getValueType() &&
1191 "Operands must all have the same type");
1203void SelectionDAG::InsertNode(
SDNode *
N) {
1204 AllNodes.push_back(
N);
1206 N->PersistentId = NextPersistentId++;
1209 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1210 DUL->NodeInserted(
N);
1217bool SelectionDAG::RemoveNodeFromCSEMaps(
SDNode *
N) {
1218 bool Erased =
false;
1219 switch (
N->getOpcode()) {
1222 assert(CondCodeNodes[cast<CondCodeSDNode>(
N)->
get()] &&
1223 "Cond code doesn't exist!");
1224 Erased = CondCodeNodes[cast<CondCodeSDNode>(
N)->get()] !=
nullptr;
1225 CondCodeNodes[cast<CondCodeSDNode>(
N)->get()] =
nullptr;
1228 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(
N)->getSymbol());
1232 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
1237 auto *MCSN = cast<MCSymbolSDNode>(
N);
1238 Erased = MCSymbols.erase(MCSN->getMCSymbol());
1242 EVT VT = cast<VTSDNode>(
N)->getVT();
1244 Erased = ExtendedValueTypeNodes.erase(VT);
1255 Erased = CSEMap.RemoveNode(
N);
1262 if (!Erased &&
N->getValueType(
N->getNumValues()-1) != MVT::Glue &&
1277SelectionDAG::AddModifiedNodeToCSEMaps(
SDNode *
N) {
1281 SDNode *Existing = CSEMap.GetOrInsertNode(
N);
1282 if (Existing !=
N) {
1290 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1291 DUL->NodeDeleted(
N, Existing);
1292 DeleteNodeNotInCSEMaps(
N);
1298 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1299 DUL->NodeUpdated(
N);
1317 Node->intersectFlagsWith(
N->getFlags());
1337 Node->intersectFlagsWith(
N->getFlags());
1355 Node->intersectFlagsWith(
N->getFlags());
1368 : TM(tm), OptLevel(OL), EntryNode(ISD::EntryToken, 0,
DebugLoc(),
1371 InsertNode(&EntryNode);
1382 SDAGISelPass = PassPtr;
1386 LibInfo = LibraryInfo;
1392 FnVarLocs = VarLocs;
1396 assert(!UpdateListeners &&
"Dangling registered DAGUpdateListeners");
1398 OperandRecycler.clear(OperandAllocator);
1406void SelectionDAG::allnodes_clear() {
1407 assert(&*AllNodes.begin() == &EntryNode);
1408 AllNodes.remove(AllNodes.begin());
1409 while (!AllNodes.empty())
1410 DeallocateNode(&AllNodes.front());
1412 NextPersistentId = 0;
1418 SDNode *
N = CSEMap.FindNodeOrInsertPos(
ID, InsertPos);
1420 switch (
N->getOpcode()) {
1425 "debug location. Use another overload.");
1432 const SDLoc &
DL,
void *&InsertPos) {
1433 SDNode *
N = CSEMap.FindNodeOrInsertPos(
ID, InsertPos);
1435 switch (
N->getOpcode()) {
1441 if (
N->getDebugLoc() !=
DL.getDebugLoc())
1448 if (
DL.getIROrder() &&
DL.getIROrder() <
N->getIROrder())
1449 N->setDebugLoc(
DL.getDebugLoc());
1458 OperandRecycler.clear(OperandAllocator);
1459 OperandAllocator.
Reset();
1462 ExtendedValueTypeNodes.clear();
1463 ExternalSymbols.clear();
1464 TargetExternalSymbols.clear();
1467 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
nullptr);
1468 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
nullptr);
1470 EntryNode.UseList =
nullptr;
1471 InsertNode(&EntryNode);
1477 return VT.
bitsGT(
Op.getValueType())
1483std::pair<SDValue, SDValue>
1487 "Strict no-op FP extend/round not allowed.");
1494 return std::pair<SDValue, SDValue>(Res,
SDValue(Res.
getNode(), 1));
1498 return VT.
bitsGT(
Op.getValueType()) ?
1504 return VT.
bitsGT(
Op.getValueType()) ?
1510 return VT.
bitsGT(
Op.getValueType()) ?
1518 auto Type =
Op.getValueType();
1522 auto Size =
Op.getValueSizeInBits();
1533 auto Type =
Op.getValueType();
1537 auto Size =
Op.getValueSizeInBits();
1548 auto Type =
Op.getValueType();
1552 auto Size =
Op.getValueSizeInBits();
1570 EVT OpVT =
Op.getValueType();
1572 "Cannot getZeroExtendInReg FP types");
1574 "getZeroExtendInReg type should be vector iff the operand "
1578 "Vector element counts must match in getZeroExtendInReg");
1590 EVT OpVT =
Op.getValueType();
1592 "Cannot getVPZeroExtendInReg FP types");
1594 "getVPZeroExtendInReg type and operand type should be vector!");
1596 "Vector element counts must match in getZeroExtendInReg");
1635 return getNode(ISD::VP_XOR,
DL, VT, Val, TrueValue, Mask, EVL);
1646 return getNode(ISD::VP_ZERO_EXTEND,
DL, VT,
Op, Mask, EVL);
1648 return getNode(ISD::VP_TRUNCATE,
DL, VT,
Op, Mask, EVL);
1668 bool isT,
bool isO) {
1674 bool isT,
bool isO) {
1675 return getConstant(*ConstantInt::get(*Context, Val),
DL, VT, isT, isO);
1679 EVT VT,
bool isT,
bool isO) {
1687 if (isa<VectorType>(Elt->
getType()))
1702 Elt = ConstantInt::get(*
getContext(), NewVal);
1721 "Can only handle an even split!");
1725 for (
unsigned i = 0; i != Parts; ++i)
1727 NewVal.
extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits),
DL,
1728 ViaEltVT, isT, isO));
1733 unsigned ViaVecNumElts = VT.
getSizeInBits() / ViaEltSizeInBits;
1744 NewVal.
extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits),
DL,
1745 ViaEltVT, isT, isO));
1750 std::reverse(EltParts.
begin(), EltParts.
end());
1769 "APInt size does not match type size!");
1778 if ((
N = FindNodeOrInsertPos(
ID,
DL, IP)))
1783 N = newSDNode<ConstantSDNode>(isT, isO, Elt, VTs);
1784 CSEMap.InsertNode(
N, IP);
1796 bool isT,
bool isO) {
1804 IsTarget, IsOpaque);
1836 EVT VT,
bool isTarget) {
1844 if (isa<VectorType>(Elt->
getType()))
1857 if ((
N = FindNodeOrInsertPos(
ID,
DL, IP)))
1862 N = newSDNode<ConstantFPSDNode>(isTarget, Elt, VTs);
1863 CSEMap.InsertNode(
N, IP);
1877 if (EltVT == MVT::f32)
1879 if (EltVT == MVT::f64)
1881 if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1882 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1893 EVT VT, int64_t
Offset,
bool isTargetGA,
1894 unsigned TargetFlags) {
1895 assert((TargetFlags == 0 || isTargetGA) &&
1896 "Cannot set target flags on target-independent globals");
1914 ID.AddInteger(TargetFlags);
1916 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
1919 auto *
N = newSDNode<GlobalAddressSDNode>(
1920 Opc,
DL.getIROrder(),
DL.getDebugLoc(), GV, VTs,
Offset, TargetFlags);
1921 CSEMap.InsertNode(
N, IP);
1933 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1936 auto *
N = newSDNode<FrameIndexSDNode>(FI, VTs, isTarget);
1937 CSEMap.InsertNode(
N, IP);
1943 unsigned TargetFlags) {
1944 assert((TargetFlags == 0 || isTarget) &&
1945 "Cannot set target flags on target-independent jump tables");
1951 ID.AddInteger(TargetFlags);
1953 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1956 auto *
N = newSDNode<JumpTableSDNode>(JTI, VTs, isTarget, TargetFlags);
1957 CSEMap.InsertNode(
N, IP);
1971 bool isTarget,
unsigned TargetFlags) {
1972 assert((TargetFlags == 0 || isTarget) &&
1973 "Cannot set target flags on target-independent globals");
1982 ID.AddInteger(Alignment->value());
1985 ID.AddInteger(TargetFlags);
1987 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1990 auto *
N = newSDNode<ConstantPoolSDNode>(isTarget,
C, VTs,
Offset, *Alignment,
1992 CSEMap.InsertNode(
N, IP);
2001 bool isTarget,
unsigned TargetFlags) {
2002 assert((TargetFlags == 0 || isTarget) &&
2003 "Cannot set target flags on target-independent globals");
2010 ID.AddInteger(Alignment->value());
2012 C->addSelectionDAGCSEId(
ID);
2013 ID.AddInteger(TargetFlags);
2015 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2018 auto *
N = newSDNode<ConstantPoolSDNode>(isTarget,
C, VTs,
Offset, *Alignment,
2020 CSEMap.InsertNode(
N, IP);
2030 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2033 auto *
N = newSDNode<BasicBlockSDNode>(
MBB);
2034 CSEMap.InsertNode(
N, IP);
2041 ValueTypeNodes.size())
2048 N = newSDNode<VTSDNode>(VT);
2056 N = newSDNode<ExternalSymbolSDNode>(
false,
Sym, 0,
getVTList(VT));
2071 unsigned TargetFlags) {
2073 TargetExternalSymbols[std::pair<std::string, unsigned>(
Sym, TargetFlags)];
2075 N = newSDNode<ExternalSymbolSDNode>(
true,
Sym, TargetFlags,
getVTList(VT));
2081 if ((
unsigned)
Cond >= CondCodeNodes.size())
2082 CondCodeNodes.resize(
Cond+1);
2084 if (!CondCodeNodes[
Cond]) {
2085 auto *
N = newSDNode<CondCodeSDNode>(
Cond);
2086 CondCodeNodes[
Cond] =
N;
2094 bool ConstantFold) {
2096 "APInt size does not match type size!");
2113 bool ConstantFold) {
2114 if (EC.isScalable())
2127 const APInt &StepVal) {
2151 "Must have the same number of vector elements as mask elements!");
2153 "Invalid VECTOR_SHUFFLE");
2161 int NElts = Mask.size();
2163 [&](
int M) {
return M < (NElts * 2) && M >= -1; }) &&
2164 "Index out of range");
2172 for (
int i = 0; i != NElts; ++i)
2173 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
2189 for (
int i = 0; i < NElts; ++i) {
2190 if (MaskVec[i] <
Offset || MaskVec[i] >= (
Offset + NElts))
2194 if (UndefElements[MaskVec[i] -
Offset]) {
2200 if (!UndefElements[i])
2204 if (
auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
2205 BlendSplat(N1BV, 0);
2206 if (
auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
2207 BlendSplat(N2BV, NElts);
2212 bool AllLHS =
true, AllRHS =
true;
2214 for (
int i = 0; i != NElts; ++i) {
2215 if (MaskVec[i] >= NElts) {
2220 }
else if (MaskVec[i] >= 0) {
2224 if (AllLHS && AllRHS)
2226 if (AllLHS && !N2Undef)
2239 bool Identity =
true, AllSame =
true;
2240 for (
int i = 0; i != NElts; ++i) {
2241 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity =
false;
2242 if (MaskVec[i] != MaskVec[0]) AllSame =
false;
2244 if (Identity && NElts)
2254 V = V->getOperand(0);
2257 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
2277 if (AllSame && SameNumElts) {
2278 EVT BuildVT = BV->getValueType(0);
2295 for (
int i = 0; i != NElts; ++i)
2296 ID.AddInteger(MaskVec[i]);
2299 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
2305 int *MaskAlloc = OperandAllocator.
Allocate<
int>(NElts);
2308 auto *
N = newSDNode<ShuffleVectorSDNode>(VTs, dl.
getIROrder(),
2310 createOperands(
N, Ops);
2312 CSEMap.InsertNode(
N, IP);
2333 ID.AddInteger(Reg.id());
2335 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2338 auto *
N = newSDNode<RegisterSDNode>(Reg, VTs);
2340 CSEMap.InsertNode(
N, IP);
2348 ID.AddPointer(RegMask);
2350 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2353 auto *
N = newSDNode<RegisterMaskSDNode>(RegMask);
2354 CSEMap.InsertNode(
N, IP);
2369 ID.AddPointer(Label);
2371 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2376 createOperands(
N, Ops);
2378 CSEMap.InsertNode(
N, IP);
2384 int64_t
Offset,
bool isTarget,
2385 unsigned TargetFlags) {
2393 ID.AddInteger(TargetFlags);
2395 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2398 auto *
N = newSDNode<BlockAddressSDNode>(Opc, VTs, BA,
Offset, TargetFlags);
2399 CSEMap.InsertNode(
N, IP);
2410 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2413 auto *
N = newSDNode<SrcValueSDNode>(V);
2414 CSEMap.InsertNode(
N, IP);
2425 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2428 auto *
N = newSDNode<MDNodeSDNode>(MD);
2429 CSEMap.InsertNode(
N, IP);
2435 if (VT == V.getValueType())
2442 unsigned SrcAS,
unsigned DestAS) {
2447 ID.AddInteger(SrcAS);
2448 ID.AddInteger(DestAS);
2451 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
2455 VTs, SrcAS, DestAS);
2456 createOperands(
N, Ops);
2458 CSEMap.InsertNode(
N, IP);
2470 EVT OpTy =
Op.getValueType();
2472 if (OpTy == ShTy || OpTy.
isVector())
return Op;
2485 std::deque<SDValue> Subvectors = {Op1};
2486 for (
unsigned I = 0;
I < ScaleFactor;
I++) {
2488 Subvectors.push_back(
2493 while (Subvectors.size() > 1) {
2494 Subvectors.push_back(
2496 Subvectors.pop_front();
2497 Subvectors.pop_front();
2500 assert(Subvectors.size() == 1 &&
2501 "There should only be one subvector after tree flattening");
2503 return Subvectors[0];
2516 if (
Op.getNode() != FPNode)
2520 while (!Worklist.
empty()) {
2553 std::optional<unsigned> CallRetResNo) {
2555 EVT VT = Node->getValueType(0);
2556 unsigned NumResults = Node->getNumValues();
2562 auto getVecDesc = [&]() ->
VecDesc const * {
2563 for (
bool Masked : {
false,
true}) {
2574 if (VT.
isVector() && !(VD = getVecDesc()))
2584 auto *ST = cast<StoreSDNode>(
User);
2585 SDValue StoreValue = ST->getValue();
2586 unsigned ResNo = StoreValue.
getResNo();
2588 if (CallRetResNo == ResNo)
2591 if (!ST->isSimple() || ST->getAddressSpace() != 0)
2594 if (StoresInChain && ST->getChain() != StoresInChain)
2598 if (ST->getAlign() <
2606 ResultStores[ResNo] = ST;
2607 StoresInChain = ST->getChain();
2611 auto AddArgListEntry = [&](
SDValue Node,
Type *Ty) {
2615 Args.push_back(Entry);
2619 for (
const SDValue &
Op : Node->op_values()) {
2620 EVT ArgVT =
Op.getValueType();
2622 AddArgListEntry(
Op, ArgTy);
2629 if (ResNo == CallRetResNo)
2631 EVT ResVT = Node->getValueType(ResNo);
2633 ResultPtrs[ResNo] = ResultPtr;
2646 Type *RetType = CallRetResNo.has_value()
2647 ? Node->getValueType(*CallRetResNo).getTypeForEVT(Ctx)
2659 if (ResNo == CallRetResNo) {
2667 PtrInfo = ST->getPointerInfo();
2673 getLoad(Node->getValueType(ResNo),
DL, CallChain, ResultPtr, PtrInfo);
2674 Results.push_back(LoadResult);
2677 if (CallRetResNo && !Node->hasAnyUseOfValue(*CallRetResNo)) {
2699 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2700 EVT VT = Node->getValueType(0);
2701 SDValue Tmp1 = Node->getOperand(0);
2702 SDValue Tmp2 = Node->getOperand(1);
2703 const MaybeAlign MA(Node->getConstantOperandVal(3));
2735 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2736 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2747 Align RedAlign = UseABI ?
DL.getABITypeAlign(Ty) :
DL.getPrefTypeAlign(Ty);
2757 if (RedAlign > StackAlign) {
2760 unsigned NumIntermediates;
2762 NumIntermediates, RegisterVT);
2764 Align RedAlign2 = UseABI ?
DL.getABITypeAlign(Ty) :
DL.getPrefTypeAlign(Ty);
2765 if (RedAlign2 < RedAlign)
2766 RedAlign = RedAlign2;
2771 RedAlign = std::min(RedAlign, StackAlign);
2786 false,
nullptr, StackID);
2801 "Don't know how to choose the maximum size when creating a stack "
2810 Align Align = std::max(
DL.getPrefTypeAlign(Ty1),
DL.getPrefTypeAlign(Ty2));
2818 auto GetUndefBooleanConstant = [&]() {
2857 return GetUndefBooleanConstant();
2862 return GetUndefBooleanConstant();
2871 const APInt &C2 = N2C->getAPIntValue();
2873 const APInt &C1 = N1C->getAPIntValue();
2880 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2881 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2883 if (N1CFP && N2CFP) {
2888 return GetUndefBooleanConstant();
2893 return GetUndefBooleanConstant();
2899 return GetUndefBooleanConstant();
2904 return GetUndefBooleanConstant();
2909 return GetUndefBooleanConstant();
2915 return GetUndefBooleanConstant();
2944 return getSetCC(dl, VT, N2, N1, SwappedCond);
2945 }
else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2960 return GetUndefBooleanConstant();
2971 unsigned BitWidth =
Op.getScalarValueSizeInBits();
2979 unsigned Depth)
const {
2987 const APInt &DemandedElts,
2988 unsigned Depth)
const {
2995 unsigned Depth )
const {
3001 unsigned Depth)
const {
3006 const APInt &DemandedElts,
3007 unsigned Depth)
const {
3008 EVT VT =
Op.getValueType();
3015 for (
unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
3016 if (!DemandedElts[EltIdx])
3020 KnownZeroElements.
setBit(EltIdx);
3022 return KnownZeroElements;
3032 unsigned Opcode = V.getOpcode();
3033 EVT VT = V.getValueType();
3036 "scalable demanded bits are ignored");
3048 UndefElts = V.getOperand(0).isUndef()
3057 APInt UndefLHS, UndefRHS;
3062 UndefElts = UndefLHS | UndefRHS;
3092 for (
unsigned i = 0; i != NumElts; ++i) {
3098 if (!DemandedElts[i])
3100 if (Scl && Scl !=
Op)
3110 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
3111 for (
int i = 0; i != (int)NumElts; ++i) {
3117 if (!DemandedElts[i])
3119 if (M < (
int)NumElts)
3122 DemandedRHS.
setBit(M - NumElts);
3134 auto CheckSplatSrc = [&](
SDValue Src,
const APInt &SrcElts) {
3136 return (SrcElts.popcount() == 1) ||
3138 (SrcElts & SrcUndefs).
isZero());
3140 if (!DemandedLHS.
isZero())
3141 return CheckSplatSrc(V.getOperand(0), DemandedLHS);
3142 return CheckSplatSrc(V.getOperand(1), DemandedRHS);
3146 SDValue Src = V.getOperand(0);
3148 if (Src.getValueType().isScalableVector())
3151 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3164 SDValue Src = V.getOperand(0);
3166 if (Src.getValueType().isScalableVector())
3168 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3170 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3172 UndefElts = UndefSrcElts.
trunc(NumElts);
3178 SDValue Src = V.getOperand(0);
3179 EVT SrcVT = Src.getValueType();
3189 if ((
BitWidth % SrcBitWidth) == 0) {
3191 unsigned Scale =
BitWidth / SrcBitWidth;
3193 APInt ScaledDemandedElts =
3195 for (
unsigned I = 0;
I != Scale; ++
I) {
3199 SubDemandedElts &= ScaledDemandedElts;
3203 if (!SubUndefElts.
isZero())
3217 EVT VT = V.getValueType();
3227 (AllowUndefs || !UndefElts);
3233 EVT VT = V.getValueType();
3234 unsigned Opcode = V.getOpcode();
3255 SplatIdx = (UndefElts & DemandedElts).
countr_one();
3269 auto *SVN = cast<ShuffleVectorSDNode>(V);
3270 if (!SVN->isSplat())
3272 int Idx = SVN->getSplatIndex();
3273 int NumElts = V.getValueType().getVectorNumElements();
3274 SplatIdx =
Idx % NumElts;
3275 return V.getOperand(
Idx / NumElts);
3291 if (LegalSVT.
bitsLT(SVT))
3300std::optional<ConstantRange>
3302 unsigned Depth)
const {
3305 "Unknown shift node");
3307 unsigned BitWidth = V.getScalarValueSizeInBits();
3309 if (
auto *Cst = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
3310 const APInt &ShAmt = Cst->getAPIntValue();
3312 return std::nullopt;
3316 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1))) {
3317 const APInt *MinAmt =
nullptr, *MaxAmt =
nullptr;
3318 for (
unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3319 if (!DemandedElts[i])
3321 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
3323 MinAmt = MaxAmt =
nullptr;
3326 const APInt &ShAmt = SA->getAPIntValue();
3328 return std::nullopt;
3329 if (!MinAmt || MinAmt->
ugt(ShAmt))
3331 if (!MaxAmt || MaxAmt->ult(ShAmt))
3334 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
3335 "Failed to find matching min/max shift amounts");
3336 if (MinAmt && MaxAmt)
3346 return std::nullopt;
3349std::optional<uint64_t>
3351 unsigned Depth)
const {
3354 "Unknown shift node");
3355 if (std::optional<ConstantRange> AmtRange =
3357 if (
const APInt *ShAmt = AmtRange->getSingleElement())
3358 return ShAmt->getZExtValue();
3359 return std::nullopt;
3362std::optional<uint64_t>
3364 EVT VT = V.getValueType();
3371std::optional<uint64_t>
3373 unsigned Depth)
const {
3376 "Unknown shift node");
3377 if (std::optional<ConstantRange> AmtRange =
3379 return AmtRange->getUnsignedMin().getZExtValue();
3380 return std::nullopt;
3383std::optional<uint64_t>
3385 EVT VT = V.getValueType();
3392std::optional<uint64_t>
3394 unsigned Depth)
const {
3397 "Unknown shift node");
3398 if (std::optional<ConstantRange> AmtRange =
3400 return AmtRange->getUnsignedMax().getZExtValue();
3401 return std::nullopt;
3404std::optional<uint64_t>
3406 EVT VT = V.getValueType();
3417 EVT VT =
Op.getValueType();
3432 unsigned Depth)
const {
3433 unsigned BitWidth =
Op.getScalarValueSizeInBits();
3437 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
3441 if (
auto *
C = dyn_cast<ConstantFPSDNode>(
Op)) {
3451 assert((!
Op.getValueType().isFixedLengthVector() ||
3452 NumElts ==
Op.getValueType().getVectorNumElements()) &&
3453 "Unexpected vector size");
3458 unsigned Opcode =
Op.getOpcode();
3466 "Expected SPLAT_VECTOR implicit truncation");
3473 unsigned ScalarSize =
Op.getOperand(0).getScalarValueSizeInBits();
3475 "Expected SPLAT_VECTOR_PARTS scalars to cover element width");
3482 const APInt &Step =
Op.getConstantOperandAPInt(0);
3491 const APInt MinNumElts =
3497 .
umul_ov(MinNumElts, Overflow);
3501 const APInt MaxValue = (MaxNumElts - 1).
umul_ov(Step, Overflow);
3509 assert(!
Op.getValueType().isScalableVector());
3513 if (!DemandedElts[i])
3522 "Expected BUILD_VECTOR implicit truncation");
3535 assert(!
Op.getValueType().isScalableVector());
3538 APInt DemandedLHS, DemandedRHS;
3542 DemandedLHS, DemandedRHS))
3547 if (!!DemandedLHS) {
3555 if (!!DemandedRHS) {
3564 const APInt &Multiplier =
Op.getConstantOperandAPInt(0);
3569 if (
Op.getValueType().isScalableVector())
3573 EVT SubVectorVT =
Op.getOperand(0).getValueType();
3576 for (
unsigned i = 0; i != NumSubVectors; ++i) {
3578 DemandedElts.
extractBits(NumSubVectorElts, i * NumSubVectorElts);
3579 if (!!DemandedSub) {
3591 if (
Op.getValueType().isScalableVector())
3600 APInt DemandedSrcElts = DemandedElts;
3605 if (!!DemandedSubElts) {
3610 if (!!DemandedSrcElts) {
3620 if (
Op.getValueType().isScalableVector() || Src.getValueType().isScalableVector())
3623 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3629 if (
Op.getValueType().isScalableVector())
3633 if (DemandedElts != 1)
3644 if (
Op.getValueType().isScalableVector())
3664 if ((
BitWidth % SubBitWidth) == 0) {
3671 unsigned SubScale =
BitWidth / SubBitWidth;
3672 APInt SubDemandedElts(NumElts * SubScale, 0);
3673 for (
unsigned i = 0; i != NumElts; ++i)
3674 if (DemandedElts[i])
3675 SubDemandedElts.
setBit(i * SubScale);
3677 for (
unsigned i = 0; i != SubScale; ++i) {
3680 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
3681 Known.
insertBits(Known2, SubBitWidth * Shifts);
3686 if ((SubBitWidth %
BitWidth) == 0) {
3687 assert(
Op.getValueType().isVector() &&
"Expected bitcast to vector");
3692 unsigned SubScale = SubBitWidth /
BitWidth;
3693 APInt SubDemandedElts =
3698 for (
unsigned i = 0; i != NumElts; ++i)
3699 if (DemandedElts[i]) {
3700 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
3731 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3735 Op.getOperand(0), DemandedElts,
false,
Depth + 1);
3741 if (
Op->getFlags().hasNoSignedWrap() &&
3742 Op.getOperand(0) ==
Op.getOperand(1) &&
3769 unsigned SignBits1 =
3773 unsigned SignBits0 =
3779 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3782 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3783 if (
Op.getResNo() == 0)
3790 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3793 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3794 if (
Op.getResNo() == 0)
3847 if (
Op.getResNo() != 1)
3862 unsigned OpNo =
Op->isStrictFPOpcode() ? 1 : 0;
3874 bool NUW =
Op->getFlags().hasNoUnsignedWrap();
3875 bool NSW =
Op->getFlags().hasNoSignedWrap();
3882 if (std::optional<uint64_t> ShMinAmt =
3891 Op->getFlags().hasExact());
3894 if (std::optional<uint64_t> ShMinAmt =
3902 Op->getFlags().hasExact());
3907 unsigned Amt =
C->getAPIntValue().urem(
BitWidth);
3913 DemandedElts,
Depth + 1);
3938 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3941 unsigned LoBits =
Op.getOperand(0).getScalarValueSizeInBits();
3942 unsigned HiBits =
Op.getOperand(1).getScalarValueSizeInBits();
3945 Known = Known2.
concat(Known);
3959 if (
Op.getResNo() == 0)
3967 EVT EVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
4005 ? cast<MaskedGatherSDNode>(
Op)->getExtensionType()
4006 : cast<MaskedLoadSDNode>(
Op)->getExtensionType();
4008 EVT MemVT = cast<MemSDNode>(
Op)->getMemoryVT();
4021 !
Op.getValueType().isScalableVector()) {
4035 for (
unsigned i = 0; i != NumElts; ++i) {
4036 if (!DemandedElts[i])
4039 if (
auto *CInt = dyn_cast<ConstantInt>(Elt)) {
4045 if (
auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4046 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4057 if (
auto *CInt = dyn_cast<ConstantInt>(Cst)) {
4059 }
else if (
auto *CFP = dyn_cast<ConstantFP>(Cst)) {
4065 }
else if (
Op.getResNo() == 0) {
4066 KnownBits Known0(!LD->getMemoryVT().isScalableVT()
4067 ? LD->getMemoryVT().getFixedSizeInBits()
4069 EVT VT =
Op.getValueType();
4076 if (
const MDNode *MD = LD->getRanges()) {
4087 if (LD->getMemoryVT().isVector())
4088 Known0 = Known0.
trunc(LD->getMemoryVT().getScalarSizeInBits());
4105 if (
Op.getValueType().isScalableVector())
4107 EVT InVT =
Op.getOperand(0).getValueType();
4119 if (
Op.getValueType().isScalableVector())
4121 EVT InVT =
Op.getOperand(0).getValueType();
4137 if (
Op.getValueType().isScalableVector())
4139 EVT InVT =
Op.getOperand(0).getValueType();
4156 EVT VT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
4159 Known.
Zero |= (~InMask);
4160 Known.
One &= (~Known.Zero);
4164 unsigned LogOfAlign =
Log2(cast<AssertAlignSDNode>(
Op)->
getAlign());
4184 Op.getOpcode() ==
ISD::ADD, Flags.hasNoSignedWrap(),
4185 Flags.hasNoUnsignedWrap(), Known, Known2);
4192 if (
Op.getResNo() == 1) {
4203 "We only compute knownbits for the difference here.");
4210 Borrow = Borrow.
trunc(1);
4224 if (
Op.getResNo() == 1) {
4235 assert(
Op.getResNo() == 0 &&
"We only compute knownbits for the sum here.");
4245 Carry = Carry.
trunc(1);
4281 const unsigned Index =
Op.getConstantOperandVal(1);
4282 const unsigned EltBitWidth =
Op.getValueSizeInBits();
4289 Known = Known.
trunc(EltBitWidth);
4305 Known = Known.
trunc(EltBitWidth);
4310 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4311 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4321 if (
Op.getValueType().isScalableVector())
4330 bool DemandedVal =
true;
4331 APInt DemandedVecElts = DemandedElts;
4332 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
4333 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4334 unsigned EltIdx = CEltNo->getZExtValue();
4335 DemandedVal = !!DemandedElts[EltIdx];
4344 if (!!DemandedVecElts) {
4362 Known = Known2.
abs();
4395 if (CstLow && CstHigh) {
4400 const APInt &ValueHigh = CstHigh->getAPIntValue();
4401 if (ValueLow.
sle(ValueHigh)) {
4404 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
4427 if (IsMax && CstLow) {
4451 EVT VT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
4456 if (
Op.getResNo() == 1) {
4483 cast<AtomicSDNode>(
Op)->getMemoryVT().getScalarSizeInBits();
4485 if (
Op.getResNo() == 0) {
4509 if (
Op.getValueType().isScalableVector())
4655 return C->getAPIntValue().zextOrTrunc(
BitWidth).isPowerOf2();
4663 if (
C &&
C->getAPIntValue() == 1)
4673 if (
C &&
C->getAPIntValue().isSignMask())
4685 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
4686 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
4694 if (
C->getAPIntValue().zextOrTrunc(
BitWidth).isPowerOf2())
4732 return C1->getValueAPF().getExactLog2Abs() >= 0;
4741 EVT VT =
Op.getValueType();
4753 unsigned Depth)
const {
4754 EVT VT =
Op.getValueType();
4759 unsigned FirstAnswer = 1;
4761 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
4762 const APInt &Val =
C->getAPIntValue();
4772 unsigned Opcode =
Op.getOpcode();
4776 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getSizeInBits();
4777 return VTBits-Tmp+1;
4779 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getSizeInBits();
4786 unsigned NumSrcBits =
Op.getOperand(0).getValueSizeInBits();
4788 if (NumSrcSignBits > (NumSrcBits - VTBits))
4789 return NumSrcSignBits - (NumSrcBits - VTBits);
4796 if (!DemandedElts[i])
4803 APInt T =
C->getAPIntValue().trunc(VTBits);
4804 Tmp2 =
T.getNumSignBits();
4808 if (
SrcOp.getValueSizeInBits() != VTBits) {
4810 "Expected BUILD_VECTOR implicit truncation");
4811 unsigned ExtraBits =
SrcOp.getValueSizeInBits() - VTBits;
4812 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
4815 Tmp = std::min(Tmp, Tmp2);
4822 APInt DemandedLHS, DemandedRHS;
4826 DemandedLHS, DemandedRHS))
4829 Tmp = std::numeric_limits<unsigned>::max();
4832 if (!!DemandedRHS) {
4834 Tmp = std::min(Tmp, Tmp2);
4839 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
4855 if (VTBits == SrcBits)
4861 if ((SrcBits % VTBits) == 0) {
4864 unsigned Scale = SrcBits / VTBits;
4865 APInt SrcDemandedElts =
4875 for (
unsigned i = 0; i != NumElts; ++i)
4876 if (DemandedElts[i]) {
4877 unsigned SubOffset = i % Scale;
4878 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
4879 SubOffset = SubOffset * VTBits;
4880 if (Tmp <= SubOffset)
4882 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
4891 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getScalarSizeInBits();
4892 return VTBits - Tmp + 1;
4894 Tmp = VTBits -
Op.getOperand(0).getScalarValueSizeInBits();
4898 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getScalarSizeInBits();
4901 return std::max(Tmp, Tmp2);
4906 EVT SrcVT = Src.getValueType();
4914 if (std::optional<uint64_t> ShAmt =
4916 Tmp = std::min<uint64_t>(Tmp + *ShAmt, VTBits);
4919 if (std::optional<ConstantRange> ShAmtRange =
4921 uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
4922 uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
4930 EVT ExtVT = Ext.getValueType();
4931 SDValue Extendee = Ext.getOperand(0);
4935 if (SizeDifference <= MinShAmt) {
4936 Tmp = SizeDifference +
4939 return Tmp - MaxShAmt;
4945 return Tmp - MaxShAmt;
4955 FirstAnswer = std::min(Tmp, Tmp2);
4965 if (Tmp == 1)
return 1;
4967 return std::min(Tmp, Tmp2);
4970 if (Tmp == 1)
return 1;
4972 return std::min(Tmp, Tmp2);
4984 if (CstLow && CstHigh) {
4989 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
4990 return std::min(Tmp, Tmp2);
4999 return std::min(Tmp, Tmp2);
5007 return std::min(Tmp, Tmp2);
5011 if (
Op.getResNo() == 0 &&
Op.getOperand(0) ==
Op.getOperand(1))
5022 if (
Op.getResNo() != 1)
5036 unsigned OpNo =
Op->isStrictFPOpcode() ? 1 : 0;
5053 unsigned RotAmt =
C->getAPIntValue().urem(VTBits);
5057 RotAmt = (VTBits - RotAmt) % VTBits;
5061 if (Tmp > (RotAmt + 1))
return (Tmp - RotAmt);
5069 if (Tmp == 1)
return 1;
5074 if (CRHS->isAllOnes()) {
5080 if ((Known.
Zero | 1).isAllOnes())
5090 if (Tmp2 == 1)
return 1;
5091 return std::min(Tmp, Tmp2) - 1;
5094 if (Tmp2 == 1)
return 1;
5099 if (CLHS->isZero()) {
5104 if ((Known.
Zero | 1).isAllOnes())
5118 if (Tmp == 1)
return 1;
5119 return std::min(Tmp, Tmp2) - 1;
5123 if (SignBitsOp0 == 1)
5126 if (SignBitsOp1 == 1)
5128 unsigned OutValidBits =
5129 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
5130 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
5138 return std::min(Tmp, Tmp2);
5147 unsigned NumSrcBits =
Op.getOperand(0).getScalarValueSizeInBits();
5149 if (NumSrcSignBits > (NumSrcBits - VTBits))
5150 return NumSrcSignBits - (NumSrcBits - VTBits);
5157 const int BitWidth =
Op.getValueSizeInBits();
5158 const int Items =
Op.getOperand(0).getValueSizeInBits() /
BitWidth;
5162 const int rIndex = Items - 1 -
Op.getConstantOperandVal(1);
5177 bool DemandedVal =
true;
5178 APInt DemandedVecElts = DemandedElts;
5179 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
5180 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
5181 unsigned EltIdx = CEltNo->getZExtValue();
5182 DemandedVal = !!DemandedElts[EltIdx];
5185 Tmp = std::numeric_limits<unsigned>::max();
5191 Tmp = std::min(Tmp, Tmp2);
5193 if (!!DemandedVecElts) {
5195 Tmp = std::min(Tmp, Tmp2);
5197 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5208 const unsigned BitWidth =
Op.getValueSizeInBits();
5209 const unsigned EltBitWidth =
Op.getOperand(0).getScalarValueSizeInBits();
5221 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
5222 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
5232 if (Src.getValueType().isScalableVector())
5235 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5244 Tmp = std::numeric_limits<unsigned>::max();
5245 EVT SubVectorVT =
Op.getOperand(0).getValueType();
5248 for (
unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
5250 DemandedElts.
extractBits(NumSubVectorElts, i * NumSubVectorElts);
5254 Tmp = std::min(Tmp, Tmp2);
5256 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5269 APInt DemandedSrcElts = DemandedElts;
5272 Tmp = std::numeric_limits<unsigned>::max();
5273 if (!!DemandedSubElts) {
5278 if (!!DemandedSrcElts) {
5280 Tmp = std::min(Tmp, Tmp2);
5282 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5287 if (
const MDNode *Ranges = LD->getRanges()) {
5288 if (DemandedElts != 1)
5293 switch (LD->getExtensionType()) {
5328 Tmp = cast<AtomicSDNode>(
Op)->getMemoryVT().getScalarSizeInBits();
5330 if (
Op.getResNo() == 0) {
5334 return VTBits - Tmp + 1;
5336 return VTBits - Tmp;
5340 return VTBits - Tmp + 1;
5342 return VTBits - Tmp;
5350 if (
Op.getResNo() == 0) {
5353 unsigned ExtType = LD->getExtensionType();
5357 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5358 return VTBits - Tmp + 1;
5360 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5361 return VTBits - Tmp;
5366 Type *CstTy = Cst->getType();
5371 for (
unsigned i = 0; i != NumElts; ++i) {
5372 if (!DemandedElts[i])
5375 if (
auto *CInt = dyn_cast<ConstantInt>(Elt)) {
5377 Tmp = std::min(Tmp,
Value.getNumSignBits());
5380 if (
auto *CFP = dyn_cast<ConstantFP>(Elt)) {
5381 APInt Value = CFP->getValueAPF().bitcastToAPInt();
5382 Tmp = std::min(Tmp,
Value.getNumSignBits());
5408 FirstAnswer = std::max(FirstAnswer, NumBits);
5419 unsigned Depth)
const {
5421 return Op.getScalarValueSizeInBits() - SignBits + 1;
5425 const APInt &DemandedElts,
5426 unsigned Depth)
const {
5428 return Op.getScalarValueSizeInBits() - SignBits + 1;
5432 unsigned Depth)
const {
5437 EVT VT =
Op.getValueType();
5445 const APInt &DemandedElts,
5447 unsigned Depth)
const {
5448 unsigned Opcode =
Op.getOpcode();
5475 if (!DemandedElts[i])
5488 APInt DemandedLHS, DemandedRHS;
5489 auto *SVN = cast<ShuffleVectorSDNode>(
Op);
5491 DemandedElts, DemandedLHS, DemandedRHS,
5494 if (!DemandedLHS.
isZero() &&
5498 if (!DemandedRHS.
isZero() &&
5526 return isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly, Depth + 1);
5532 unsigned Depth)
const {
5533 EVT VT =
Op.getValueType();
5543 unsigned Depth)
const {
5544 if (ConsiderFlags &&
Op->hasPoisonGeneratingFlags())
5547 unsigned Opcode =
Op.getOpcode();
5586 if (
Op.getOperand(0).getValueType().isInteger())
5593 unsigned CCOp = Opcode ==
ISD::SETCC ? 2 : 4;
5594 ISD::CondCode CCCode = cast<CondCodeSDNode>(
Op.getOperand(CCOp))->get();
5595 if (((
unsigned)CCCode & 0x10U))
5626 EVT VecVT =
Op.getOperand(0).getValueType();
5638 auto *SVN = cast<ShuffleVectorSDNode>(
Op);
5640 if (Elt < 0 && DemandedElts[
Idx])
5659 unsigned Opcode =
Op.getOpcode();
5661 return Op->getFlags().hasDisjoint() ||
5683 return !
C->getValueAPF().isNaN() ||
5684 (SNaN && !
C->getValueAPF().isSignaling());
5687 unsigned Opcode =
Op.getOpcode();
5807 assert(
Op.getValueType().isFloatingPoint() &&
5808 "Floating point type expected");
5819 assert(!
Op.getValueType().isFloatingPoint() &&
5820 "Floating point types unsupported - use isKnownNeverZeroFloat");
5829 switch (
Op.getOpcode()) {
5843 if (
Op->getFlags().hasNoSignedWrap() ||
Op->getFlags().hasNoUnsignedWrap())
5847 if (ValKnown.
One[0])
5907 if (
Op->getFlags().hasExact())
5923 if (
Op->getFlags().hasExact())
5928 if (
Op->getFlags().hasNoUnsignedWrap())
5939 std::optional<bool> ne =
5946 if (
Op->getFlags().hasNoSignedWrap() ||
Op->getFlags().hasNoUnsignedWrap())
5957 const APInt &Multiplier =
Op.getConstantOperandAPInt(0);
5971 return !C1->isNegative();
5978 if (
A ==
B)
return true;
5983 if (CA->isZero() && CB->isZero())
return true;
5992 return V.getOperand(0);
5999 SDValue ExtArg = V.getOperand(0);
6018 NotOperand = NotOperand->getOperand(0);
6020 if (
Other == NotOperand)
6023 return NotOperand ==
Other->getOperand(0) ||
6024 NotOperand ==
Other->getOperand(1);
6030 A =
A->getOperand(0);
6033 B =
B->getOperand(0);
6036 return MatchNoCommonBitsPattern(
A->getOperand(0),
A->getOperand(1),
B) ||
6037 MatchNoCommonBitsPattern(
A->getOperand(1),
A->getOperand(0),
B);
6043 assert(
A.getValueType() ==
B.getValueType() &&
6044 "Values must have the same type");
6054 if (cast<ConstantSDNode>(Step)->
isZero())
6063 int NumOps = Ops.
size();
6064 assert(NumOps != 0 &&
"Can't build an empty vector!");
6066 "BUILD_VECTOR cannot be used with scalable types");
6068 "Incorrect element count in BUILD_VECTOR!");
6076 bool IsIdentity =
true;
6077 for (
int i = 0; i != NumOps; ++i) {
6080 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
6081 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
6082 Ops[i].getConstantOperandAPInt(1) != i) {
6086 IdentitySrc = Ops[i].getOperand(0);
6099 assert(!Ops.
empty() &&
"Can't concatenate an empty list of vectors!");
6102 return Ops[0].getValueType() ==
Op.getValueType();
6104 "Concatenation of vectors with inconsistent value types!");
6107 "Incorrect element count in vector concatenation!");
6109 if (Ops.
size() == 1)
6120 bool IsIdentity =
true;
6121 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i) {
6123 unsigned IdentityIndex = i *
Op.getValueType().getVectorMinNumElements();
6125 Op.getOperand(0).getValueType() != VT ||
6126 (IdentitySrc &&
Op.getOperand(0) != IdentitySrc) ||
6127 Op.getConstantOperandVal(1) != IdentityIndex) {
6131 assert((!IdentitySrc || IdentitySrc ==
Op.getOperand(0)) &&
6132 "Unexpected identity source vector for concat of extracts");
6133 IdentitySrc =
Op.getOperand(0);
6136 assert(IdentitySrc &&
"Failed to set source vector of extracts");
6151 EVT OpVT =
Op.getValueType();
6163 SVT = (SVT.
bitsLT(
Op.getValueType()) ?
Op.getValueType() : SVT);
6187 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
6190 auto *
N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6191 CSEMap.InsertNode(
N, IP);
6204 return getNode(Opcode,
DL, VT, N1, Flags);
6255 "STEP_VECTOR can only be used with scalable types");
6258 "Unexpected step operand");
6280 "Invalid FP cast!");
6284 "Vector element count mismatch!");
6302 "Invalid SIGN_EXTEND!");
6304 "SIGN_EXTEND result type type should be vector iff the operand "
6309 "Vector element count mismatch!");
6323 "Invalid ZERO_EXTEND!");
6325 "ZERO_EXTEND result type type should be vector iff the operand "
6330 "Vector element count mismatch!");
6361 "Invalid ANY_EXTEND!");
6363 "ANY_EXTEND result type type should be vector iff the operand "
6368 "Vector element count mismatch!");
6393 "Invalid TRUNCATE!");
6395 "TRUNCATE result type type should be vector iff the operand "
6400 "Vector element count mismatch!");
6423 assert(VT.
isVector() &&
"This DAG node is restricted to vector types.");
6425 "The input must be the same size or smaller than the result.");
6428 "The destination vector type must have fewer lanes than the input.");
6438 "BSWAP types must be a multiple of 16 bits!");
6452 "Cannot BITCAST between types of different sizes!");
6465 "Illegal SCALAR_TO_VECTOR node!");
6522 "Wrong operand type!");
6529 if (VT != MVT::Glue) {
6533 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
6534 E->intersectFlagsWith(Flags);
6538 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6540 createOperands(
N, Ops);
6541 CSEMap.InsertNode(
N, IP);
6543 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6544 createOperands(
N, Ops);
6578 if (!C2.getBoolValue())
6582 if (!C2.getBoolValue())
6586 if (!C2.getBoolValue())
6590 if (!C2.getBoolValue())
6610 return std::nullopt;
6615 bool IsUndef1,
const APInt &C2,
6617 if (!(IsUndef1 || IsUndef2))
6625 return std::nullopt;
6635 auto *C2 = dyn_cast<ConstantSDNode>(N2);
6638 int64_t
Offset = C2->getSExtValue();
6656 assert(Ops.
size() == 2 &&
"Div/rem should have 2 operands");
6663 [](
SDValue V) { return V.isUndef() ||
6664 isNullConstant(V); });
6685 unsigned NumOps = Ops.
size();
6701 if (
auto *
C = dyn_cast<ConstantSDNode>(N1)) {
6702 const APInt &Val =
C->getAPIntValue();
6706 C->isTargetOpcode(),
C->isOpaque());
6713 C->isTargetOpcode(),
C->isOpaque());
6718 C->isTargetOpcode(),
C->isOpaque());
6720 C->isTargetOpcode(),
C->isOpaque());
6766 if (VT == MVT::f16 &&
C->getValueType(0) == MVT::i16)
6768 if (VT == MVT::f32 &&
C->getValueType(0) == MVT::i32)
6770 if (VT == MVT::f64 &&
C->getValueType(0) == MVT::i64)
6772 if (VT == MVT::f128 &&
C->getValueType(0) == MVT::i128)
6779 if (
auto *
C = dyn_cast<ConstantFPSDNode>(N1)) {
6833 return getConstant(V.bitcastToAPInt().getZExtValue(),
DL, VT);
6836 if (VT == MVT::i16 &&
C->getValueType(0) == MVT::f16)
6839 if (VT == MVT::i16 &&
C->getValueType(0) == MVT::bf16)
6842 if (VT == MVT::i32 &&
C->getValueType(0) == MVT::f32)
6845 if (VT == MVT::i64 &&
C->getValueType(0) == MVT::f64)
6846 return getConstant(V.bitcastToAPInt().getZExtValue(),
DL, VT);
6861 if (
auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
6862 if (
auto *C2 = dyn_cast<ConstantSDNode>(Ops[1])) {
6863 if (C1->isOpaque() || C2->isOpaque())
6866 std::optional<APInt> FoldAttempt =
6867 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
6873 "Can't fold vectors ops with scalar operands");
6887 EVT EVT = cast<VTSDNode>(Ops[1])->getVT();
6896 if (
auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
6897 const APInt &Val = C1->getAPIntValue();
6898 return SignExtendInReg(Val, VT);
6903 llvm::EVT OpVT = Ops[0].getOperand(0).getValueType();
6910 const APInt &Val = cast<ConstantSDNode>(
Op)->getAPIntValue();
6911 ScalarOps.
push_back(SignExtendInReg(Val, OpVT));
6917 isa<ConstantSDNode>(Ops[0].getOperand(0)))
6919 SignExtendInReg(Ops[0].getConstantOperandAPInt(0),
6932 Ops[0].getValueType() == VT && Ops[1].getValueType() == VT &&
6937 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
6938 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
6945 if (BV1->getConstantRawBits(IsLE, EltBits, RawBits1, UndefElts1) &&
6946 BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) {
6950 Opcode, RawBits1[
I], UndefElts1[
I], RawBits2[
I], UndefElts2[
I]);
6961 BVEltVT = BV1->getOperand(0).getValueType();
6964 BVEltVT = BV2->getOperand(0).getValueType();
6970 DstBits, RawBits, DstUndefs,
6973 for (
unsigned I = 0, E = DstBits.
size();
I != E; ++
I) {
6991 ? Ops[0].getConstantOperandAPInt(0) * RHSVal
6992 : Ops[0].getConstantOperandAPInt(0) << RHSVal;
6997 auto IsScalarOrSameVectorSize = [NumElts](
const SDValue &
Op) {
6998 return !
Op.getValueType().isVector() ||
6999 Op.getValueType().getVectorElementCount() == NumElts;
7002 auto IsBuildVectorSplatVectorOrUndef = [](
const SDValue &
Op) {
7011 if (!
llvm::all_of(Ops, IsBuildVectorSplatVectorOrUndef) ||
7040 for (
unsigned I = 0;
I != NumVectorElts;
I++) {
7043 EVT InSVT =
Op.getValueType().getScalarType();
7065 !isa<ConstantSDNode>(ScalarOp) &&
7086 if (LegalSVT != SVT)
7087 ScalarResult =
getNode(ExtendCode,
DL, LegalSVT, ScalarResult);
7101 if (Ops.
size() != 2)
7112 if (N1CFP && N2CFP) {
7163 if (N1C && N1C->getValueAPF().isNegZero() && N2.
isUndef())
7192 ID.AddInteger(
A.value());
7195 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
7199 newSDNode<AssertAlignSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs,
A);
7200 createOperands(
N, {Val});
7202 CSEMap.InsertNode(
N, IP);
7215 return getNode(Opcode,
DL, VT, N1, N2, Flags);
7229 if ((N1C && !N2C) || (N1CFP && !N2CFP))
7243 "Operand is DELETED_NODE!");
7247 auto *N1C = dyn_cast<ConstantSDNode>(N1);
7248 auto *N2C = dyn_cast<ConstantSDNode>(N2);
7259 N2.
getValueType() == MVT::Other &&
"Invalid token factor!");
7263 if (N1 == N2)
return N1;
7279 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7281 N1.
getValueType() == VT &&
"Binary operator types must match!");
7284 if (N2CV && N2CV->
isZero())
7293 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7295 N1.
getValueType() == VT &&
"Binary operator types must match!");
7298 if (N2CV && N2CV->
isZero())
7305 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7307 N1.
getValueType() == VT &&
"Binary operator types must match!");
7312 const APInt &N2CImm = N2C->getAPIntValue();
7326 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7328 N1.
getValueType() == VT &&
"Binary operator types must match!");
7341 "Types of operands of UCMP/SCMP must match");
7343 "Operands and return type of must both be scalars or vectors");
7347 "Result and operands must have the same number of elements");
7353 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7355 N1.
getValueType() == VT &&
"Binary operator types must match!");
7359 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7361 N1.
getValueType() == VT &&
"Binary operator types must match!");
7367 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7369 N1.
getValueType() == VT &&
"Binary operator types must match!");
7375 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
7377 N1.
getValueType() == VT &&
"Binary operator types must match!");
7388 N1.
getValueType() == VT &&
"Binary operator types must match!");
7396 "Invalid FCOPYSIGN!");
7401 const APInt &ShiftImm = N2C->getAPIntValue();
7413 "Shift operators return type must be the same as their first arg");
7415 "Shifts only work on integers");
7417 "Vector shift amounts must be in the same as their first arg");
7424 "Invalid use of small shift amount with oversized value!");
7431 if (N2CV && N2CV->
isZero())
7437 (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
7443 EVT EVT = cast<VTSDNode>(N2)->getVT();
7446 "Cannot *_EXTEND_INREG FP types");
7448 "AssertSExt/AssertZExt type should be the vector element type "
7449 "rather than the vector type!");
7455 EVT EVT = cast<VTSDNode>(N2)->getVT();
7458 "Cannot *_EXTEND_INREG FP types");
7460 "SIGN_EXTEND_INREG type should be vector iff the operand "
7464 "Vector element counts must match in SIGN_EXTEND_INREG");
7466 if (
EVT == VT)
return N1;
7474 "FP_TO_*INT_SAT type should be vector iff the operand type is "
7478 "Vector element counts must match in FP_TO_*INT_SAT");
7479 assert(!cast<VTSDNode>(N2)->getVT().isVector() &&
7480 "Type to saturate to must be a scalar.");
7487 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
7488 element type of the vector.");
7520 "BUILD_VECTOR used for scalable vectors");
7543 if (N1Op2C && N2C) {
7573 assert(N2C && (
unsigned)N2C->getZExtValue() < 2 &&
"Bad EXTRACT_ELEMENT!");
7577 "Wrong types for EXTRACT_ELEMENT!");
7588 unsigned Shift = ElementSize * N2C->getZExtValue();
7589 const APInt &Val = N1C->getAPIntValue();
7596 "Extract subvector VTs must be vectors!");
7598 "Extract subvector VTs must have the same element type!");
7600 "Cannot extract a scalable vector from a fixed length vector!");
7603 "Extract subvector must be from larger vector to smaller vector!");
7604 assert(N2C &&
"Extract subvector index must be a constant");
7608 "Extract subvector overflow!");
7609 assert(N2C->getAPIntValue().getBitWidth() ==
7611 "Constant index for EXTRACT_SUBVECTOR has an invalid size");
7626 return N1.
getOperand(N2C->getZExtValue() / Factor);
7694 if (VT != MVT::Glue) {
7698 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
7699 E->intersectFlagsWith(Flags);
7703 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
7705 createOperands(
N, Ops);
7706 CSEMap.InsertNode(
N, IP);
7708 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
7709 createOperands(
N, Ops);
7723 return getNode(Opcode,
DL, VT, N1, N2, N3, Flags);
7732 "Operand is DELETED_NODE!");
7743 if (N1CFP && N2CFP && N3CFP) {
7772 "SETCC operands must have the same type!");
7774 "SETCC type should be vector iff the operand type is vector!");
7777 "SETCC vector element counts must match!");
7797 if (cast<ConstantSDNode>(N3)->
isZero())
7827 "Dest and insert subvector source types must match!");
7829 "Insert subvector VTs must be vectors!");
7831 "Insert subvector VTs must have the same element type!");
7833 "Cannot insert a scalable vector into a fixed length vector!");
7836 "Insert subvector must be from smaller vector to larger vector!");
7837 assert(isa<ConstantSDNode>(N3) &&
7838 "Insert subvector index must be constant");
7842 "Insert subvector overflow!");
7845 "Constant index for INSERT_SUBVECTOR has an invalid size");
7863 case ISD::VP_TRUNCATE:
7864 case ISD::VP_SIGN_EXTEND:
7865 case ISD::VP_ZERO_EXTEND:
7874 assert(VT == VecVT &&
"Vector and result type don't match.");
7876 "All inputs must be vectors.");
7877 assert(VecVT == PassthruVT &&
"Vector and passthru types don't match.");
7879 "Vector and mask must have same number of elements.");
7892 if (VT != MVT::Glue) {
7896 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
7897 E->intersectFlagsWith(Flags);
7901 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
7903 createOperands(
N, Ops);
7904 CSEMap.InsertNode(
N, IP);
7906 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
7907 createOperands(
N, Ops);
7919 SDValue Ops[] = { N1, N2, N3, N4 };
7920 return getNode(Opcode,
DL, VT, Ops, Flags);
7928 return getNode(Opcode,
DL, VT, N1, N2, N3, N4, Flags);
7934 SDValue Ops[] = { N1, N2, N3, N4, N5 };
7935 return getNode(Opcode,
DL, VT, Ops, Flags);
7944 return getNode(Opcode,
DL, VT, N1, N2, N3, N4, N5, Flags);
7961 if (FI->getIndex() < 0)
7976 assert(
C->getAPIntValue().getBitWidth() == 8);
7981 return DAG.
getConstant(Val, dl, VT,
false, IsOpaque);
7986 assert(
Value.getValueType() == MVT::i8 &&
"memset with non-byte fill value?");
8002 if (VT !=
Value.getValueType())
8015 if (Slice.
Array ==
nullptr) {
8018 if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
8033 unsigned NumVTBytes = NumVTBits / 8;
8034 unsigned NumBytes = std::min(NumVTBytes,
unsigned(Slice.
Length));
8036 APInt Val(NumVTBits, 0);
8038 for (
unsigned i = 0; i != NumBytes; ++i)
8041 for (
unsigned i = 0; i != NumBytes; ++i)
8042 Val |= (
uint64_t)(
unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
8061 APInt(
Base.getValueSizeInBits().getFixedValue(),
8062 Offset.getKnownMinValue()));
8073 EVT BasePtrVT =
Ptr.getValueType();
8082 G = cast<GlobalAddressSDNode>(Src);
8083 else if (Src.getOpcode() ==
ISD::ADD &&
8086 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
8087 SrcDelta = Src.getConstantOperandVal(1);
8093 SrcDelta +
G->getOffset());
8109 assert(OutLoadChains.
size() &&
"Missing loads in memcpy inlining");
8110 assert(OutStoreChains.
size() &&
"Missing stores in memcpy inlining");
8112 for (
unsigned i =
From; i < To; ++i) {
8114 GluedLoadChains.
push_back(OutLoadChains[i]);
8121 for (
unsigned i =
From; i < To; ++i) {
8122 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
8124 ST->getBasePtr(), ST->getMemoryVT(),
8125 ST->getMemOperand());
8147 std::vector<EVT> MemOps;
8148 bool DstAlignCanChange =
false;
8154 DstAlignCanChange =
true;
8156 if (!SrcAlign || Alignment > *SrcAlign)
8157 SrcAlign = Alignment;
8158 assert(SrcAlign &&
"SrcAlign must be set");
8162 bool isZeroConstant = CopyFromConstant && Slice.
Array ==
nullptr;
8164 const MemOp Op = isZeroConstant
8168 *SrcAlign, isVol, CopyFromConstant);
8174 if (DstAlignCanChange) {
8175 Type *Ty = MemOps[0].getTypeForEVT(
C);
8176 Align NewAlign =
DL.getABITypeAlign(Ty);
8182 if (!
TRI->hasStackRealignment(MF))
8184 NewAlign = std::min(NewAlign, *StackAlign);
8186 if (NewAlign > Alignment) {
8190 Alignment = NewAlign;
8198 const Value *SrcVal = dyn_cast_if_present<const Value *>(SrcPtrInfo.
V);
8200 BatchAA && SrcVal &&
8208 unsigned NumMemOps = MemOps.
size();
8210 for (
unsigned i = 0; i != NumMemOps; ++i) {
8215 if (VTSize >
Size) {
8218 assert(i == NumMemOps-1 && i != 0);
8219 SrcOff -= VTSize -
Size;
8220 DstOff -= VTSize -
Size;
8223 if (CopyFromConstant &&
8231 if (SrcOff < Slice.
Length) {
8233 SubSlice.
move(SrcOff);
8236 SubSlice.
Array =
nullptr;
8238 SubSlice.
Length = VTSize;
8241 if (
Value.getNode()) {
8245 DstPtrInfo.
getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
8250 if (!Store.getNode()) {
8259 bool isDereferenceable =
8262 if (isDereferenceable)
8277 DstPtrInfo.
getWithOffset(DstOff), VT, Alignment, MMOFlags, NewAAInfo);
8287 unsigned NumLdStInMemcpy = OutStoreChains.
size();
8289 if (NumLdStInMemcpy) {
8295 for (
unsigned i = 0; i < NumLdStInMemcpy; ++i) {
8301 if (NumLdStInMemcpy <= GluedLdStLimit) {
8303 NumLdStInMemcpy, OutLoadChains,
8306 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
8307 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
8308 unsigned GlueIter = 0;
8310 for (
unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
8311 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
8312 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
8315 OutLoadChains, OutStoreChains);
8316 GlueIter += GluedLdStLimit;
8320 if (RemainingLdStInMemcpy) {
8322 RemainingLdStInMemcpy, OutLoadChains,
8334 bool isVol,
bool AlwaysInline,
8348 std::vector<EVT> MemOps;
8349 bool DstAlignCanChange =
false;
8355 DstAlignCanChange =
true;
8357 if (!SrcAlign || Alignment > *SrcAlign)
8358 SrcAlign = Alignment;
8359 assert(SrcAlign &&
"SrcAlign must be set");
8369 if (DstAlignCanChange) {
8370 Type *Ty = MemOps[0].getTypeForEVT(
C);
8371 Align NewAlign =
DL.getABITypeAlign(Ty);
8377 if (!
TRI->hasStackRealignment(MF))
8379 NewAlign = std::min(NewAlign, *StackAlign);
8381 if (NewAlign > Alignment) {
8385 Alignment = NewAlign;
8399 unsigned NumMemOps = MemOps.
size();
8400 for (
unsigned i = 0; i < NumMemOps; i++) {
8405 bool isDereferenceable =
8408 if (isDereferenceable)
8414 SrcPtrInfo.
getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags, NewAAInfo);
8421 for (
unsigned i = 0; i < NumMemOps; i++) {
8427 Chain, dl, LoadValues[i],
8429 DstPtrInfo.
getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
8469 std::vector<EVT> MemOps;
8470 bool DstAlignCanChange =
false;
8476 DstAlignCanChange =
true;
8482 MemOp::Set(
Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
8486 if (DstAlignCanChange) {
8489 Align NewAlign =
DL.getABITypeAlign(Ty);
8495 if (!
TRI->hasStackRealignment(MF))
8497 NewAlign = std::min(NewAlign, *StackAlign);
8499 if (NewAlign > Alignment) {
8503 Alignment = NewAlign;
8509 unsigned NumMemOps = MemOps.size();
8512 EVT LargestVT = MemOps[0];
8513 for (
unsigned i = 1; i < NumMemOps; i++)
8514 if (MemOps[i].bitsGT(LargestVT))
8515 LargestVT = MemOps[i];
8522 for (
unsigned i = 0; i < NumMemOps; i++) {
8525 if (VTSize >
Size) {
8528 assert(i == NumMemOps-1 && i != 0);
8529 DstOff -= VTSize -
Size;
8536 if (VT.
bitsLT(LargestVT)) {
8557 assert(
Value.getValueType() == VT &&
"Value with wrong type.");
8584 Align Alignment,
bool isVol,
bool AlwaysInline,
const CallInst *CI,
8593 if (ConstantSize->
isZero())
8597 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
8598 isVol,
false, DstPtrInfo, SrcPtrInfo, AAInfo, BatchAA);
8599 if (Result.getNode())
8607 *
this, dl, Chain, Dst, Src,
Size, Alignment, isVol, AlwaysInline,
8608 DstPtrInfo, SrcPtrInfo);
8609 if (Result.getNode())
8616 assert(ConstantSize &&
"AlwaysInline requires a constant size!");
8618 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
8619 isVol,
true, DstPtrInfo, SrcPtrInfo, AAInfo, BatchAA);
8635 Entry.Node = Dst; Args.push_back(Entry);
8636 Entry.Node = Src; Args.push_back(Entry);
8639 Entry.Node =
Size; Args.push_back(Entry);
8642 bool IsTailCall =
false;
8643 if (OverrideTailCall.has_value()) {
8644 IsTailCall = *OverrideTailCall;
8646 bool LowersToMemcpy =
8651 ReturnsFirstArg && LowersToMemcpy);
8657 Dst.getValueType().getTypeForEVT(*
getContext()),
8664 std::pair<SDValue,SDValue> CallResult = TLI->
LowerCallTo(CLI);
8665 return CallResult.second;
8670 Type *SizeTy,
unsigned ElemSz,
8679 Args.push_back(Entry);
8682 Args.push_back(Entry);
8686 Args.push_back(Entry);
8690 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
8704 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
8705 return CallResult.second;
8711 std::optional<bool> OverrideTailCall,
8721 if (ConstantSize->
isZero())
8725 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
8726 isVol,
false, DstPtrInfo, SrcPtrInfo, AAInfo);
8727 if (Result.getNode())
8736 Alignment, isVol, DstPtrInfo, SrcPtrInfo);
8737 if (Result.getNode())
8751 Entry.Node = Dst; Args.push_back(Entry);
8752 Entry.Node = Src; Args.push_back(Entry);
8755 Entry.Node =
Size; Args.push_back(Entry);
8759 bool IsTailCall =
false;
8760 if (OverrideTailCall.has_value()) {
8761 IsTailCall = *OverrideTailCall;
8763 bool LowersToMemmove =
8768 ReturnsFirstArg && LowersToMemmove);
8774 Dst.getValueType().getTypeForEVT(*
getContext()),
8781 std::pair<SDValue,SDValue> CallResult = TLI->
LowerCallTo(CLI);
8782 return CallResult.second;
8787 Type *SizeTy,
unsigned ElemSz,
8796 Args.push_back(Entry);
8799 Args.push_back(Entry);
8803 Args.push_back(Entry);
8807 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
8821 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
8822 return CallResult.second;
8827 bool isVol,
bool AlwaysInline,
8836 if (ConstantSize->
isZero())
8841 isVol,
false, DstPtrInfo, AAInfo);
8843 if (Result.getNode())
8851 *
this, dl, Chain, Dst, Src,
Size, Alignment, isVol, AlwaysInline, DstPtrInfo);
8852 if (Result.getNode())
8859 assert(ConstantSize &&
"AlwaysInline requires a constant size!");
8862 isVol,
true, DstPtrInfo, AAInfo);
8864 "getMemsetStores must return a valid sequence when AlwaysInline");
8881 const auto CreateEntry = [](
SDValue Node,
Type *Ty) {
8893 Args.push_back(CreateEntry(
Size,
DL.getIntPtrType(Ctx)));
8900 Args.push_back(CreateEntry(Src, Src.getValueType().getTypeForEVT(Ctx)));
8901 Args.push_back(CreateEntry(
Size,
DL.getIntPtrType(Ctx)));
8903 Dst.getValueType().getTypeForEVT(Ctx),
8908 bool LowersToMemset =
8919 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
8920 return CallResult.second;
8925 Type *SizeTy,
unsigned ElemSz,
8933 Args.push_back(Entry);
8937 Args.push_back(Entry);
8941 Args.push_back(Entry);
8945 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
8959 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
8960 return CallResult.second;
8972 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
8973 cast<AtomicSDNode>(E)->refineAlignment(MMO);
8978 VTList, MemVT, MMO);
8979 createOperands(
N, Ops);
8981 CSEMap.InsertNode(
N, IP);
8995 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
9014 "Invalid Atomic Op");
9021 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
9031 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
9036 if (Ops.
size() == 1)
9051 if (
Size.hasValue() && !
Size.getValue())
9068 (Opcode <= (
unsigned)std::numeric_limits<int>::max() &&
9070 "Opcode is not a memory-accessing opcode!");
9074 if (VTList.
VTs[VTList.
NumVTs-1] != MVT::Glue) {
9077 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
9078 Opcode, dl.
getIROrder(), VTList, MemVT, MMO));
9083 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9084 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
9089 VTList, MemVT, MMO);
9090 createOperands(
N, Ops);
9092 CSEMap.InsertNode(
N, IP);
9095 VTList, MemVT, MMO);
9096 createOperands(
N, Ops);
9105 SDValue Chain,
int FrameIndex,
9117 ID.AddInteger(FrameIndex);
9121 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
9126 createOperands(
N, Ops);
9127 CSEMap.InsertNode(
N, IP);
9143 ID.AddInteger(Index);
9145 if (
SDNode *E = FindNodeOrInsertPos(
ID, Dl, IP))
9148 auto *
N = newSDNode<PseudoProbeSDNode>(
9150 createOperands(
N, Ops);
9151 CSEMap.InsertNode(
N, IP);
9172 !isa<ConstantSDNode>(
Ptr.getOperand(1)) ||
9173 !isa<FrameIndexSDNode>(
Ptr.getOperand(0)))
9176 int FI = cast<FrameIndexSDNode>(
Ptr.getOperand(0))->getIndex();
9179 Offset + cast<ConstantSDNode>(
Ptr.getOperand(1))->getSExtValue());
9190 if (
ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
9205 "Invalid chain type");
9217 Alignment, AAInfo, Ranges);
9228 assert(VT == MemVT &&
"Non-extending load from different memory type!");
9232 "Should only be an extending load, not truncating!");
9234 "Cannot convert from FP to Int or Int -> FP!");
9236 "Cannot use an ext load to convert to or from a vector!");
9239 "Cannot use an ext load to change the number of vector elements!");
9251 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
9252 dl.
getIROrder(), VTs, AM, ExtType, MemVT, MMO));
9256 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9257 cast<LoadSDNode>(E)->refineAlignment(MMO);
9261 ExtType, MemVT, MMO);
9262 createOperands(
N, Ops);
9264 CSEMap.InsertNode(
N, IP);
9278 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
9296 MemVT, Alignment, MMOFlags, AAInfo);
9311 assert(LD->getOffset().isUndef() &&
"Load is already a indexed load!");
9314 LD->getMemOperand()->getFlags() &
9317 LD->getChain(),
Base,
Offset, LD->getPointerInfo(),
9318 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
9344 "Invalid chain type");
9352 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
9357 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9358 cast<StoreSDNode>(E)->refineAlignment(MMO);
9363 createOperands(
N, Ops);
9365 CSEMap.InsertNode(
N, IP);
9378 "Invalid chain type");
9399 "Invalid chain type");
9404 "Should only be a truncating store, not extending!");
9406 "Can't do FP-INT conversion!");
9408 "Cannot use trunc store to convert to or from a vector!");
9411 "Cannot use trunc store to change the number of vector elements!");
9419 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
9424 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9425 cast<StoreSDNode>(E)->refineAlignment(MMO);
9430 createOperands(
N, Ops);
9432 CSEMap.InsertNode(
N, IP);
9443 assert(ST->getOffset().isUndef() &&
"Store is already a indexed store!");
9448 ID.AddInteger(ST->getMemoryVT().getRawBits());
9449 ID.AddInteger(ST->getRawSubclassData());
9450 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
9451 ID.AddInteger(ST->getMemOperand()->getFlags());
9453 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
9457 ST->isTruncatingStore(), ST->getMemoryVT(),
9458 ST->getMemOperand());
9459 createOperands(
N, Ops);
9461 CSEMap.InsertNode(
N, IP);
9473 const MDNode *Ranges,
bool IsExpanding) {
9486 Alignment, AAInfo, Ranges);
9487 return getLoadVP(AM, ExtType, VT, dl, Chain,
Ptr,
Offset, Mask, EVL, MemVT,
9506 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadSDNode>(
9507 dl.
getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
9511 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9512 cast<VPLoadSDNode>(E)->refineAlignment(MMO);
9516 ExtType, IsExpanding, MemVT, MMO);
9517 createOperands(
N, Ops);
9519 CSEMap.InsertNode(
N, IP);
9535 Mask, EVL, PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges,
9544 Mask, EVL, VT, MMO, IsExpanding);
9553 const AAMDNodes &AAInfo,
bool IsExpanding) {
9556 EVL, PtrInfo, MemVT, Alignment, MMOFlags, AAInfo,
nullptr,
9566 EVL, MemVT, MMO, IsExpanding);
9572 auto *LD = cast<VPLoadSDNode>(OrigLoad);
9573 assert(LD->getOffset().isUndef() &&
"Load is already a indexed load!");
9576 LD->getMemOperand()->getFlags() &
9580 LD->getVectorLength(), LD->getPointerInfo(),
9581 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo(),
9582 nullptr, LD->isExpandingLoad());
9589 bool IsCompressing) {
9599 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
9600 dl.
getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
9604 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9605 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
9609 IsTruncating, IsCompressing, MemVT, MMO);
9610 createOperands(
N, Ops);
9612 CSEMap.InsertNode(
N, IP);
9625 bool IsCompressing) {
9646 bool IsCompressing) {
9653 false, IsCompressing);
9656 "Should only be a truncating store, not extending!");
9659 "Cannot use trunc store to convert to or from a vector!");
9662 "Cannot use trunc store to change the number of vector elements!");
9666 SDValue Ops[] = {Chain, Val,
Ptr, Undef, Mask, EVL};
9670 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
9675 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9676 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
9682 createOperands(
N, Ops);
9684 CSEMap.InsertNode(
N, IP);
9694 auto *ST = cast<VPStoreSDNode>(OrigStore);
9695 assert(ST->getOffset().isUndef() &&
"Store is already an indexed store!");
9697 SDValue Ops[] = {ST->getChain(), ST->getValue(),
Base,
9698 Offset, ST->getMask(), ST->getVectorLength()};
9701 ID.AddInteger(ST->getMemoryVT().getRawBits());
9702 ID.AddInteger(ST->getRawSubclassData());
9703 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
9704 ID.AddInteger(ST->getMemOperand()->getFlags());
9706 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
9709 auto *
N = newSDNode<VPStoreSDNode>(
9711 ST->isCompressingStore(), ST->getMemoryVT(), ST->getMemOperand());
9712 createOperands(
N, Ops);
9714 CSEMap.InsertNode(
N, IP);
9734 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedLoadSDNode>(
9735 DL.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
9739 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
9740 cast<VPStridedLoadSDNode>(E)->refineAlignment(MMO);
9745 newSDNode<VPStridedLoadSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs, AM,
9746 ExtType, IsExpanding, MemVT, MMO);
9747 createOperands(
N, Ops);
9748 CSEMap.InsertNode(
N, IP);
9762 Undef, Stride, Mask, EVL, VT, MMO, IsExpanding);
9771 Stride, Mask, EVL, MemVT, MMO, IsExpanding);
9780 bool IsTruncating,
bool IsCompressing) {
9790 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
9791 DL.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
9794 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
9795 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
9798 auto *
N = newSDNode<VPStridedStoreSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
9799 VTs, AM, IsTruncating,
9800 IsCompressing, MemVT, MMO);
9801 createOperands(
N, Ops);
9803 CSEMap.InsertNode(
N, IP);
9815 bool IsCompressing) {
9822 false, IsCompressing);
9825 "Should only be a truncating store, not extending!");
9828 "Cannot use trunc store to convert to or from a vector!");
9831 "Cannot use trunc store to change the number of vector elements!");
9835 SDValue Ops[] = {Chain, Val,
Ptr, Undef, Stride, Mask, EVL};
9839 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
9843 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
9844 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
9847 auto *
N = newSDNode<VPStridedStoreSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
9849 IsCompressing, SVT, MMO);
9850 createOperands(
N, Ops);
9852 CSEMap.InsertNode(
N, IP);
9862 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
9867 ID.AddInteger(getSyntheticNodeSubclassData<VPGatherSDNode>(
9872 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9873 cast<VPGatherSDNode>(E)->refineAlignment(MMO);
9878 VT, MMO, IndexType);
9879 createOperands(
N, Ops);
9881 assert(
N->getMask().getValueType().getVectorElementCount() ==
9882 N->getValueType(0).getVectorElementCount() &&
9883 "Vector width mismatch between mask and data");
9884 assert(
N->getIndex().getValueType().getVectorElementCount().isScalable() ==
9885 N->getValueType(0).getVectorElementCount().isScalable() &&
9886 "Scalable flags of index and data do not match");
9888 N->getIndex().getValueType().getVectorElementCount(),
9889 N->getValueType(0).getVectorElementCount()) &&
9890 "Vector width mismatch between index and data");
9891 assert(isa<ConstantSDNode>(
N->getScale()) &&
9892 N->getScale()->getAsAPIntVal().isPowerOf2() &&
9893 "Scale should be a constant power of 2");
9895 CSEMap.InsertNode(
N, IP);
9906 assert(Ops.
size() == 7 &&
"Incompatible number of operands");
9911 ID.AddInteger(getSyntheticNodeSubclassData<VPScatterSDNode>(
9916 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9917 cast<VPScatterSDNode>(E)->refineAlignment(MMO);
9921 VT, MMO, IndexType);
9922 createOperands(
N, Ops);
9924 assert(
N->getMask().getValueType().getVectorElementCount() ==
9925 N->getValue().getValueType().getVectorElementCount() &&
9926 "Vector width mismatch between mask and data");
9928 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
9929 N->getValue().getValueType().getVectorElementCount().isScalable() &&
9930 "Scalable flags of index and data do not match");
9932 N->getIndex().getValueType().getVectorElementCount(),
9933 N->getValue().getValueType().getVectorElementCount()) &&
9934 "Vector width mismatch between index and data");
9935 assert(isa<ConstantSDNode>(
N->getScale()) &&
9936 N->getScale()->getAsAPIntVal().isPowerOf2() &&
9937 "Scale should be a constant power of 2");
9939 CSEMap.InsertNode(
N, IP);
9954 "Unindexed masked load with an offset!");
9961 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
9962 dl.
getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
9966 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9967 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
9971 AM, ExtTy, isExpanding, MemVT, MMO);
9972 createOperands(
N, Ops);
9974 CSEMap.InsertNode(
N, IP);
9985 assert(LD->getOffset().isUndef() &&
"Masked load is already a indexed load!");
9987 Offset, LD->getMask(), LD->getPassThru(),
9988 LD->getMemoryVT(), LD->getMemOperand(), AM,
9989 LD->getExtensionType(), LD->isExpandingLoad());
9997 bool IsCompressing) {
9999 "Invalid chain type");
10002 "Unindexed masked store with an offset!");
10009 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
10010 dl.
getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10013 void *IP =
nullptr;
10014 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10015 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
10020 IsTruncating, IsCompressing, MemVT, MMO);
10021 createOperands(
N, Ops);
10023 CSEMap.InsertNode(
N, IP);
10034 assert(ST->getOffset().isUndef() &&
10035 "Masked store is already a indexed store!");
10037 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
10038 AM, ST->isTruncatingStore(), ST->isCompressingStore());
10046 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
10051 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
10052 dl.
getIROrder(), VTs, MemVT, MMO, IndexType, ExtTy));
10055 void *IP =
nullptr;
10056 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10057 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
10062 VTs, MemVT, MMO, IndexType, ExtTy);
10063 createOperands(
N, Ops);
10065 assert(
N->getPassThru().getValueType() ==
N->getValueType(0) &&
10066 "Incompatible type of the PassThru value in MaskedGatherSDNode");
10067 assert(
N->getMask().getValueType().getVectorElementCount() ==
10068 N->getValueType(0).getVectorElementCount() &&
10069 "Vector width mismatch between mask and data");
10070 assert(
N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10071 N->getValueType(0).getVectorElementCount().isScalable() &&
10072 "Scalable flags of index and data do not match");
10074 N->getIndex().getValueType().getVectorElementCount(),
10075 N->getValueType(0).getVectorElementCount()) &&
10076 "Vector width mismatch between index and data");
10077 assert(isa<ConstantSDNode>(
N->getScale()) &&
10078 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10079 "Scale should be a constant power of 2");
10081 CSEMap.InsertNode(
N, IP);
10093 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
10098 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
10099 dl.
getIROrder(), VTs, MemVT, MMO, IndexType, IsTrunc));
10102 void *IP =
nullptr;
10103 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10104 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
10109 VTs, MemVT, MMO, IndexType, IsTrunc);
10110 createOperands(
N, Ops);
10112 assert(
N->getMask().getValueType().getVectorElementCount() ==
10113 N->getValue().getValueType().getVectorElementCount() &&
10114 "Vector width mismatch between mask and data");
10116 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
10117 N->getValue().getValueType().getVectorElementCount().isScalable() &&
10118 "Scalable flags of index and data do not match");
10120 N->getIndex().getValueType().getVectorElementCount(),
10121 N->getValue().getValueType().getVectorElementCount()) &&
10122 "Vector width mismatch between index and data");
10123 assert(isa<ConstantSDNode>(
N->getScale()) &&
10124 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10125 "Scale should be a constant power of 2");
10127 CSEMap.InsertNode(
N, IP);
10138 assert(Ops.
size() == 7 &&
"Incompatible number of operands");
10143 ID.AddInteger(getSyntheticNodeSubclassData<MaskedHistogramSDNode>(
10144 dl.
getIROrder(), VTs, MemVT, MMO, IndexType));
10147 void *IP =
nullptr;
10148 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10149 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
10154 VTs, MemVT, MMO, IndexType);
10155 createOperands(
N, Ops);
10157 assert(
N->getMask().getValueType().getVectorElementCount() ==
10158 N->getIndex().getValueType().getVectorElementCount() &&
10159 "Vector width mismatch between mask and data");
10160 assert(isa<ConstantSDNode>(
N->getScale()) &&
10161 N->getScale()->getAsAPIntVal().isPowerOf2() &&
10162 "Scale should be a constant power of 2");
10163 assert(
N->getInc().getValueType().isInteger() &&
"Non integer update value");
10165 CSEMap.InsertNode(
N, IP);
10180 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
10184 void *IP =
nullptr;
10185 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10190 createOperands(
N, Ops);
10192 CSEMap.InsertNode(
N, IP);
10207 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
10211 void *IP =
nullptr;
10212 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10217 createOperands(
N, Ops);
10219 CSEMap.InsertNode(
N, IP);
10230 if (
Cond.isUndef())
10265 return !Val || Val->getAPIntValue().uge(
X.getScalarValueSizeInBits());
10271 if (
X.getValueType().getScalarType() == MVT::i1)
10284 bool HasNan = (XC && XC->getValueAPF().isNaN()) ||
10286 bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
10289 if (Flags.hasNoNaNs() && (HasNan ||
X.isUndef() ||
Y.isUndef()))
10292 if (Flags.hasNoInfs() && (HasInf ||
X.isUndef() ||
Y.isUndef()))
10315 if (Opcode ==
ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
10330 switch (Ops.
size()) {
10331 case 0:
return getNode(Opcode,
DL, VT);
10332 case 1:
return getNode(Opcode,
DL, VT,
static_cast<const SDValue>(Ops[0]));
10333 case 2:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1]);
10334 case 3:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Ops[2]);
10341 return getNode(Opcode,
DL, VT, NewOps);
10349 return getNode(Opcode,
DL, VT, Ops, Flags);
10354 unsigned NumOps = Ops.
size();
10356 case 0:
return getNode(Opcode,
DL, VT);
10357 case 1:
return getNode(Opcode,
DL, VT, Ops[0], Flags);
10358 case 2:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Flags);
10359 case 3:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Ops[2], Flags);
10364 for (
const auto &
Op : Ops)
10366 "Operand is DELETED_NODE!");
10381 assert(NumOps == 5 &&
"SELECT_CC takes 5 operands!");
10383 "LHS and RHS of condition must have same type!");
10385 "True and False arms of SelectCC must have same type!");
10387 "select_cc node must be of same type as true and false value!");
10391 "Expected select_cc with vector result to have the same sized "
10392 "comparison type!");
10395 assert(NumOps == 5 &&
"BR_CC takes 5 operands!");
10397 "LHS/RHS of comparison should match types!");
10403 Opcode = ISD::VP_XOR;
10408 Opcode = ISD::VP_AND;
10410 case ISD::VP_REDUCE_MUL:
10413 Opcode = ISD::VP_REDUCE_AND;
10415 case ISD::VP_REDUCE_ADD:
10418 Opcode = ISD::VP_REDUCE_XOR;
10420 case ISD::VP_REDUCE_SMAX:
10421 case ISD::VP_REDUCE_UMIN:
10425 Opcode = ISD::VP_REDUCE_AND;
10427 case ISD::VP_REDUCE_SMIN:
10428 case ISD::VP_REDUCE_UMAX:
10432 Opcode = ISD::VP_REDUCE_OR;
10440 if (VT != MVT::Glue) {
10443 void *IP =
nullptr;
10445 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10446 E->intersectFlagsWith(Flags);
10450 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
10451 createOperands(
N, Ops);
10453 CSEMap.InsertNode(
N, IP);
10455 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
10456 createOperands(
N, Ops);
10459 N->setFlags(Flags);
10476 return getNode(Opcode,
DL, VTList, Ops, Flags);
10482 return getNode(Opcode,
DL, VTList.
VTs[0], Ops, Flags);
10485 for (
const auto &
Op : Ops)
10487 "Operand is DELETED_NODE!");
10496 "Invalid add/sub overflow op!");
10498 Ops[0].getValueType() == Ops[1].getValueType() &&
10499 Ops[0].getValueType() == VTList.
VTs[0] &&
10500 "Binary operator types must match!");
10501 SDValue N1 = Ops[0], N2 = Ops[1];
10507 if (N2CV && N2CV->
isZero()) {
10538 "Invalid add/sub overflow op!");
10540 Ops[0].getValueType() == Ops[1].getValueType() &&
10541 Ops[0].getValueType() == VTList.
VTs[0] &&
10542 Ops[2].getValueType() == VTList.
VTs[1] &&
10543 "Binary operator types must match!");
10549 VTList.
VTs[0] == Ops[0].getValueType() &&
10550 VTList.
VTs[0] == Ops[1].getValueType() &&
10551 "Binary operator types must match!");
10557 unsigned OutWidth = Width * 2;
10561 Val = Val.
sext(OutWidth);
10562 Mul =
Mul.sext(OutWidth);
10564 Val = Val.
zext(OutWidth);
10565 Mul =
Mul.zext(OutWidth);
10579 VTList.
VTs[0] == Ops[0].getValueType() &&
"frexp type mismatch");
10595 "Invalid STRICT_FP_EXTEND!");
10597 Ops[1].getValueType().isFloatingPoint() &&
"Invalid FP cast!");
10599 "STRICT_FP_EXTEND result type should be vector iff the operand "
10600 "type is vector!");
10603 Ops[1].getValueType().getVectorElementCount()) &&
10604 "Vector element count mismatch!");
10606 "Invalid fpext node, dst <= src!");
10609 assert(VTList.
NumVTs == 2 && Ops.
size() == 3 &&
"Invalid STRICT_FP_ROUND!");
10611 "STRICT_FP_ROUND result type should be vector iff the operand "
10612 "type is vector!");
10615 Ops[1].getValueType().getVectorElementCount()) &&
10616 "Vector element count mismatch!");
10618 Ops[1].getValueType().isFloatingPoint() &&
10619 VTList.
VTs[0].
bitsLT(Ops[1].getValueType()) &&
10621 (Ops[2]->getAsZExtVal() == 0 || Ops[2]->getAsZExtVal() == 1) &&
10622 "Invalid STRICT_FP_ROUND!");
10632 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
10633 return getNode(Opcode,
DL, VT, N1, N2, N3.getOperand(0));
10634 else if (N3.getOpcode() ==
ISD::AND)
10635 if (
ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
10639 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
10640 return getNode(Opcode,
DL, VT, N1, N2, N3.getOperand(0));
10648 if (VTList.
VTs[VTList.
NumVTs-1] != MVT::Glue) {
10651 void *IP =
nullptr;
10652 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10653 E->intersectFlagsWith(Flags);
10657 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTList);
10658 createOperands(
N, Ops);
10659 CSEMap.InsertNode(
N, IP);
10661 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTList);
10662 createOperands(
N, Ops);
10665 N->setFlags(Flags);
10680 return getNode(Opcode,
DL, VTList, Ops);
10686 return getNode(Opcode,
DL, VTList, Ops);
10691 SDValue Ops[] = { N1, N2, N3 };
10692 return getNode(Opcode,
DL, VTList, Ops);
10697 SDValue Ops[] = { N1, N2, N3, N4 };
10698 return getNode(Opcode,
DL, VTList, Ops);
10704 SDValue Ops[] = { N1, N2, N3, N4, N5 };
10705 return getNode(Opcode,
DL, VTList, Ops);
10712 return makeVTList(&(*EVTs.insert(VT).first), 1);
10721 void *IP =
nullptr;
10727 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 2);
10728 VTListMap.InsertNode(Result, IP);
10730 return Result->getSDVTList();
10740 void *IP =
nullptr;
10747 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 3);
10748 VTListMap.InsertNode(Result, IP);
10750 return Result->getSDVTList();
10761 void *IP =
nullptr;
10769 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 4);
10770 VTListMap.InsertNode(Result, IP);
10772 return Result->getSDVTList();
10776 unsigned NumVTs = VTs.
size();
10778 ID.AddInteger(NumVTs);
10779 for (
unsigned index = 0; index < NumVTs; index++) {
10780 ID.AddInteger(VTs[index].getRawBits());
10783 void *IP =
nullptr;
10788 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, NumVTs);
10789 VTListMap.InsertNode(Result, IP);
10791 return Result->getSDVTList();
10802 assert(
N->getNumOperands() == 1 &&
"Update with wrong number of operands");
10805 if (
Op ==
N->getOperand(0))
return N;
10808 void *InsertPos =
nullptr;
10809 if (
SDNode *Existing = FindModifiedNodeSlot(
N,
Op, InsertPos))
10814 if (!RemoveNodeFromCSEMaps(
N))
10815 InsertPos =
nullptr;
10818 N->OperandList[0].set(
Op);
10822 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
10827 assert(
N->getNumOperands() == 2 &&
"Update with wrong number of operands");
10830 if (Op1 ==
N->getOperand(0) && Op2 ==
N->getOperand(1))
10834 void *InsertPos =
nullptr;
10835 if (
SDNode *Existing = FindModifiedNodeSlot(
N, Op1, Op2, InsertPos))
10840 if (!RemoveNodeFromCSEMaps(
N))
10841 InsertPos =
nullptr;
10844 if (
N->OperandList[0] != Op1)
10845 N->OperandList[0].set(Op1);
10846 if (
N->OperandList[1] != Op2)
10847 N->OperandList[1].set(Op2);
10851 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
10857 SDValue Ops[] = { Op1, Op2, Op3 };
10864 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
10871 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
10877 unsigned NumOps = Ops.
size();
10878 assert(
N->getNumOperands() == NumOps &&
10879 "Update with wrong number of operands");
10882 if (std::equal(Ops.
begin(), Ops.
end(),
N->op_begin()))
10886 void *InsertPos =
nullptr;
10887 if (
SDNode *Existing = FindModifiedNodeSlot(
N, Ops, InsertPos))
10892 if (!RemoveNodeFromCSEMaps(
N))
10893 InsertPos =
nullptr;
10896 for (
unsigned i = 0; i != NumOps; ++i)
10897 if (
N->OperandList[i] != Ops[i])
10898 N->OperandList[i].set(Ops[i]);
10902 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
10919 if (NewMemRefs.
empty()) {
10925 if (NewMemRefs.
size() == 1) {
10926 N->MemRefs = NewMemRefs[0];
10932 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.
size());
10934 N->MemRefs = MemRefsBuffer;
10935 N->NumMemRefs =
static_cast<int>(NewMemRefs.
size());
10958 SDValue Ops[] = { Op1, Op2 };
10966 SDValue Ops[] = { Op1, Op2, Op3 };
10999 SDValue Ops[] = { Op1, Op2 };
11007 New->setNodeId(-1);
11027 unsigned Order = std::min(
N->getIROrder(), OLoc.
getIROrder());
11028 N->setIROrder(Order);
11051 void *IP =
nullptr;
11052 if (VTs.
VTs[VTs.
NumVTs-1] != MVT::Glue) {
11056 return UpdateSDLocOnMergeSDNode(ON,
SDLoc(
N));
11059 if (!RemoveNodeFromCSEMaps(
N))
11064 N->ValueList = VTs.
VTs;
11074 if (Used->use_empty())
11075 DeadNodeSet.
insert(Used);
11080 MN->clearMemRefs();
11084 createOperands(
N, Ops);
11088 if (!DeadNodeSet.
empty()) {
11090 for (
SDNode *
N : DeadNodeSet)
11091 if (
N->use_empty())
11097 CSEMap.InsertNode(
N, IP);
11102 unsigned OrigOpc = Node->getOpcode();
11107#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
11108 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
11109#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
11110 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
11111#include "llvm/IR/ConstrainedOps.def"
11114 assert(Node->getNumValues() == 2 &&
"Unexpected number of results!");
11117 SDValue InputChain = Node->getOperand(0);
11122 for (
unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
11165 SDValue Ops[] = { Op1, Op2 };
11173 SDValue Ops[] = { Op1, Op2, Op3 };
11187 SDValue Ops[] = { Op1, Op2 };
11195 SDValue Ops[] = { Op1, Op2, Op3 };
11210 SDValue Ops[] = { Op1, Op2 };
11219 SDValue Ops[] = { Op1, Op2, Op3 };
11240 bool DoCSE = VTs.
VTs[VTs.
NumVTs-1] != MVT::Glue;
11242 void *IP =
nullptr;
11248 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
11249 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E,
DL));
11254 N = newSDNode<MachineSDNode>(~Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
11255 createOperands(
N, Ops);
11258 CSEMap.InsertNode(
N, IP);
11271 VT, Operand, SRIdxVal);
11281 VT, Operand, Subreg, SRIdxVal);
11298 if (VTList.
VTs[VTList.
NumVTs - 1] != MVT::Glue) {
11301 void *IP =
nullptr;
11303 E->intersectFlagsWith(Flags);
11313 if (VTList.
VTs[VTList.
NumVTs - 1] != MVT::Glue) {
11316 void *IP =
nullptr;
11317 if (FindNodeOrInsertPos(
ID,
SDLoc(), IP))
11327 SDNode *
N,
unsigned R,
bool IsIndirect,
11329 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11330 "Expected inlined-at fields to agree");
11333 {}, IsIndirect,
DL, O,
11342 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11343 "Expected inlined-at fields to agree");
11356 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11357 "Expected inlined-at fields to agree");
11368 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11369 "Expected inlined-at fields to agree");
11372 Dependencies, IsIndirect,
DL, O,
11378 unsigned VReg,
bool IsIndirect,
11380 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11381 "Expected inlined-at fields to agree");
11384 {}, IsIndirect,
DL, O,
11392 unsigned O,
bool IsVariadic) {
11393 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
11394 "Expected inlined-at fields to agree");
11397 DL, O, IsVariadic);
11401 unsigned OffsetInBits,
unsigned SizeInBits,
11402 bool InvalidateDbg) {
11405 assert(FromNode && ToNode &&
"Can't modify dbg values");
11410 if (
From == To || FromNode == ToNode)
11422 if (Dbg->isInvalidated())
11429 bool Changed =
false;
11430 auto NewLocOps = Dbg->copyLocationOps();
11432 NewLocOps.begin(), NewLocOps.end(),
11434 bool Match = Op == FromLocOp;
11444 auto *Expr = Dbg->getExpression();
11450 if (
auto FI = Expr->getFragmentInfo())
11451 if (OffsetInBits + SizeInBits > FI->SizeInBits)
11460 auto AdditionalDependencies = Dbg->getAdditionalDependencies();
11463 Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(),
11464 Dbg->getDebugLoc(), std::max(ToNode->
getIROrder(), Dbg->getOrder()),
11465 Dbg->isVariadic());
11468 if (InvalidateDbg) {
11470 Dbg->setIsInvalidated();
11471 Dbg->setIsEmitted();
11477 "Transferred DbgValues should depend on the new SDNode");
11483 if (!
N.getHasDebugValue())
11486 auto GetLocationOperand = [](
SDNode *Node,
unsigned ResNo) {
11487 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(Node))
11494 if (DV->isInvalidated())
11496 switch (
N.getOpcode()) {
11502 if (!isa<ConstantSDNode>(N0)) {
11503 bool RHSConstant = isa<ConstantSDNode>(N1);
11506 Offset =
N.getConstantOperandVal(1);
11509 if (!RHSConstant && DV->isIndirect())
11516 auto *DIExpr = DV->getExpression();
11517 auto NewLocOps = DV->copyLocationOps();
11518 bool Changed =
false;
11519 size_t OrigLocOpsSize = NewLocOps.size();
11520 for (
size_t i = 0; i < OrigLocOpsSize; ++i) {
11525 NewLocOps[i].getSDNode() != &
N)
11536 const auto *TmpDIExpr =
11544 NewLocOps.push_back(
RHS);
11550 assert(Changed &&
"Salvage target doesn't use N");
11553 DV->isVariadic() || OrigLocOpsSize != NewLocOps.size();
11555 auto AdditionalDependencies = DV->getAdditionalDependencies();
11557 DV->getVariable(), DIExpr, NewLocOps, AdditionalDependencies,
11558 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder(), IsVariadic);
11560 DV->setIsInvalidated();
11561 DV->setIsEmitted();
11563 N0.
getNode()->dumprFull(
this);
11564 dbgs() <<
" into " << *DIExpr <<
'\n');
11571 TypeSize ToSize =
N.getValueSizeInBits(0);
11575 auto NewLocOps = DV->copyLocationOps();
11576 bool Changed =
false;
11577 for (
size_t i = 0; i < NewLocOps.size(); ++i) {
11579 NewLocOps[i].getSDNode() != &
N)
11586 assert(Changed &&
"Salvage target doesn't use N");
11591 DV->getAdditionalDependencies(), DV->isIndirect(),
11592 DV->getDebugLoc(), DV->getOrder(), DV->isVariadic());
11595 DV->setIsInvalidated();
11596 DV->setIsEmitted();
11598 dbgs() <<
" into " << *DbgExpression <<
'\n');
11605 assert((!Dbg->getSDNodes().empty() ||
11608 return Op.getKind() == SDDbgOperand::FRAMEIX;
11610 "Salvaged DbgValue should depend on a new SDNode");
11618 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(
DL) &&
11619 "Expected inlined-at fields to agree");
11635 while (UI != UE &&
N == UI->
getUser())
11643 :
SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
11656 "Cannot replace with this method!");
11672 RAUWUpdateListener Listener(*
this, UI, UE);
11677 RemoveNodeFromCSEMaps(
User);
11692 AddModifiedNodeToCSEMaps(
User);
11708 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i)
11711 "Cannot use this version of ReplaceAllUsesWith!");
11719 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i)
11720 if (
From->hasAnyUseOfValue(i)) {
11721 assert((i < To->getNumValues()) &&
"Invalid To location");
11730 RAUWUpdateListener Listener(*
this, UI, UE);
11735 RemoveNodeFromCSEMaps(
User);
11751 AddModifiedNodeToCSEMaps(
User);
11765 if (
From->getNumValues() == 1)
11768 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i) {
11778 RAUWUpdateListener Listener(*
this, UI, UE);
11783 RemoveNodeFromCSEMaps(
User);
11789 bool To_IsDivergent =
false;
11798 if (To_IsDivergent !=
From->isDivergent())
11803 AddModifiedNodeToCSEMaps(
User);
11816 if (
From == To)
return;
11819 if (
From.getNode()->getNumValues() == 1) {
11831 UE =
From.getNode()->use_end();
11832 RAUWUpdateListener Listener(*
this, UI, UE);
11835 bool UserRemovedFromCSEMaps =
false;
11845 if (
Use.getResNo() !=
From.getResNo()) {
11852 if (!UserRemovedFromCSEMaps) {
11853 RemoveNodeFromCSEMaps(
User);
11854 UserRemovedFromCSEMaps =
true;
11864 if (!UserRemovedFromCSEMaps)
11869 AddModifiedNodeToCSEMaps(
User);
11888bool operator<(
const UseMemo &L,
const UseMemo &R) {
11889 return (intptr_t)L.User < (intptr_t)R.User;
11899 for (UseMemo &Memo :
Uses)
11900 if (Memo.User ==
N)
11901 Memo.User =
nullptr;
11913 switch (
Node->getOpcode()) {
11927 "Conflicting divergence information!");
11932 for (
const auto &
Op :
N->ops()) {
11933 EVT VT =
Op.getValueType();
11936 if (VT != MVT::Other &&
Op.getNode()->isDivergent() &&
11948 if (
N->SDNodeBits.IsDivergent != IsDivergent) {
11949 N->SDNodeBits.IsDivergent = IsDivergent;
11952 }
while (!Worklist.
empty());
11955void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
11957 Order.
reserve(AllNodes.size());
11959 unsigned NOps =
N.getNumOperands();
11962 Order.push_back(&
N);
11964 for (
size_t I = 0;
I != Order.size(); ++
I) {
11966 for (
auto *U :
N->users()) {
11967 unsigned &UnsortedOps = Degree[U];
11968 if (0 == --UnsortedOps)
11969 Order.push_back(U);
11974#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
11975void SelectionDAG::VerifyDAGDivergence() {
11976 std::vector<SDNode *> TopoOrder;
11977 CreateTopologicalOrder(TopoOrder);
11978 for (
auto *
N : TopoOrder) {
11980 "Divergence bit inconsistency detected");
12003 for (
unsigned i = 0; i != Num; ++i) {
12004 unsigned FromResNo =
From[i].getResNo();
12007 if (
Use.getResNo() == FromResNo) {
12009 Uses.push_back(Memo);
12016 RAUOVWUpdateListener Listener(*
this,
Uses);
12018 for (
unsigned UseIndex = 0, UseIndexEnd =
Uses.size();
12019 UseIndex != UseIndexEnd; ) {
12025 if (
User ==
nullptr) {
12031 RemoveNodeFromCSEMaps(
User);
12038 unsigned i =
Uses[UseIndex].Index;
12043 }
while (UseIndex != UseIndexEnd &&
Uses[UseIndex].
User ==
User);
12047 AddModifiedNodeToCSEMaps(
User);
12055 unsigned DAGSize = 0;
12071 unsigned Degree =
N.getNumOperands();
12074 N.setNodeId(DAGSize++);
12076 if (Q != SortedPos)
12077 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
12078 assert(SortedPos != AllNodes.end() &&
"Overran node list");
12082 N.setNodeId(Degree);
12094 unsigned Degree =
P->getNodeId();
12095 assert(Degree != 0 &&
"Invalid node degree");
12099 P->setNodeId(DAGSize++);
12100 if (
P->getIterator() != SortedPos)
12101 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(
P));
12102 assert(SortedPos != AllNodes.end() &&
"Overran node list");
12106 P->setNodeId(Degree);
12109 if (Node.getIterator() == SortedPos) {
12113 dbgs() <<
"Overran sorted position:\n";
12115 dbgs() <<
"Checking if this is due to cycles\n";
12122 assert(SortedPos == AllNodes.end() &&
12123 "Topological sort incomplete!");
12125 "First node in topological sort is not the entry token!");
12126 assert(AllNodes.front().getNodeId() == 0 &&
12127 "First node in topological sort has non-zero id!");
12128 assert(AllNodes.front().getNumOperands() == 0 &&
12129 "First node in topological sort has operands!");
12130 assert(AllNodes.back().getNodeId() == (
int)DAGSize-1 &&
12131 "Last node in topologic sort has unexpected id!");
12132 assert(AllNodes.back().use_empty() &&
12133 "Last node in topologic sort has users!");
12141 for (
SDNode *SD : DB->getSDNodes()) {
12145 SD->setHasDebugValue(
true);
12147 DbgInfo->
add(DB, isParameter);
12154 assert(isa<MemSDNode>(NewMemOpChain) &&
"Expected a memop node");
12160 if (OldChain == NewMemOpChain || OldChain.
use_empty())
12161 return NewMemOpChain;
12164 OldChain, NewMemOpChain);
12167 return TokenFactor;
12172 assert(isa<MemSDNode>(NewMemOp.
getNode()) &&
"Expected a memop node");
12180 assert(isa<ExternalSymbolSDNode>(
Op) &&
"Node should be an ExternalSymbol");
12182 auto *Symbol = cast<ExternalSymbolSDNode>(
Op)->getSymbol();
12186 if (OutFunction !=
nullptr)
12194 std::string ErrorStr;
12196 ErrorFormatter <<
"Undefined external symbol ";
12197 ErrorFormatter <<
'"' << Symbol <<
'"';
12207 return Const !=
nullptr && Const->isZero();
12216 return Const !=
nullptr && Const->isZero() && !Const->isNegative();
12221 return Const !=
nullptr && Const->isAllOnes();
12226 return Const !=
nullptr && Const->isOne();
12231 return Const !=
nullptr && Const->isMinSignedValue();
12235 unsigned OperandNo) {
12240 APInt Const = ConstV->getAPIntValue().trunc(V.getScalarValueSizeInBits());
12246 return Const.isZero();
12248 return Const.isOne();
12251 return Const.isAllOnes();
12253 return Const.isMinSignedValue();
12255 return Const.isMaxSignedValue();
12260 return OperandNo == 1 && Const.isZero();
12263 return OperandNo == 1 && Const.isOne();
12268 return ConstFP->isZero() &&
12269 (Flags.hasNoSignedZeros() || ConstFP->isNegative());
12271 return OperandNo == 1 && ConstFP->isZero() &&
12272 (Flags.hasNoSignedZeros() || !ConstFP->isNegative());
12274 return ConstFP->isExactlyValue(1.0);
12276 return OperandNo == 1 && ConstFP->isExactlyValue(1.0);
12280 EVT VT = V.getValueType();
12282 APFloat NeutralAF = !Flags.hasNoNaNs()
12284 : !Flags.hasNoInfs()
12290 return ConstFP->isExactlyValue(NeutralAF);
12299 V = V.getOperand(0);
12304 while (V.getOpcode() ==
ISD::BITCAST && V.getOperand(0).hasOneUse())
12305 V = V.getOperand(0);
12311 V = V.getOperand(0);
12317 V = V.getOperand(0);
12325 unsigned NumBits = V.getScalarValueSizeInBits();
12328 return C && (
C->getAPIntValue().countr_one() >= NumBits);
12332 bool AllowTruncation) {
12333 EVT VT =
N.getValueType();
12342 bool AllowTruncation) {
12349 EVT VecEltVT =
N->getValueType(0).getVectorElementType();
12350 if (
auto *CN = dyn_cast<ConstantSDNode>(
N->getOperand(0))) {
12351 EVT CVT = CN->getValueType(0);
12352 assert(CVT.
bitsGE(VecEltVT) &&
"Illegal splat_vector element extension");
12353 if (AllowTruncation || CVT == VecEltVT)
12360 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
12365 if (CN && (UndefElements.
none() || AllowUndefs)) {
12367 EVT NSVT =
N.getValueType().getScalarType();
12368 assert(CVT.
bitsGE(NSVT) &&
"Illegal build vector element extension");
12369 if (AllowTruncation || (CVT == NSVT))
12378 EVT VT =
N.getValueType();
12386 const APInt &DemandedElts,
12387 bool AllowUndefs) {
12394 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
12396 if (CN && (UndefElements.
none() || AllowUndefs))
12411 return C &&
C->isZero();
12417 return C &&
C->isOne();
12422 unsigned BitWidth =
N.getScalarValueSizeInBits();
12424 return C &&
C->isAllOnes() &&
C->getValueSizeInBits(0) ==
BitWidth;
12433 :
SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
12457 std::vector<EVT> VTs;
12470const EVT *SDNode::getValueTypeList(
MVT VT) {
12471 static EVTArray SimpleVTArray;
12474 return &SimpleVTArray.VTs[VT.
SimpleTy];
12485 if (U.getResNo() ==
Value) {
12502 if (U.getResNo() ==
Value)
12540 return any_of(
N->op_values(),
12541 [
this](
SDValue Op) { return this == Op.getNode(); });
12555 unsigned Depth)
const {
12556 if (*
this == Dest)
return true;
12560 if (
Depth == 0)
return false;
12580 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
12585 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(*
this)) {
12586 if (Ld->isUnordered())
12587 return Ld->getChain().reachesChainWithoutSideEffects(Dest,
Depth-1);
12600 this->Flags &= Flags;
12606 bool AllowPartials) {
12615 return Op.getOpcode() ==
unsigned(BinOp);
12621 unsigned CandidateBinOp =
Op.getOpcode();
12622 if (
Op.getValueType().isFloatingPoint()) {
12624 switch (CandidateBinOp) {
12626 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
12636 auto PartialReduction = [&](
SDValue Op,
unsigned NumSubElts) {
12637 if (!AllowPartials || !
Op)
12639 EVT OpVT =
Op.getValueType();
12662 unsigned Stages =
Log2_32(
Op.getValueType().getVectorNumElements());
12664 for (
unsigned i = 0; i < Stages; ++i) {
12665 unsigned MaskEnd = (1 << i);
12667 if (
Op.getOpcode() != CandidateBinOp)
12668 return PartialReduction(PrevOp, MaskEnd);
12677 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
12684 return PartialReduction(PrevOp, MaskEnd);
12687 for (
int Index = 0; Index < (int)MaskEnd; ++Index)
12688 if (Shuffle->
getMaskElt(Index) != (int)(MaskEnd + Index))
12689 return PartialReduction(PrevOp, MaskEnd);
12696 while (
Op.getOpcode() == CandidateBinOp) {
12697 unsigned NumElts =
Op.getValueType().getVectorNumElements();
12705 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
12706 if (NumSrcElts != (2 * NumElts))
12721 EVT VT =
N->getValueType(0);
12730 else if (NE > ResNE)
12733 if (
N->getNumValues() == 2) {
12736 EVT VT1 =
N->getValueType(1);
12740 for (i = 0; i != NE; ++i) {
12741 for (
unsigned j = 0, e =
N->getNumOperands(); j != e; ++j) {
12742 SDValue Operand =
N->getOperand(j);
12756 for (; i < ResNE; ++i) {
12768 assert(
N->getNumValues() == 1 &&
12769 "Can't unroll a vector with multiple results!");
12775 for (i= 0; i != NE; ++i) {
12776 for (
unsigned j = 0, e =
N->getNumOperands(); j != e; ++j) {
12777 SDValue Operand =
N->getOperand(j);
12790 switch (
N->getOpcode()) {
12816 const auto *ASC = cast<AddrSpaceCastSDNode>(
N);
12818 ASC->getSrcAddressSpace(),
12819 ASC->getDestAddressSpace()));
12825 for (; i < ResNE; ++i)
12834 unsigned Opcode =
N->getOpcode();
12838 "Expected an overflow opcode");
12840 EVT ResVT =
N->getValueType(0);
12841 EVT OvVT =
N->getValueType(1);
12850 else if (NE > ResNE)
12862 for (
unsigned i = 0; i < NE; ++i) {
12863 SDValue Res =
getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
12886 if (LD->isVolatile() ||
Base->isVolatile())
12889 if (!LD->isSimple())
12891 if (LD->isIndexed() ||
Base->isIndexed())
12893 if (LD->getChain() !=
Base->getChain())
12895 EVT VT = LD->getMemoryVT();
12903 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *
this,
Offset))
12904 return (Dist * (int64_t)Bytes ==
Offset);
12913 int64_t GVOffset = 0;
12925 int FrameIdx = INT_MIN;
12926 int64_t FrameOffset = 0;
12928 FrameIdx = FI->getIndex();
12930 isa<FrameIndexSDNode>(
Ptr.getOperand(0))) {
12932 FrameIdx = cast<FrameIndexSDNode>(
Ptr.getOperand(0))->getIndex();
12933 FrameOffset =
Ptr.getConstantOperandVal(1);
12936 if (FrameIdx != INT_MIN) {
12941 return std::nullopt;
12951 "Split node must be a scalar type");
12956 return std::make_pair(
Lo,
Hi);
12969 return std::make_pair(LoVT, HiVT);
12977 bool *HiIsEmpty)
const {
12987 "Mixing fixed width and scalable vectors when enveloping a type");
12992 *HiIsEmpty =
false;
13000 return std::make_pair(LoVT, HiVT);
13005std::pair<SDValue, SDValue>
13010 "Splitting vector with an invalid mixture of fixed and scalable "
13013 N.getValueType().getVectorMinNumElements() &&
13014 "More vector elements requested than available!");
13024 return std::make_pair(
Lo,
Hi);
13031 EVT VT =
N.getValueType();
13033 "Expecting the mask to be an evenly-sized vector");
13041 return std::make_pair(
Lo,
Hi);
13046 EVT VT =
N.getValueType();
13055 unsigned Start,
unsigned Count,
13057 EVT VT =
Op.getValueType();
13060 if (EltVT ==
EVT())
13063 for (
unsigned i = Start, e = Start + Count; i != e; ++i) {
13076 return Val.MachineCPVal->getType();
13077 return Val.ConstVal->getType();
13081 unsigned &SplatBitSize,
13082 bool &HasAnyUndefs,
13083 unsigned MinSplatBits,
13084 bool IsBigEndian)
const {
13088 if (MinSplatBits > VecWidth)
13093 SplatValue =
APInt(VecWidth, 0);
13094 SplatUndef =
APInt(VecWidth, 0);
13101 assert(NumOps > 0 &&
"isConstantSplat has 0-size build vector");
13104 for (
unsigned j = 0; j < NumOps; ++j) {
13105 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
13107 unsigned BitPos = j * EltWidth;
13110 SplatUndef.
setBits(BitPos, BitPos + EltWidth);
13111 else if (
auto *CN = dyn_cast<ConstantSDNode>(OpVal))
13112 SplatValue.
insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
13113 else if (
auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
13114 SplatValue.
insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
13121 HasAnyUndefs = (SplatUndef != 0);
13124 while (VecWidth > 8) {
13129 unsigned HalfSize = VecWidth / 2;
13136 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
13137 MinSplatBits > HalfSize)
13140 SplatValue = HighValue | LowValue;
13141 SplatUndef = HighUndef & LowUndef;
13143 VecWidth = HalfSize;
13152 SplatBitSize = VecWidth;
13159 if (UndefElements) {
13160 UndefElements->
clear();
13161 UndefElements->
resize(NumOps);
13167 for (
unsigned i = 0; i != NumOps; ++i) {
13168 if (!DemandedElts[i])
13171 if (
Op.isUndef()) {
13173 (*UndefElements)[i] =
true;
13174 }
else if (!Splatted) {
13176 }
else if (Splatted !=
Op) {
13182 unsigned FirstDemandedIdx = DemandedElts.
countr_zero();
13184 "Can only have a splat without a constant for all undefs.");
13201 if (UndefElements) {
13202 UndefElements->
clear();
13203 UndefElements->
resize(NumOps);
13211 for (
unsigned I = 0;
I != NumOps; ++
I)
13213 (*UndefElements)[
I] =
true;
13216 for (
unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
13217 Sequence.append(SeqLen,
SDValue());
13218 for (
unsigned I = 0;
I != NumOps; ++
I) {
13219 if (!DemandedElts[
I])
13221 SDValue &SeqOp = Sequence[
I % SeqLen];
13223 if (
Op.isUndef()) {
13228 if (SeqOp && !SeqOp.
isUndef() && SeqOp !=
Op) {
13234 if (!Sequence.empty())
13238 assert(Sequence.empty() &&
"Failed to empty non-repeating sequence pattern");
13251 return dyn_cast_or_null<ConstantSDNode>(
13257 return dyn_cast_or_null<ConstantSDNode>(
getSplatValue(UndefElements));
13263 return dyn_cast_or_null<ConstantFPSDNode>(
13269 return dyn_cast_or_null<ConstantFPSDNode>(
getSplatValue(UndefElements));
13276 dyn_cast_or_null<ConstantFPSDNode>(
getSplatValue(UndefElements))) {
13279 const APFloat &APF = CN->getValueAPF();
13285 return IntVal.exactLogBase2();
13291 bool IsLittleEndian,
unsigned DstEltSizeInBits,
13299 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
13300 "Invalid bitcast scale");
13305 BitVector SrcUndeElements(NumSrcOps,
false);
13307 for (
unsigned I = 0;
I != NumSrcOps; ++
I) {
13309 if (
Op.isUndef()) {
13310 SrcUndeElements.
set(
I);
13313 auto *CInt = dyn_cast<ConstantSDNode>(
Op);
13314 auto *CFP = dyn_cast<ConstantFPSDNode>(
Op);
13315 assert((CInt || CFP) &&
"Unknown constant");
13316 SrcBitElements[
I] = CInt ? CInt->getAPIntValue().trunc(SrcEltSizeInBits)
13317 : CFP->getValueAPF().bitcastToAPInt();
13321 recastRawBits(IsLittleEndian, DstEltSizeInBits, RawBitElements,
13322 SrcBitElements, UndefElements, SrcUndeElements);
13327 unsigned DstEltSizeInBits,
13332 unsigned NumSrcOps = SrcBitElements.
size();
13333 unsigned SrcEltSizeInBits = SrcBitElements[0].getBitWidth();
13334 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
13335 "Invalid bitcast scale");
13336 assert(NumSrcOps == SrcUndefElements.
size() &&
13337 "Vector size mismatch");
13339 unsigned NumDstOps = (NumSrcOps * SrcEltSizeInBits) / DstEltSizeInBits;
13340 DstUndefElements.
clear();
13341 DstUndefElements.
resize(NumDstOps,
false);
13345 if (SrcEltSizeInBits <= DstEltSizeInBits) {
13346 unsigned Scale = DstEltSizeInBits / SrcEltSizeInBits;
13347 for (
unsigned I = 0;
I != NumDstOps; ++
I) {
13348 DstUndefElements.
set(
I);
13349 APInt &DstBits = DstBitElements[
I];
13350 for (
unsigned J = 0; J != Scale; ++J) {
13351 unsigned Idx = (
I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
13352 if (SrcUndefElements[
Idx])
13354 DstUndefElements.
reset(
I);
13355 const APInt &SrcBits = SrcBitElements[
Idx];
13357 "Illegal constant bitwidths");
13358 DstBits.
insertBits(SrcBits, J * SrcEltSizeInBits);
13365 unsigned Scale = SrcEltSizeInBits / DstEltSizeInBits;
13366 for (
unsigned I = 0;
I != NumSrcOps; ++
I) {
13367 if (SrcUndefElements[
I]) {
13368 DstUndefElements.
set(
I * Scale, (
I + 1) * Scale);
13371 const APInt &SrcBits = SrcBitElements[
I];
13372 for (
unsigned J = 0; J != Scale; ++J) {
13373 unsigned Idx = (
I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
13374 APInt &DstBits = DstBitElements[
Idx];
13375 DstBits = SrcBits.
extractBits(DstEltSizeInBits, J * DstEltSizeInBits);
13382 unsigned Opc =
Op.getOpcode();
13389std::optional<std::pair<APInt, APInt>>
13393 return std::nullopt;
13397 return std::nullopt;
13404 return std::nullopt;
13406 for (
unsigned i = 2; i < NumOps; ++i) {
13408 return std::nullopt;
13411 if (Val != (Start + (Stride * i)))
13412 return std::nullopt;
13415 return std::make_pair(Start, Stride);
13431 for (
int Idx = Mask[i]; i != e; ++i)
13432 if (Mask[i] >= 0 && Mask[i] !=
Idx)
13440 SDValue N,
bool AllowOpaques)
const {
13443 if (
auto *
C = dyn_cast<ConstantSDNode>(
N))
13444 return AllowOpaques || !
C->isOpaque();
13451 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(
N))
13457 isa<ConstantSDNode>(
N.getOperand(0)))
13464 if (isa<ConstantFPSDNode>(
N))
13471 isa<ConstantFPSDNode>(
N.getOperand(0)))
13478 bool AllowTruncation)
const {
13481 return std::nullopt;
13483 const APInt &CVal = Const->getAPIntValue();
13490 return std::nullopt;
13496 return std::nullopt;
13504 assert(!Node->OperandList &&
"Node already has operands");
13506 "too many operands to fit into SDNode");
13507 SDUse *Ops = OperandRecycler.allocate(
13510 bool IsDivergent =
false;
13511 for (
unsigned I = 0;
I != Vals.
size(); ++
I) {
13512 Ops[
I].setUser(Node);
13513 Ops[
I].setInitial(Vals[
I]);
13517 if (VT != MVT::Other &&
13519 Ops[
I].
getNode()->isDivergent()) {
13520 IsDivergent =
true;
13524 Node->OperandList = Ops;
13527 Node->SDNodeBits.IsDivergent = IsDivergent;
13535 while (Vals.
size() > Limit) {
13536 unsigned SliceIdx = Vals.
size() - Limit;
13612 const SDLoc &DLoc) {
13617 Entry.Ty =
Ptr.getValueType().getTypeForEVT(*
getContext());
13618 Args.push_back(Entry);
13630 assert(
From && To &&
"Invalid SDNode; empty source SDValue?");
13631 auto I = SDEI.find(
From);
13632 if (
I == SDEI.end())
13637 NodeExtraInfo NEI =
I->second;
13646 SDEI[To] = std::move(NEI);
13661 auto VisitFrom = [&](
auto &&Self,
const SDNode *
N,
int MaxDepth) {
13662 if (MaxDepth == 0) {
13665 Leafs.emplace_back(
N);
13668 if (!FromReach.
insert(
N).second)
13671 Self(Self,
Op.getNode(), MaxDepth - 1);
13676 auto DeepCopyTo = [&](
auto &&Self,
const SDNode *
N) {
13679 if (!Visited.
insert(
N).second)
13684 if (!Self(Self,
Op.getNode()))
13698 for (
int PrevDepth = 0, MaxDepth = 16; MaxDepth <= 1024;
13699 PrevDepth = MaxDepth, MaxDepth *= 2, Visited.
clear()) {
13704 for (
const SDNode *
N : StartFrom)
13705 VisitFrom(VisitFrom,
N, MaxDepth - PrevDepth);
13709 LLVM_DEBUG(
dbgs() << __func__ <<
": MaxDepth=" << MaxDepth <<
" too low\n");
13717 errs() <<
"warning: incomplete propagation of SelectionDAG::NodeExtraInfo\n";
13718 assert(
false &&
"From subgraph too complex - increase max. MaxDepth?");
13720 SDEI[To] = std::move(NEI);
13734 if (!Visited.
insert(
N).second) {
13735 errs() <<
"Detected cycle in SelectionDAG\n";
13736 dbgs() <<
"Offending node:\n";
13737 N->dumprFull(DAG);
dbgs() <<
"\n";
13753 bool check = force;
13754#ifdef EXPENSIVE_CHECKS
13758 assert(
N &&
"Checking nonexistent SDNode");
static bool isConstant(const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
#define __asan_unpoison_memory_region(p, size)
#define LLVM_LIKELY(EXPR)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Looks at all the uses of the given value Returns the Liveness deduced from the uses of this value Adds all uses that cause the result to be MaybeLive to MaybeLiveRetUses If the result is MaybeLiveUses might be modified but its content should be ignored(since it might not be complete). DeadArgumentEliminationPass
Given that RA is a live propagate it s liveness to any other values it uses(according to Uses). void DeadArgumentEliminationPass
Given that RA is a live value
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines a hash set that can be used to remove duplication of nodes in a graph.
static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB)
static bool shouldLowerMemFuncForSize(const MachineFunction &MF)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
mir Rename Register Operands
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
Contains matchers for matching SelectionDAG nodes and values.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo, BatchAAResults *BatchAA)
static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo)
Lower the call to 'memset' intrinsic function into a series of store operations.
static std::optional< APInt > FoldValueWithUndef(unsigned Opcode, const APInt &C1, bool IsUndef1, const APInt &C2, bool IsUndef2)
static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step, SelectionDAG &DAG)
static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned OpC, SDVTList VTList, ArrayRef< SDValue > OpList)
static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, const TargetLowering &TLI, const ConstantDataArraySlice &Slice)
getMemsetStringVal - Similar to getMemsetValue.
static cl::opt< bool > EnableMemCpyDAGOpt("enable-memcpy-dag-opt", cl::Hidden, cl::init(true), cl::desc("Gang up loads and stores generated by inlining of memcpy"))
static bool haveNoCommonBitsSetCommutative(SDValue A, SDValue B)
static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList)
AddNodeIDValueTypes - Value type lists are intern'd so we can represent them solely with their pointe...
static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef< int > M)
Swaps the values of N1 and N2.
static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice)
Returns true if memcpy source is constant data.
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo)
static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)
AddNodeIDOpcode - Add the node opcode to the NodeID data.
static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike)
static bool doNotCSE(SDNode *N)
doNotCSE - Return true if CSE should not be performed for this node.
static cl::opt< int > MaxLdStGlue("ldstmemcpy-glue-max", cl::desc("Number limit for gluing ld/st of memcpy."), cl::Hidden, cl::init(0))
static void AddNodeIDOperands(FoldingSetNodeID &ID, ArrayRef< SDValue > Ops)
AddNodeIDOperands - Various routines for adding operands to the NodeID data.
static bool canFoldStoreIntoLibCallOutputPointers(StoreSDNode *StoreNode, SDNode *FPNode)
Given a store node StoreNode, return true if it is safe to fold that node into FPNode,...
static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
Try to simplify vector concatenation to an input value, undef, or build vector.
static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, SelectionDAG &DAG, SDValue Ptr, int64_t Offset=0)
InferPointerInfo - If the specified ptr/offset is a frame index, infer a MachinePointerInfo record fr...
static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N)
If this is an SDNode with special info, add this info to the NodeID data.
static bool gluePropagatesDivergence(const SDNode *Node)
Return true if a glue output should propagate divergence information.
static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G)
static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs)
makeVTList - Return an instance of the SDVTList struct initialized with the specified members.
static void VerifySDNode(SDNode *N, const TargetLowering *TLI)
VerifySDNode - Check the given SDNode. Aborts if it is invalid.
static void checkForCyclesHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallPtrSetImpl< const SDNode * > &Checked, const llvm::SelectionDAG *DAG)
static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SmallVector< SDValue, 32 > &OutChains, unsigned From, unsigned To, SmallVector< SDValue, 16 > &OutLoadChains, SmallVector< SDValue, 16 > &OutStoreChains)
static int isSignedOp(ISD::CondCode Opcode)
For an integer comparison, return 1 if the comparison is a signed operation and 2 if the result is an...
static std::optional< APInt > FoldValue(unsigned Opcode, const APInt &C1, const APInt &C2)
static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, unsigned AS)
static cl::opt< unsigned > MaxSteps("has-predecessor-max-steps", cl::Hidden, cl::init(8192), cl::desc("DAG combiner limit number of steps when searching DAG " "for predecessor nodes"))
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static unsigned getSize(unsigned Kind)
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
opStatus add(const APFloat &RHS, roundingMode RM)
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
opStatus multiply(const APFloat &RHS, roundingMode RM)
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
opStatus mod(const APFloat &RHS)
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt usub_sat(const APInt &RHS) const
APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
unsigned popcount() const
Count the number of bits set.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
APInt sadd_sat(const APInt &RHS) const
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
void clearAllBits()
Set every bit to 0.
APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
APInt reverseBits() const
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt sshl_sat(const APInt &RHS) const
APInt ushl_sat(const APInt &RHS) const
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
unsigned logBase2() const
APInt uadd_sat(const APInt &RHS) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
void setAllBits()
Set every bit to 1.
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
unsigned getSrcAddressSpace() const
unsigned getDestAddressSpace() const
Recycle small arrays allocated from a BumpPtrAllocator.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
This is an SDNode representing atomic operations.
static BaseIndexOffset match(const SDNode *N, const SelectionDAG &DAG)
Parses tree in N for base, index, offset addresses.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
void clear()
clear - Removes all bits from the bitvector.
bool none() const
none - Returns true if none of the bits are set.
size_type size() const
size - Returns the number of bits in this bitvector.
int64_t getOffset() const
unsigned getTargetFlags() const
const BlockAddress * getBlockAddress() const
The address of a basic block.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool getConstantRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &RawBitElements, BitVector &UndefElements) const
Extract the raw bit data from a build vector of Undef, Constant or ConstantFP node elements.
static void recastRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &DstBitElements, ArrayRef< APInt > SrcBitElements, BitVector &DstUndefElements, const BitVector &SrcUndefElements)
Recast bit data SrcBitElements to DstEltSizeInBits wide elements.
bool getRepeatedSequence(const APInt &DemandedElts, SmallVectorImpl< SDValue > &Sequence, BitVector *UndefElements=nullptr) const
Find the shortest repeating sequence of values in the build vector.
ConstantFPSDNode * getConstantFPSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant FP or null if this is not a constant FP splat.
std::optional< std::pair< APInt, APInt > > isConstantSequence() const
If this BuildVector is constant and represents the numerical series "<a, a+n, a+2n,...
SDValue getSplatValue(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted value or a null value if this is not a splat.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2,...
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
This class represents a function call, abstracting a target machine's calling convention.
static bool isValueValidForType(EVT VT, const APFloat &Val)
const APFloat & getValueAPF() const
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
ConstantFP - Floating Point Values [float, double].
const APFloat & getValue() const
This is the shared class of boolean and integer constants.
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
const APInt & getValue() const
Return the constant as an APInt value reference.
bool isMachineConstantPoolEntry() const
This class represents a range of values.
ConstantRange multiply(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
KnownBits toKnownBits() const
Return known bits for values in this range.
ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
This is an important base class in LLVM.
Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
static ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static const DIExpression * convertToVariadicExpression(const DIExpression *Expr)
If Expr is a non-variadic expression (i.e.
static std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
Base class for variables.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Implements a dense probed hash-table based set.
const char * getSymbol() const
unsigned getTargetFlags() const
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
MachineBasicBlock * MBB
MBB - The current block.
Data structure describing the variable locations in a function.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
AttributeList getAttributes() const
Return the attribute list for this Function.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
int64_t getOffset() const
unsigned getAddressSpace() const
unsigned getTargetFlags() const
const GlobalValue * getGlobal() const
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
This class is used to form a handle around another node that is persistent and is updated across invo...
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
constexpr bool isValid() const
This is an important class for using LLVM in a threaded context.
This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate the offet and size that ar...
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static MVT getIntegerVT(unsigned BitWidth)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
Abstract base class for all machine specific constantpool value subclasses.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
void setObjectAlignment(int ObjectIdx, Align Alignment)
setObjectAlignment - Change the alignment of the specified stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
bool isNonTemporal() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
bool isDereferenceable() const
This class contains meta information specific to a module.
An SDNode that represents everything that will be needed to construct a MachineInstr.
This class is used to represent an MGATHER node.
This class is used to represent an MLOAD node.
This class is used to represent an MSCATTER node.
This class is used to represent an MSTORE node.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT memvt, MachineMemOperand *MMO)
MachineMemOperand * MMO
Memory reference information.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
unsigned getRawSubclassData() const
Return the SubclassData value, without HasDebugValue.
EVT getMemoryVT() const
Return the type of the in-memory value.
Representation for a specific memory location.
A Module instance is used to store all the information related to an LLVM module.
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Pass interface - Implemented by all 'passes'.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Analysis providing profile information.
void Deallocate(SubClass *E)
Deallocate - Release storage for the pointed-to object.
Wrapper class representing virtual and physical registers.
Keeps track of dbg_value information through SDISel.
BumpPtrAllocator & getAlloc()
void add(SDDbgValue *V, bool isParameter)
void erase(const SDNode *Node)
Invalidate all DbgValues attached to the node and remove it from the Node-to-DbgValues map.
ArrayRef< SDDbgValue * > getSDDbgValues(const SDNode *Node) const
Holds the information from a dbg_label node through SDISel.
Holds the information for a single machine location through SDISel; either an SDNode,...
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(unsigned VReg)
static SDDbgOperand fromConst(const Value *Const)
@ SDNODE
Value is the result of an expression.
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
unsigned getIROrder() const
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
void dumprFull(const SelectionDAG *G=nullptr) const
printrFull to dbgs().
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
static constexpr size_t getMaxNumOperands()
Return the maximum number of operands that a SDNode can hold.
iterator_range< use_iterator > uses()
MemSDNodeBitfields MemSDNodeBits
void Profile(FoldingSetNodeID &ID) const
Gather unique data for the node.
bool getHasDebugValue() const
SDNodeFlags getFlags() const
void setNodeId(int Id)
Set unique node id.
void intersectFlagsWith(const SDNodeFlags Flags)
Clear any flags in this node that aren't also set in Flags.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
bool use_empty() const
Return true if there are no uses of this node.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
static bool areOnlyUsersOf(ArrayRef< const SDNode * > Nodes, const SDNode *N)
Return true if all the users of N are contained in Nodes.
bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
const APInt & getConstantOperandAPInt(unsigned Num) const
Helper method returns the APInt of a ConstantSDNode operand.
bool hasPredecessor(const SDNode *N) const
Return true if N is a predecessor of this node.
bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Return true if the type of the node type undefined.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
op_iterator op_end() const
op_iterator op_begin() const
void DropOperands()
Release the operands and set this node to have zero operands.
Represents a use of a SDNode.
EVT getValueType() const
Convenience function for get().getValueType().
SDNode * getUser()
This returns the SDNode that contains this Use.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
virtual bool isTargetMemoryOpcode(unsigned Opcode) const
Returns true if a node with the given target-specific opcode has a memory operand.
virtual SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, bool AlwaysInline, MachinePointerInfo DstPtrInfo) const
Emit target-specific code that performs a memset.
virtual SDValue EmitTargetCodeForMemmove(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memmove.
virtual SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memcpy.
SDNodeFlags getFlags() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getVPZeroExtendInReg(SDValue Op, SDValue Mask, SDValue EVL, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op)
Return the specified value casted to the target's desired shift amount type.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsExpanding=false)
SDValue getSplatSourceVector(SDValue V, int &SplatIndex)
If V is a splatted value, return the source vector and its splat index.
SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
OverflowKind computeOverflowForUnsignedSub(SDValue N0, SDValue N1) const
Determine if the result of the unsigned sub of 2 nodes can overflow.
unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
bool isKnownNeverSNaN(SDValue Op, unsigned Depth=0) const
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
const TargetSubtargetInfo & getSubtarget() const
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
void updateDivergence(SDNode *N)
SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT, SDNodeFlags Flags)
Get the (commutative) neutral element for the given opcode, if it exists.
SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDNode * SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT)
These are used for target selectors to mutate the specified node to have the specified return type,...
SelectionDAG(const TargetMachine &TM, CodeGenOptLevel)
SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getBitcastedSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, unsigned VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
OverflowKind
Used to represent the possible overflow behavior of an operation.
static unsigned getHasPredecessorMaxSteps()
bool haveNoCommonBitsSet(SDValue A, SDValue B) const
Return true if A and B have no common bits set.
bool cannotBeOrderedNegativeFP(SDValue Op) const
Test whether the given float value is known to be positive.
SDValue getRegister(Register Reg, EVT VT)
bool calculateDivergence(SDNode *N)
SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, bool ConstantFold=true)
SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
SDNode * mutateStrictFPToFP(SDNode *Node)
Mutate the specified strict FP node to its non-strict equivalent, unlinking the node from its chain a...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getBitcastedZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
std::optional< uint64_t > getValidMinimumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
bool shouldOptForSize() const
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
bool isEqualTo(SDValue A, SDValue B) const
Test whether two SDValues are known to compare equal.
static constexpr unsigned MaxRecursionDepth
SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue expandVACopy(SDNode *Node)
Expand the specified ISD::VACOPY node as the Legalize pass would.
SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
APInt computeVectorKnownZeroElements(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
For each demanded element of a vector, see if it is known to be zero.
void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
void salvageDebugInfo(SDNode &N)
To be invoked on an SDNode that is slated to be erased.
SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
std::pair< SDValue, SDValue > UnrollVectorOverflowOp(SDNode *N, unsigned ResNE=0)
Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getBitcastedAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
void DeleteNode(SDNode *N)
Remove the specified node from the system.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal)
Try to simplify a select/vselect into 1 of its operands or a constant.
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
bool isConstantFPBuildVectorOrConstantFP(SDValue N) const
Test whether the given value is a constant FP or similar node.
const DataLayout & getDataLayout() const
SDValue expandVAArg(SDNode *Node)
Expand the specified ISD::VAARG node as the Legalize pass would.
SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, int Dist) const
Return true if loads are next to each other and can be merged.
SDValue getMaskedHistogram(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
OverflowKind computeOverflowForUnsignedMul(SDValue N0, SDValue N1) const
Determine if the result of the unsigned mul of 2 nodes can overflow.
void copyExtraInfo(SDNode *From, SDNode *To)
Copy extra info associated with one node to another.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
void clear()
Clear state and free memory necessary to make this SelectionDAG ready to process a new block.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue makeStateFunctionCall(unsigned LibFunc, SDValue Ptr, SDValue InChain, const SDLoc &DLoc)
Helper used to make a call to a library function that has one argument of pointer type.
bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly=false, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
OverflowKind computeOverflowForSignedMul(SDValue N0, SDValue N1) const
Determine if the result of the signed mul of 2 nodes can overflow.
MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if '(Op & Mask) == Mask'.
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
bool isConstantValueOfAnyType(SDValue N) const
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
bool isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
SDValue getPartialReduceAdd(SDLoc DL, EVT ReducedTy, SDValue Op1, SDValue Op2)
Create the DAG equivalent of vector_partial_reduce where Op1 and Op2 are its operands and ReducedTY i...
SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
SDValue getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
std::pair< SDValue, SDValue > getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT)
Convert Op, which must be a STRICT operation of float type, to the float type VT, by either extending...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
SDValue getVPLogicalNOT(const SDLoc &DL, SDValue Val, SDValue Mask, SDValue EVL, EVT VT)
Create a vector-predicated logical NOT operation as (VP_XOR Val, BooleanOne, Mask,...
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
iterator_range< allnodes_iterator > allnodes()
SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
SDValue WidenVector(const SDValue &N, const SDLoc &DL)
Widen the vector up to the next power of two using INSERT_SUBVECTOR.
bool isKnownNeverZeroFloat(SDValue Op) const
Test whether the given floating point SDValue is known to never be positive or negative zero.
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getValueType(EVT)
ArrayRef< SDDbgValue * > GetDbgValues(const SDNode *SD) const
Get the debug values which reference the given SDNode.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
OverflowKind computeOverflowForSignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the signed addition of 2 nodes can overflow.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
unsigned AssignTopologicalOrder()
Topological-sort the AllNodes list and a assign a unique node id for each node in the DAG based on th...
ilist< SDNode >::size_type allnodes_size() const
SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN.
SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getTruncStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsCompressing=false)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
const TargetLibraryInfo & getLibInfo() const
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool MaskedVectorIsZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Return true if 'Op' is known to be zero in DemandedElts.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
SDValue getExtStridedLoadVP(ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
bool canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts, bool PoisonOnly=false, bool ConsiderFlags=true, unsigned Depth=0) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
OverflowKind computeOverflowForUnsignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the unsigned addition of 2 nodes can overflow.
std::optional< uint64_t > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT SVT, MachineMemOperand *MMO, bool IsCompressing=false)
void canonicalizeCommutativeBinop(unsigned Opcode, SDValue &N1, SDValue &N2) const
Swap N1 and N2 if Opcode is a commutative binary opcode and the canonical form expects the opposite o...
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex, int64_t Size, int64_t Offset=-1)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the por...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
bool isKnownToBeAPowerOfTwoFP(SDValue Val, unsigned Depth=0) const
Test if the given fp value is known to be an integer power-of-2, either positive or negative.
OverflowKind computeOverflowForSignedSub(SDValue N0, SDValue N1) const
Determine if the result of the signed sub of 2 nodes can overflow.
bool expandMultipleResultFPLibCall(RTLIB::Libcall LC, SDNode *Node, SmallVectorImpl< SDValue > &Results, std::optional< unsigned > CallRetResNo={})
Expands a node with multiple results to an FP or vector libcall.
std::optional< uint64_t > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
LLVMContext * getContext() const
SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, SDNodeFlags Flags)
Try to simplify a floating-point binary operation into 1 of its operands or a constant.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
bool isUndef(unsigned Opcode, ArrayRef< SDValue > Ops)
Return true if the result of this operation is always undefined.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops)
Fold floating-point operations when all operands are constants and/or undefined.
SDNode * getNodeIfExists(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops, const SDNodeFlags Flags)
Get the specified node if it's already available, or else return NULL.
void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE, Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, UniformityInfo *UA, ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin, MachineModuleInfo &MMI, FunctionVarLocs const *FnVarLocs)
Prepare this SelectionDAG to process code in the given MachineFunction.
std::optional< ConstantRange > getValidShiftAmountRange(SDValue V, const APInt &DemandedElts, unsigned Depth) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
SDValue FoldSymbolOffset(unsigned Opcode, EVT VT, const GlobalAddressSDNode *GA, const SDNode *N2)
std::optional< bool > isBoolConstant(SDValue N, bool AllowTruncation=false) const
Check if a value \op N is a constant using the target's BooleanContent for its type.
SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, ArrayRef< ISD::NodeType > CandidateBinOps, bool AllowPartials=false)
Match a binop + shuffle pyramid that represents a horizontal reduction over the elements of a vector ...
bool isADDLike(SDValue Op, bool NoWrap=false) const
Return true if the specified operand is an ISD::OR or ISD::XOR node that can be treated as an ISD::AD...
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue simplifyShift(SDValue X, SDValue Y)
Try to simplify a shift into 1 of its operands or a constant.
void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits=0, unsigned SizeInBits=0, bool InvalidateDbg=true)
Transfer debug values from one node to another, while optionally generating fragment expressions for ...
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
ilist< SDNode >::iterator allnodes_iterator
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
static bool isSplatMask(const int *Mask, EVT VT)
int getMaskElt(unsigned Idx) const
ArrayRef< int > getMask() const
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Completely target-dependent object reference.
int64_t getOffset() const
unsigned getTargetFlags() const
Provides information about what library functions are available for the current target.
const VecDesc * getVectorMappingInfo(StringRef F, const ElementCount &VF, bool Masked) const
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself.
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual ISD::NodeType getExtendForAtomicOps() const
Returns how the platform's atomic operations are extended (ZERO_EXTEND, SIGN_EXTEND,...
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
virtual bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const
Return true if the target shall perform extract vector element and store given that the vector is kno...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal for a comparison of the specified types on this ...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
virtual unsigned getMaxGluedStoresPerMemcpy() const
Get maximum # of store operations to be glued together.
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual bool hasVectorBlend() const
Return true if the target has a vector blend instruction.
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual bool isLegalStoreImmediate(int64_t Value) const
Return true if the specified immediate is legal for the value input of a store instruction.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
virtual void verifyTargetSDNode(const SDNode *N) const
Check the given SDNode. Aborts if it is invalid.
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual bool isKnownNeverNaNForTargetNode(SDValue Op, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
virtual void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
virtual bool isSDNodeSourceOfDivergence(const SDNode *N, FunctionLoweringInfo *FLI, UniformityInfo *UA) const
virtual bool isSDNodeAlwaysUniform(const SDNode *N) const
virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
virtual const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
Primary interface to the complete machine description for the target machine.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const SelectionDAGTargetInfo * getSelectionDAGInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
This class is used to represent an VP_GATHER node.
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Provides info so a possible vectorization of a function can be computed.
StringRef getVectorFnName() const
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
APInt mulhu(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on zero-extended operands.
const APInt abdu(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be unsigned.
APInt avgCeilU(const APInt &C1, const APInt &C2)
Compute the ceil of the unsigned average of C1 and C2.
APInt avgFloorU(const APInt &C1, const APInt &C2)
Compute the floor of the unsigned average of C1 and C2.
const APInt abds(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be signed.
APInt mulhs(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on sign-extended operands.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
APInt avgFloorS(const APInt &C1, const APInt &C2)
Compute the floor of the signed average of C1 and C2.
APInt avgCeilS(const APInt &C1, const APInt &C2)
Compute the ceil of the signed average of C1 and C2.
@ C
The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical AND between different comparisons of identical values: ((X op1 Y) & (X...
bool isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are ~0 ...
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ MDNODE_SDNODE
MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to reference metadata in the IR.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ JUMP_TABLE_DEBUG_INFO
JUMP_TABLE_DEBUG_INFO - Jumptable debug info.
@ BSWAP
Byte Swap and Counting operators.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SRCVALUE
SRCVALUE - This is a node type that holds a Value* that is used to make reference to a value in the L...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ TargetIndex
TargetIndex - Like a constant pool entry, but with completely target-dependent semantics.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ AssertAlign
AssertAlign - These nodes record if a register contains a value that has a known alignment and the tr...
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ TargetGlobalAddress
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ LIFETIME_START
This corresponds to the llvm.lifetime.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ MGATHER
Masked gather and scatter - load and store operations for a vector of random addresses with additiona...
@ HANDLENODE
HANDLENODE node - Used as a handle for various purposes.
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ GET_FPENV_MEM
Gets the current floating-point environment.
@ PSEUDO_PROBE
Pseudo probe for AutoFDO, as a place holder in a basic block to improve the sample counts quality.
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ SPLAT_VECTOR_PARTS
SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the scalar values joined together a...
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ EXPERIMENTAL_VECTOR_HISTOGRAM
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ CALLSEQ_START
CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...
@ SET_FPENV_MEM
Sets the current floating point environment.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
bool matchUnaryFpPredicate(SDValue Op, std::function< bool(ConstantFPSDNode *)> Match, bool AllowUndefs=false)
Hook for matching ConstantFPSDNode predicate.
bool isExtOpcode(unsigned Opcode)
bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
bool isVectorShrinkable(const SDNode *N, unsigned NewEltSize, bool Signed)
Returns true if the specified node is a vector where all elements can be truncated to the specified e...
bool isVPBinaryOp(unsigned Opcode)
Whether this is a vector-predicated binary operation opcode.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
std::optional< unsigned > getBaseOpcodeForVP(unsigned Opcode, bool hasFPExcept)
Translate this VP Opcode to its corresponding non-VP Opcode.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
std::optional< unsigned > getVPMaskIdx(unsigned Opcode)
The operand position of the vector mask.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
std::optional< unsigned > getVPExplicitVectorLengthIdx(unsigned Opcode)
The operand position of the explicit vector length parameter.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
bool isFreezeUndef(const SDNode *N)
Return true if the specified node is FREEZE(UNDEF).
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
std::optional< unsigned > getVPForBaseOpcode(unsigned Opcode)
Translate this non-VP Opcode to its corresponding VP Opcode.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool matchUnaryPredicateImpl(SDValue Op, std::function< bool(ConstNodeType *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant BUI...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
NodeType getInverseMinMaxOpcode(unsigned MinMaxOpc)
Given a MinMaxOpc of ISD::(U|S)MIN or ISD::(U|S)MAX, returns ISD::(U|S)MAX and ISD::(U|S)MIN,...
bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
bool isVPReduction(unsigned Opcode)
Whether this is a vector-predicated reduction opcode.
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Hook for matching ConstantSDNode predicate.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
bool isBuildVectorOfConstantFPSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantFPSDNode or undef.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical OR between different comparisons of identical values: ((X op1 Y) | (X ...
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
initializer< Ty > init(const Ty &Val)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
bool operator<(int64_t V1, const APSInt &V2)
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
SDValue peekThroughExtractSubvectors(SDValue V)
Return the non-extracted vector source operand of V if it exists.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
SDValue getBitwiseNotOperand(SDValue V, SDValue Mask, bool AllowUndefs)
If V is a bitwise not, returns the inverted operand.
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
MaybeAlign getAlign(const Function &F, unsigned Index)
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
bool isMinSignedConstant(SDValue V)
Returns true if V is a constant min signed integer value.
ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 maximumNumber semantics.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void checkForCycles(const SelectionDAG *DAG, bool force=false)
void sort(IteratorTy Start, IteratorTy End)
LLVM_READONLY APFloat minimumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimumNumber semantics.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
SDValue peekThroughTruncates(SDValue V)
Return the non-truncated source operand of V if it exists.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
SDValue peekThroughOneUseBitcasts(SDValue V)
Return the non-bitcasted and one-use source operand of V if it exists.
CodeGenOptLevel
Code generation optimization level.
bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 minimumNumber semantics.
@ Mul
Product of integers.
bool isNullConstantOrUndef(SDValue V)
Returns true if V is a constant integer zero or an UNDEF node.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
OutputIt copy(R &&Range, OutputIt Out)
constexpr unsigned BitWidth
bool funcReturnsFirstArgOfCall(const CallInst &CI)
Returns true if the parent of CI returns CI's first argument after calling CI.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
unsigned Log2(Align A)
Returns the log2 of the alignment.
void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
LLVM_READONLY APFloat maximumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximumNumber semantics.
bool isNeutralConstant(unsigned Opc, SDNodeFlags Flags, SDValue V, unsigned OperandNo)
Returns true if V is a neutral element of Opc with Flags.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
MDNode * TBAAStruct
The tag for type-based alias analysis (tbaa struct).
MDNode * TBAA
The tag for type-based alias analysis.
static const fltSemantics & IEEEsingle() LLVM_READNONE
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
static constexpr roundingMode rmTowardNegative
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
static const fltSemantics & IEEEquad() LLVM_READNONE
static const fltSemantics & IEEEdouble() LLVM_READNONE
static const fltSemantics & IEEEhalf() LLVM_READNONE
static constexpr roundingMode rmTowardPositive
static const fltSemantics & BFloat() LLVM_READNONE
opStatus
IEEE-754R 7: Default exception handling.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represents offset+length into a ConstantDataArray.
uint64_t Length
Length of the slice.
uint64_t Offset
Slice starts at this Offset.
void move(uint64_t Delta)
Moves the Offset and adjusts Length accordingly.
const ConstantDataArray * Array
ConstantDataArray pointer.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
intptr_t getRawBits() const
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
void makeNonNegative()
Make this value non-negative.
static KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
static KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
static std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
void makeNegative()
Make this value negative.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
static KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
static KnownBits abdu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for abdu(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
static KnownBits avgFloorU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorU.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static KnownBits computeForSubBorrow(const KnownBits &LHS, KnownBits RHS, const KnownBits &Borrow)
Compute known bits results from subtracting RHS from LHS with 1-bit Borrow.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static KnownBits abds(KnownBits LHS, KnownBits RHS)
Compute known bits for abds(LHS, RHS).
static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
static KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
static KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static KnownBits avgFloorS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorS.
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits computeForAddCarry(const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry)
Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static KnownBits avgCeilU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilU.
static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
static KnownBits avgCeilS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilS.
This class contains a discriminated union of information about pointers in memory operands,...
bool isDereferenceable(unsigned Size, LLVMContext &C, const DataLayout &DL) const
Return true if memory region [V, V+Offset+Size) is known to be dereferenceable.
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile)
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc=false)
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Clients of various APIs that cause global effects on the DAG can optionally implement this interface.
DAGUpdateListener *const Next
virtual void NodeDeleted(SDNode *N, SDNode *E)
The node N that was deleted and, if E is not null, an equivalent node E that replaced it.
virtual void NodeInserted(SDNode *N)
The node N that was inserted.
virtual void NodeUpdated(SDNode *N)
The node N that was updated.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)